repo_name
stringlengths
6
130
hexsha
sequence
file_path
sequence
code
sequence
apis
sequence
possible_versions
list
RickyMexx/ML-weather
[ "6d0a1b718b7b946ebb0c5df6ae990de9724ca5a0" ]
[ "weather_class.py" ]
[ "# *************** SETTINGS *************** #\nMODEL_NAME = 'VGG16-TL'\nBATCH_SIZE = 6\n\nEPOCHS = 100\nEXIF_FLAG = 0 # Set on 1 if the program is running for the first time with a dataset D, 0 otherwise.\n\nMODELS_DIR = 'models/'\ntrainingset = \"Train_New/\"\n\ntestset1 = \"Test_New/\"\ntestset2 = \"Weather_Testset/\"\nblindtest = \"BlindTest/\"\nmytestset = \"MyTestSet/\"\n\nimgstype = \"*/*.jpg\"\n\ncsv_file = '1743168.csv'\n\nimg_w = 136\nimg_h = 136\n# **************************************** #\n\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\nimport os, glob\nfrom PIL import Image\nimport csv\nimport scikitplot as skplt\n\n# Clear info and warnings, showing errors\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\n\n# Fix for TensorFlow2 + CUDA 10.1 + CUDNN 7.6.5 + Python 3.7.5\ngpu_options = tf.compat.v1.GPUOptions(allow_growth=True)\nsession = tf.compat.v1.InteractiveSession(config=tf.compat.v1.ConfigProto(gpu_options=gpu_options))\n\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\nfrom tensorflow.keras.models import Sequential, load_model, Model\nfrom tensorflow.keras.layers import Dense, Activation, Dropout, Flatten, Conv2D, MaxPooling2D, AveragePooling2D, Input\nfrom tensorflow.keras.layers import BatchNormalization\nfrom tensorflow.keras import optimizers, applications, callbacks\nfrom tensorflow.keras import backend as K\nfrom tensorflow.keras.applications.inception_v3 import InceptionV3\nfrom tensorflow.keras.optimizers import SGD, RMSprop, Adam\n\nimport numpy as np\nfrom sklearn.metrics import classification_report, confusion_matrix\n\ndef savemodel(model, problem):\n filename = os.path.join(MODELS_DIR, '%s.h5' % problem)\n model.save(filename)\n print(\"\\nModel saved successfully on file %s\\n\" % filename)\n\ndef loadmodel(problem):\n filename = os.path.join(MODELS_DIR, '%s.h5' % problem)\n try:\n model = load_model(filename)\n print(\"\\nModel loaded successfully from file %s\\n\" % filename)\n except OSError:\n print(\"\\nModel file %s not found!!!\\n\" % filename)\n model = None\n return model\n\n# batch_size = 64\ndef RickyNet(input_shape, num_classes):\n model = Sequential()\n\n model.add(AveragePooling2D(pool_size=(3, 3), strides=(3, 3), input_shape=input_shape))\n\n model.add(Conv2D(32, kernel_size=(1, 1), activation=\"relu\", padding=\"valid\"))\n model.add(Conv2D(64, kernel_size=(1, 1), activation=\"relu\", padding=\"valid\"))\n model.add(Conv2D(128, kernel_size=(2, 2), activation=\"relu\", padding=\"valid\"))\n\n model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))\n\n model.add(Flatten())\n\n model.add(Dropout(0.45))\n model.add(Dense(512, activation=\"relu\"))\n\n model.add(Dropout(0.35))\n model.add(Dense(128, activation=\"relu\"))\n\n model.add(Dense(num_classes, activation=\"softmax\"))\n\n model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])\n return model\n\ndef delCorruptEXIF(files):\n for f in files:\n if(os.stat(f).st_size):\n print(files.index(f),\" ---- \", f)\n image = Image.open(f).convert('RGB')\n data = list(image.getdata())\n image_without_exif = Image.new(image.mode, image.size)\n image_without_exif.putdata(data)\n image_without_exif.save(f)\n else:\n os.remove(f)\n\ndef processData(batch_size):\n trd = ImageDataGenerator(\n rescale=1. / 255,\n zoom_range=0.3,\n rotation_range=7,\n width_shift_range=0.2,\n height_shift_range=0.2,\n horizontal_flip=True,\n vertical_flip=False,\n validation_split=0.1,\n fill_mode=\"reflect\")\n\n trg = trd.flow_from_directory(\n directory=trainingset,\n target_size=(img_h, img_w),\n color_mode=\"rgb\",\n batch_size=batch_size,\n class_mode=\"categorical\",\n shuffle=True,\n subset='training'\n )\n\n teg = trd.flow_from_directory(\n directory=trainingset,\n target_size=(img_h, img_w),\n color_mode=\"rgb\",\n batch_size=batch_size,\n class_mode=\"categorical\",\n shuffle=False,\n subset='validation'\n )\n\n return trg, teg\n\ndef processTest(batch_size, test_dir):\n ted = ImageDataGenerator(\n rescale=1. / 255)\n\n teg = ted.flow_from_directory(\n directory=test_dir,\n target_size=(img_h, img_w),\n color_mode=\"rgb\",\n batch_size=batch_size,\n class_mode=\"categorical\",\n shuffle=False\n )\n\n return teg\n\ndef plot_history(history, name):\n # Accuracy\n plt.plot(history.history['accuracy'])\n plt.plot(history.history['val_accuracy'])\n plt.title(name+' accuracy')\n plt.ylabel('accuracy')\n plt.xlabel('epoch')\n plt.legend(['train', 'test'], loc='upper left')\n plt.show()\n\n # Loss\n plt.plot(history.history['loss'])\n plt.plot(history.history['val_loss'])\n plt.title(name+' loss')\n plt.ylabel('loss')\n plt.xlabel('epoch')\n plt.legend(['train', 'test'], loc='upper left')\n plt.show()\n\ndef printSinglePredictions(model, teg3, classnames):\n x, y = teg3.next()\n for i in range(len(x)):\n pred = classnames[model.predict(x)[i].argmax()]\n print(\"Class of the image:\", classnames[y[i].argmax()], \"\\t RickyNet prediction:\", pred)\n plt.imshow(x[i])\n plt.show()\n\ndef savePredictions(model, teg3, classnames):\n i = 0\n\n while i <= teg3.batch_index and i < 1:\n data= teg3.next()\n pred = model.predict(data)\n\n for k in range(BATCH_SIZE):\n plt.imshow(data[0][k])\n plt.title(classnames[pred[k].argmax()])\n plt.savefig('PREDICTIONS/img'+str(k)+'.png')\n i+=1\n\ndef solveBlind(model, teg3, classnames):\n i = 0\n lines = [None] * teg3.n\n while i <= teg3.batch_index:\n data = teg3.next()\n pred = model.predict(data)\n\n for k in range(BATCH_SIZE):\n idx = BATCH_SIZE*i + k\n pred_label = classnames[pred[k].argmax()]\n lines[idx] = [pred_label]\n # print(str(int((idx/teg3.n)*100)) + \"%\")\n i += 1\n\n with open(csv_file, \"w\") as fw:\n wr = csv.writer(fw)\n wr.writerows(lines)\n\ndef load_backbone_net(input_shape):\n # define input tensor\n input0 = Input(shape=input_shape)\n\n #model = applications.VGG19(weights=\"imagenet\", include_top=False, input_tensor=input0)\n #model= applications.InceptionV3(weights=\"imagenet\", include_top=False, input_tensor=input0)\n model = applications.vgg16.VGG16(include_top=False, weights='imagenet', input_tensor=input0)\n\n feature_extractor = Model(inputs=input0, outputs=model.output)\n optimizer = 'adam' # alternative 'SGD'\n\n feature_extractor.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])\n\n return feature_extractor\n return model\n\ndef transferNet(feature_extractor, num_classes, output_layer_name, trainable_layers):\n # get the original input layer tensor\n input_t = feature_extractor.get_layer(index=0).input\n\n # set the feature extractor layers as non-trainable\n for idx, layer in enumerate(feature_extractor.layers):\n if layer.name in trainable_layers:\n layer.trainable = True\n else:\n layer.trainable = False\n\n # get the output tensor from a layer of the feature extractor\n output_extractor = feature_extractor.get_layer(name=output_layer_name).output\n\n #output_extractor = MaxPooling2D(pool_size=(4,4))(output_extractor)\n output_extractor = AveragePooling2D(pool_size=(3, 3), strides=(3, 3))(output_extractor)\n\n # flat the output of a Conv layer\n flatten = Flatten()(output_extractor)\n flatten_norm = BatchNormalization()(flatten)\n\n # add a Dense layer\n dense = Dropout(0.4)(flatten_norm)\n dense = Dense(200, activation='relu')(dense)\n dense = BatchNormalization()(dense)\n\n # add a Dense layer\n dense = Dropout(0.4)(dense)\n dense = Dense(100, activation='relu')(dense)\n dense = BatchNormalization()(dense)\n\n # add the final output layer\n dense = BatchNormalization()(dense)\n dense = Dense(num_classes, activation='softmax')(dense)\n\n model = Model(inputs=input_t, outputs=dense, name=\"transferNet\")\n\n model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n\n return model\n\n\nif __name__ == \"__main__\":\n\n # Removing corrupt EXIF data\n if(EXIF_FLAG):\n delCorruptEXIF(glob.glob(trainingset + imgstype))\n\n batch_size = BATCH_SIZE\n train_generator, test_generator = processData(batch_size)\n\n num_samples = train_generator.n\n num_classes = train_generator.num_classes\n input_shape = train_generator.image_shape\n\n classnames = [k for k, v in train_generator.class_indices.items()]\n\n print(\"Image input %s\" % str(input_shape))\n print(\"Classes: %r\" % classnames)\n\n stopping = callbacks.EarlyStopping(monitor='val_accuracy', patience=6)\n\n # ------------------------------- Train the model ----------------------------------- #\n print(\"Generating the model\")\n model = RickyNet(input_shape, num_classes)\n\n print(model.summary())\n\n steps_per_epoch = train_generator.n // train_generator.batch_size\n val_steps = test_generator.n // test_generator.batch_size + 1\n\n try:\n history = model.fit_generator(train_generator, epochs=EPOCHS, verbose=1, callbacks=[stopping],\n steps_per_epoch=steps_per_epoch,\n validation_data=test_generator,\n validation_steps=val_steps)\n except KeyboardInterrupt:\n pass\n\n\n\n savemodel(model, MODEL_NAME)\n \n plot_history(history, MODEL_NAME)\n # ---------------------------------------------------------------------------------- #\n\n\n # -------------------------------- Load a model ------------------------------------ #\n # model = loadmodel(MODEL_NAME)\n # ---------------------------------------------------------------------------------- #\n\n\n # ------------------------------ Evaluation tests ---------------------------------- #\n print(\"\\nAcc and Loss on Test_New:\")\n teg1 = processTest(batch_size, testset1)\n val_steps1 = teg1.n // teg1.batch_size + 1\n # Accuracy + Loss\n loss, acc = model.evaluate_generator(teg1, verbose=1, steps=val_steps1)\n print('Test loss: %f' % loss)\n print('Test accuracy: %f' % acc)\n\n print(\"\\nAcc and Loss on Weather_Test:\")\n teg2 = processTest(batch_size, testset2)\n val_steps2 = teg2.n // teg2.batch_size + 1\n # Accuracy + Loss\n loss, acc = model.evaluate_generator(teg2, verbose=1, steps=val_steps2)\n print('Test loss: %f' % loss)\n print('Test accuracy: %f' % acc)\n\n # Confusion Matrix\n Y_pred = model.predict_generator(teg2, val_steps2)\n y_pred = np.argmax(Y_pred, axis=1)\n skplt.metrics.plot_confusion_matrix(teg2.classes, y_pred, normalize=True, title=\"RickyNet\")\n plt.ylim([3.5, -.5])\n plt.tight_layout()\n plt.show()\n\n # Precision Recall Curve\n skplt.metrics.plot_precision_recall_curve(teg2.classes, Y_pred, title=\"RickyNet\")\n plt.show()\n \n # Precision + Recall + f1-score\n preds = model.predict_generator(teg2, verbose=1, steps=val_steps2)\n Ypred = np.argmax(preds, axis=1)\n Ytest = teg2.classes\n print(classification_report(Ytest, Ypred, labels=None, target_names=classnames, digits=3))\n # ---------------------------------------------------------------------------------- #\n\n\n\n\n # ------------------------------ Transfer learning + Fine tuning ---------------------------------- #\n # load the pre-trained model\n feature_extractor = load_backbone_net(input_shape)\n feature_extractor.summary()\n\n # VGG16\n name_output_extractor = \"block5_pool\"\n trainable_layers = [\"block5_conv3\"]\n\n # build the transfer model\n transfer_model = transferNet(feature_extractor, num_classes, name_output_extractor, trainable_layers)\n transfer_model.summary()\n\n\n steps_per_epoch = train_generator.n // train_generator.batch_size\n val_steps = test_generator.n // test_generator.batch_size + 1\n\n try:\n history_transfer = transfer_model.fit_generator(train_generator, epochs=EPOCHS, verbose=1, callbacks=[stopping], \\\n steps_per_epoch=steps_per_epoch, \\\n validation_data=test_generator, \\\n validation_steps=val_steps)\n except KeyboardInterrupt:\n pass\n\n savemodel(transfer_model, MODEL_NAME)\n plot_history(history_transfer, MODEL_NAME)\n # ------------------------------------------------------------------------------------------------- #\n\n # -------------------------------- Load a TL model ------------------------------------ #\n # transfer_model = loadmodel(MODEL_NAME)\n # ------------------------------------------------------------------------------------- #\n\n # ------------------------------ TF Evaluation tests ---------------------------------- #\n print(\"\\nAcc and Loss on Test_New:\")\n teg1 = processTest(batch_size, testset1)\n val_steps1 = teg1.n // teg1.batch_size + 1\n # Accuracy + Loss\n loss, acc = transfer_model.evaluate_generator(teg1, verbose=1, steps=val_steps1)\n print('Test loss: %f' % loss)\n print('Test accuracy: %f' % acc)\n\n print(\"\\nAcc and Loss on Weather_Test:\")\n teg2 = processTest(batch_size, testset2)\n val_steps2 = teg2.n // teg2.batch_size + 1\n # Accuracy + Loss\n loss, acc = transfer_model.evaluate_generator(teg2, verbose=1, steps=val_steps2)\n print('Test loss: %f' % loss)\n print('Test accuracy: %f' % acc)\n\n # Precision + Recall + f1-score\n preds = transfer_model.predict_generator(teg2, verbose=1, steps=val_steps2)\n Ypred = np.argmax(preds, axis=1)\n Ytest = teg2.classes\n print(classification_report(Ytest, Ypred[:teg2.n], labels=None, target_names=classnames, digits=3))\n\n # Confusion Matrix\n Y_pred = transfer_model.predict_generator(teg2, val_steps2)\n y_pred = np.argmax(Y_pred, axis=1)\n skplt.metrics.plot_confusion_matrix(teg2.classes, y_pred[:teg2.n], normalize=True, title=MODEL_NAME)\n plt.ylim([3.5, -.5])\n plt.tight_layout()\n plt.show()\n\n # Precision Recall Curve\n skplt.metrics.plot_precision_recall_curve(teg2.classes, Y_pred[:teg2.n], title=MODEL_NAME)\n plt.show()\n # ------------------------------------------------------------------------------------ #\n\n # ---------------------------------- Blind test -------------------------------------- #\n tegb = processTest(batch_size, blindtest)\n solveBlind(transfer_model, tegb, classnames)\n # ------------------------------------------------------------------------------------ #\n\n" ]
[ [ "matplotlib.pyplot.legend", "tensorflow.keras.models.load_model", "matplotlib.pyplot.imshow", "matplotlib.pyplot.plot", "sklearn.metrics.classification_report", "tensorflow.keras.layers.Dropout", "matplotlib.pyplot.tight_layout", "tensorflow.keras.preprocessing.image.ImageDataGenerator", "tensorflow.keras.layers.Conv2D", "numpy.argmax", "tensorflow.keras.callbacks.EarlyStopping", "tensorflow.keras.models.Sequential", "tensorflow.keras.layers.Flatten", "tensorflow.keras.layers.AveragePooling2D", "tensorflow.compat.v1.GPUOptions", "matplotlib.pyplot.title", "tensorflow.keras.models.Model", "matplotlib.pyplot.ylim", "tensorflow.keras.layers.Dense", "tensorflow.keras.applications.vgg16.VGG16", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel", "tensorflow.compat.v1.ConfigProto", "tensorflow.keras.layers.BatchNormalization", "matplotlib.pyplot.xlabel", "tensorflow.keras.layers.MaxPooling2D", "tensorflow.keras.layers.Input" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "2.6", "2.4", "2.3", "2.5", "2.2" ] } ]
pkozakowski/trax
[ "31215c378017347e0b66ba51c37cd3cbedf60b17" ]
[ "trax/supervised/trainer_lib_test.py" ]
[ "# coding=utf-8\n# Copyright 2021 The Trax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for trax.supervised.trainer_lib.\"\"\"\n\nimport functools\nimport os\n\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\nfrom jax import test_util # pylint: disable=unused-import\nfrom jax.config import config\nfrom jax.lib import xla_bridge\nimport tensorflow.compat.v2 as tf\nfrom trax import fastmath\nfrom trax import layers as tl\nfrom trax import models\nfrom trax import optimizers as trax_opt\nfrom trax import shapes as trax_shapes\nfrom trax import test_utils\nfrom trax.data import inputs as inputs_lib\nfrom trax.fastmath import numpy as jnp\nfrom trax.supervised import lr_schedules as lr\nfrom trax.supervised import trainer_lib\nfrom trax.tf_numpy import extensions as npe\nfrom trax.tf_numpy import numpy as tf_np\n\n\n\ndef _test_inputs(n_classes, with_weights=False, input_shape=(6, 6, 3)):\n \"\"\"Make trainer_lib.inputs.Inputs.\"\"\"\n batch_size = 2 * xla_bridge.device_count()\n\n def input_stream(n_devices):\n del n_devices\n key = fastmath.random.get_prng(0)\n while True:\n keys = fastmath.random.split(key, 4)\n key = keys[0]\n inputs = fastmath.random.uniform(\n keys[1], [batch_size] + list(input_shape))\n targets = fastmath.random.randint(\n keys[2], [batch_size], dtype=jnp.int32, minval=0, maxval=n_classes)\n weights = fastmath.random.uniform(keys[3], [batch_size])\n if with_weights:\n yield inputs, targets, weights\n else:\n yield inputs, targets\n\n def input_stream_masked(n_devices):\n return inputs_lib.add_loss_weights(input_stream(n_devices))\n\n return inputs_lib.Inputs(input_stream_masked)\n\n\ndef _test_inputs_lm(vocab_size, seq_len, per_device_batch_size=2):\n \"\"\"Make trainer_lib.inputs.Inputs for language model.\"\"\"\n batch_size = per_device_batch_size * xla_bridge.device_count()\n\n def input_stream(_):\n def make_batch(key):\n return fastmath.random.randint(\n key, [batch_size, seq_len], dtype=jnp.int32, minval=0,\n maxval=vocab_size)\n key = fastmath.random.get_prng(0)\n while True:\n keys = fastmath.random.split(key, 3)\n key = keys[0]\n inputs = make_batch(keys[1])\n targets = make_batch(keys[2])\n yield inputs, targets\n\n def input_stream_masked(n_devices):\n return inputs_lib.add_loss_weights(input_stream(n_devices))\n\n return inputs_lib.Inputs(input_stream_masked)\n\n\n\nBACKENDS = [fastmath.Backend.JAX, fastmath.Backend.TFNP]\n\n\ndef short_name(b):\n if b == fastmath.Backend.JAX:\n return 'jax'\n else:\n return 'tf'\n\n\ndef opt_name(opt):\n if opt is None:\n return 'None'\n return opt.__name__\n\n\ndef _pure_lsh_self_attention_fn(n_chunks_after=0):\n return functools.partial(\n tl.PureLSHSelfAttentionWrapper,\n attention_dropout=0.1,\n chunk_len=16,\n n_buckets=[32, 32],\n n_chunks_after=n_chunks_after,\n n_chunks_before=1,\n n_hashes=2,\n n_parallel_heads=1,\n max_length_for_buckets=1024,\n predict_drop_len=128,\n predict_mem_len=1024,\n num_weights=2,\n bias=False,\n pure_lsh_implementation=tl.PureLSHSelfAttention,\n )\n\n\ndef _mixed_lsh_self_attention_fn(n_chunks_after=0):\n return functools.partial(\n tl.PureLSHSelfAttentionWrapper,\n attention_dropout=0.1,\n chunk_len=16,\n n_buckets=[32, 32],\n n_chunks_after=n_chunks_after,\n n_chunks_before=1,\n n_hashes=2,\n n_parallel_heads=1,\n max_length_for_buckets=1024,\n predict_drop_len=128,\n predict_mem_len=1024,\n num_weights=2,\n bias=False,\n pure_lsh_implementation=tl.MixedLSHSelfAttention,\n )\n\n\nclass TraxTest(parameterized.TestCase):\n\n def __init__(self, methodName='runTest'): # pylint: disable=invalid-name\n super().__init__(methodName)\n if npe.tpu_devices():\n # Initialize TPU for TF\n resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu='local')\n tf.tpu.experimental.initialize_tpu_system(resolver)\n\n def setUp(self):\n super().setUp()\n test_utils.ensure_flag('test_tmpdir')\n self._old_is_allow_float64 = tf_np.is_allow_float64()\n tf_np.set_allow_float64(False)\n\n def tearDown(self):\n tf_np.set_allow_float64(self._old_is_allow_float64)\n super().tearDown()\n\n def _test_train_eval_predict(self, backend, model_name='Simple',\n optimizer=None):\n with fastmath.use_backend(backend):\n # Prepare model and inputs\n steps = 2\n eval_steps = 2\n\n if model_name == 'Simple':\n n_classes = 4\n # Adds Dropout and BatchNorm to test state handling.\n def model_fn(mode='train'):\n return tl.Serial(\n tl.Dropout(mode=mode, rate=0.1),\n tl.BatchNorm(mode=mode),\n models.MLP(layer_widths=(16, 16, n_classes), mode=mode))\n inputs = _test_inputs(n_classes)\n n_in = 1\n elif model_name == 'Resnet50':\n n_classes = 4\n model_fn = models.Resnet50\n inputs = _test_inputs(n_classes, input_shape=(224, 224, 3))\n n_in = 1\n elif model_name == 'Transformer':\n vocab_size = 32\n seq_len = 16\n inputs = _test_inputs_lm(vocab_size, seq_len)\n model_fn = functools.partial(\n models.Transformer,\n input_vocab_size=vocab_size)\n n_in = 2\n else:\n raise ValueError('Unrecognized model name: ' + model_name)\n\n kwargs = {}\n if optimizer is not None:\n kwargs['optimizer'] = optimizer\n\n # Train and evaluate\n output_dir = self.create_tempdir().full_path\n loop = trainer_lib.train(\n output_dir,\n model=model_fn,\n inputs=inputs,\n steps=steps,\n eval_steps=eval_steps,\n eval_frequency=1, # eval at every step.\n **kwargs)\n\n # Assert total train steps\n self.assertEqual(steps, loop.step)\n\n inputs = inputs.train_stream(1)\n\n # Predict with final weights\n model = model_fn()\n weights = loop.model.weights\n state = loop.model.state\n model(next(inputs)[:n_in], weights=weights, state=state)\n\n # Predict with weights loaded from file.\n model = model_fn()\n model.init_from_file(os.path.join(output_dir, 'model.pkl.gz'))\n model(next(inputs)[:n_in])\n\n @parameterized.named_parameters(\n ('_%s_%s_%s' % (short_name(backend), model_name, opt_name(opt)), # pylint: disable=g-complex-comprehension\n backend, model_name, opt)\n for backend, configs in [\n (fastmath.Backend.JAX, [('Simple', None)]),\n (fastmath.Backend.TFNP, [('Simple', None),\n ('Resnet50', trax_opt.Momentum),\n ('Transformer', trax_opt.Adam)])]\n for model_name, opt in configs)\n def test_train_eval_predict(self, backend, model_name, opt):\n self._test_train_eval_predict(backend, model_name, opt)\n\n @parameterized.parameters(BACKENDS)\n def test_train_eval_predict_sm3(self, backend):\n self._test_train_eval_predict(backend, 'Simple', trax_opt.SM3)\n\n @parameterized.parameters(BACKENDS)\n def test_train_restart(self, backend):\n with fastmath.use_backend(backend):\n # Prepare model and inputs\n n_classes = 4\n steps = 2\n eval_steps = 2\n model_fn = functools.partial(models.MLP,\n layer_widths=(16, 16, n_classes))\n inputs = _test_inputs(n_classes)\n\n # Train and evaluate\n output_dir = self.create_tempdir().full_path\n trainer_lib.train(\n output_dir,\n model=model_fn,\n inputs=inputs,\n steps=steps,\n eval_steps=eval_steps,\n eval_frequency=1)\n\n # Restart training\n loop = trainer_lib.train(\n output_dir,\n model=model_fn,\n inputs=inputs,\n steps=(2 * steps),\n eval_steps=eval_steps,\n eval_frequency=1)\n\n # Assert total train steps\n self.assertEqual(loop.step, 2 * steps)\n\n @parameterized.parameters(BACKENDS)\n def test_train_permanent_checkpoints(self, backend):\n with fastmath.use_backend(backend):\n # Prepare model and inputs\n n_classes = 4\n steps = 5\n eval_steps = 2\n model_fn = functools.partial(models.MLP,\n layer_widths=(16, 16, n_classes))\n inputs = _test_inputs(n_classes)\n\n # Train and evaluate\n output_dir = self.create_tempdir().full_path\n\n # Steps 1 -> 5\n loop = trainer_lib.train(\n output_dir,\n model=model_fn,\n inputs=inputs,\n steps=steps,\n eval_steps=eval_steps,\n eval_frequency=1,\n permanent_checkpoint_frequency=2)\n\n # Steps 6 -> 10\n loop = trainer_lib.train(\n output_dir,\n model=model_fn,\n inputs=inputs,\n steps=(2 * steps),\n eval_steps=eval_steps,\n eval_frequency=1,\n permanent_checkpoints_at=[7, 8, 10])\n\n path = os.path.join(output_dir, 'model.pkl.gz')\n self.assertTrue(tf.io.gfile.exists(path))\n\n for step in range(11):\n filename = 'model_{}.pkl.gz'.format(step)\n path = os.path.join(output_dir, filename)\n if step in [1, 2, 4, 7, 8, 10]:\n self.assertTrue(tf.io.gfile.exists(path),\n msg='No model for step: {} in dir {}.'.format(\n step, tf.io.gfile.listdir(output_dir)))\n else:\n self.assertFalse(tf.io.gfile.exists(path),\n msg='Model for step: {} in dir {}.'.format(\n step, tf.io.gfile.listdir(output_dir)))\n\n # Assert total train steps\n self.assertEqual(loop.step, 10)\n\n @parameterized.parameters(BACKENDS)\n def test_train_restart_with_same_steps(self, backend):\n with fastmath.use_backend(backend):\n # Prepare model and inputs\n n_classes = 4\n steps = 2\n eval_steps = 2\n model_fn = functools.partial(models.MLP,\n layer_widths=(16, 16, n_classes))\n inputs = _test_inputs(n_classes)\n\n # Train and evaluate\n output_dir = self.create_tempdir().full_path\n trainer_lib.train(\n output_dir,\n model=model_fn,\n inputs=inputs,\n steps=steps,\n eval_steps=eval_steps,\n eval_frequency=1)\n\n # Restart training\n loop = trainer_lib.train(\n output_dir,\n model=model_fn,\n inputs=inputs,\n steps=steps,\n eval_steps=eval_steps,\n eval_frequency=1)\n\n # Assert total train steps\n self.assertEqual(loop.step, steps)\n\n def test_train_with_pure_lsh_attention(self, backend=fastmath.Backend.JAX):\n with fastmath.use_backend(backend):\n # Prepare model and inputs\n def model(mode='train'):\n return models.Reformer2(\n mode=mode,\n d_model=16,\n d_ff=16,\n n_heads=2,\n dropout=0.05,\n n_decoder_layers=1,\n n_encoder_layers=1,\n input_vocab_size=256,\n encoder_attention_type=_pure_lsh_self_attention_fn(),\n encoder_decoder_attention_type=_pure_lsh_self_attention_fn(),\n )\n\n max_len = 128\n inputs = _test_inputs_lm(vocab_size=256, seq_len=max_len)\n\n steps = 1\n eval_steps = 1\n\n # Train and evaluate\n output_dir = self.create_tempdir().full_path\n trainer_lib.train(\n output_dir,\n model=model,\n inputs=inputs,\n steps=steps,\n eval_steps=eval_steps,\n eval_frequency=1)\n\n # Read checkpoint\n model_file = os.path.join(output_dir, 'model.pkl.gz')\n\n shape11 = trax_shapes.ShapeDtype((1, 1), dtype=jnp.int32)\n shape1l = trax_shapes.ShapeDtype((1, max_len), dtype=jnp.int32)\n\n model_predict = model(mode='predict')\n model_predict.init_from_file(\n model_file, weights_only=True, input_signature=(shape1l, shape11))\n\n def test_train_with_mixed_lsh_attention(self, backend=fastmath.Backend.JAX):\n with fastmath.use_backend(backend):\n # Prepare model and inputs\n\n def model(mode='train'):\n return models.Reformer2(\n mode=mode,\n d_model=16,\n d_ff=16,\n n_heads=2,\n dropout=0.05,\n n_decoder_layers=1,\n n_encoder_layers=1,\n input_vocab_size=256,\n encoder_attention_type=_mixed_lsh_self_attention_fn(),\n encoder_decoder_attention_type=_mixed_lsh_self_attention_fn(),\n )\n\n max_len = 128\n inputs = _test_inputs_lm(vocab_size=256, seq_len=max_len)\n\n steps = 1\n eval_steps = 1\n\n # Train and evaluate\n output_dir = self.create_tempdir().full_path\n trainer_lib.train(\n output_dir,\n model=model,\n inputs=inputs,\n steps=steps,\n eval_steps=eval_steps,\n eval_frequency=1)\n\n # Read checkpoint\n model_file = os.path.join(output_dir, 'model.pkl.gz')\n\n shape11 = trax_shapes.ShapeDtype((1, 1), dtype=jnp.int32)\n shape1l = trax_shapes.ShapeDtype((1, max_len), dtype=jnp.int32)\n\n model_predict = model(mode='predict')\n model_predict.init_from_file(model_file, weights_only=True,\n input_signature=(shape1l, shape11))\n\n @parameterized.parameters(BACKENDS)\n def test_train_fills_in_missing_eval_metrics(self, backend):\n with fastmath.use_backend(backend):\n # Prepare model and inputs\n n_classes = 4\n steps = 2\n eval_steps = 2\n model_fn = functools.partial(models.MLP, layer_widths=(16, 16, n_classes))\n inputs = _test_inputs(n_classes)\n additional_eval_stream = trainer_lib.NamedStream(\n # deliberately duplicating eval data\n stream=inputs.eval_stream(1),\n name='additional_eval_task')\n\n # Train and evaluate\n output_dir = self.create_tempdir().full_path\n loop = trainer_lib.train(\n output_dir,\n model=model_fn,\n inputs=inputs,\n steps=steps,\n eval_steps=eval_steps,\n eval_frequency=1,\n additional_eval_streams=[additional_eval_stream])\n\n self.assertLen(loop.eval_tasks, 2)\n eval_task_1, eval_task_2 = loop.eval_tasks\n self.assertCountEqual(eval_task_1.metrics, eval_task_2.metrics)\n self.assertCountEqual(eval_task_1.metric_names, eval_task_2.metric_names)\n\n @parameterized.named_parameters(\n ('_%s' % short_name(backend), backend)\n for backend in BACKENDS)\n def test_train_with_weights(self, backend):\n with fastmath.use_backend(backend):\n # Prepare model and inputs\n n_classes = 4\n steps = 2\n eval_steps = 2\n model_fn = functools.partial(models.MLP,\n layer_widths=(16, 16, n_classes))\n inputs = _test_inputs(n_classes, with_weights=True)\n\n # Train and evaluate\n output_dir = self.create_tempdir().full_path\n state = trainer_lib.train(\n output_dir,\n model=model_fn,\n inputs=inputs,\n steps=steps,\n eval_steps=eval_steps)\n\n # Assert total train steps\n self.assertEqual(state.step, steps)\n\n @parameterized.parameters(BACKENDS)\n def test_reset_twice(self, backend):\n with fastmath.use_backend(backend):\n n_classes = 4\n model_fn = functools.partial(models.MLP,\n layer_widths=(16, 16, n_classes))\n inputs = _test_inputs(n_classes)\n\n trainer = trainer_lib.Trainer(\n model=model_fn,\n loss_fn=tl.WeightedCategoryCrossEntropy(),\n optimizer=trax_opt.SM3,\n lr_schedule=lr.multifactor(),\n inputs=inputs,\n )\n\n output_dir1 = self.create_tempdir(name='output_dir1').full_path\n trainer.reset(output_dir1)\n trainer.evaluate(1)\n output_dir2 = self.create_tempdir(name='output_dir2').full_path\n trainer.reset(output_dir2)\n trainer.evaluate(1)\n\n def test_tf_xla_forced_compile(self):\n # TODO(wangpeng): re-enable this test\n self.skipTest('Needs --config=cuda to pass this test')\n old_flag = fastmath.tf.tf_xla_forced_compile_enabled()\n fastmath.tf.set_tf_xla_forced_compile(True)\n self._test_train_eval_predict('tf')\n fastmath.tf.set_tf_xla_forced_compile(old_flag)\n\n def test_no_int32_or_uint32_returned(self):\n \"\"\"Tests that Trainer._jit_update_fn doesn't return int32 or uint32.\n\n TF pins int32/uint32 tensors to CPU, which will cause XLA-forced-compiled\n computation to copy int32/uint32 outputs to CPU. This test makes sure that\n won't happen.\n \"\"\"\n with fastmath.use_backend(fastmath.Backend.TFNP):\n n_classes = 1001\n model_fn = functools.partial(models.Resnet50,\n n_output_classes=n_classes)\n inputs = _test_inputs(n_classes, input_shape=(224, 224, 3))\n trainer = trainer_lib.Trainer(\n model=model_fn,\n loss_fn=tl.WeightedCategoryCrossEntropy(),\n optimizer=trax_opt.SM3,\n lr_schedule=lr.multifactor(),\n inputs=inputs,\n )\n output_dir = self.create_tempdir().full_path\n trainer.reset(output_dir)\n trainer.train_epoch(1, 0)\n # Those are the things returned by Trainer._jit_update_fn\n arrays = (trainer._opt_state.weights, trainer._opt_state.slots,\n trainer._model_state, trainer._rngs)\n arrays = tf.nest.flatten(arrays)\n for x in arrays:\n if isinstance(x, jnp.ndarray) and (x.dtype == jnp.int32 or\n x.dtype == jnp.uint32):\n raise ValueError('Found an array of int32 or uint32: %s' % x)\n\n\n\nclass EpochsTest(absltest.TestCase):\n\n def test_cuts_epoch_when_total_steps_reached(self):\n epoch_steps = trainer_lib.epochs(\n total_steps=5, steps_to_skip=0, epoch_steps=[1, 2, 3])\n self.assertEqual(list(epoch_steps), [1, 2, 2])\n\n def test_skips_full_epoch(self):\n epoch_steps = trainer_lib.epochs(\n total_steps=4, steps_to_skip=2, epoch_steps=[2, 2])\n self.assertEqual(list(epoch_steps), [2])\n\n def test_skips_part_of_epoch(self):\n epoch_steps = trainer_lib.epochs(\n total_steps=4, steps_to_skip=1, epoch_steps=[2, 2])\n self.assertEqual(list(epoch_steps), [1, 2])\n\n\nif __name__ == '__main__':\n config.config_with_absl()\n tf.compat.v1.enable_eager_execution()\n absltest.main()\n" ]
[ [ "tensorflow.compat.v2.distribute.cluster_resolver.TPUClusterResolver", "tensorflow.compat.v2.compat.v1.enable_eager_execution", "tensorflow.compat.v2.io.gfile.exists", "tensorflow.compat.v2.tpu.experimental.initialize_tpu_system", "tensorflow.compat.v2.io.gfile.listdir", "tensorflow.compat.v2.nest.flatten" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
liyongsheng-tech/pkuseg
[ "e5bd8a4f7e2589a3c132c291433abd3be5c69dba" ]
[ "libs/networks/builder.py" ]
[ "import importlib\n\nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F\n\nfrom heads import FCNHead\n\n\nclass ModelBuilder(nn.Module):\n def __init__(self, net_config, aux_config=None):\n super(ModelBuilder, self).__init__()\n\n num_classes = net_config['num_classes']\n out_planes = int(net_config['out_planes'])\n self.use_aux = aux_config is not None\n\n if self.use_aux:\n in_planes = int(aux_config['in_planes'])\n out_planes = int(aux_config['out_planes'])\n self.aux = FCNHead(in_planes, out_planes)\n self.aux_clsf = nn.Conv2d(out_planes, num_classes)\n\n self.encoder = self._build_module(net_config, 'encoder')\n self.seg_head = self._build_module(net_config, 'seg_head')\n self.decoder = self._build_module(net_config, 'decoder')\n assert self.encoder is not None, 'There must be an encoder!'\n\n if out_planes >= 256:\n self.clsf = nn.Sequential(\n nn.Dropout2d(0.1),\n nn.Conv2d(out_planes, num_classes, 1))\n else:\n self.clsf = nn.Conv2d(out_planes, num_classes)\n\n\n def _build_module(self, net_config, key):\n cls_config = net_config.get(key, None)\n if cls_config is None:\n return None\n\n cls_type = cls_config['type']\n mod_name, cls_name = cls_type.rsplit('.', 1)\n mod = importlib.import_module(mod_name)\n cls = getattr(mod, cls_name)\n return cls(**cls_config.get('args', dict()))\n\n def forward(self, x):\n h, w = x.size()[-2:]\n xs = self.encoder(x)\n\n if self.seg_head is not None:\n xs[-1] = self.seg_head(xs[-1])\n\n if self.decoder is not None:\n x = self.decoder(xs)\n else:\n x = xs[-1]\n\n pred = self.clsf(x)\n pred = F.interpolate(pred, size=(h, w), mode='bilinear', \n align_corners=True)\n\n if not self.use_aux:\n return [pred, None]\n\n aux = self.aux(xs[-2])\n aux = self.aux_clsf(aux)\n aux = F.interpolate(aux, size=(h, w), mode='bilinear', \n align_corners=True)\n\n return [pred, aux]\n\n\nif __name__ == '__main__':\n net_config = {\n 'num_classes': 19,\n 'out_planes': 512,\n 'encoder': {\n 'type': 'resnet.resnet50',\n },\n 'seg_head': {\n 'type': 'heads.ASPP',\n 'args': {\n 'in_planes': 2048,\n }\n },\n }\n model = ModelBuilder(net_config)\n print(model)\n input = torch.Tensor(2, 3, 513, 513)\n outputs = model(input)\n for output in outputs:\n if output is not None:\n print(output.size())\n\n" ]
[ [ "torch.nn.Conv2d", "torch.nn.Dropout2d", "torch.Tensor", "torch.nn.functional.interpolate" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Eccsx/CVRP-Genetic-Tournament
[ "8b428ec8ef976489bd701e2d11253249fe46ff88" ]
[ "src/population.py" ]
[ "from math import floor\nfrom numpy import delete, fliplr\nfrom itertools import chain\nfrom random import random, sample\nfrom copy import deepcopy\nfrom chromosome import fitness, pmx, obx\nfrom utils import create_shuffle_array\nfrom genetic import POPULATION_SIZE, PROBABILITY_ELITISM, \\\n NUMBER_TOURNAMENT_SELECTION, CROSSOVER_METHOD, PROBABILITY_MUTATION, \\\n MUTATION_METHOD\n\n\ndef create_population(\n vehicles_capacity,\n customers\n):\n # Initialization\n chromosomes = [\n create_shuffle_array(len(customers)) for _ in range(POPULATION_SIZE)\n ]\n\n # Chromosomes validity\n chromosomes = separate_by_capacity(\n chromosomes,\n vehicles_capacity,\n customers\n )\n\n return chromosomes\n\n\ndef separate_by_capacity(\n chromosomes,\n vehicles_capacity,\n customers\n):\n new_chromosomes = []\n\n for chromosome in chromosomes:\n new_chromosome = []\n gene = []\n total_demand = 0\n\n for node in chromosome:\n demand = customers[node][2]\n\n # We only care about the chromosome parts respecting\n # the capacity constraint\n if (total_demand + demand) > vehicles_capacity:\n # Add new gene to the new chromosome\n new_chromosome.append(gene.copy())\n\n # Reset total demand\n total_demand = 0\n\n # Clear gene\n del gene[:]\n\n # Update total demand\n total_demand += demand\n\n # Add node to gene\n gene.append(node)\n\n # Append new gene to new chromosome\n new_chromosome.append(gene.copy())\n\n new_chromosomes.append(new_chromosome)\n\n return new_chromosomes\n\n\ndef elistism(\n chromosomes,\n customers,\n COORDINATES_DEPOT\n):\n elites = []\n\n while len(chromosomes) < floor(PROBABILITY_ELITISM * len(chromosomes)) + 1:\n best = chromosomes[0]\n pos = 0\n for i in range(len(chromosomes)):\n if fitness(chromosomes[i], customers, COORDINATES_DEPOT) \\\n < fitness(best, customers, COORDINATES_DEPOT):\n pos = i\n best = chromosomes[i]\n elites.append(\n list(chain.from_iterable(best.copy()))\n )\n chromosomes = delete(chromosomes, [pos], axis=0)\n\n return elites\n\n\ndef crossover(\n chromosomes,\n customers,\n vehicles_capacity,\n COORDINATES_DEPOT\n):\n crossed_chromosomes = []\n chromosomes_copy = chromosomes.copy()\n\n # Elitism\n crossed_chromosomes = elistism(chromosomes, customers, COORDINATES_DEPOT)\n\n # Number of chromosomes to create\n num_chromosomes = len(chromosomes)\n\n if num_chromosomes % 2 == 0:\n num_chromosomes = int(num_chromosomes / 2)\n else:\n crossed_chromosomes.append((list(chain.from_iterable(chromosomes[0]))))\n delete(chromosomes, 0)\n num_chromosomes = int((num_chromosomes - 1) / 2)\n\n # Breeding process\n for _ in range(num_chromosomes):\n positions = sample(range(0, len(chromosomes_copy)), 2)\n\n # Crossover\n if CROSSOVER_METHOD == 'RANDOM':\n if random() >= 0.5:\n aa, bb = pmx(\n deepcopy(chromosomes_copy[positions[0]]),\n deepcopy(chromosomes_copy[positions[1]]),\n len(customers)\n )\n else:\n aa, bb = obx(\n deepcopy(chromosomes_copy[positions[0]]),\n deepcopy(chromosomes_copy[positions[1]]),\n len(customers)\n )\n\n elif CROSSOVER_METHOD == 'PMX':\n aa, bb = pmx(\n deepcopy(chromosomes_copy[positions[0]]),\n deepcopy(chromosomes_copy[positions[1]]),\n len(customers)\n )\n elif CROSSOVER_METHOD == 'OBX':\n aa, bb = obx(\n deepcopy(chromosomes_copy[positions[0]]),\n deepcopy(chromosomes_copy[positions[1]]),\n len(customers)\n )\n\n # append selected\n crossed_chromosomes.append(aa.copy())\n crossed_chromosomes.append(bb.copy())\n\n # return\n return separate_by_capacity(\n crossed_chromosomes,\n vehicles_capacity,\n customers\n )\n\n\ndef tournament_selection(\n chromosomes,\n customers,\n COORDINATES_DEPOT\n):\n selection = []\n\n # Select by elitism\n while len(selection) < floor(PROBABILITY_ELITISM * len(chromosomes)) + 1:\n # Take first chromosome as reference\n best = chromosomes[0]\n index = 0\n\n # Compare all chromosomes to keep the best one\n for i in range(len(chromosomes)):\n if fitness(chromosomes[i], customers, COORDINATES_DEPOT) \\\n < fitness(best, customers, COORDINATES_DEPOT):\n index = i\n best = chromosomes[i]\n\n selection.append(best.copy())\n chromosomes = delete(chromosomes, [index], axis=0)\n\n # Gene recombination\n for _ in range(0, len(chromosomes)):\n\n # Pick random positions\n positions = sample(\n range(0, len(chromosomes)),\n NUMBER_TOURNAMENT_SELECTION\n )\n\n # Define genes to compare\n compare = chromosomes[positions[0]]\n\n # Keep best genes\n for position in positions:\n if fitness(chromosomes[position], customers, COORDINATES_DEPOT) \\\n < fitness(compare, customers, COORDINATES_DEPOT):\n compare = chromosomes[position]\n\n selection.append(compare)\n\n return selection\n\n\ndef mutation(\n chromosomes,\n customers,\n vehicles_capacity,\n COORDINATES_DEPOT\n):\n mutated_chromosomes = []\n\n # Elitism\n while len(mutated_chromosomes) \\\n < floor(PROBABILITY_ELITISM * len(chromosomes)) + 1:\n best = chromosomes[0]\n pos = 0\n for i in range(len(chromosomes)):\n if fitness(chromosomes[i], customers, COORDINATES_DEPOT) \\\n < fitness(best, customers, COORDINATES_DEPOT):\n pos = i\n best = chromosomes[i]\n\n mutated_chromosomes.append(list(chain.from_iterable(best.copy())))\n chromosomes = delete(chromosomes, [pos], axis=0)\n\n # Number of chromosomes to create\n num_chromosomes = len(chromosomes)\n\n # Mutate chromosomes\n for a in range(num_chromosomes):\n if random() <= PROBABILITY_MUTATION:\n if MUTATION_METHOD == 'RANDOM':\n if random() <= 0.5:\n pass\n else:\n pass\n elif MUTATION_METHOD == 'EXC':\n # get cut positions\n positions = sample(range(0, len(customers)), 2)\n\n # sort postions\n positions.sort()\n\n # concatenate\n concatenated_chromosome = \\\n list(chain.from_iterable(chromosomes[a]))\n\n # get and reverse range\n aux = concatenated_chromosome[positions[0]]\n\n # set reversed interval\n concatenated_chromosome[positions[0]] \\\n = concatenated_chromosome[positions[1]]\n\n # set seconda value\n concatenated_chromosome[positions[1]] = aux\n\n # set new value\n mutated_chromosomes.append(concatenated_chromosome)\n\n # if reverse\n elif MUTATION_METHOD == 'INV':\n # get cut positions\n positions = sample(range(1, len(customers) - 1), 2)\n\n # sort postions\n positions.sort()\n\n # concatenate\n concatenated_chromosome = \\\n list(chain.from_iterable(chromosomes[a]))\n\n # get and reverse range\n aux = concatenated_chromosome[positions[0]:(positions[1] + 1)]\n\n # set seconda value\n concatenated_chromosome[positions[0]:(\n positions[1] + 1)], = fliplr([aux])\n\n # set new value\n mutated_chromosomes.append(concatenated_chromosome)\n else:\n mutated_chromosomes.append(\n list(chain.from_iterable(chromosomes[a]))\n )\n\n # return\n return separate_by_capacity(\n mutated_chromosomes,\n vehicles_capacity,\n customers\n )\n" ]
[ [ "numpy.fliplr", "numpy.delete" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
segasai/imf
[ "9a33b9e68b0af677dab86511e343d6099d9ea530", "9a33b9e68b0af677dab86511e343d6099d9ea530" ]
[ "imf/tests/test_distributions.py", "imf/imf.py" ]
[ "import numpy as np\nimport scipy.interpolate\nfrom .. import distributions as D\nnp.random.seed(1)\n\n\ndef sampltest(distr, left=None, right=None, bounds=None):\n\n # check that mean and stddev from the generated sample\n # match what we get from integrating the PDF\n\n def FF1(x):\n return distr.pdf(x) * x\n\n def FF2(x):\n return distr.pdf(x) * x**2\n\n if left is None:\n left = 0\n if right is None:\n right = np.inf\n if bounds is None:\n mom1, _ = scipy.integrate.quad(FF1, left, right)\n mom2, _ = scipy.integrate.quad(FF2, left, right)\n else:\n mom1, mom2 = 0, 0\n for curb in bounds:\n cmom1, _ = scipy.integrate.quad(FF1, curb[0], curb[1])\n cmom2, _ = scipy.integrate.quad(FF2, curb[0], curb[1])\n mom1 += cmom1\n mom2 += cmom2\n\n std = np.sqrt(mom2 - mom1**2)\n assert (mom2 > mom1**2)\n N = int(1e6)\n samps = distr.rvs(N)\n assert ((samps.mean() - mom1) < 5 * std / np.sqrt(N))\n assert ((samps.std() - std) < 20 * std / np.sqrt(2 * (N - 1)))\n\n\ndef ppftest(distr):\n # test that ppf is inverse of cdf\n xs = np.random.uniform(0, 1, size=100)\n eps = 1e-5\n assert (np.all(np.abs(distr.cdf(distr.ppf(xs)) - xs) < eps))\n # test on scalar\n assert (np.abs(distr.cdf(distr.ppf(xs[0])) - xs[0]) < eps)\n assert (np.isnan(distr.ppf(-0.1)))\n assert (np.isnan(distr.ppf(1.1)))\n\n\ndef test_lognorm():\n ln = D.LogNormal(1, 1)\n ln.pdf(1.)\n ln.cdf(1)\n ln.rvs(1000)\n ppftest(ln)\n sampltest(ln)\n\n for i in range(10):\n N = 100000\n mean = np.random.uniform(0.1, 10)\n sig = np.random.uniform(0.1, 10)\n ln2 = D.LogNormal(mean, sig)\n samp = ln2.rvs(N)\n # check that the means and sigmas are correct\n assert (np.abs(np.log(samp).mean() - np.log(mean)) < 0.01 * sig)\n assert (np.abs(np.log(samp).std() - sig) < 0.01 * sig)\n\n\ndef test_broken_plaw():\n ln = D.BrokenPowerLaw([-2, -1.1, -3], [0.1, 1, 2, 100])\n ln.pdf(1.)\n ln.cdf(1)\n ln.rvs(1000)\n ppftest(ln)\n sampltest(ln, 0.05, 120, bounds=[[0.05, 1], [1, 2], [2, 120]])\n # test values in each range\n assert (np.abs(ln.ppf(ln.cdf(0.5)) - 0.5) < 1e-5)\n assert (np.abs(ln.ppf(ln.cdf(1.5)) - 1.5) < 1e-5)\n assert (np.abs(ln.ppf(ln.cdf(2.5)) - 2.5) < 1e-5)\n\n\ndef test_distr():\n ln = D.TruncatedLogNormal(1, 1, 2, 3)\n ln.pdf(1.)\n ln.cdf(1)\n ln.rvs(1000)\n ppftest(ln)\n sampltest(ln, 1, 4)\n ln = D.PowerLaw(-2, 2, 6)\n ln.pdf(1.)\n ln.cdf(1)\n ln.rvs(1000)\n ppftest(ln)\n sampltest(ln, 1, 7)\n\n\ndef test_composite():\n ln = D.CompositeDistribution([\n D.TruncatedLogNormal(1, 1, 2, 3),\n D.PowerLaw(-2, 3, 4),\n D.TruncatedLogNormal(1, 1, 4, 5),\n D.PowerLaw(-3.5, 5, np.inf)\n ])\n ln.pdf(2.5)\n ln.cdf(2.5)\n ln.rvs(1000)\n ppftest(ln)\n # test values in each break\n assert (np.abs(ln.ppf(ln.cdf(2.5)) - 2.5) < 1e-5)\n assert (np.abs(ln.ppf(ln.cdf(3.5)) - 3.5) < 1e-5)\n assert (np.abs(ln.ppf(ln.cdf(4.5)) - 4.5) < 1e-5)\n assert (np.abs(ln.ppf(ln.cdf(5.5)) - 5.5) < 1e-5)\n\n sampltest(ln, 1, np.inf, bounds=[[1, 3], [3, 4], [4, 5], [5, np.inf]])\n\n\ndef test_bounds():\n left, right = 1, 2\n tleft, tright = 0.5, 3\n ln = D.TruncatedLogNormal(1, 1, left, right)\n assert (ln.pdf(tleft) == 0)\n assert (ln.pdf(tright) == 0)\n assert (ln.cdf(tleft) == 0)\n assert (ln.cdf(tright) == 1)\n\n ln = D.PowerLaw(-3, left, right)\n assert (ln.pdf(tleft) == 0)\n assert (ln.pdf(tright) == 0)\n assert (ln.cdf(tleft) == 0)\n assert (ln.cdf(tright) == 1)\n\n ln = D.BrokenPowerLaw(\n [-2, -1.1, -3],\n [left, .6 * left + .3 * right, .3 * left + .6 * right, right])\n assert (ln.pdf(tleft) == 0)\n assert (ln.pdf(tright) == 0)\n assert (ln.cdf(tleft) == 0)\n assert (ln.cdf(tright) == 1)\n\n ln = D.CompositeDistribution([\n D.TruncatedLogNormal(1, 1, left, .75 * left + .25 * right),\n D.PowerLaw(-2, .75 * left + .25 * right, .5 * left + .5 * right),\n D.TruncatedLogNormal(1, 1, .5 * left + .5 * right,\n .25 * left + .75 * right),\n D.PowerLaw(-2, .25 * left + .75 * right, right)\n ])\n assert (ln.pdf(tleft) == 0)\n assert (ln.pdf(tright) == 0)\n assert (ln.cdf(tleft) == 0)\n assert (ln.cdf(tright) == 1)\n\n\ndef integralcheck(distr, left, x, val):\n I, EI = scipy.integrate.quad(lambda y: distr.pdf(y), left, x)\n assert (np.abs(val - I) < 1e-6)\n\n\ndef integralcheck_many(distr, left, right):\n integralcheck(distr, left, right, 1)\n N = 100\n xs = np.random.uniform(left, right, size=N)\n for x in xs:\n integralcheck(distr, left, x, distr.cdf(x))\n\n\ndef test_integral():\n # test that the numerically integrated pdf is within 3 sigma of 1\n # for different kind of pdfs\n\n left, right = 2, 3\n distrs = [\n D.TruncatedLogNormal(1, 1, left, right),\n D.PowerLaw(-2, left, right),\n D.BrokenPowerLaw([-2, -1.1, -3],\n [left, .6 * left + .3 * right, .3 * left + .6 * right, right]),\n D.CompositeDistribution([\n D.TruncatedLogNormal(1, 1, left, .75 * left + .25 * right),\n D.PowerLaw(-2, .75 * left + .25 * right, .5 * left + .5 * right),\n D.TruncatedLogNormal(1, 1,\n .5 * left + .5 * right,\n .25 * left + .75 * right),\n D.PowerLaw(-2, .25 * left + .75 * right, right)]\n )\n ]\n for curd in distrs:\n integralcheck_many(curd, left, right)\n", "\"\"\"\nVarious codes to work with the initial mass function\n\"\"\"\n\nfrom __future__ import print_function\nimport numpy as np\nimport types\nimport scipy.integrate\nfrom scipy.special import erf\nfrom six import iteritems\nimport scipy.integrate as integrate\nfrom scipy.integrate import quad\nfrom . import distributions\n\n\nclass MassFunction(object):\n \"\"\"\n Generic Mass Function class\n\n (this is mostly meant to be subclassed by other functions, not used itself)\n \"\"\"\n\n def dndm(self, m, **kwargs):\n \"\"\"\n The differential form of the mass function, d N(M) / dM\n \"\"\"\n return self(m, integral_form=False, **kwargs)\n\n def n_of_m(self, m, **kwargs):\n \"\"\"\n The integral form of the mass function, N(M)\n \"\"\"\n return self(m, integral_form=True, **kwargs)\n\n def mass_weighted(self, m, **kwargs):\n return self(m, integral_form=False, **kwargs) * m\n\n def integrate(self, mlow, mhigh, **kwargs):\n \"\"\"\n Integrate the mass function over some range\n \"\"\"\n return scipy.integrate.quad(self, mlow, mhigh)\n\n def m_integrate(self, mlow, mhigh, **kwargs):\n \"\"\"\n Integrate the mass-weighted mass function over some range (this tells\n you the fraction of mass in the specified range)\n \"\"\"\n return scipy.integrate.quad(self.mass_weighted, mlow, mhigh, **kwargs)\n\n def log_integrate(self, mlow, mhigh, **kwargs):\n def logform(x):\n return self(x) / x\n return scipy.integrate.quad(logform, mlow, mhigh, **kwargs)\n\n def normalize(self, mmin=None, mmax=None, log=False, **kwargs):\n \"\"\"\n Set self.normfactor such that the integral of the function over the\n range (mmin, mmax) = 1\n \"\"\"\n if mmin is None:\n mmin = self.mmin\n if mmax is None:\n mmax = self.mmax\n\n self.normfactor = 1\n\n if log:\n integral = self.log_integrate(mmin, mmax, **kwargs)\n else:\n integral = self.integrate(mmin, mmax, **kwargs)\n self.normfactor = 1./integral[0]\n\n assert self.normfactor > 0\n\n\nclass Salpeter(MassFunction):\n\n def __init__(self, alpha=2.35, mmin=0.3, mmax=120):\n \"\"\"\n Create a default Salpeter mass function, i.e. a power-law mass function\n the Salpeter 1955 IMF: dn/dm ~ m^-2.35\n \"\"\"\n self.mmin = mmin\n self.mmax = mmax\n self.alpha = alpha\n self.normfactor = 1\n\n @property\n def distr(self):\n return distributions.PowerLaw(-self.alpha, self.mmin, self.mmax)\n\n def __call__(self, m, integral_form=False):\n if not integral_form:\n return self.distr.pdf(m) * self.normfactor\n else:\n return self.distr.cdf(m) * self.normfactor\n\n\n# three codes for dn/dlog(m)\nsalpeter = Salpeter()\n# kroupa\n# chabrier\n\n\nclass Kroupa(MassFunction):\n # kroupa = BrokenPowerLaw(breaks={0.08: -0.3, 0.5: 1.3, 'last': 2.3}, mmin=0.03, mmax=120)\n def __init__(self, mmin=0.03, mmax=120, p1=0.3, p2=1.3, p3=2.3,\n break1=0.08, break2=0.5):\n \"\"\"\n The Kroupa IMF with two power-law breaks, p1 and p2. See __call__ for\n details.\n \"\"\"\n self.p1 = p1\n self.p2 = p2\n self.p3 = p3\n self.break1 = break1\n self.break2 = break2\n self.distr = distributions.BrokenPowerLaw([-p1, -p2, -p3],\n [mmin, break1, break2, mmax])\n self.mmin = mmin\n self.mmax = mmax\n self.normfactor = 1\n\n @property\n def mmin(self):\n return self.distr.m1\n\n @mmin.setter\n def mmin(self, value):\n self.distr.m1 = value\n\n @property\n def mmax(self):\n return self.distr.m2\n\n @mmax.setter\n def mmax(self, value):\n self.distr.m2 = value\n\n def __call__(self, m, integral_form=False):\n \"\"\"\n Kroupa 2001 IMF (http://arxiv.org/abs/astro-ph/0009005,\n http://adsabs.harvard.edu/abs/2001MNRAS.322..231K)\n\n Parameters\n ----------\n m: float array\n The mass at which to evaluate the function (Msun)\n p1, p2, p3: floats\n The power-law slopes of the different segments of the IMF\n break1, break2: floats\n The mass breakpoints at which to use the different power laws\n \"\"\"\n\n\n if integral_form:\n return self.normfactor * self.distr.cdf(m)\n else:\n return self.normfactor * self.distr.pdf(m)\n\n\n def integrate(self, mlow, mhigh, numerical=False):\n \"\"\"\n Integrate the mass function over some range\n \"\"\"\n if mhigh <= mlow:\n raise ValueError(\"Must have mlow < mhigh in integral\")\n if numerical:\n return super(Kroupa, self).integrate(mlow, mhigh)\n\n return (self.distr.cdf(mhigh) - self.distr.cdf(mlow)) * self.normfactor, 0\n\n\n def m_integrate(self, mlow, mhigh, numerical=False, **kwargs):\n \"\"\"\n Integrate the mass function over some range\n \"\"\"\n if mhigh <= mlow:\n raise ValueError(\"Must have mlow < mhigh in integral\")\n\n if numerical:\n return super(Kroupa, self).m_integrate(mlow, mhigh, **kwargs)\n else:\n distr1 = distributions.BrokenPowerLaw([-self.p1+1, -self.p2+1,\n -self.p3+1],\n [self.mmin, self.break1,\n self.break2, self.mmax])\n ratio = distr1.pdf(self.break1)/self.distr.pdf(self.break1)/self.break1\n return ((distr1.cdf(mhigh)-distr1.cdf(mlow))/ratio, 0)\n\n\n\n\nkroupa = Kroupa()\n\n\nclass Chabrier(MassFunction):\n def __init__(self):\n self.mmin = 0.57*np.log(10)\n self.multiplier = 0.86\n\n @property\n def distr(self):\n return distributions.LogNormal(0.22, self.mmin)\n\n def __call__(self, mass, integral_form=False, **kw):\n if integral_form:\n return self.distr.cdf(mass)*self.multiplier\n else:\n return self.distr.pdf(mass)*self.multiplier\n\n\nchabrier = Chabrier()\n\nclass Chabrier2005(MassFunction):\n def __init__(self, lognormal_center=0.2, lognormal_width=0.55*np.log(10),\n mmin=0.033, mmax=np.inf, alpha=2.35, mmid=1):\n # The numbers are from Eqn 3 of\n # https://ui.adsabs.harvard.edu/abs/2005ASSL..327...41C/abstract\n # importantly the lognormal center is the exp(M) where M is the mean of ln(mass)\n # normal distribution\n self.mmin = mmin\n self.mmid = mmid\n self.mmax = mmax\n self.alpha = alpha\n self.lognormal_width = lognormal_width\n self.lognormal_center = lognormal_center\n\n @property\n def distr(self):\n return distributions.CompositeDistribution(\n [distributions.TruncatedLogNormal(self.lognormal_center,\n self.lognormal_width,\n self.mmin,\n self.mmid),\n distributions.PowerLaw(-self.alpha, self.mmid, self.mmax)])\n\n def __call__(self, x, integral_form=False, **kw):\n if integral_form:\n return self.distr.cdf(x)\n else:\n return self.distr.pdf(x)\n\n\nchabrier2005 = Chabrier2005()\n\n\ndef schechter(m, A=1, beta=2, m0=100, integral=False):\n \"\"\"\n A Schechter function with arbitrary defaults\n (integral may not be correct - exponent hasn't been dealt with at all)\n\n $$ A m^{-\\\\beta} e^{-m/m_0} $$\n\n Parameters\n ----------\n m: np.ndarray\n List of masses for which to compute the Schechter function\n A: float\n Arbitrary amplitude of the Schechter function\n beta: float\n Power law exponent\n m0: float\n Characteristic mass (mass at which exponential decay takes over)\n\n Returns\n -------\n p(m) - the (unnormalized) probability of an object of a given mass\n as a function of that object's mass\n (though you could interpret mass as anything, it's just a number)\n\n \"\"\"\n if integral:\n beta -= 1\n return A*m**-beta * np.exp(-m/m0)\n\ndef modified_schechter(m, m1, **kwargs):\n \"\"\"\n A Schechter function with a low-level exponential cutoff\n \"\n Parameters\n ----------\n m: np.ndarray\n List of masses for which to compute the Schechter function\n m1: float\n Characteristic minimum mass (exponential decay below this mass)\n ** See schecter for other parameters **\n\n Returns\n -------\n p(m) - the (unnormalized) probability of an object of a given mass\n as a function of that object's mass\n (though you could interpret mass as anything, it's just a number)\n \"\"\"\n return schechter(m, **kwargs) * np.exp(-m1/m)\n\ntry:\n import scipy\n def schechter_cdf(m, A=1, beta=2, m0=100, mmin=10, mmax=None, npts=1e4):\n \"\"\"\n Return the CDF value of a given mass for a set mmin, mmax\n mmax will default to 10 m0 if not specified\n\n Analytic integral of the Schechter function:\n http://www.wolframalpha.com/input/?i=integral%28x^-a+exp%28-x%2Fm%29+dx%29\n \"\"\"\n if mmax is None:\n mmax = 10*m0\n\n # integrate the CDF from the minimum to maximum\n posint = -mmax**(1-beta) * scipy.special.expn(beta, mmax/m0)\n negint = -mmin**(1-beta) * scipy.special.expn(beta, mmin/m0)\n tot = posint-negint\n\n # normalize by the integral\n ret = (-m**(1-beta) * scipy.special.expn(beta, m/m0) - negint) / tot\n\n return ret\n\n def sh_cdf_func(**kwargs):\n return lambda x: schechter_cdf(x, **kwargs)\nexcept ImportError:\n pass\n\ndef m_integrate(fn=kroupa, bins=np.logspace(-2, 2, 500)):\n xax = (bins[:-1]+bins[1:])/2.\n integral = xax*(bins[1:]-bins[:-1]) * (fn(bins[:-1])+fn(bins[1:])) / 2.\n\n return xax, integral\n\ndef cumint(fn=kroupa, bins=np.logspace(-2, 2, 500)):\n xax, integral = integrate(fn, bins)\n return integral.cumsum() / integral.sum()\n\ndef m_cumint(fn=kroupa, bins=np.logspace(-2, 2, 500)):\n xax, integral = m_integrate(fn, bins)\n return integral.cumsum() / integral.sum()\n\nmassfunctions = {'kroupa': kroupa, 'salpeter': salpeter, 'chabrier': chabrier,\n 'schechter': schechter, 'modified_schechter': modified_schechter}\nreverse_mf_dict = {v: k for k, v in iteritems(massfunctions)}\n# salpeter and schechter selections are arbitrary\nmostcommonmass = {'kroupa': 0.08, 'salpeter': 0.01, 'chabrier': 0.23,\n 'schecter': 0.01, 'modified_schechter': 0.01}\nexpectedmass_cache = {}\n\ndef get_massfunc(massfunc):\n if isinstance(massfunc, types.FunctionType) or hasattr(massfunc, '__call__'):\n return massfunc\n elif type(massfunc) is str:\n return massfunctions[massfunc]\n else:\n raise ValueError(\"massfunc must either be a string in the set %s or a function\"\n % (\", \".join(massfunctions.keys())))\n\ndef get_massfunc_name(massfunc):\n if massfunc in reverse_mf_dict:\n return reverse_mf_dict[massfunc]\n elif type(massfunc) is str:\n return massfunc\n elif hasattr(massfunc, '__name__'):\n return massfunc.__name__\n else:\n raise ValueError(\"invalid mass function\")\n\ndef inverse_imf(p, nbins=1000, mmin=None, mmax=None, massfunc='kroupa',\n **kwargs):\n \"\"\"\n Inverse mass function. Creates a cumulative distribution function from the\n mass function and samples it using the given randomly distributed values\n ``p``.\n\n\n Parameters\n ----------\n p: np.array\n An array of floats in the range [0, 1). These should be uniformly random\n numbers.\n nbins: int\n The number of bins in the cumulative distribution function to sample\n over. More bins results in (marginally) higher precision.\n mmin: float\n mmax: float\n Minimum and maximum stellar mass in the distribution\n massfunc: string or function\n massfunc can be 'kroupa', 'chabrier', 'salpeter', 'schechter', or a\n function\n \"\"\"\n\n mfc = get_massfunc(massfunc)\n\n if mmin is not None and hasattr(mfc, 'mmin') and mmin != mfc.mmin:\n orig_mmin = mfc.mmin\n mfc.mmin = mmin\n if mmax is not None and hasattr(mfc, 'mmax') and mmax != mfc.mmax:\n orig_mmax = mfc.mmax\n mfc.mmax = mmax\n\n mmin = mfc.mmin\n mmax = mfc.mmax\n\n ends = np.logspace(np.log10(mmin), np.log10(mmax), nbins)\n masses = (ends[1:] + ends[:-1])/2.\n dm = np.diff(ends)\n\n\n # the full probability distribution function N(M) dm\n mf = mfc(masses, **kwargs)\n\n # integrate by taking the cumulative sum of x dx\n mfcum = (mf*dm).cumsum()\n\n # normalize to sum (this turns into a cdf)\n mfcum /= mfcum.max()\n\n result = np.interp(p, mfcum, masses)\n\n if 'orig_mmin' in locals():\n mfc.mmin = orig_mmin\n if 'orig_mmax' in locals():\n mfc.mmax = orig_mmax\n\n return result\n\n\ndef make_cluster(mcluster, massfunc='kroupa', verbose=False, silent=False,\n tolerance=0.0, stop_criterion='nearest', mmax=120, mmin=None):\n \"\"\"\n Sample from an IMF to make a cluster. Returns the masses of all stars in the cluster\n\n massfunc must be a string\n tolerance is how close the cluster mass must be to the requested mass.\n If the last star is greater than this tolerance, the total mass will not be within\n tolerance of the requested\n\n stop criteria can be: 'nearest', 'before', 'after', 'sorted'\n\n \"\"\"\n\n # use most common mass to guess needed number of samples\n # nsamp = mcluster / mostcommonmass[get_massfunc_name(massfunc)]\n # masses = inverse_imf(np.random.random(int(nsamp)), massfunc=massfunc, **kwargs)\n\n # mtot = masses.sum()\n # if verbose:\n # print((\"%i samples yielded a cluster mass of %g (%g requested)\" %\n # (nsamp, mtot, mcluster)))\n\n mfc = get_massfunc(massfunc)\n if mmin is not None and hasattr(mfc, 'mmin') and mfc.mmin != mmin:\n orig_mmin = mfc.mmin\n mfc.mmin = mmin\n if mmax is not None and hasattr(mfc, 'mmax') and mfc.mmax != mmax:\n orig_mmax = mfc.mmax\n mfc.mmax = mmax\n\n if (massfunc, mfc.mmin, mmax) in expectedmass_cache:\n expected_mass = expectedmass_cache[(massfunc, mfc.mmin, mmax)]\n assert expected_mass > 0\n else:\n expected_mass = mfc.m_integrate(mfc.mmin, mmax)[0]\n assert expected_mass > 0\n expectedmass_cache[(massfunc, mfc.mmin, mmax)] = expected_mass\n\n if verbose:\n print(\"Expected mass is {0:0.3f}\".format(expected_mass))\n\n\n mtot = 0\n masses = []\n\n while mtot < mcluster + tolerance:\n # at least 1 sample, but potentially many more\n nsamp = int(np.ceil((mcluster+tolerance-mtot) / expected_mass))\n assert nsamp > 0\n newmasses = mfc.distr.rvs(nsamp)\n masses = np.concatenate([masses, newmasses])\n mtot = masses.sum()\n if verbose:\n print(\"Sampled %i new stars. Total is now %g\" % (int(nsamp), mtot))\n\n if mtot > mcluster+tolerance: # don't force exact equality; that would yield infinite loop\n mcum = masses.cumsum()\n if stop_criterion == 'sorted':\n masses = np.sort(masses)\n if np.abs(masses[:-1].sum()-mcluster) < np.abs(masses.sum() - mcluster):\n # if the most massive star makes the cluster a worse fit, reject it\n # (this follows Krumholz+ 2015 appendix A1)\n last_ind = len(masses) - 1\n else:\n last_ind = len(masses)\n else:\n if stop_criterion == 'nearest':\n # find the closest one, and use +1 to include it\n last_ind = np.argmin(np.abs(mcum - mcluster)) + 1\n elif stop_criterion == 'before':\n last_ind = np.argmax(mcum > mcluster)\n elif stop_criterion == 'after':\n last_ind = np.argmax(mcum > mcluster) + 1\n masses = masses[:last_ind]\n mtot = masses.sum()\n if verbose:\n print(\"Selected the first %i out of %i masses to get %g total\"\n % (last_ind, len(mcum), mtot))\n # force the break, because some stopping criteria can push mtot < mcluster\n break\n\n if not silent:\n print(\"Total cluster mass is %g (limit was %g)\" % (mtot, mcluster))\n\n if 'orig_mmin' in locals():\n mfc.mmin = orig_mmin\n if 'orig_mmax' in locals():\n mfc.mmax = orig_mmax\n\n return masses\n\nmass_luminosity_interpolator_cache = {}\n\ndef mass_luminosity_interpolator(name):\n if name in mass_luminosity_interpolator_cache:\n return mass_luminosity_interpolator_cache[name]\n elif name == 'VGS':\n\n # non-extrapolated\n vgsMass = [51.3, 44.2, 41.0, 38.1, 35.5, 33.1, 30.8, 28.8, 26.9, 25.1,\n 23.6, 22.1, 20.8, 19.5, 18.4]\n vgslogL = [6.154, 6.046, 5.991, 5.934, 5.876, 5.817, 5.756, 5.695,\n 5.631, 5.566, 5.499, 5.431, 5.360, 5.287, 5.211]\n vgslogQ = [49.18, 48.99, 48.90, 48.81, 48.72, 48.61, 48.49, 48.34,\n 48.16, 47.92, 47.63, 47.25, 46.77, 46.23, 45.69]\n # mass extrapolated\n vgsMe = np.concatenate([\n np.linspace(0.03, 0.43, 100),\n np.linspace(0.43, 2, 100),\n np.linspace(2, 20, 100),\n vgsMass[::-1],\n np.linspace(50, 150, 100)])\n # log luminosity extrapolated\n vgslogLe = np.concatenate([\n np.log10(0.23*np.linspace(0.03, 0.43, 100)**2.3),\n np.log10(np.linspace(0.43, 2, 100)**4),\n np.log10(1.5*np.linspace(2, 20, 100)**3.5),\n vgslogL[::-1],\n np.polyval(np.polyfit(np.log10(vgsMass[:3], vgslogL[:3], 1),\n np.log10(np.linspace(50, 150, 100))))])\n # log Q (lyman continuum) extrapolated\n vgslogQe = np.concatenate([\n np.zeros(100), # 0.03-0.43 solar mass stars produce 0 LyC photons\n np.zeros(100), # 0.43-2.0 solar mass stars produce 0 LyC photons\n np.polyval(np.polyfit(np.log10(vgsMass[-3:], vgslogQ[-3:], 1),\n np.log10(np.linspace(8, 18.4, 100)))),\n vgslogQ[::-1],\n np.polyval(np.polyfit(np.log10(vgsMass[:3], vgslogQ[:3], 1),\n np.log10(np.linspace(50, 150, 100))))\n ])\n\n mass_luminosity_interpolator_cache[name] = vgsMe, vgslogLe, vgslogQ\n\n return mass_luminosity_interpolator_cache[name]\n elif name == 'Ekstrom':\n from astroquery.vizier import Vizier\n Vizier.ROW_LIMIT = 1e7 # effectively infinite\n\n # this query should cache\n tbl = Vizier.get_catalogs('J/A+A/537/A146/iso')[0]\n\n match = tbl['logAge'] == 6.5\n masses = tbl['Mass'][match]\n lums = tbl['logL'][match]\n mass_0 = 0.033\n lum_0 = np.log10((mass_0/masses[0])**3.5 * 10**lums[0])\n mass_f = 200 # extrapolate to 200 Msun...\n lum_f = np.log10(10**lums[-1] * (mass_f/masses[-1])**1.35)\n\n masses = np.array([mass_0] + masses.tolist() + [mass_f])\n lums = np.array([lum_0] + lums.tolist() + [lum_f])\n\n # TODO: come up with a half-decent approximation here? based on logTe?\n logQ = lums - 0.5\n\n mass_luminosity_interpolator_cache[name] = masses, lums, logQ\n\n return mass_luminosity_interpolator_cache[name]\n else:\n raise ValueError(\"Bad grid name {0}\".format(name))\n\ndef lum_of_star(mass, grid='Ekstrom'):\n \"\"\"\n Determine total luminosity of a star given its mass\n\n Two grids:\n (1) VGS:\n Uses the Vacca, Garmany, Shull 1996 Table 5 Log Q and Mspec parameters\n\n returns LogL in solar luminosities\n **WARNING** Extrapolates for M not in [18.4, 50] msun\n\n http://en.wikipedia.org/wiki/Mass%E2%80%93luminosity_relation\n\n (2) Ekstrom 2012:\n Covers 0.8 - 64 Msun, extrapolated out of that\n \"\"\"\n masses, lums, _ = mass_luminosity_interpolator(grid)\n return np.interp(mass, masses, lums)\n\ndef lum_of_cluster(masses, grid='Ekstrom'):\n \"\"\"\n Determine the log of the integrated luminosity of a cluster\n Only M>=8msun count\n\n masses is a list or array of masses.\n \"\"\"\n #if max(masses) < 8: return 0\n logL = lum_of_star(masses, grid=grid) #[masses >= 8])\n logLtot = np.log10( (10**logL).sum() )\n return logLtot\n\ndef lyc_of_star(mass):\n \"\"\"\n Determine lyman continuum luminosity of a star given its mass\n Uses the Vacca, Garmany, Shull 1996 Table 5 Log Q and Mspec parameters\n\n returns LogQ\n \"\"\"\n masses, _, logQ = mass_luminosity_interpolator(grid)\n\n return np.interp(mass, masses, logQ)\n\ndef lyc_of_cluster(masses):\n \"\"\"\n Determine the log of the integrated lyman continuum luminosity of a cluster\n Only M>=8msun count\n\n masses is a list or array of masses.\n \"\"\"\n if max(masses) < 8: return 0\n logq = lyc_of_star(masses[masses >= 8])\n logqtot = np.log10( (10**logq).sum() )\n return logqtot\n\ndef color_from_mass(mass, outtype=float):\n \"\"\"\n Use vendian.org colors:\n 100 O2(V) 150 175 255 #9db4ff\n 50 O5(V) 157 180 255 #9db4ff\n 20 B1(V) 162 185 255 #a2b9ff\n 10 B3(V) 167 188 255 #a7bcff\n 8 B5(V) 170 191 255 #aabfff\n 6 B8(V) 175 195 255 #afc3ff\n 2.2 A1(V) 186 204 255 #baccff\n 2.0 A3(V) 192 209 255 #c0d1ff\n 1.86 A5(V) 202 216 255 #cad8ff\n 1.6 F0(V) 228 232 255 #e4e8ff\n 1.5 F2(V) 237 238 255 #edeeff\n 1.3 F5(V) 251 248 255 #fbf8ff\n 1.2 F8(V) 255 249 249 #fff9f9\n 1 G2(V) 255 245 236 #fff5ec\n 0.95 G5(V) 255 244 232 #fff4e8\n 0.90 G8(V) 255 241 223 #fff1df\n 0.85 K0(V) 255 235 209 #ffebd1\n 0.70 K4(V) 255 215 174 #ffd7ae\n 0.60 K7(V) 255 198 144 #ffc690\n 0.50 M2(V) 255 190 127 #ffbe7f\n 0.40 M4(V) 255 187 123 #ffbb7b\n 0.35 M6(V) 255 187 123 #ffbb7b\n 0.30 M8(V) 255 167 123 #ffbb7b # my addition\n \"\"\"\n\n mcolor = { # noqa: E131\n 100: (150, 175, 255),\n 50: (157, 180, 255),\n 20: (162, 185, 255),\n 10: (167, 188, 255),\n 8: (170, 191, 255),\n 6: (175, 195, 255),\n 2.2: (186, 204, 255),\n 2.0: (192, 209, 255),\n 1.86: (202, 216, 255),\n 1.6: (228, 232, 255),\n 1.5: (237, 238, 255),\n 1.3: (251, 248, 255),\n 1.2: (255, 249, 249),\n 1: (255, 245, 236),\n 0.95: (255, 244, 232),\n 0.90: (255, 241, 223),\n 0.85: (255, 235, 209),\n 0.70: (255, 215, 174),\n 0.60: (255, 198, 144),\n 0.50: (255, 190, 127),\n 0.40: (255, 187, 123),\n 0.35: (255, 187, 123),\n 0.30: (255, 177, 113),\n 0.20: (255, 107, 63),\n 0.10: (155, 57, 33),\n 0.10: (155, 57, 33),\n 0.003: (105, 27, 0),\n }\n\n keys = sorted(mcolor.keys())\n\n reds, greens, blues = zip(*[mcolor[k] for k in keys])\n\n r = np.interp(mass, keys, reds)\n g = np.interp(mass, keys, greens)\n b = np.interp(mass, keys, blues)\n\n if outtype == int:\n return (r, g, b)\n elif outtype == float:\n return (r/255., g/255., b/255.)\n else:\n raise NotImplementedError\n\ndef color_of_cluster(cluster, colorfunc=color_from_mass):\n colors = np.array([colorfunc(m) for m in cluster])\n luminosities = 10**np.array([lum_of_star(m) for m in cluster])\n mean_color = (colors*luminosities[:, None]).sum(axis=0)/luminosities.sum()\n return mean_color\n\ndef coolplot(clustermass, massfunc='kroupa', log=True, **kwargs):\n \"\"\"\n \"cool plot\" is just because the plot is kinda neat.\n\n This function creates a cluster using `make_cluster`, assigns each star a\n color based on the vendian.org colors using `color_from_mass`, and assigns\n each star a random Y-value distributed underneath the specified mass\n function's curve.\n\n Parameters\n ----------\n clustermass: float\n The mass of the cluster in solar masses\n massfunc: str\n The name of the mass function to use, determined using the\n `get_massfunc` function.\n log: bool\n Is the Y-axis log-scaled?\n\n Returns\n -------\n cluster: array\n The array of stellar masses that makes up the cluster\n yax: array\n The array of Y-values associated with the stellar masses\n colors: list\n A list of color tuples associated with each star\n \"\"\"\n cluster = make_cluster(clustermass, massfunc=massfunc, mmax=massfunc.mmax,\n **kwargs)\n colors = [color_from_mass(m) for m in cluster]\n massfunc = get_massfunc(massfunc)\n maxmass = cluster.max()\n pmin = massfunc(maxmass)\n if log:\n yax = [np.random.rand()*(np.log10(massfunc(m))-np.log10(pmin))\n + np.log10(pmin) for m in cluster]\n else:\n yax = [np.random.rand()*((massfunc(m))/(pmin)) + (pmin) for m in cluster]\n\n assert all(np.isfinite(yax))\n\n return cluster, yax, colors\n\n # import pylab as pl\n # pl.scatter(cluster, yax, c=colors, s=np.log10(cluster)*5)\n\n\n\nclass KoenConvolvedPowerLaw(MassFunction):\n \"\"\"\n Implementaton of convolved errror power-law described in 2009 Koen, Kondlo\n paper, Fitting power-law distributions to data with measurement errors.\n Equations (3) and (5)\n\n Parameters\n ----------\n m: float\n The mass at which to evaluate the function\n mmin, mmax: floats\n The upper and lower bounds for the power law distribution\n gamma: floats\n The specified gamma for the distribution, slope = -gamma - 1\n sigma: float or None\n specified spread of error, assumes Normal distribution with mean 0 and variance sigma.\n \"\"\"\n\n\n def __init__(self, mmin, mmax, gamma, sigma):\n self.mmin = mmin\n self.mmax = mmax\n self.sigma = sigma\n self.gamma = gamma\n\n def __call__(self, m, integral_form=False):\n m = np.asarray(m)\n if self.mmax<self.mmin:\n raise ValueError(\"mmax must be greater than mmin\")\n\n if integral_form:\n # Returns\n # -------\n # Probability that m < x for the given CDF with specified\n # mmin, mmax, sigma, and gamma\n\n def error(t):\n return np.exp(-(t**2)/2)\n\n error_coeffecient = 1/np.sqrt(2*np.pi)\n\n def error_integral(y):\n error_integral = quad(error, -np.inf, (y-self.mmax)/self.sigma)[0]\n return error_integral\n\n vector_errorintegral = np.vectorize(error_integral)\n phi = vector_errorintegral(m) * error_coeffecient\n\n def integrand(x, y):\n return ((self.mmin**-self.gamma - x**-self.gamma) *\n np.exp((-1/2)*((y-x)/self.sigma)**2))\n\n coef = (1 / (self.sigma*np.sqrt(2*np.pi) *\n (self.mmin**-self.gamma -\n self.mmax**-self.gamma)))\n\n def eval_integral(y):\n integral = quad(integrand, self.mmin, self.mmax, args=(y))[0]\n return integral\n\n vector_integral = np.vectorize(eval_integral)\n probability = phi + coef * vector_integral(m)\n return probability\n\n\n else:\n # Returns\n # ------\n # Probability of getting x given the PDF with specified mmin, mmax, sigma, and gamma\n def integrand(x, y):\n return (x**-(self.gamma+1)) * np.exp(-.5*((y-x)/self.sigma)**2)\n\n coef = (self.gamma/((self.sigma*np.sqrt(2*np.pi)) *\n ((self.mmin**-self.gamma) -\n (self.mmax**-self.gamma))))\n\n def Integral(y):\n I = quad(integrand, self.mmin, self.mmax, args=(y))[0]\n return I\n\n vector_I = np.vectorize(Integral)\n return coef * vector_I(m)\n\n\nclass KoenTruePowerLaw(MassFunction):\n \"\"\"\n Implementaton of error free power-law described in 2009 Koen Kondlo paper,\n Fitting power-law distributions to data with measurement errors\n\n This is a power law with truncations on the low and high end.\n\n Equations (2) and (4)\n\n Parameters\n ----------\n m: float\n The mass at which to evaluate the function\n mmin, mmax: floats\n The upper and lower bounds for the power law distribution\n gamma: floats\n The specified gamma for the distribution, related to the slope, alpha = -gamma + 1\n \"\"\"\n\n def __init__(self, mmin, mmax, gamma):\n self.mmin = mmin\n self.mmax = mmax\n self.gamma = gamma\n\n def __call__(self, m, integral_form=False):\n m = np.asarray(m)\n if self.mmax < self.mmin:\n raise ValueError('mmax must be greater than mmin')\n if integral_form:\n # Returns\n # -------\n # Probability that m < x for the given CDF with specified mmin, mmax, sigma, and gamma\n # True for L<=x\n pdf = ((self.mmin**-self.gamma -\n np.power(m, -self.gamma))/self.mmin**-self.gamma -\n self.mmax**-self.gamma)\n return_value = (pdf * ((m > self.mmin) & (m < self.mmax)) + 1.0 *\n (m >= self.mmax) + 0 * (m < self.mmin))\n return return_value\n\n else:\n # Returns\n # ------\n # Probability of getting x given the PDF with specified mmin, mmax, and gamma\n # Answers it gives are true from mmin<=x<=mmax\n cdf = (self.gamma*np.power(m,\n -(self.gamma+1))/(self.mmin**-self.gamma\n -\n self.mmax**-self.gamma))\n return_value = (cdf * ((m > self.mmin) & (m < self.mmax)) + 0 *\n (m > self.mmax) + 0 * (m < self.mmin))\n return return_value\n" ]
[ [ "numpy.log", "numpy.sqrt", "numpy.random.seed", "numpy.abs", "numpy.random.uniform" ], [ "numpy.sqrt", "numpy.linspace", "numpy.asarray", "numpy.concatenate", "numpy.exp", "numpy.ceil", "numpy.argmax", "numpy.diff", "numpy.interp", "numpy.zeros", "numpy.log", "numpy.power", "numpy.logspace", "scipy.integrate", "numpy.log10", "numpy.random.rand", "scipy.integrate.quad", "numpy.abs", "numpy.isfinite", "scipy.special.expn", "numpy.sort", "numpy.vectorize" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Seanny123/ARS
[ "f445a870feac13286fe2d8b14ee508f789c9ef7d" ]
[ "arsrl/ars.py" ]
[ "'''\nParallel implementation of the Augmented Random Search method.\nHoria Mania --- [email protected]\nAurelia Guy\nBenjamin Recht\n'''\n\nimport os\nimport socket\nimport time\n\nimport gym\nimport numpy as np\nimport ray\n\nfrom arsrl import utils, logz, optimizers\nfrom arsrl.policies import LinearPolicy\nfrom arsrl.shared_noise import SharedNoiseTable, create_shared_noise\n\n\[email protected]\nclass Worker(object):\n \"\"\"\n Object class for parallel rollout generation.\n \"\"\"\n\n def __init__(self, env_seed,\n env_name='',\n policy_params=None,\n deltas=None,\n rollout_length=1000,\n delta_std=0.02):\n\n # initialize OpenAI environment for each worker\n self.env = gym.make(env_name)\n self.env.seed(env_seed)\n\n # each worker gets access to the shared noise table\n # with independent random streams for sampling\n # from the shared noise table.\n self.deltas = SharedNoiseTable(deltas, env_seed + 7)\n self.policy_params = policy_params\n if policy_params['type'] == 'linear':\n self.policy = LinearPolicy(policy_params)\n else:\n raise NotImplementedError\n\n self.delta_std = delta_std\n self.rollout_length = rollout_length\n\n def get_weights_plus_stats(self):\n \"\"\"\n Get current policy weights and current statistics of past states.\n \"\"\"\n assert self.policy_params['type'] == 'linear'\n return self.policy.get_weights_plus_stats()\n\n def rollout(self, shift=0., rollout_length=None):\n \"\"\"\n Performs one rollout of maximum length rollout_length.\n At each time-step it substracts shift from the reward.\n \"\"\"\n\n if rollout_length is None:\n rollout_length = self.rollout_length\n\n total_reward = 0.\n steps = 0\n\n ob = self.env.reset()\n for i in range(rollout_length):\n action = self.policy.act(ob)\n ob, reward, done, _ = self.env.step(action)\n steps += 1\n total_reward += (reward - shift)\n if done:\n break\n\n return total_reward, steps\n\n def do_rollouts(self, w_policy, num_rollouts=1, shift=1, evaluate=False):\n \"\"\"\n Generate multiple rollouts with a policy parametrized by w_policy.\n \"\"\"\n\n rollout_rewards, deltas_idx = [], []\n steps = 0\n\n for i in range(num_rollouts):\n\n if evaluate:\n self.policy.update_weights(w_policy)\n deltas_idx.append(-1)\n\n # set to false so that evaluation rollouts are not used for updating state statistics\n self.policy.update_filter = False\n\n # for evaluation we do not shift the rewards (shift = 0) and we use the\n # default rollout length (1000 for the MuJoCo locomotion tasks)\n reward, r_steps = self.rollout(shift=0., rollout_length=self.env.spec.timestep_limit)\n rollout_rewards.append(reward)\n\n else:\n idx, delta = self.deltas.get_delta(w_policy.size)\n\n delta = (self.delta_std * delta).reshape(w_policy.shape)\n deltas_idx.append(idx)\n\n # set to true so that state statistics are updated\n self.policy.update_filter = True\n\n # compute reward and number of timesteps used for positive perturbation rollout\n self.policy.update_weights(w_policy + delta)\n pos_reward, pos_steps = self.rollout(shift=shift)\n\n # compute reward and number of timesteps used for negative pertubation rollout\n self.policy.update_weights(w_policy - delta)\n neg_reward, neg_steps = self.rollout(shift = shift)\n steps += pos_steps + neg_steps\n\n rollout_rewards.append([pos_reward, neg_reward])\n\n return {'deltas_idx': deltas_idx, 'rollout_rewards': rollout_rewards, \"steps\" : steps}\n\n def stats_increment(self):\n self.policy.observation_filter.stats_increment()\n return\n\n def get_weights(self):\n return self.policy.get_weights()\n\n def get_filter(self):\n return self.policy.observation_filter\n\n def sync_filter(self, other):\n self.policy.observation_filter.sync(other)\n return\n\n\nclass ARSLearner(object):\n \"\"\"\n Object class implementing the ARS algorithm.\n \"\"\"\n\n def __init__(self, env_name='HalfCheetah-v1',\n policy_params=None,\n num_workers=32,\n num_deltas=320,\n deltas_used=320,\n delta_std=0.02,\n logdir=None,\n rollout_length=1000,\n step_size=0.01,\n shift='constant zero',\n params=None,\n seed=123):\n\n logz.configure_output_dir(logdir)\n logz.save_params(params)\n\n env = gym.make(env_name)\n\n self.timesteps = 0\n self.action_size = env.action_space.shape[0]\n self.ob_size = env.observation_space.shape[0]\n self.num_deltas = num_deltas\n self.deltas_used = deltas_used\n self.rollout_length = rollout_length\n self.step_size = step_size\n self.delta_std = delta_std\n self.logdir = logdir\n self.shift = shift\n self.params = params\n self.max_past_avg_reward = float('-inf')\n self.num_episodes_used = float('inf')\n\n # create shared table for storing noise\n print(\"Creating deltas table.\")\n deltas_id = create_shared_noise.remote()\n self.deltas = SharedNoiseTable(ray.get(deltas_id), seed = seed + 3)\n print('Created deltas table.')\n\n # initialize workers with different random seeds\n print('Initializing workers.')\n self.num_workers = num_workers\n self.workers = [Worker.remote(seed + 7 * i,\n env_name=env_name,\n policy_params=policy_params,\n deltas=deltas_id,\n rollout_length=rollout_length,\n delta_std=delta_std) for i in range(num_workers)]\n\n # initialize policy\n if policy_params['type'] == 'linear':\n self.policy = LinearPolicy(policy_params)\n self.w_policy = self.policy.get_weights()\n else:\n raise NotImplementedError\n\n # initialize optimization algorithm\n self.optimizer = optimizers.SGD(self.w_policy, self.step_size)\n print(\"Initialization of ARS complete.\")\n\n def aggregate_rollouts(self, num_rollouts = None, evaluate = False):\n \"\"\"\n Aggregate update step from rollouts generated in parallel.\n \"\"\"\n\n if num_rollouts is None:\n num_deltas = self.num_deltas\n else:\n num_deltas = num_rollouts\n\n # put policy weights in the object store\n policy_id = ray.put(self.w_policy)\n\n t1 = time.time()\n num_rollouts = int(num_deltas / self.num_workers)\n\n # parallel generation of rollouts\n rollout_ids_one = [worker.do_rollouts.remote(policy_id,\n num_rollouts=num_rollouts,\n shift=self.shift,\n evaluate=evaluate) for worker in self.workers]\n\n rollout_ids_two = [worker.do_rollouts.remote(policy_id,\n num_rollouts=1,\n shift=self.shift,\n evaluate=evaluate) for worker in\n self.workers[:(num_deltas % self.num_workers)]]\n\n # gather results\n results_one = ray.get(rollout_ids_one)\n results_two = ray.get(rollout_ids_two)\n\n rollout_rewards, deltas_idx = [], []\n\n for result in results_one:\n if not evaluate:\n self.timesteps += result[\"steps\"]\n deltas_idx += result['deltas_idx']\n rollout_rewards += result['rollout_rewards']\n\n for result in results_two:\n if not evaluate:\n self.timesteps += result[\"steps\"]\n deltas_idx += result['deltas_idx']\n rollout_rewards += result['rollout_rewards']\n\n deltas_idx = np.array(deltas_idx)\n rollout_rewards = np.array(rollout_rewards, dtype = np.float64)\n\n print('Maximum reward of collected rollouts:', rollout_rewards.max())\n t2 = time.time()\n\n print('Time to generate rollouts:', t2 - t1)\n\n if evaluate:\n return rollout_rewards\n\n # select top performing directions if deltas_used < num_deltas\n max_rewards = np.max(rollout_rewards, axis=1)\n if self.deltas_used > self.num_deltas:\n self.deltas_used = self.num_deltas\n\n idx = np.arange(max_rewards.size)[\n max_rewards >= np.percentile(max_rewards, 100 * (1 - (self.deltas_used / self.num_deltas)))]\n deltas_idx = deltas_idx[idx]\n rollout_rewards = rollout_rewards[idx, :]\n\n # normalize rewards by their standard deviation\n rollout_rewards /= np.std(rollout_rewards)\n\n t1 = time.time()\n # aggregate rollouts to form g_hat, the gradient used to compute SGD step\n g_hat, count = utils.batched_weighted_sum(rollout_rewards[:, 0] - rollout_rewards[:, 1],\n (self.deltas.get(idx, self.w_policy.size)\n for idx in deltas_idx),\n batch_size=500)\n g_hat /= deltas_idx.size\n t2 = time.time()\n print('time to aggregate rollouts', t2 - t1)\n return g_hat\n\n def train_step(self):\n \"\"\"\n Perform one update step of the policy weights.\n \"\"\"\n\n g_hat = self.aggregate_rollouts()\n print(\"Euclidean norm of update step:\", np.linalg.norm(g_hat))\n self.w_policy -= self.optimizer._compute_step(g_hat).reshape(self.w_policy.shape)\n return\n\n def train(self, num_iter):\n\n start = time.time()\n for i in range(num_iter):\n\n t1 = time.time()\n self.train_step()\n t2 = time.time()\n print('total time of one step', t2 - t1)\n print('iter ', i, ' done')\n\n # record statistics every 10 iterations\n if (i + 1) % 10 == 0:\n rewards = self.aggregate_rollouts(num_rollouts=100, evaluate=True)\n w = ray.get(self.workers[0].get_weights_plus_stats.remote())\n np.savez(self.logdir + \"/lin_policy_plus\", w)\n\n print(sorted(self.params.items()))\n logz.log_tabular(\"Time\", time.time() - start)\n logz.log_tabular(\"Iteration\", i + 1)\n logz.log_tabular(\"AverageReward\", np.mean(rewards))\n logz.log_tabular(\"StdRewards\", np.std(rewards))\n logz.log_tabular(\"MaxRewardRollout\", np.max(rewards))\n logz.log_tabular(\"MinRewardRollout\", np.min(rewards))\n logz.log_tabular(\"timesteps\", self.timesteps)\n logz.dump_tabular()\n\n t1 = time.time()\n # get statistics from all workers\n for j in range(self.num_workers):\n self.policy.observation_filter.update(ray.get(self.workers[j].get_filter.remote()))\n self.policy.observation_filter.stats_increment()\n\n # make sure master filter buffer is clear\n self.policy.observation_filter.clear_buffer()\n # sync all workers\n filter_id = ray.put(self.policy.observation_filter)\n setting_filters_ids = [worker.sync_filter.remote(filter_id) for worker in self.workers]\n # waiting for sync of all workers\n ray.get(setting_filters_ids)\n\n increment_filters_ids = [worker.stats_increment.remote() for worker in self.workers]\n # waiting for increment of all workers\n ray.get(increment_filters_ids)\n t2 = time.time()\n print('Time to sync statistics:', t2 - t1)\n\n return\n\n\ndef run_ars(params):\n\n dir_path = params['dir_path']\n\n if not(os.path.exists(dir_path)):\n os.makedirs(dir_path)\n logdir = dir_path\n if not(os.path.exists(logdir)):\n os.makedirs(logdir)\n\n env = gym.make(params['env_name'])\n ob_dim = env.observation_space.shape[0]\n ac_dim = env.action_space.shape[0]\n\n # set policy parameters. Possible filters: 'MeanStdFilter' for v2, 'NoFilter' for v1.\n policy_params = {'type': 'linear',\n 'ob_filter': params['filter'],\n 'ob_dim': ob_dim,\n 'ac_dim': ac_dim}\n\n ARS = ARSLearner(env_name=params['env_name'],\n policy_params=policy_params,\n num_workers=params['n_workers'],\n num_deltas=params['n_directions'],\n deltas_used=params['deltas_used'],\n step_size=params['step_size'],\n delta_std=params['delta_std'],\n logdir=logdir,\n rollout_length=params['rollout_length'],\n shift=params['shift'],\n params=params,\n seed = params['seed'])\n\n ARS.train(params['n_iter'])\n\n return\n\n\nif __name__ == '__main__':\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('--env_name', type=str, default='HalfCheetah-v1')\n parser.add_argument('--n_iter', '-n', type=int, default=1000)\n parser.add_argument('--n_directions', '-nd', type=int, default=8)\n parser.add_argument('--deltas_used', '-du', type=int, default=8)\n parser.add_argument('--step_size', '-s', type=float, default=0.02)\n parser.add_argument('--delta_std', '-std', type=float, default=.03)\n parser.add_argument('--n_workers', '-e', type=int, default=18)\n parser.add_argument('--rollout_length', '-r', type=int, default=1000)\n\n # for Swimmer-v1 and HalfCheetah-v1 use shift = 0\n # for Hopper-v1, Walker2d-v1, and Ant-v1 use shift = 1\n # for Humanoid-v1 used shift = 5\n parser.add_argument('--shift', type=float, default=0)\n parser.add_argument('--seed', type=int, default=237)\n parser.add_argument('--policy_type', type=str, default='linear')\n parser.add_argument('--dir_path', type=str, default='data')\n\n # for ARS V1 use filter = 'NoFilter'\n parser.add_argument('--filter', type=str, default='MeanStdFilter')\n\n local_ip = socket.gethostbyname(socket.gethostname())\n ray.init(redis_address=local_ip + ':6379')\n\n args = parser.parse_args()\n params = vars(args)\n run_ars(params)\n" ]
[ [ "numpy.savez", "numpy.min", "numpy.arange", "numpy.linalg.norm", "numpy.percentile", "numpy.max", "numpy.std", "numpy.mean", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
vitek2577/Multi-Tacotron-Voice-Cloning-GST
[ "a47d7f8412add19f7a154d70cc55698e2bb0a6da" ]
[ "synthesizer/audio.py" ]
[ "import librosa\nimport librosa.filters\nimport numpy as np\nimport tensorflow as tf\nfrom scipy import signal\nfrom scipy.io import wavfile\n\n\ndef load_wav(path, sr):\n return librosa.core.load(path, sr=sr)[0]\n\ndef save_wav(wav, path, sr):\n wav *= 32767 / max(0.01, np.max(np.abs(wav)))\n #proposed by @dsmiller\n wavfile.write(path, sr, wav.astype(np.int16))\n\ndef save_wavenet_wav(wav, path, sr):\n librosa.output.write_wav(path, wav, sr=sr)\n\ndef preemphasis(wav, k, preemphasize=True):\n if preemphasize:\n return signal.lfilter([1, -k], [1], wav)\n return wav\n\ndef inv_preemphasis(wav, k, inv_preemphasize=True):\n if inv_preemphasize:\n return signal.lfilter([1], [1, -k], wav)\n return wav\n\n#From https://github.com/r9y9/wavenet_vocoder/blob/master/audio.py\ndef start_and_end_indices(quantized, silence_threshold=2):\n for start in range(quantized.size):\n if abs(quantized[start] - 127) > silence_threshold:\n break\n for end in range(quantized.size - 1, 1, -1):\n if abs(quantized[end] - 127) > silence_threshold:\n break\n \n assert abs(quantized[start] - 127) > silence_threshold\n assert abs(quantized[end] - 127) > silence_threshold\n \n return start, end\n\ndef get_hop_size(hparams):\n hop_size = hparams.hop_size\n if hop_size is None:\n assert hparams.frame_shift_ms is not None\n hop_size = int(hparams.frame_shift_ms / 1000 * hparams.sample_rate)\n return hop_size\n\ndef linearspectrogram(wav, hparams):\n D = _stft(preemphasis(wav, hparams.preemphasis, hparams.preemphasize), hparams)\n S = _amp_to_db(np.abs(D), hparams) - hparams.ref_level_db\n \n if hparams.signal_normalization:\n return _normalize(S, hparams)\n return S\n\ndef melspectrogram(wav, hparams):\n D = _stft(preemphasis(wav, hparams.preemphasis, hparams.preemphasize), hparams)\n S = _amp_to_db(_linear_to_mel(np.abs(D), hparams), hparams) - hparams.ref_level_db\n \n if hparams.signal_normalization:\n return _normalize(S, hparams)\n return S\n\ndef inv_linear_spectrogram(linear_spectrogram, hparams):\n \"\"\"Converts linear spectrogram to waveform using librosa\"\"\"\n if hparams.signal_normalization:\n D = _denormalize(linear_spectrogram, hparams)\n else:\n D = linear_spectrogram\n \n S = _db_to_amp(D + hparams.ref_level_db) #Convert back to linear\n \n if hparams.use_lws:\n processor = _lws_processor(hparams)\n D = processor.run_lws(S.astype(np.float64).T ** hparams.power)\n y = processor.istft(D).astype(np.float32)\n return inv_preemphasis(y, hparams.preemphasis, hparams.preemphasize)\n else:\n return inv_preemphasis(_griffin_lim(S ** hparams.power, hparams), hparams.preemphasis, hparams.preemphasize)\n\ndef inv_mel_spectrogram(mel_spectrogram, hparams):\n \"\"\"Converts mel spectrogram to waveform using librosa\"\"\"\n if hparams.signal_normalization:\n D = _denormalize(mel_spectrogram, hparams)\n else:\n D = mel_spectrogram\n #print(D)\n S = _mel_to_linear(_db_to_amp(D + hparams.ref_level_db), hparams) # Convert back to linear\n #print(S) \n if hparams.use_lws:\n processor = _lws_processor(hparams)\n D = processor.run_lws(S.astype(np.float64).T ** hparams.power)\n y = processor.istft(D).astype(np.float32)\n return inv_preemphasis(y, hparams.preemphasis, hparams.preemphasize)\n else:\n return inv_preemphasis(_griffin_lim(S ** hparams.power, hparams), hparams.preemphasis, hparams.preemphasize)\n\ndef _lws_processor(hparams):\n import lws\n return lws.lws(hparams.n_fft, get_hop_size(hparams), fftsize=hparams.win_size, mode=\"speech\")\n\ndef _griffin_lim(S, hparams):\n \"\"\"librosa implementation of Griffin-Lim\n Based on https://github.com/librosa/librosa/issues/434\n \"\"\"\n angles = np.exp(2j * np.pi * np.random.rand(*S.shape))\n S_complex = np.abs(S).astype(np.complex)\n y = _istft(S_complex * angles, hparams)\n for i in range(hparams.griffin_lim_iters):\n angles = np.exp(1j * np.angle(_stft(y, hparams)))\n y = _istft(S_complex * angles, hparams)\n return y\n\ndef _stft(y, hparams):\n if hparams.use_lws:\n return _lws_processor(hparams).stft(y).T\n else:\n return librosa.stft(y=y, n_fft=hparams.n_fft, hop_length=get_hop_size(hparams), win_length=hparams.win_size)\n\ndef _istft(y, hparams):\n return librosa.istft(y, hop_length=get_hop_size(hparams), win_length=hparams.win_size)\n\n##########################################################\n#Those are only correct when using lws!!! (This was messing with Wavenet quality for a long time!)\ndef num_frames(length, fsize, fshift):\n \"\"\"Compute number of time frames of spectrogram\n \"\"\"\n pad = (fsize - fshift)\n if length % fshift == 0:\n M = (length + pad * 2 - fsize) // fshift + 1\n else:\n M = (length + pad * 2 - fsize) // fshift + 2\n return M\n\n\ndef pad_lr(x, fsize, fshift):\n \"\"\"Compute left and right padding\n \"\"\"\n M = num_frames(len(x), fsize, fshift)\n pad = (fsize - fshift)\n T = len(x) + 2 * pad\n r = (M - 1) * fshift + fsize - T\n return pad, pad + r\n##########################################################\n#Librosa correct padding\ndef librosa_pad_lr(x, fsize, fshift):\n return 0, (x.shape[0] // fshift + 1) * fshift - x.shape[0]\n\n# Conversions\n_mel_basis = None\n_inv_mel_basis = None\n\ndef _linear_to_mel(spectogram, hparams):\n global _mel_basis\n if _mel_basis is None:\n _mel_basis = _build_mel_basis(hparams)\n return np.dot(_mel_basis, spectogram)\n\ndef _mel_to_linear(mel_spectrogram, hparams):\n global _inv_mel_basis\n if _inv_mel_basis is None:\n _inv_mel_basis = np.linalg.pinv(_build_mel_basis(hparams))\n return np.maximum(1e-10, np.dot(_inv_mel_basis, mel_spectrogram))\n\ndef _build_mel_basis(hparams):\n assert hparams.fmax <= hparams.sample_rate // 2\n print(hparams.sample_rate, hparams.n_fft, hparams.num_mels, hparams.fmin, hparams.fmax)\n return librosa.filters.mel(hparams.sample_rate, hparams.n_fft, n_mels=hparams.num_mels,\n fmin=hparams.fmin, fmax=hparams.fmax)\n\ndef _amp_to_db(x, hparams):\n min_level = np.exp(hparams.min_level_db / 20 * np.log(10))\n return 20 * np.log10(np.maximum(min_level, x))\n\ndef _db_to_amp(x):\n return np.power(10.0, (x) * 0.05)\n\ndef _normalize(S, hparams):\n if hparams.allow_clipping_in_normalization:\n if hparams.symmetric_mels:\n return np.clip((2 * hparams.max_abs_value) * ((S - hparams.min_level_db) / (-hparams.min_level_db)) - hparams.max_abs_value,\n -hparams.max_abs_value, hparams.max_abs_value)\n else:\n return np.clip(hparams.max_abs_value * ((S - hparams.min_level_db) / (-hparams.min_level_db)), 0, hparams.max_abs_value)\n \n assert S.max() <= 0 and S.min() - hparams.min_level_db >= 0\n if hparams.symmetric_mels:\n return (2 * hparams.max_abs_value) * ((S - hparams.min_level_db) / (-hparams.min_level_db)) - hparams.max_abs_value\n else:\n return hparams.max_abs_value * ((S - hparams.min_level_db) / (-hparams.min_level_db))\n\ndef _denormalize(D, hparams):\n if hparams.allow_clipping_in_normalization:\n if hparams.symmetric_mels:\n return (((np.clip(D, -hparams.max_abs_value,\n hparams.max_abs_value) + hparams.max_abs_value) * -hparams.min_level_db / (2 * hparams.max_abs_value))\n + hparams.min_level_db)\n else:\n return ((np.clip(D, 0, hparams.max_abs_value) * -hparams.min_level_db / hparams.max_abs_value) + hparams.min_level_db)\n \n if hparams.symmetric_mels:\n return (((D + hparams.max_abs_value) * -hparams.min_level_db / (2 * hparams.max_abs_value)) + hparams.min_level_db)\n else:\n return ((D * -hparams.min_level_db / hparams.max_abs_value) + hparams.min_level_db)\n" ]
[ [ "numpy.dot", "numpy.log", "numpy.maximum", "numpy.abs", "numpy.power", "numpy.clip", "numpy.random.rand", "scipy.signal.lfilter" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
tacaswell/pandas
[ "81c57e20da278494dfebc2f1043f5ff361a234f3", "81c57e20da278494dfebc2f1043f5ff361a234f3", "81c57e20da278494dfebc2f1043f5ff361a234f3", "81c57e20da278494dfebc2f1043f5ff361a234f3", "81c57e20da278494dfebc2f1043f5ff361a234f3", "81c57e20da278494dfebc2f1043f5ff361a234f3", "81c57e20da278494dfebc2f1043f5ff361a234f3" ]
[ "pandas/tseries/tools.py", "pandas/tseries/offsets.py", "pandas/tests/io/parser/c_parser_only.py", "pandas/core/config.py", "pandas/tests/indexing/test_datetime.py", "asv_bench/benchmarks/gil.py", "scripts/bench_join.py" ]
[ "from datetime import datetime, timedelta, time\nimport numpy as np\nfrom collections import MutableMapping\n\nimport pandas.lib as lib\nimport pandas.tslib as tslib\n\nfrom pandas.types.common import (_ensure_object,\n is_datetime64_ns_dtype,\n is_datetime64_dtype,\n is_datetime64tz_dtype,\n is_integer_dtype,\n is_list_like)\nfrom pandas.types.generic import (ABCIndexClass, ABCSeries,\n ABCDataFrame)\nfrom pandas.types.missing import notnull\n\nimport pandas.compat as compat\n\n_DATEUTIL_LEXER_SPLIT = None\ntry:\n # Since these are private methods from dateutil, it is safely imported\n # here so in case this interface changes, pandas will just fallback\n # to not using the functionality\n from dateutil.parser import _timelex\n\n if hasattr(_timelex, 'split'):\n def _lexer_split_from_str(dt_str):\n # The StringIO(str(_)) is for dateutil 2.2 compatibility\n return _timelex.split(compat.StringIO(str(dt_str)))\n\n _DATEUTIL_LEXER_SPLIT = _lexer_split_from_str\nexcept (ImportError, AttributeError):\n pass\n\n\ndef _infer_tzinfo(start, end):\n def _infer(a, b):\n tz = a.tzinfo\n if b and b.tzinfo:\n if not (tslib.get_timezone(tz) == tslib.get_timezone(b.tzinfo)):\n raise AssertionError('Inputs must both have the same timezone,'\n ' {0} != {1}'.format(tz, b.tzinfo))\n return tz\n\n tz = None\n if start is not None:\n tz = _infer(start, end)\n elif end is not None:\n tz = _infer(end, start)\n return tz\n\n\ndef _guess_datetime_format(dt_str, dayfirst=False,\n dt_str_parse=compat.parse_date,\n dt_str_split=_DATEUTIL_LEXER_SPLIT):\n \"\"\"\n Guess the datetime format of a given datetime string.\n\n Parameters\n ----------\n dt_str : string, datetime string to guess the format of\n dayfirst : boolean, default False\n If True parses dates with the day first, eg 20/01/2005\n Warning: dayfirst=True is not strict, but will prefer to parse\n with day first (this is a known bug).\n dt_str_parse : function, defaults to `compat.parse_date` (dateutil)\n This function should take in a datetime string and return\n a `datetime.datetime` guess that the datetime string represents\n dt_str_split : function, defaults to `_DATEUTIL_LEXER_SPLIT` (dateutil)\n This function should take in a datetime string and return\n a list of strings, the guess of the various specific parts\n e.g. '2011/12/30' -> ['2011', '/', '12', '/', '30']\n\n Returns\n -------\n ret : datetime format string (for `strftime` or `strptime`)\n \"\"\"\n if dt_str_parse is None or dt_str_split is None:\n return None\n\n if not isinstance(dt_str, compat.string_types):\n return None\n\n day_attribute_and_format = (('day',), '%d', 2)\n\n # attr name, format, padding (if any)\n datetime_attrs_to_format = [\n (('year', 'month', 'day'), '%Y%m%d', 0),\n (('year',), '%Y', 0),\n (('month',), '%B', 0),\n (('month',), '%b', 0),\n (('month',), '%m', 2),\n day_attribute_and_format,\n (('hour',), '%H', 2),\n (('minute',), '%M', 2),\n (('second',), '%S', 2),\n (('microsecond',), '%f', 6),\n (('second', 'microsecond'), '%S.%f', 0),\n ]\n\n if dayfirst:\n datetime_attrs_to_format.remove(day_attribute_and_format)\n datetime_attrs_to_format.insert(0, day_attribute_and_format)\n\n try:\n parsed_datetime = dt_str_parse(dt_str, dayfirst=dayfirst)\n except:\n # In case the datetime can't be parsed, its format cannot be guessed\n return None\n\n if parsed_datetime is None:\n return None\n\n try:\n tokens = dt_str_split(dt_str)\n except:\n # In case the datetime string can't be split, its format cannot\n # be guessed\n return None\n\n format_guess = [None] * len(tokens)\n found_attrs = set()\n\n for attrs, attr_format, padding in datetime_attrs_to_format:\n # If a given attribute has been placed in the format string, skip\n # over other formats for that same underlying attribute (IE, month\n # can be represented in multiple different ways)\n if set(attrs) & found_attrs:\n continue\n\n if all(getattr(parsed_datetime, attr) is not None for attr in attrs):\n for i, token_format in enumerate(format_guess):\n token_filled = tokens[i].zfill(padding)\n if (token_format is None and\n token_filled == parsed_datetime.strftime(attr_format)):\n format_guess[i] = attr_format\n tokens[i] = token_filled\n found_attrs.update(attrs)\n break\n\n # Only consider it a valid guess if we have a year, month and day\n if len(set(['year', 'month', 'day']) & found_attrs) != 3:\n return None\n\n output_format = []\n for i, guess in enumerate(format_guess):\n if guess is not None:\n # Either fill in the format placeholder (like %Y)\n output_format.append(guess)\n else:\n # Or just the token separate (IE, the dashes in \"01-01-2013\")\n try:\n # If the token is numeric, then we likely didn't parse it\n # properly, so our guess is wrong\n float(tokens[i])\n return None\n except ValueError:\n pass\n\n output_format.append(tokens[i])\n\n guessed_format = ''.join(output_format)\n\n # rebuild string, capturing any inferred padding\n dt_str = ''.join(tokens)\n if parsed_datetime.strftime(guessed_format) == dt_str:\n return guessed_format\n\n\ndef _guess_datetime_format_for_array(arr, **kwargs):\n # Try to guess the format based on the first non-NaN element\n non_nan_elements = notnull(arr).nonzero()[0]\n if len(non_nan_elements):\n return _guess_datetime_format(arr[non_nan_elements[0]], **kwargs)\n\n\ndef to_datetime(arg, errors='raise', dayfirst=False, yearfirst=False,\n utc=None, box=True, format=None, exact=True,\n unit=None, infer_datetime_format=False):\n \"\"\"\n Convert argument to datetime.\n\n Parameters\n ----------\n arg : integer, float, string, datetime, list, tuple, 1-d array, Series\n\n .. versionadded: 0.18.1\n\n or DataFrame/dict-like\n\n errors : {'ignore', 'raise', 'coerce'}, default 'raise'\n\n - If 'raise', then invalid parsing will raise an exception\n - If 'coerce', then invalid parsing will be set as NaT\n - If 'ignore', then invalid parsing will return the input\n dayfirst : boolean, default False\n Specify a date parse order if `arg` is str or its list-likes.\n If True, parses dates with the day first, eg 10/11/12 is parsed as\n 2012-11-10.\n Warning: dayfirst=True is not strict, but will prefer to parse\n with day first (this is a known bug, based on dateutil behavior).\n yearfirst : boolean, default False\n Specify a date parse order if `arg` is str or its list-likes.\n\n - If True parses dates with the year first, eg 10/11/12 is parsed as\n 2010-11-12.\n - If both dayfirst and yearfirst are True, yearfirst is preceded (same\n as dateutil).\n\n Warning: yearfirst=True is not strict, but will prefer to parse\n with year first (this is a known bug, based on dateutil beahavior).\n\n .. versionadded: 0.16.1\n\n utc : boolean, default None\n Return UTC DatetimeIndex if True (converting any tz-aware\n datetime.datetime objects as well).\n box : boolean, default True\n\n - If True returns a DatetimeIndex\n - If False returns ndarray of values.\n format : string, default None\n strftime to parse time, eg \"%d/%m/%Y\", note that \"%f\" will parse\n all the way up to nanoseconds.\n exact : boolean, True by default\n\n - If True, require an exact format match.\n - If False, allow the format to match anywhere in the target string.\n\n unit : string, default 'ns'\n unit of the arg (D,s,ms,us,ns) denote the unit in epoch\n (e.g. a unix timestamp), which is an integer/float number.\n infer_datetime_format : boolean, default False\n If True and no `format` is given, attempt to infer the format of the\n datetime strings, and if it can be inferred, switch to a faster\n method of parsing them. In some cases this can increase the parsing\n speed by ~5-10x.\n\n Returns\n -------\n ret : datetime if parsing succeeded.\n Return type depends on input:\n\n - list-like: DatetimeIndex\n - Series: Series of datetime64 dtype\n - scalar: Timestamp\n\n In case when it is not possible to return designated types (e.g. when\n any element of input is before Timestamp.min or after Timestamp.max)\n return will have datetime.datetime type (or correspoding array/Series).\n\n Examples\n --------\n\n Assembling a datetime from multiple columns of a DataFrame. The keys can be\n common abbreviations like ['year', 'month', 'day', 'minute', 'second',\n 'ms', 'us', 'ns']) or plurals of the same\n\n >>> df = pd.DataFrame({'year': [2015, 2016],\n 'month': [2, 3],\n 'day': [4, 5]})\n >>> pd.to_datetime(df)\n 0 2015-02-04\n 1 2016-03-05\n dtype: datetime64[ns]\n\n If a date does not meet the `timestamp limitations\n <http://pandas.pydata.org/pandas-docs/stable/timeseries.html\n #timeseries-timestamp-limits>`_, passing errors='ignore'\n will return the original input instead of raising any exception.\n\n Passing errors='coerce' will force an out-of-bounds date to NaT,\n in addition to forcing non-dates (or non-parseable dates) to NaT.\n\n >>> pd.to_datetime('13000101', format='%Y%m%d', errors='ignore')\n datetime.datetime(1300, 1, 1, 0, 0)\n >>> pd.to_datetime('13000101', format='%Y%m%d', errors='coerce')\n NaT\n\n Passing infer_datetime_format=True can often-times speedup a parsing\n if its not an ISO8601 format exactly, but in a regular format.\n\n >>> s = pd.Series(['3/11/2000', '3/12/2000', '3/13/2000']*1000)\n\n >>> s.head()\n 0 3/11/2000\n 1 3/12/2000\n 2 3/13/2000\n 3 3/11/2000\n 4 3/12/2000\n dtype: object\n\n >>> %timeit pd.to_datetime(s,infer_datetime_format=True)\n 100 loops, best of 3: 10.4 ms per loop\n\n >>> %timeit pd.to_datetime(s,infer_datetime_format=False)\n 1 loop, best of 3: 471 ms per loop\n\n \"\"\"\n\n from pandas.tseries.index import DatetimeIndex\n\n tz = 'utc' if utc else None\n\n def _convert_listlike(arg, box, format, name=None, tz=tz):\n\n if isinstance(arg, (list, tuple)):\n arg = np.array(arg, dtype='O')\n\n # these are shortcutable\n if is_datetime64tz_dtype(arg):\n if not isinstance(arg, DatetimeIndex):\n return DatetimeIndex(arg, tz=tz, name=name)\n if utc:\n arg = arg.tz_convert(None).tz_localize('UTC')\n return arg\n\n elif is_datetime64_ns_dtype(arg):\n if box and not isinstance(arg, DatetimeIndex):\n try:\n return DatetimeIndex(arg, tz=tz, name=name)\n except ValueError:\n pass\n\n return arg\n\n elif unit is not None:\n if format is not None:\n raise ValueError(\"cannot specify both format and unit\")\n arg = getattr(arg, 'values', arg)\n result = tslib.array_with_unit_to_datetime(arg, unit,\n errors=errors)\n if box:\n if errors == 'ignore':\n from pandas import Index\n return Index(result)\n\n return DatetimeIndex(result, tz=tz, name=name)\n return result\n elif getattr(arg, 'ndim', 1) > 1:\n raise TypeError('arg must be a string, datetime, list, tuple, '\n '1-d array, or Series')\n\n arg = _ensure_object(arg)\n require_iso8601 = False\n\n if infer_datetime_format and format is None:\n format = _guess_datetime_format_for_array(arg, dayfirst=dayfirst)\n\n if format is not None:\n # There is a special fast-path for iso8601 formatted\n # datetime strings, so in those cases don't use the inferred\n # format because this path makes process slower in this\n # special case\n format_is_iso8601 = _format_is_iso(format)\n if format_is_iso8601:\n require_iso8601 = not infer_datetime_format\n format = None\n\n try:\n result = None\n\n if format is not None:\n # shortcut formatting here\n if format == '%Y%m%d':\n try:\n result = _attempt_YYYYMMDD(arg, errors=errors)\n except:\n raise ValueError(\"cannot convert the input to \"\n \"'%Y%m%d' date format\")\n\n # fallback\n if result is None:\n try:\n result = tslib.array_strptime(arg, format, exact=exact,\n errors=errors)\n except tslib.OutOfBoundsDatetime:\n if errors == 'raise':\n raise\n result = arg\n except ValueError:\n # if format was inferred, try falling back\n # to array_to_datetime - terminate here\n # for specified formats\n if not infer_datetime_format:\n if errors == 'raise':\n raise\n result = arg\n\n if result is None and (format is None or infer_datetime_format):\n result = tslib.array_to_datetime(\n arg,\n errors=errors,\n utc=utc,\n dayfirst=dayfirst,\n yearfirst=yearfirst,\n require_iso8601=require_iso8601\n )\n\n if is_datetime64_dtype(result) and box:\n result = DatetimeIndex(result, tz=tz, name=name)\n return result\n\n except ValueError as e:\n try:\n values, tz = tslib.datetime_to_datetime64(arg)\n return DatetimeIndex._simple_new(values, name=name, tz=tz)\n except (ValueError, TypeError):\n raise e\n\n if arg is None:\n return arg\n elif isinstance(arg, tslib.Timestamp):\n return arg\n elif isinstance(arg, ABCSeries):\n from pandas import Series\n values = _convert_listlike(arg._values, False, format)\n return Series(values, index=arg.index, name=arg.name)\n elif isinstance(arg, (ABCDataFrame, MutableMapping)):\n return _assemble_from_unit_mappings(arg, errors=errors)\n elif isinstance(arg, ABCIndexClass):\n return _convert_listlike(arg, box, format, name=arg.name)\n elif is_list_like(arg):\n return _convert_listlike(arg, box, format)\n\n return _convert_listlike(np.array([arg]), box, format)[0]\n\n\n# mappings for assembling units\n_unit_map = {'year': 'year',\n 'years': 'year',\n 'month': 'month',\n 'months': 'month',\n 'day': 'day',\n 'days': 'day',\n 'hour': 'h',\n 'hours': 'h',\n 'minute': 'm',\n 'minutes': 'm',\n 'second': 's',\n 'seconds': 's',\n 'ms': 'ms',\n 'millisecond': 'ms',\n 'milliseconds': 'ms',\n 'us': 'us',\n 'microsecond': 'us',\n 'microseconds': 'us',\n 'ns': 'ns',\n 'nanosecond': 'ns',\n 'nanoseconds': 'ns'\n }\n\n\ndef _assemble_from_unit_mappings(arg, errors):\n \"\"\"\n assemble the unit specifed fields from the arg (DataFrame)\n Return a Series for actual parsing\n\n Parameters\n ----------\n arg : DataFrame\n errors : {'ignore', 'raise', 'coerce'}, default 'raise'\n\n - If 'raise', then invalid parsing will raise an exception\n - If 'coerce', then invalid parsing will be set as NaT\n - If 'ignore', then invalid parsing will return the input\n\n Returns\n -------\n Series\n \"\"\"\n from pandas import to_timedelta, to_numeric, DataFrame\n arg = DataFrame(arg)\n if not arg.columns.is_unique:\n raise ValueError(\"cannot assemble with duplicate keys\")\n\n # replace passed unit with _unit_map\n def f(value):\n if value in _unit_map:\n return _unit_map[value]\n\n # m is case significant\n if value.lower() in _unit_map:\n return _unit_map[value.lower()]\n\n return value\n\n unit = {k: f(k) for k in arg.keys()}\n unit_rev = {v: k for k, v in unit.items()}\n\n # we require at least Ymd\n required = ['year', 'month', 'day']\n req = sorted(list(set(required) - set(unit_rev.keys())))\n if len(req):\n raise ValueError(\"to assemble mappings requires at \"\n \"least that [year, month, day] be specified: \"\n \"[{0}] is missing\".format(','.join(req)))\n\n # keys we don't recognize\n excess = sorted(list(set(unit_rev.keys()) - set(_unit_map.values())))\n if len(excess):\n raise ValueError(\"extra keys have been passed \"\n \"to the datetime assemblage: \"\n \"[{0}]\".format(','.join(excess)))\n\n def coerce(values):\n # we allow coercion to if errors allows\n values = to_numeric(values, errors=errors)\n\n # prevent overflow in case of int8 or int16\n if is_integer_dtype(values):\n values = values.astype('int64', copy=False)\n return values\n\n values = (coerce(arg[unit_rev['year']]) * 10000 +\n coerce(arg[unit_rev['month']]) * 100 +\n coerce(arg[unit_rev['day']]))\n try:\n values = to_datetime(values, format='%Y%m%d', errors=errors)\n except (TypeError, ValueError) as e:\n raise ValueError(\"cannot assemble the \"\n \"datetimes: {0}\".format(e))\n\n for u in ['h', 'm', 's', 'ms', 'us', 'ns']:\n value = unit_rev.get(u)\n if value is not None and value in arg:\n try:\n values += to_timedelta(coerce(arg[value]),\n unit=u,\n errors=errors)\n except (TypeError, ValueError) as e:\n raise ValueError(\"cannot assemble the datetimes \"\n \"[{0}]: {1}\".format(value, e))\n\n return values\n\n\ndef _attempt_YYYYMMDD(arg, errors):\n \"\"\" try to parse the YYYYMMDD/%Y%m%d format, try to deal with NaT-like,\n arg is a passed in as an object dtype, but could really be ints/strings\n with nan-like/or floats (e.g. with nan)\n\n Parameters\n ----------\n arg : passed value\n errors : 'raise','ignore','coerce'\n \"\"\"\n\n def calc(carg):\n # calculate the actual result\n carg = carg.astype(object)\n parsed = lib.try_parse_year_month_day(carg / 10000,\n carg / 100 % 100,\n carg % 100)\n return tslib.array_to_datetime(parsed, errors=errors)\n\n def calc_with_mask(carg, mask):\n result = np.empty(carg.shape, dtype='M8[ns]')\n iresult = result.view('i8')\n iresult[~mask] = tslib.iNaT\n result[mask] = calc(carg[mask].astype(np.float64).astype(np.int64)). \\\n astype('M8[ns]')\n return result\n\n # try intlike / strings that are ints\n try:\n return calc(arg.astype(np.int64))\n except:\n pass\n\n # a float with actual np.nan\n try:\n carg = arg.astype(np.float64)\n return calc_with_mask(carg, notnull(carg))\n except:\n pass\n\n # string with NaN-like\n try:\n mask = ~lib.ismember(arg, tslib._nat_strings)\n return calc_with_mask(arg, mask)\n except:\n pass\n\n return None\n\n\ndef _format_is_iso(f):\n \"\"\"\n Does format match the iso8601 set that can be handled by the C parser?\n Generally of form YYYY-MM-DDTHH:MM:SS - date separator can be different\n but must be consistent. Leading 0s in dates and times are optional.\n \"\"\"\n iso_template = '%Y{date_sep}%m{date_sep}%d{time_sep}%H:%M:%S.%f'.format\n excluded_formats = ['%Y%m%d', '%Y%m', '%Y']\n\n for date_sep in [' ', '/', '\\\\', '-', '.', '']:\n for time_sep in [' ', 'T']:\n if (iso_template(date_sep=date_sep,\n time_sep=time_sep\n ).startswith(f) and f not in excluded_formats):\n return True\n return False\n\n\ndef parse_time_string(arg, freq=None, dayfirst=None, yearfirst=None):\n \"\"\"\n Try hard to parse datetime string, leveraging dateutil plus some extra\n goodies like quarter recognition.\n\n Parameters\n ----------\n arg : compat.string_types\n freq : str or DateOffset, default None\n Helps with interpreting time string if supplied\n dayfirst : bool, default None\n If None uses default from print_config\n yearfirst : bool, default None\n If None uses default from print_config\n\n Returns\n -------\n datetime, datetime/dateutil.parser._result, str\n \"\"\"\n from pandas.core.config import get_option\n if not isinstance(arg, compat.string_types):\n return arg\n\n from pandas.tseries.offsets import DateOffset\n if isinstance(freq, DateOffset):\n freq = freq.rule_code\n\n if dayfirst is None:\n dayfirst = get_option(\"display.date_dayfirst\")\n if yearfirst is None:\n yearfirst = get_option(\"display.date_yearfirst\")\n\n return tslib.parse_datetime_string_with_reso(arg, freq=freq,\n dayfirst=dayfirst,\n yearfirst=yearfirst)\n\n\nDateParseError = tslib.DateParseError\nnormalize_date = tslib.normalize_date\n\n# Fixed time formats for time parsing\n_time_formats = [\"%H:%M\", \"%H%M\", \"%I:%M%p\", \"%I%M%p\",\n \"%H:%M:%S\", \"%H%M%S\", \"%I:%M:%S%p\", \"%I%M%S%p\"]\n\n\ndef _guess_time_format_for_array(arr):\n # Try to guess the format based on the first non-NaN element\n non_nan_elements = notnull(arr).nonzero()[0]\n if len(non_nan_elements):\n element = arr[non_nan_elements[0]]\n for time_format in _time_formats:\n try:\n datetime.strptime(element, time_format)\n return time_format\n except ValueError:\n pass\n\n return None\n\n\ndef to_time(arg, format=None, infer_time_format=False, errors='raise'):\n \"\"\"\n Parse time strings to time objects using fixed strptime formats (\"%H:%M\",\n \"%H%M\", \"%I:%M%p\", \"%I%M%p\", \"%H:%M:%S\", \"%H%M%S\", \"%I:%M:%S%p\",\n \"%I%M%S%p\")\n\n Use infer_time_format if all the strings are in the same format to speed\n up conversion.\n\n Parameters\n ----------\n arg : string in time format, datetime.time, list, tuple, 1-d array, Series\n format : str, default None\n Format used to convert arg into a time object. If None, fixed formats\n are used.\n infer_time_format: bool, default False\n Infer the time format based on the first non-NaN element. If all\n strings are in the same format, this will speed up conversion.\n errors : {'ignore', 'raise', 'coerce'}, default 'raise'\n - If 'raise', then invalid parsing will raise an exception\n - If 'coerce', then invalid parsing will be set as None\n - If 'ignore', then invalid parsing will return the input\n\n Returns\n -------\n datetime.time\n \"\"\"\n from pandas.core.series import Series\n\n def _convert_listlike(arg, format):\n\n if isinstance(arg, (list, tuple)):\n arg = np.array(arg, dtype='O')\n\n elif getattr(arg, 'ndim', 1) > 1:\n raise TypeError('arg must be a string, datetime, list, tuple, '\n '1-d array, or Series')\n\n arg = _ensure_object(arg)\n\n if infer_time_format and format is None:\n format = _guess_time_format_for_array(arg)\n\n times = []\n if format is not None:\n for element in arg:\n try:\n times.append(datetime.strptime(element, format).time())\n except (ValueError, TypeError):\n if errors == 'raise':\n raise ValueError(\"Cannot convert %s to a time with \"\n \"given format %s\" % (element, format))\n elif errors == 'ignore':\n return arg\n else:\n times.append(None)\n else:\n formats = _time_formats[:]\n format_found = False\n for element in arg:\n time_object = None\n for time_format in formats:\n try:\n time_object = datetime.strptime(element,\n time_format).time()\n if not format_found:\n # Put the found format in front\n fmt = formats.pop(formats.index(time_format))\n formats.insert(0, fmt)\n format_found = True\n break\n except (ValueError, TypeError):\n continue\n\n if time_object is not None:\n times.append(time_object)\n elif errors == 'raise':\n raise ValueError(\"Cannot convert arg {arg} to \"\n \"a time\".format(arg=arg))\n elif errors == 'ignore':\n return arg\n else:\n times.append(None)\n\n return times\n\n if arg is None:\n return arg\n elif isinstance(arg, time):\n return arg\n elif isinstance(arg, Series):\n values = _convert_listlike(arg._values, format)\n return Series(values, index=arg.index, name=arg.name)\n elif isinstance(arg, ABCIndexClass):\n return _convert_listlike(arg, format)\n elif is_list_like(arg):\n return _convert_listlike(arg, format)\n\n return _convert_listlike(np.array([arg]), format)[0]\n\n\ndef format(dt):\n \"\"\"Returns date in YYYYMMDD format.\"\"\"\n return dt.strftime('%Y%m%d')\n\n\nOLE_TIME_ZERO = datetime(1899, 12, 30, 0, 0, 0)\n\n\ndef ole2datetime(oledt):\n \"\"\"function for converting excel date to normal date format\"\"\"\n val = float(oledt)\n\n # Excel has a bug where it thinks the date 2/29/1900 exists\n # we just reject any date before 3/1/1900.\n if val < 61:\n raise ValueError(\"Value is outside of acceptable range: %s \" % val)\n\n return OLE_TIME_ZERO + timedelta(days=val)\n", "from datetime import date, datetime, timedelta\nfrom pandas.compat import range\nfrom pandas import compat\nimport numpy as np\n\nfrom pandas.types.generic import ABCSeries, ABCDatetimeIndex, ABCPeriod\nfrom pandas.tseries.tools import to_datetime, normalize_date\nfrom pandas.core.common import AbstractMethodError\n\n# import after tools, dateutil check\nfrom dateutil.relativedelta import relativedelta, weekday\nfrom dateutil.easter import easter\nimport pandas.tslib as tslib\nfrom pandas.tslib import Timestamp, OutOfBoundsDatetime, Timedelta\n\nimport functools\nimport operator\n\n__all__ = ['Day', 'BusinessDay', 'BDay', 'CustomBusinessDay', 'CDay',\n 'CBMonthEnd', 'CBMonthBegin',\n 'MonthBegin', 'BMonthBegin', 'MonthEnd', 'BMonthEnd',\n 'SemiMonthEnd', 'SemiMonthBegin',\n 'BusinessHour', 'CustomBusinessHour',\n 'YearBegin', 'BYearBegin', 'YearEnd', 'BYearEnd',\n 'QuarterBegin', 'BQuarterBegin', 'QuarterEnd', 'BQuarterEnd',\n 'LastWeekOfMonth', 'FY5253Quarter', 'FY5253',\n 'Week', 'WeekOfMonth', 'Easter',\n 'Hour', 'Minute', 'Second', 'Milli', 'Micro', 'Nano',\n 'DateOffset']\n\n# convert to/from datetime/timestamp to allow invalid Timestamp ranges to\n# pass thru\n\n\ndef as_timestamp(obj):\n if isinstance(obj, Timestamp):\n return obj\n try:\n return Timestamp(obj)\n except (OutOfBoundsDatetime):\n pass\n return obj\n\n\ndef as_datetime(obj):\n f = getattr(obj, 'to_pydatetime', None)\n if f is not None:\n obj = f()\n return obj\n\n\ndef apply_wraps(func):\n @functools.wraps(func)\n def wrapper(self, other):\n if other is tslib.NaT:\n return tslib.NaT\n elif isinstance(other, (timedelta, Tick, DateOffset)):\n # timedelta path\n return func(self, other)\n elif isinstance(other, (np.datetime64, datetime, date)):\n other = as_timestamp(other)\n\n tz = getattr(other, 'tzinfo', None)\n nano = getattr(other, 'nanosecond', 0)\n\n try:\n if self._adjust_dst and isinstance(other, Timestamp):\n other = other.tz_localize(None)\n\n result = func(self, other)\n\n if self._adjust_dst:\n result = tslib._localize_pydatetime(result, tz)\n\n result = Timestamp(result)\n if self.normalize:\n result = result.normalize()\n\n # nanosecond may be deleted depending on offset process\n if not self.normalize and nano != 0:\n if not isinstance(self, Nano) and result.nanosecond != nano:\n if result.tz is not None:\n # convert to UTC\n value = tslib.tz_convert_single(\n result.value, 'UTC', result.tz)\n else:\n value = result.value\n result = Timestamp(value + nano)\n\n if tz is not None and result.tzinfo is None:\n result = tslib._localize_pydatetime(result, tz)\n\n except OutOfBoundsDatetime:\n result = func(self, as_datetime(other))\n\n if self.normalize:\n # normalize_date returns normal datetime\n result = normalize_date(result)\n\n if tz is not None and result.tzinfo is None:\n result = tslib._localize_pydatetime(result, tz)\n\n return result\n return wrapper\n\n\ndef apply_index_wraps(func):\n @functools.wraps(func)\n def wrapper(self, other):\n result = func(self, other)\n if self.normalize:\n result = result.to_period('D').to_timestamp()\n return result\n return wrapper\n\n\ndef _is_normalized(dt):\n if (dt.hour != 0 or dt.minute != 0 or dt.second != 0 or\n dt.microsecond != 0 or getattr(dt, 'nanosecond', 0) != 0):\n return False\n return True\n\n# ---------------------------------------------------------------------\n# DateOffset\n\n\nclass ApplyTypeError(TypeError):\n # sentinel class for catching the apply error to return NotImplemented\n pass\n\n\nclass CacheableOffset(object):\n _cacheable = True\n\n\nclass DateOffset(object):\n \"\"\"\n Standard kind of date increment used for a date range.\n\n Works exactly like relativedelta in terms of the keyword args you\n pass in, use of the keyword n is discouraged-- you would be better\n off specifying n in the keywords you use, but regardless it is\n there for you. n is needed for DateOffset subclasses.\n\n DateOffets work as follows. Each offset specify a set of dates\n that conform to the DateOffset. For example, Bday defines this\n set to be the set of dates that are weekdays (M-F). To test if a\n date is in the set of a DateOffset dateOffset we can use the\n onOffset method: dateOffset.onOffset(date).\n\n If a date is not on a valid date, the rollback and rollforward\n methods can be used to roll the date to the nearest valid date\n before/after the date.\n\n DateOffsets can be created to move dates forward a given number of\n valid dates. For example, Bday(2) can be added to a date to move\n it two business days forward. If the date does not start on a\n valid date, first it is moved to a valid date. Thus pseudo code\n is:\n\n def __add__(date):\n date = rollback(date) # does nothing if date is valid\n return date + <n number of periods>\n\n When a date offset is created for a negative number of periods,\n the date is first rolled forward. The pseudo code is:\n\n def __add__(date):\n date = rollforward(date) # does nothing is date is valid\n return date + <n number of periods>\n\n Zero presents a problem. Should it roll forward or back? We\n arbitrarily have it rollforward:\n\n date + BDay(0) == BDay.rollforward(date)\n\n Since 0 is a bit weird, we suggest avoiding its use.\n \"\"\"\n _cacheable = False\n _normalize_cache = True\n _kwds_use_relativedelta = (\n 'years', 'months', 'weeks', 'days',\n 'year', 'month', 'week', 'day', 'weekday',\n 'hour', 'minute', 'second', 'microsecond'\n )\n _use_relativedelta = False\n _adjust_dst = False\n\n # default for prior pickles\n normalize = False\n\n def __init__(self, n=1, normalize=False, **kwds):\n self.n = int(n)\n self.normalize = normalize\n self.kwds = kwds\n self._offset, self._use_relativedelta = self._determine_offset()\n\n def _determine_offset(self):\n # timedelta is used for sub-daily plural offsets and all singular\n # offsets relativedelta is used for plural offsets of daily length or\n # more nanosecond(s) are handled by apply_wraps\n kwds_no_nanos = dict(\n (k, v) for k, v in self.kwds.items()\n if k not in ('nanosecond', 'nanoseconds')\n )\n use_relativedelta = False\n\n if len(kwds_no_nanos) > 0:\n if any(k in self._kwds_use_relativedelta for k in kwds_no_nanos):\n use_relativedelta = True\n offset = relativedelta(**kwds_no_nanos)\n else:\n # sub-daily offset - use timedelta (tz-aware)\n offset = timedelta(**kwds_no_nanos)\n else:\n offset = timedelta(1)\n return offset, use_relativedelta\n\n @apply_wraps\n def apply(self, other):\n if self._use_relativedelta:\n other = as_datetime(other)\n\n if len(self.kwds) > 0:\n tzinfo = getattr(other, 'tzinfo', None)\n if tzinfo is not None and self._use_relativedelta:\n # perform calculation in UTC\n other = other.replace(tzinfo=None)\n\n if self.n > 0:\n for i in range(self.n):\n other = other + self._offset\n else:\n for i in range(-self.n):\n other = other - self._offset\n\n if tzinfo is not None and self._use_relativedelta:\n # bring tz back from UTC calculation\n other = tslib._localize_pydatetime(other, tzinfo)\n\n return as_timestamp(other)\n else:\n return other + timedelta(self.n)\n\n @apply_index_wraps\n def apply_index(self, i):\n \"\"\"\n Vectorized apply of DateOffset to DatetimeIndex,\n raises NotImplentedError for offsets without a\n vectorized implementation\n\n .. versionadded:: 0.17.0\n\n Parameters\n ----------\n i : DatetimeIndex\n\n Returns\n -------\n y : DatetimeIndex\n \"\"\"\n\n if not type(self) is DateOffset:\n raise NotImplementedError(\"DateOffset subclass %s \"\n \"does not have a vectorized \"\n \"implementation\"\n % (self.__class__.__name__,))\n relativedelta_fast = set(['years', 'months', 'weeks',\n 'days', 'hours', 'minutes',\n 'seconds', 'microseconds'])\n # relativedelta/_offset path only valid for base DateOffset\n if (self._use_relativedelta and\n set(self.kwds).issubset(relativedelta_fast)):\n\n months = ((self.kwds.get('years', 0) * 12 +\n self.kwds.get('months', 0)) * self.n)\n if months:\n shifted = tslib.shift_months(i.asi8, months)\n i = i._shallow_copy(shifted)\n\n weeks = (self.kwds.get('weeks', 0)) * self.n\n if weeks:\n i = (i.to_period('W') + weeks).to_timestamp() + \\\n i.to_perioddelta('W')\n\n timedelta_kwds = dict((k, v) for k, v in self.kwds.items()\n if k in ['days', 'hours', 'minutes',\n 'seconds', 'microseconds'])\n if timedelta_kwds:\n delta = Timedelta(**timedelta_kwds)\n i = i + (self.n * delta)\n return i\n elif not self._use_relativedelta and hasattr(self, '_offset'):\n # timedelta\n return i + (self._offset * self.n)\n else:\n # relativedelta with other keywords\n raise NotImplementedError(\"DateOffset with relativedelta \"\n \"keyword(s) %s not able to be \"\n \"applied vectorized\" %\n (set(self.kwds) - relativedelta_fast),)\n\n def isAnchored(self):\n return (self.n == 1)\n\n def copy(self):\n return self.__class__(self.n, normalize=self.normalize, **self.kwds)\n\n def _should_cache(self):\n return self.isAnchored() and self._cacheable\n\n def _params(self):\n all_paras = dict(list(vars(self).items()) + list(self.kwds.items()))\n if 'holidays' in all_paras and not all_paras['holidays']:\n all_paras.pop('holidays')\n exclude = ['kwds', 'name', 'normalize', 'calendar']\n attrs = [(k, v) for k, v in all_paras.items()\n if (k not in exclude) and (k[0] != '_')]\n attrs = sorted(set(attrs))\n params = tuple([str(self.__class__)] + attrs)\n return params\n\n def __repr__(self):\n className = getattr(self, '_outputName', type(self).__name__)\n exclude = set(['n', 'inc', 'normalize'])\n attrs = []\n for attr in sorted(self.__dict__):\n if ((attr == 'kwds' and len(self.kwds) == 0) or\n attr.startswith('_')):\n continue\n elif attr == 'kwds':\n kwds_new = {}\n for key in self.kwds:\n if not hasattr(self, key):\n kwds_new[key] = self.kwds[key]\n if len(kwds_new) > 0:\n attrs.append('='.join((attr, repr(kwds_new))))\n else:\n if attr not in exclude:\n attrs.append('='.join((attr, repr(getattr(self, attr)))))\n\n if abs(self.n) != 1:\n plural = 's'\n else:\n plural = ''\n\n n_str = \"\"\n if self.n != 1:\n n_str = \"%s * \" % self.n\n\n out = '<%s' % n_str + className + plural\n if attrs:\n out += ': ' + ', '.join(attrs)\n out += '>'\n return out\n\n @property\n def name(self):\n return self.rule_code\n\n def __eq__(self, other):\n if other is None:\n return False\n\n if isinstance(other, compat.string_types):\n from pandas.tseries.frequencies import to_offset\n\n other = to_offset(other)\n\n if not isinstance(other, DateOffset):\n return False\n\n return self._params() == other._params()\n\n def __ne__(self, other):\n return not self == other\n\n def __hash__(self):\n return hash(self._params())\n\n def __call__(self, other):\n return self.apply(other)\n\n def __add__(self, other):\n if isinstance(other, (ABCDatetimeIndex, ABCSeries)):\n return other + self\n elif isinstance(other, ABCPeriod):\n return other + self\n try:\n return self.apply(other)\n except ApplyTypeError:\n return NotImplemented\n\n def __radd__(self, other):\n return self.__add__(other)\n\n def __sub__(self, other):\n if isinstance(other, datetime):\n raise TypeError('Cannot subtract datetime from offset.')\n elif type(other) == type(self):\n return self.__class__(self.n - other.n, normalize=self.normalize,\n **self.kwds)\n else: # pragma: no cover\n return NotImplemented\n\n def __rsub__(self, other):\n if isinstance(other, (ABCDatetimeIndex, ABCSeries)):\n return other - self\n return self.__class__(-self.n, normalize=self.normalize,\n **self.kwds) + other\n\n def __mul__(self, someInt):\n return self.__class__(n=someInt * self.n, normalize=self.normalize,\n **self.kwds)\n\n def __rmul__(self, someInt):\n return self.__mul__(someInt)\n\n def __neg__(self):\n return self.__class__(-self.n, normalize=self.normalize, **self.kwds)\n\n def rollback(self, dt):\n \"\"\"Roll provided date backward to next offset only if not on offset\"\"\"\n dt = as_timestamp(dt)\n if not self.onOffset(dt):\n dt = dt - self.__class__(1, normalize=self.normalize, **self.kwds)\n return dt\n\n def rollforward(self, dt):\n \"\"\"Roll provided date forward to next offset only if not on offset\"\"\"\n dt = as_timestamp(dt)\n if not self.onOffset(dt):\n dt = dt + self.__class__(1, normalize=self.normalize, **self.kwds)\n return dt\n\n def onOffset(self, dt):\n if self.normalize and not _is_normalized(dt):\n return False\n # XXX, see #1395\n if type(self) == DateOffset or isinstance(self, Tick):\n return True\n\n # Default (slow) method for determining if some date is a member of the\n # date range generated by this offset. Subclasses may have this\n # re-implemented in a nicer way.\n a = dt\n b = ((dt + self) - self)\n return a == b\n\n # helpers for vectorized offsets\n def _beg_apply_index(self, i, freq):\n \"\"\"Offsets index to beginning of Period frequency\"\"\"\n\n off = i.to_perioddelta('D')\n\n from pandas.tseries.frequencies import get_freq_code\n base, mult = get_freq_code(freq)\n base_period = i.to_period(base)\n if self.n <= 0:\n # when subtracting, dates on start roll to prior\n roll = np.where(base_period.to_timestamp() == i - off,\n self.n, self.n + 1)\n else:\n roll = self.n\n\n base = (base_period + roll).to_timestamp()\n return base + off\n\n def _end_apply_index(self, i, freq):\n \"\"\"Offsets index to end of Period frequency\"\"\"\n\n off = i.to_perioddelta('D')\n\n from pandas.tseries.frequencies import get_freq_code\n base, mult = get_freq_code(freq)\n base_period = i.to_period(base)\n if self.n > 0:\n # when adding, dates on end roll to next\n roll = np.where(base_period.to_timestamp(how='end') == i - off,\n self.n, self.n - 1)\n else:\n roll = self.n\n\n base = (base_period + roll).to_timestamp(how='end')\n return base + off\n\n # way to get around weirdness with rule_code\n @property\n def _prefix(self):\n raise NotImplementedError('Prefix not defined')\n\n @property\n def rule_code(self):\n return self._prefix\n\n @property\n def freqstr(self):\n try:\n code = self.rule_code\n except NotImplementedError:\n return repr(self)\n\n if self.n != 1:\n fstr = '%d%s' % (self.n, code)\n else:\n fstr = code\n\n return fstr\n\n @property\n def nanos(self):\n raise ValueError(\"{0} is a non-fixed frequency\".format(self))\n\n\nclass SingleConstructorOffset(DateOffset):\n\n @classmethod\n def _from_name(cls, suffix=None):\n # default _from_name calls cls with no args\n if suffix:\n raise ValueError(\"Bad freq suffix %s\" % suffix)\n return cls()\n\n\nclass BusinessMixin(object):\n \"\"\" mixin to business types to provide related functions \"\"\"\n\n # TODO: Combine this with DateOffset by defining a whitelisted set of\n # attributes on each object rather than the existing behavior of iterating\n # over internal ``__dict__``\n def __repr__(self):\n className = getattr(self, '_outputName', self.__class__.__name__)\n\n if abs(self.n) != 1:\n plural = 's'\n else:\n plural = ''\n\n n_str = \"\"\n if self.n != 1:\n n_str = \"%s * \" % self.n\n\n out = '<%s' % n_str + className + plural + self._repr_attrs() + '>'\n return out\n\n def _repr_attrs(self):\n if self.offset:\n attrs = ['offset=%s' % repr(self.offset)]\n else:\n attrs = None\n out = ''\n if attrs:\n out += ': ' + ', '.join(attrs)\n return out\n\n def __getstate__(self):\n \"\"\"Return a pickleable state\"\"\"\n state = self.__dict__.copy()\n\n # we don't want to actually pickle the calendar object\n # as its a np.busyday; we recreate on deserilization\n if 'calendar' in state:\n del state['calendar']\n try:\n state['kwds'].pop('calendar')\n except KeyError:\n pass\n\n return state\n\n def __setstate__(self, state):\n \"\"\"Reconstruct an instance from a pickled state\"\"\"\n self.__dict__ = state\n if 'weekmask' in state and 'holidays' in state:\n calendar, holidays = self.get_calendar(weekmask=self.weekmask,\n holidays=self.holidays,\n calendar=None)\n self.kwds['calendar'] = self.calendar = calendar\n self.kwds['holidays'] = self.holidays = holidays\n self.kwds['weekmask'] = state['weekmask']\n\n\nclass BusinessDay(BusinessMixin, SingleConstructorOffset):\n \"\"\"\n DateOffset subclass representing possibly n business days\n \"\"\"\n _prefix = 'B'\n _adjust_dst = True\n\n def __init__(self, n=1, normalize=False, **kwds):\n self.n = int(n)\n self.normalize = normalize\n self.kwds = kwds\n self.offset = kwds.get('offset', timedelta(0))\n\n @property\n def freqstr(self):\n try:\n code = self.rule_code\n except NotImplementedError:\n return repr(self)\n\n if self.n != 1:\n fstr = '%d%s' % (self.n, code)\n else:\n fstr = code\n\n if self.offset:\n fstr += self._offset_str()\n\n return fstr\n\n def _offset_str(self):\n def get_str(td):\n off_str = ''\n if td.days > 0:\n off_str += str(td.days) + 'D'\n if td.seconds > 0:\n s = td.seconds\n hrs = int(s / 3600)\n if hrs != 0:\n off_str += str(hrs) + 'H'\n s -= hrs * 3600\n mts = int(s / 60)\n if mts != 0:\n off_str += str(mts) + 'Min'\n s -= mts * 60\n if s != 0:\n off_str += str(s) + 's'\n if td.microseconds > 0:\n off_str += str(td.microseconds) + 'us'\n return off_str\n\n if isinstance(self.offset, timedelta):\n zero = timedelta(0, 0, 0)\n if self.offset >= zero:\n off_str = '+' + get_str(self.offset)\n else:\n off_str = '-' + get_str(-self.offset)\n return off_str\n else:\n return '+' + repr(self.offset)\n\n def isAnchored(self):\n return (self.n == 1)\n\n @apply_wraps\n def apply(self, other):\n if isinstance(other, datetime):\n n = self.n\n\n if n == 0 and other.weekday() > 4:\n n = 1\n\n result = other\n\n # avoid slowness below\n if abs(n) > 5:\n k = n // 5\n result = result + timedelta(7 * k)\n if n < 0 and result.weekday() > 4:\n n += 1\n n -= 5 * k\n if n == 0 and result.weekday() > 4:\n n -= 1\n\n while n != 0:\n k = n // abs(n)\n result = result + timedelta(k)\n if result.weekday() < 5:\n n -= k\n\n if self.offset:\n result = result + self.offset\n return result\n\n elif isinstance(other, (timedelta, Tick)):\n return BDay(self.n, offset=self.offset + other,\n normalize=self.normalize)\n else:\n raise ApplyTypeError('Only know how to combine business day with '\n 'datetime or timedelta.')\n\n @apply_index_wraps\n def apply_index(self, i):\n time = i.to_perioddelta('D')\n # to_period rolls forward to next BDay; track and\n # reduce n where it does when rolling forward\n shifted = (i.to_perioddelta('B') - time).asi8 != 0\n if self.n > 0:\n roll = np.where(shifted, self.n - 1, self.n)\n else:\n roll = self.n\n\n return (i.to_period('B') + roll).to_timestamp() + time\n\n def onOffset(self, dt):\n if self.normalize and not _is_normalized(dt):\n return False\n return dt.weekday() < 5\n\n\nclass BusinessHourMixin(BusinessMixin):\n\n def __init__(self, **kwds):\n # must be validated here to equality check\n kwds['start'] = self._validate_time(kwds.get('start', '09:00'))\n kwds['end'] = self._validate_time(kwds.get('end', '17:00'))\n self.kwds = kwds\n self.offset = kwds.get('offset', timedelta(0))\n self.start = kwds.get('start', '09:00')\n self.end = kwds.get('end', '17:00')\n\n def _validate_time(self, t_input):\n from datetime import time as dt_time\n import time\n if isinstance(t_input, compat.string_types):\n try:\n t = time.strptime(t_input, '%H:%M')\n return dt_time(hour=t.tm_hour, minute=t.tm_min)\n except ValueError:\n raise ValueError(\"time data must match '%H:%M' format\")\n elif isinstance(t_input, dt_time):\n if t_input.second != 0 or t_input.microsecond != 0:\n raise ValueError(\n \"time data must be specified only with hour and minute\")\n return t_input\n else:\n raise ValueError(\"time data must be string or datetime.time\")\n\n def _get_daytime_flag(self):\n if self.start == self.end:\n raise ValueError('start and end must not be the same')\n elif self.start < self.end:\n return True\n else:\n return False\n\n def _next_opening_time(self, other):\n \"\"\"\n If n is positive, return tomorrow's business day opening time.\n Otherwise yesterday's business day's opening time.\n\n Opening time always locates on BusinessDay.\n Otherwise, closing time may not if business hour extends over midnight.\n \"\"\"\n if not self.next_bday.onOffset(other):\n other = other + self.next_bday\n else:\n if self.n >= 0 and self.start < other.time():\n other = other + self.next_bday\n elif self.n < 0 and other.time() < self.start:\n other = other + self.next_bday\n return datetime(other.year, other.month, other.day,\n self.start.hour, self.start.minute)\n\n def _prev_opening_time(self, other):\n \"\"\"\n If n is positive, return yesterday's business day opening time.\n Otherwise yesterday business day's opening time.\n \"\"\"\n if not self.next_bday.onOffset(other):\n other = other - self.next_bday\n else:\n if self.n >= 0 and other.time() < self.start:\n other = other - self.next_bday\n elif self.n < 0 and other.time() > self.start:\n other = other - self.next_bday\n return datetime(other.year, other.month, other.day,\n self.start.hour, self.start.minute)\n\n def _get_business_hours_by_sec(self):\n \"\"\"\n Return business hours in a day by seconds.\n \"\"\"\n if self._get_daytime_flag():\n # create dummy datetime to calcurate businesshours in a day\n dtstart = datetime(2014, 4, 1, self.start.hour, self.start.minute)\n until = datetime(2014, 4, 1, self.end.hour, self.end.minute)\n return tslib.tot_seconds(until - dtstart)\n else:\n self.daytime = False\n dtstart = datetime(2014, 4, 1, self.start.hour, self.start.minute)\n until = datetime(2014, 4, 2, self.end.hour, self.end.minute)\n return tslib.tot_seconds(until - dtstart)\n\n @apply_wraps\n def rollback(self, dt):\n \"\"\"Roll provided date backward to next offset only if not on offset\"\"\"\n if not self.onOffset(dt):\n businesshours = self._get_business_hours_by_sec()\n if self.n >= 0:\n dt = self._prev_opening_time(\n dt) + timedelta(seconds=businesshours)\n else:\n dt = self._next_opening_time(\n dt) + timedelta(seconds=businesshours)\n return dt\n\n @apply_wraps\n def rollforward(self, dt):\n \"\"\"Roll provided date forward to next offset only if not on offset\"\"\"\n if not self.onOffset(dt):\n if self.n >= 0:\n return self._next_opening_time(dt)\n else:\n return self._prev_opening_time(dt)\n return dt\n\n @apply_wraps\n def apply(self, other):\n # calcurate here because offset is not immutable\n daytime = self._get_daytime_flag()\n businesshours = self._get_business_hours_by_sec()\n bhdelta = timedelta(seconds=businesshours)\n\n if isinstance(other, datetime):\n # used for detecting edge condition\n nanosecond = getattr(other, 'nanosecond', 0)\n # reset timezone and nanosecond\n # other may be a Timestamp, thus not use replace\n other = datetime(other.year, other.month, other.day,\n other.hour, other.minute,\n other.second, other.microsecond)\n n = self.n\n if n >= 0:\n if (other.time() == self.end or\n not self._onOffset(other, businesshours)):\n other = self._next_opening_time(other)\n else:\n if other.time() == self.start:\n # adjustment to move to previous business day\n other = other - timedelta(seconds=1)\n if not self._onOffset(other, businesshours):\n other = self._next_opening_time(other)\n other = other + bhdelta\n\n bd, r = divmod(abs(n * 60), businesshours // 60)\n if n < 0:\n bd, r = -bd, -r\n\n if bd != 0:\n skip_bd = BusinessDay(n=bd)\n # midnight business hour may not on BusinessDay\n if not self.next_bday.onOffset(other):\n remain = other - self._prev_opening_time(other)\n other = self._next_opening_time(other + skip_bd) + remain\n else:\n other = other + skip_bd\n\n hours, minutes = divmod(r, 60)\n result = other + timedelta(hours=hours, minutes=minutes)\n\n # because of previous adjustment, time will be larger than start\n if ((daytime and (result.time() < self.start or\n self.end < result.time())) or\n not daytime and (self.end < result.time() < self.start)):\n if n >= 0:\n bday_edge = self._prev_opening_time(other)\n bday_edge = bday_edge + bhdelta\n # calcurate remainder\n bday_remain = result - bday_edge\n result = self._next_opening_time(other)\n result += bday_remain\n else:\n bday_edge = self._next_opening_time(other)\n bday_remain = result - bday_edge\n result = self._next_opening_time(result) + bhdelta\n result += bday_remain\n # edge handling\n if n >= 0:\n if result.time() == self.end:\n result = self._next_opening_time(result)\n else:\n if result.time() == self.start and nanosecond == 0:\n # adjustment to move to previous business day\n result = self._next_opening_time(\n result - timedelta(seconds=1)) + bhdelta\n\n return result\n else:\n raise ApplyTypeError(\n 'Only know how to combine business hour with ')\n\n def onOffset(self, dt):\n if self.normalize and not _is_normalized(dt):\n return False\n\n if dt.tzinfo is not None:\n dt = datetime(dt.year, dt.month, dt.day, dt.hour,\n dt.minute, dt.second, dt.microsecond)\n # Valid BH can be on the different BusinessDay during midnight\n # Distinguish by the time spent from previous opening time\n businesshours = self._get_business_hours_by_sec()\n return self._onOffset(dt, businesshours)\n\n def _onOffset(self, dt, businesshours):\n \"\"\"\n Slight speedups using calcurated values\n \"\"\"\n # if self.normalize and not _is_normalized(dt):\n # return False\n # Valid BH can be on the different BusinessDay during midnight\n # Distinguish by the time spent from previous opening time\n if self.n >= 0:\n op = self._prev_opening_time(dt)\n else:\n op = self._next_opening_time(dt)\n span = tslib.tot_seconds(dt - op)\n if span <= businesshours:\n return True\n else:\n return False\n\n def _repr_attrs(self):\n out = super(BusinessHourMixin, self)._repr_attrs()\n start = self.start.strftime('%H:%M')\n end = self.end.strftime('%H:%M')\n attrs = ['{prefix}={start}-{end}'.format(prefix=self._prefix,\n start=start, end=end)]\n out += ': ' + ', '.join(attrs)\n return out\n\n\nclass BusinessHour(BusinessHourMixin, SingleConstructorOffset):\n \"\"\"\n DateOffset subclass representing possibly n business days\n\n .. versionadded: 0.16.1\n\n \"\"\"\n _prefix = 'BH'\n _anchor = 0\n\n def __init__(self, n=1, normalize=False, **kwds):\n self.n = int(n)\n self.normalize = normalize\n super(BusinessHour, self).__init__(**kwds)\n\n # used for moving to next businessday\n if self.n >= 0:\n nb_offset = 1\n else:\n nb_offset = -1\n self.next_bday = BusinessDay(n=nb_offset)\n\n\nclass CustomBusinessDay(BusinessDay):\n \"\"\"\n **EXPERIMENTAL** DateOffset subclass representing possibly n business days\n excluding holidays\n\n .. warning:: EXPERIMENTAL\n\n This class is not officially supported and the API is likely to change\n in future versions. Use this at your own risk.\n\n Parameters\n ----------\n n : int, default 1\n offset : timedelta, default timedelta(0)\n normalize : bool, default False\n Normalize start/end dates to midnight before generating date range\n weekmask : str, Default 'Mon Tue Wed Thu Fri'\n weekmask of valid business days, passed to ``numpy.busdaycalendar``\n holidays : list\n list/array of dates to exclude from the set of valid business days,\n passed to ``numpy.busdaycalendar``\n calendar : pd.HolidayCalendar or np.busdaycalendar\n \"\"\"\n _cacheable = False\n _prefix = 'C'\n\n def __init__(self, n=1, normalize=False, weekmask='Mon Tue Wed Thu Fri',\n holidays=None, calendar=None, **kwds):\n self.n = int(n)\n self.normalize = normalize\n self.kwds = kwds\n self.offset = kwds.get('offset', timedelta(0))\n calendar, holidays = self.get_calendar(weekmask=weekmask,\n holidays=holidays,\n calendar=calendar)\n # CustomBusinessDay instances are identified by the\n # following two attributes. See DateOffset._params()\n # holidays, weekmask\n\n self.kwds['weekmask'] = self.weekmask = weekmask\n self.kwds['holidays'] = self.holidays = holidays\n self.kwds['calendar'] = self.calendar = calendar\n\n def get_calendar(self, weekmask, holidays, calendar):\n \"\"\"Generate busdaycalendar\"\"\"\n if isinstance(calendar, np.busdaycalendar):\n if not holidays:\n holidays = tuple(calendar.holidays)\n elif not isinstance(holidays, tuple):\n holidays = tuple(holidays)\n else:\n # trust that calendar.holidays and holidays are\n # consistent\n pass\n return calendar, holidays\n\n if holidays is None:\n holidays = []\n try:\n holidays = holidays + calendar.holidays().tolist()\n except AttributeError:\n pass\n holidays = [self._to_dt64(dt, dtype='datetime64[D]') for dt in\n holidays]\n holidays = tuple(sorted(holidays))\n\n kwargs = {'weekmask': weekmask}\n if holidays:\n kwargs['holidays'] = holidays\n\n busdaycalendar = np.busdaycalendar(**kwargs)\n return busdaycalendar, holidays\n\n @apply_wraps\n def apply(self, other):\n if self.n <= 0:\n roll = 'forward'\n else:\n roll = 'backward'\n\n if isinstance(other, datetime):\n date_in = other\n np_dt = np.datetime64(date_in.date())\n\n np_incr_dt = np.busday_offset(np_dt, self.n, roll=roll,\n busdaycal=self.calendar)\n\n dt_date = np_incr_dt.astype(datetime)\n result = datetime.combine(dt_date, date_in.time())\n\n if self.offset:\n result = result + self.offset\n return result\n\n elif isinstance(other, (timedelta, Tick)):\n return BDay(self.n, offset=self.offset + other,\n normalize=self.normalize)\n else:\n raise ApplyTypeError('Only know how to combine trading day with '\n 'datetime, datetime64 or timedelta.')\n\n def apply_index(self, i):\n raise NotImplementedError\n\n @staticmethod\n def _to_dt64(dt, dtype='datetime64'):\n # Currently\n # > np.datetime64(dt.datetime(2013,5,1),dtype='datetime64[D]')\n # numpy.datetime64('2013-05-01T02:00:00.000000+0200')\n # Thus astype is needed to cast datetime to datetime64[D]\n if getattr(dt, 'tzinfo', None) is not None:\n i8 = tslib.pydt_to_i8(dt)\n dt = tslib.tz_convert_single(i8, 'UTC', dt.tzinfo)\n dt = Timestamp(dt)\n dt = np.datetime64(dt)\n if dt.dtype.name != dtype:\n dt = dt.astype(dtype)\n return dt\n\n def onOffset(self, dt):\n if self.normalize and not _is_normalized(dt):\n return False\n day64 = self._to_dt64(dt, 'datetime64[D]')\n return np.is_busday(day64, busdaycal=self.calendar)\n\n\nclass CustomBusinessHour(BusinessHourMixin, SingleConstructorOffset):\n \"\"\"\n DateOffset subclass representing possibly n custom business days\n\n .. versionadded: 0.18.1\n\n \"\"\"\n _prefix = 'CBH'\n _anchor = 0\n\n def __init__(self, n=1, normalize=False, weekmask='Mon Tue Wed Thu Fri',\n holidays=None, calendar=None, **kwds):\n self.n = int(n)\n self.normalize = normalize\n super(CustomBusinessHour, self).__init__(**kwds)\n # used for moving to next businessday\n if self.n >= 0:\n nb_offset = 1\n else:\n nb_offset = -1\n self.next_bday = CustomBusinessDay(n=nb_offset,\n weekmask=weekmask,\n holidays=holidays,\n calendar=calendar)\n\n self.kwds['weekmask'] = self.next_bday.weekmask\n self.kwds['holidays'] = self.next_bday.holidays\n self.kwds['calendar'] = self.next_bday.calendar\n\n\nclass MonthOffset(SingleConstructorOffset):\n _adjust_dst = True\n\n @property\n def name(self):\n if self.isAnchored:\n return self.rule_code\n else:\n return \"%s-%s\" % (self.rule_code, _int_to_month[self.n])\n\n\nclass MonthEnd(MonthOffset):\n \"\"\"DateOffset of one month end\"\"\"\n\n @apply_wraps\n def apply(self, other):\n n = self.n\n _, days_in_month = tslib.monthrange(other.year, other.month)\n if other.day != days_in_month:\n other = other + relativedelta(months=-1, day=31)\n if n <= 0:\n n = n + 1\n other = other + relativedelta(months=n, day=31)\n return other\n\n @apply_index_wraps\n def apply_index(self, i):\n shifted = tslib.shift_months(i.asi8, self.n, 'end')\n return i._shallow_copy(shifted)\n\n def onOffset(self, dt):\n if self.normalize and not _is_normalized(dt):\n return False\n days_in_month = tslib.monthrange(dt.year, dt.month)[1]\n return dt.day == days_in_month\n\n _prefix = 'M'\n\n\nclass MonthBegin(MonthOffset):\n \"\"\"DateOffset of one month at beginning\"\"\"\n\n @apply_wraps\n def apply(self, other):\n n = self.n\n\n if other.day > 1 and n <= 0: # then roll forward if n<=0\n n += 1\n\n return other + relativedelta(months=n, day=1)\n\n @apply_index_wraps\n def apply_index(self, i):\n shifted = tslib.shift_months(i.asi8, self.n, 'start')\n return i._shallow_copy(shifted)\n\n def onOffset(self, dt):\n if self.normalize and not _is_normalized(dt):\n return False\n return dt.day == 1\n\n _prefix = 'MS'\n\n\nclass SemiMonthOffset(DateOffset):\n _adjust_dst = True\n _default_day_of_month = 15\n _min_day_of_month = 2\n\n def __init__(self, n=1, day_of_month=None, normalize=False, **kwds):\n if day_of_month is None:\n self.day_of_month = self._default_day_of_month\n else:\n self.day_of_month = int(day_of_month)\n if not self._min_day_of_month <= self.day_of_month <= 27:\n raise ValueError('day_of_month must be '\n '{}<=day_of_month<=27, got {}'.format(\n self._min_day_of_month, self.day_of_month))\n self.n = int(n)\n self.normalize = normalize\n self.kwds = kwds\n self.kwds['day_of_month'] = self.day_of_month\n\n @classmethod\n def _from_name(cls, suffix=None):\n return cls(day_of_month=suffix)\n\n @property\n def rule_code(self):\n suffix = '-{}'.format(self.day_of_month)\n return self._prefix + suffix\n\n @apply_wraps\n def apply(self, other):\n n = self.n\n if not self.onOffset(other):\n _, days_in_month = tslib.monthrange(other.year, other.month)\n if 1 < other.day < self.day_of_month:\n other += relativedelta(day=self.day_of_month)\n if n > 0:\n # rollforward so subtract 1\n n -= 1\n elif self.day_of_month < other.day < days_in_month:\n other += relativedelta(day=self.day_of_month)\n if n < 0:\n # rollforward in the negative direction so add 1\n n += 1\n elif n == 0:\n n = 1\n\n return self._apply(n, other)\n\n def _apply(self, n, other):\n \"\"\"Handle specific apply logic for child classes\"\"\"\n raise AbstractMethodError(self)\n\n @apply_index_wraps\n def apply_index(self, i):\n # determine how many days away from the 1st of the month we are\n days_from_start = i.to_perioddelta('M').asi8\n delta = Timedelta(days=self.day_of_month - 1).value\n\n # get boolean array for each element before the day_of_month\n before_day_of_month = days_from_start < delta\n\n # get boolean array for each element after the day_of_month\n after_day_of_month = days_from_start > delta\n\n # determine the correct n for each date in i\n roll = self._get_roll(i, before_day_of_month, after_day_of_month)\n\n # isolate the time since it will be striped away one the next line\n time = i.to_perioddelta('D')\n\n # apply the correct number of months\n i = (i.to_period('M') + (roll // 2)).to_timestamp()\n\n # apply the correct day\n i = self._apply_index_days(i, roll)\n\n return i + time\n\n def _get_roll(self, i, before_day_of_month, after_day_of_month):\n \"\"\"Return an array with the correct n for each date in i.\n\n The roll array is based on the fact that i gets rolled back to\n the first day of the month.\n \"\"\"\n raise AbstractMethodError(self)\n\n def _apply_index_days(self, i, roll):\n \"\"\"Apply the correct day for each date in i\"\"\"\n raise AbstractMethodError(self)\n\n\nclass SemiMonthEnd(SemiMonthOffset):\n \"\"\"\n Two DateOffset's per month repeating on the last\n day of the month and day_of_month.\n\n .. versionadded:: 0.19.0\n\n Parameters\n ----------\n n: int\n normalize : bool, default False\n day_of_month: int, {1, 3,...,27}, default 15\n \"\"\"\n _prefix = 'SM'\n _min_day_of_month = 1\n\n def onOffset(self, dt):\n if self.normalize and not _is_normalized(dt):\n return False\n _, days_in_month = tslib.monthrange(dt.year, dt.month)\n return dt.day in (self.day_of_month, days_in_month)\n\n def _apply(self, n, other):\n # if other.day is not day_of_month move to day_of_month and update n\n if other.day < self.day_of_month:\n other += relativedelta(day=self.day_of_month)\n if n > 0:\n n -= 1\n elif other.day > self.day_of_month:\n other += relativedelta(day=self.day_of_month)\n if n == 0:\n n = 1\n else:\n n += 1\n\n months = n // 2\n day = 31 if n % 2 else self.day_of_month\n return other + relativedelta(months=months, day=day)\n\n def _get_roll(self, i, before_day_of_month, after_day_of_month):\n n = self.n\n is_month_end = i.is_month_end\n if n > 0:\n roll_end = np.where(is_month_end, 1, 0)\n roll_before = np.where(before_day_of_month, n, n + 1)\n roll = roll_end + roll_before\n elif n == 0:\n roll_after = np.where(after_day_of_month, 2, 0)\n roll_before = np.where(~after_day_of_month, 1, 0)\n roll = roll_before + roll_after\n else:\n roll = np.where(after_day_of_month, n + 2, n + 1)\n return roll\n\n def _apply_index_days(self, i, roll):\n i += (roll % 2) * Timedelta(days=self.day_of_month).value\n return i + Timedelta(days=-1)\n\n\nclass SemiMonthBegin(SemiMonthOffset):\n \"\"\"\n Two DateOffset's per month repeating on the first\n day of the month and day_of_month.\n\n .. versionadded:: 0.19.0\n\n Parameters\n ----------\n n: int\n normalize : bool, default False\n day_of_month: int, {2, 3,...,27}, default 15\n \"\"\"\n _prefix = 'SMS'\n\n def onOffset(self, dt):\n if self.normalize and not _is_normalized(dt):\n return False\n return dt.day in (1, self.day_of_month)\n\n def _apply(self, n, other):\n # if other.day is not day_of_month move to day_of_month and update n\n if other.day < self.day_of_month:\n other += relativedelta(day=self.day_of_month)\n if n == 0:\n n = -1\n else:\n n -= 1\n elif other.day > self.day_of_month:\n other += relativedelta(day=self.day_of_month)\n if n == 0:\n n = 1\n elif n < 0:\n n += 1\n\n months = n // 2 + n % 2\n day = 1 if n % 2 else self.day_of_month\n return other + relativedelta(months=months, day=day)\n\n def _get_roll(self, i, before_day_of_month, after_day_of_month):\n n = self.n\n is_month_start = i.is_month_start\n if n > 0:\n roll = np.where(before_day_of_month, n, n + 1)\n elif n == 0:\n roll_start = np.where(is_month_start, 0, 1)\n roll_after = np.where(after_day_of_month, 1, 0)\n roll = roll_start + roll_after\n else:\n roll_after = np.where(after_day_of_month, n + 2, n + 1)\n roll_start = np.where(is_month_start, -1, 0)\n roll = roll_after + roll_start\n return roll\n\n def _apply_index_days(self, i, roll):\n return i + (roll % 2) * Timedelta(days=self.day_of_month - 1).value\n\n\nclass BusinessMonthEnd(MonthOffset):\n \"\"\"DateOffset increments between business EOM dates\"\"\"\n\n def isAnchored(self):\n return (self.n == 1)\n\n @apply_wraps\n def apply(self, other):\n n = self.n\n wkday, days_in_month = tslib.monthrange(other.year, other.month)\n lastBDay = days_in_month - max(((wkday + days_in_month - 1)\n % 7) - 4, 0)\n\n if n > 0 and not other.day >= lastBDay:\n n = n - 1\n elif n <= 0 and other.day > lastBDay:\n n = n + 1\n other = other + relativedelta(months=n, day=31)\n\n if other.weekday() > 4:\n other = other - BDay()\n return other\n\n _prefix = 'BM'\n\n\nclass BusinessMonthBegin(MonthOffset):\n \"\"\"DateOffset of one business month at beginning\"\"\"\n\n @apply_wraps\n def apply(self, other):\n n = self.n\n wkday, _ = tslib.monthrange(other.year, other.month)\n first = _get_firstbday(wkday)\n\n if other.day > first and n <= 0:\n # as if rolled forward already\n n += 1\n elif other.day < first and n > 0:\n other = other + timedelta(days=first - other.day)\n n -= 1\n\n other = other + relativedelta(months=n)\n wkday, _ = tslib.monthrange(other.year, other.month)\n first = _get_firstbday(wkday)\n result = datetime(other.year, other.month, first,\n other.hour, other.minute,\n other.second, other.microsecond)\n return result\n\n def onOffset(self, dt):\n if self.normalize and not _is_normalized(dt):\n return False\n first_weekday, _ = tslib.monthrange(dt.year, dt.month)\n if first_weekday == 5:\n return dt.day == 3\n elif first_weekday == 6:\n return dt.day == 2\n else:\n return dt.day == 1\n\n _prefix = 'BMS'\n\n\nclass CustomBusinessMonthEnd(BusinessMixin, MonthOffset):\n \"\"\"\n **EXPERIMENTAL** DateOffset of one custom business month\n\n .. warning:: EXPERIMENTAL\n\n This class is not officially supported and the API is likely to change\n in future versions. Use this at your own risk.\n\n Parameters\n ----------\n n : int, default 1\n offset : timedelta, default timedelta(0)\n normalize : bool, default False\n Normalize start/end dates to midnight before generating date range\n weekmask : str, Default 'Mon Tue Wed Thu Fri'\n weekmask of valid business days, passed to ``numpy.busdaycalendar``\n holidays : list\n list/array of dates to exclude from the set of valid business days,\n passed to ``numpy.busdaycalendar``\n calendar : pd.HolidayCalendar or np.busdaycalendar\n \"\"\"\n\n _cacheable = False\n _prefix = 'CBM'\n\n def __init__(self, n=1, normalize=False, weekmask='Mon Tue Wed Thu Fri',\n holidays=None, calendar=None, **kwds):\n self.n = int(n)\n self.normalize = normalize\n self.kwds = kwds\n self.offset = kwds.get('offset', timedelta(0))\n self.cbday = CustomBusinessDay(n=self.n, normalize=normalize,\n weekmask=weekmask, holidays=holidays,\n calendar=calendar, **kwds)\n self.m_offset = MonthEnd(n=1, normalize=normalize, **kwds)\n self.kwds['calendar'] = self.cbday.calendar # cache numpy calendar\n\n @apply_wraps\n def apply(self, other):\n n = self.n\n # First move to month offset\n cur_mend = self.m_offset.rollforward(other)\n # Find this custom month offset\n cur_cmend = self.cbday.rollback(cur_mend)\n\n # handle zero case. arbitrarily rollforward\n if n == 0 and other != cur_cmend:\n n += 1\n\n if other < cur_cmend and n >= 1:\n n -= 1\n elif other > cur_cmend and n <= -1:\n n += 1\n\n new = cur_mend + n * self.m_offset\n result = self.cbday.rollback(new)\n return result\n\n\nclass CustomBusinessMonthBegin(BusinessMixin, MonthOffset):\n \"\"\"\n **EXPERIMENTAL** DateOffset of one custom business month\n\n .. warning:: EXPERIMENTAL\n\n This class is not officially supported and the API is likely to change\n in future versions. Use this at your own risk.\n\n Parameters\n ----------\n n : int, default 1\n offset : timedelta, default timedelta(0)\n normalize : bool, default False\n Normalize start/end dates to midnight before generating date range\n weekmask : str, Default 'Mon Tue Wed Thu Fri'\n weekmask of valid business days, passed to ``numpy.busdaycalendar``\n holidays : list\n list/array of dates to exclude from the set of valid business days,\n passed to ``numpy.busdaycalendar``\n calendar : pd.HolidayCalendar or np.busdaycalendar\n \"\"\"\n\n _cacheable = False\n _prefix = 'CBMS'\n\n def __init__(self, n=1, normalize=False, weekmask='Mon Tue Wed Thu Fri',\n holidays=None, calendar=None, **kwds):\n self.n = int(n)\n self.normalize = normalize\n self.kwds = kwds\n self.offset = kwds.get('offset', timedelta(0))\n self.cbday = CustomBusinessDay(n=self.n, normalize=normalize,\n weekmask=weekmask, holidays=holidays,\n calendar=calendar, **kwds)\n self.m_offset = MonthBegin(n=1, normalize=normalize, **kwds)\n self.kwds['calendar'] = self.cbday.calendar # cache numpy calendar\n\n @apply_wraps\n def apply(self, other):\n n = self.n\n dt_in = other\n # First move to month offset\n cur_mbegin = self.m_offset.rollback(dt_in)\n # Find this custom month offset\n cur_cmbegin = self.cbday.rollforward(cur_mbegin)\n\n # handle zero case. arbitrarily rollforward\n if n == 0 and dt_in != cur_cmbegin:\n n += 1\n\n if dt_in > cur_cmbegin and n <= -1:\n n += 1\n elif dt_in < cur_cmbegin and n >= 1:\n n -= 1\n\n new = cur_mbegin + n * self.m_offset\n result = self.cbday.rollforward(new)\n return result\n\n\nclass Week(DateOffset):\n \"\"\"\n Weekly offset\n\n Parameters\n ----------\n weekday : int, default None\n Always generate specific day of week. 0 for Monday\n \"\"\"\n _adjust_dst = True\n\n def __init__(self, n=1, normalize=False, **kwds):\n self.n = n\n self.normalize = normalize\n self.weekday = kwds.get('weekday', None)\n\n if self.weekday is not None:\n if self.weekday < 0 or self.weekday > 6:\n raise ValueError('Day must be 0<=day<=6, got %d' %\n self.weekday)\n\n self._inc = timedelta(weeks=1)\n self.kwds = kwds\n\n def isAnchored(self):\n return (self.n == 1 and self.weekday is not None)\n\n @apply_wraps\n def apply(self, other):\n base = other\n if self.weekday is None:\n return other + self.n * self._inc\n\n if self.n > 0:\n k = self.n\n otherDay = other.weekday()\n if otherDay != self.weekday:\n other = other + timedelta((self.weekday - otherDay) % 7)\n k = k - 1\n other = other\n for i in range(k):\n other = other + self._inc\n else:\n k = self.n\n otherDay = other.weekday()\n if otherDay != self.weekday:\n other = other + timedelta((self.weekday - otherDay) % 7)\n for i in range(-k):\n other = other - self._inc\n\n other = datetime(other.year, other.month, other.day,\n base.hour, base.minute, base.second, base.microsecond)\n return other\n\n @apply_index_wraps\n def apply_index(self, i):\n if self.weekday is None:\n return ((i.to_period('W') + self.n).to_timestamp() +\n i.to_perioddelta('W'))\n else:\n return self._end_apply_index(i, self.freqstr)\n\n def onOffset(self, dt):\n if self.normalize and not _is_normalized(dt):\n return False\n return dt.weekday() == self.weekday\n\n _prefix = 'W'\n\n @property\n def rule_code(self):\n suffix = ''\n if self.weekday is not None:\n suffix = '-%s' % (_int_to_weekday[self.weekday])\n return self._prefix + suffix\n\n @classmethod\n def _from_name(cls, suffix=None):\n if not suffix:\n weekday = None\n else:\n weekday = _weekday_to_int[suffix]\n return cls(weekday=weekday)\n\n\nclass WeekDay(object):\n MON = 0\n TUE = 1\n WED = 2\n THU = 3\n FRI = 4\n SAT = 5\n SUN = 6\n\n\n_int_to_weekday = {\n WeekDay.MON: 'MON',\n WeekDay.TUE: 'TUE',\n WeekDay.WED: 'WED',\n WeekDay.THU: 'THU',\n WeekDay.FRI: 'FRI',\n WeekDay.SAT: 'SAT',\n WeekDay.SUN: 'SUN'\n}\n\n_weekday_to_int = dict((v, k) for k, v in _int_to_weekday.items())\n\n\nclass WeekOfMonth(DateOffset):\n \"\"\"\n Describes monthly dates like \"the Tuesday of the 2nd week of each month\"\n\n Parameters\n ----------\n n : int\n week : {0, 1, 2, 3, ...}\n 0 is 1st week of month, 1 2nd week, etc.\n weekday : {0, 1, ..., 6}\n 0: Mondays\n 1: Tuesdays\n 2: Wednesdays\n 3: Thursdays\n 4: Fridays\n 5: Saturdays\n 6: Sundays\n \"\"\"\n\n _adjust_dst = True\n\n def __init__(self, n=1, normalize=False, **kwds):\n self.n = n\n self.normalize = normalize\n self.weekday = kwds['weekday']\n self.week = kwds['week']\n\n if self.n == 0:\n raise ValueError('N cannot be 0')\n\n if self.weekday < 0 or self.weekday > 6:\n raise ValueError('Day must be 0<=day<=6, got %d' %\n self.weekday)\n if self.week < 0 or self.week > 3:\n raise ValueError('Week must be 0<=day<=3, got %d' %\n self.week)\n\n self.kwds = kwds\n\n @apply_wraps\n def apply(self, other):\n base = other\n offsetOfMonth = self.getOffsetOfMonth(other)\n\n if offsetOfMonth > other:\n if self.n > 0:\n months = self.n - 1\n else:\n months = self.n\n elif offsetOfMonth == other:\n months = self.n\n else:\n if self.n > 0:\n months = self.n\n else:\n months = self.n + 1\n\n other = self.getOffsetOfMonth(\n other + relativedelta(months=months, day=1))\n other = datetime(other.year, other.month, other.day, base.hour,\n base.minute, base.second, base.microsecond)\n return other\n\n def getOffsetOfMonth(self, dt):\n w = Week(weekday=self.weekday)\n d = datetime(dt.year, dt.month, 1, tzinfo=dt.tzinfo)\n d = w.rollforward(d)\n\n for i in range(self.week):\n d = w.apply(d)\n\n return d\n\n def onOffset(self, dt):\n if self.normalize and not _is_normalized(dt):\n return False\n d = datetime(dt.year, dt.month, dt.day, tzinfo=dt.tzinfo)\n return d == self.getOffsetOfMonth(dt)\n\n @property\n def rule_code(self):\n return '%s-%d%s' % (self._prefix, self.week + 1,\n _int_to_weekday.get(self.weekday, ''))\n\n _prefix = 'WOM'\n\n @classmethod\n def _from_name(cls, suffix=None):\n if not suffix:\n raise ValueError(\"Prefix %r requires a suffix.\" % (cls._prefix))\n # TODO: handle n here...\n # only one digit weeks (1 --> week 0, 2 --> week 1, etc.)\n week = int(suffix[0]) - 1\n weekday = _weekday_to_int[suffix[1:]]\n return cls(week=week, weekday=weekday)\n\n\nclass LastWeekOfMonth(DateOffset):\n \"\"\"\n Describes monthly dates in last week of month like \"the last Tuesday of\n each month\"\n\n Parameters\n ----------\n n : int\n weekday : {0, 1, ..., 6}\n 0: Mondays\n 1: Tuesdays\n 2: Wednesdays\n 3: Thursdays\n 4: Fridays\n 5: Saturdays\n 6: Sundays\n \"\"\"\n\n def __init__(self, n=1, normalize=False, **kwds):\n self.n = n\n self.normalize = normalize\n self.weekday = kwds['weekday']\n\n if self.n == 0:\n raise ValueError('N cannot be 0')\n\n if self.weekday < 0 or self.weekday > 6:\n raise ValueError('Day must be 0<=day<=6, got %d' %\n self.weekday)\n\n self.kwds = kwds\n\n @apply_wraps\n def apply(self, other):\n offsetOfMonth = self.getOffsetOfMonth(other)\n\n if offsetOfMonth > other:\n if self.n > 0:\n months = self.n - 1\n else:\n months = self.n\n elif offsetOfMonth == other:\n months = self.n\n else:\n if self.n > 0:\n months = self.n\n else:\n months = self.n + 1\n\n return self.getOffsetOfMonth(\n other + relativedelta(months=months, day=1))\n\n def getOffsetOfMonth(self, dt):\n m = MonthEnd()\n d = datetime(dt.year, dt.month, 1, dt.hour, dt.minute,\n dt.second, dt.microsecond, tzinfo=dt.tzinfo)\n eom = m.rollforward(d)\n w = Week(weekday=self.weekday)\n return w.rollback(eom)\n\n def onOffset(self, dt):\n if self.normalize and not _is_normalized(dt):\n return False\n return dt == self.getOffsetOfMonth(dt)\n\n @property\n def rule_code(self):\n return '%s-%s' % (self._prefix, _int_to_weekday.get(self.weekday, ''))\n\n _prefix = 'LWOM'\n\n @classmethod\n def _from_name(cls, suffix=None):\n if not suffix:\n raise ValueError(\"Prefix %r requires a suffix.\" % (cls._prefix))\n # TODO: handle n here...\n weekday = _weekday_to_int[suffix]\n return cls(weekday=weekday)\n\n\nclass QuarterOffset(DateOffset):\n \"\"\"Quarter representation - doesn't call super\"\"\"\n\n #: default month for __init__\n _default_startingMonth = None\n #: default month in _from_name\n _from_name_startingMonth = None\n _adjust_dst = True\n # TODO: Consider combining QuarterOffset and YearOffset __init__ at some\n # point\n\n def __init__(self, n=1, normalize=False, **kwds):\n self.n = n\n self.normalize = normalize\n self.startingMonth = kwds.get('startingMonth',\n self._default_startingMonth)\n\n self.kwds = kwds\n\n def isAnchored(self):\n return (self.n == 1 and self.startingMonth is not None)\n\n @classmethod\n def _from_name(cls, suffix=None):\n kwargs = {}\n if suffix:\n kwargs['startingMonth'] = _month_to_int[suffix]\n else:\n if cls._from_name_startingMonth is not None:\n kwargs['startingMonth'] = cls._from_name_startingMonth\n return cls(**kwargs)\n\n @property\n def rule_code(self):\n return '%s-%s' % (self._prefix, _int_to_month[self.startingMonth])\n\n\nclass BQuarterEnd(QuarterOffset):\n \"\"\"DateOffset increments between business Quarter dates\n startingMonth = 1 corresponds to dates like 1/31/2007, 4/30/2007, ...\n startingMonth = 2 corresponds to dates like 2/28/2007, 5/31/2007, ...\n startingMonth = 3 corresponds to dates like 3/30/2007, 6/29/2007, ...\n \"\"\"\n _outputName = 'BusinessQuarterEnd'\n _default_startingMonth = 3\n # 'BQ'\n _from_name_startingMonth = 12\n _prefix = 'BQ'\n\n @apply_wraps\n def apply(self, other):\n n = self.n\n base = other\n other = datetime(other.year, other.month, other.day,\n other.hour, other.minute, other.second,\n other.microsecond)\n\n wkday, days_in_month = tslib.monthrange(other.year, other.month)\n lastBDay = days_in_month - max(((wkday + days_in_month - 1)\n % 7) - 4, 0)\n\n monthsToGo = 3 - ((other.month - self.startingMonth) % 3)\n if monthsToGo == 3:\n monthsToGo = 0\n\n if n > 0 and not (other.day >= lastBDay and monthsToGo == 0):\n n = n - 1\n elif n <= 0 and other.day > lastBDay and monthsToGo == 0:\n n = n + 1\n\n other = other + relativedelta(months=monthsToGo + 3 * n, day=31)\n other = tslib._localize_pydatetime(other, base.tzinfo)\n if other.weekday() > 4:\n other = other - BDay()\n return other\n\n def onOffset(self, dt):\n if self.normalize and not _is_normalized(dt):\n return False\n modMonth = (dt.month - self.startingMonth) % 3\n return BMonthEnd().onOffset(dt) and modMonth == 0\n\n\n_int_to_month = tslib._MONTH_ALIASES\n_month_to_int = dict((v, k) for k, v in _int_to_month.items())\n\n\n# TODO: This is basically the same as BQuarterEnd\nclass BQuarterBegin(QuarterOffset):\n _outputName = \"BusinessQuarterBegin\"\n # I suspect this is wrong for *all* of them.\n _default_startingMonth = 3\n _from_name_startingMonth = 1\n _prefix = 'BQS'\n\n @apply_wraps\n def apply(self, other):\n n = self.n\n wkday, _ = tslib.monthrange(other.year, other.month)\n\n first = _get_firstbday(wkday)\n\n monthsSince = (other.month - self.startingMonth) % 3\n\n if n <= 0 and monthsSince != 0: # make sure to roll forward so negate\n monthsSince = monthsSince - 3\n\n # roll forward if on same month later than first bday\n if n <= 0 and (monthsSince == 0 and other.day > first):\n n = n + 1\n # pretend to roll back if on same month but before firstbday\n elif n > 0 and (monthsSince == 0 and other.day < first):\n n = n - 1\n\n # get the first bday for result\n other = other + relativedelta(months=3 * n - monthsSince)\n wkday, _ = tslib.monthrange(other.year, other.month)\n first = _get_firstbday(wkday)\n result = datetime(other.year, other.month, first,\n other.hour, other.minute, other.second,\n other.microsecond)\n return result\n\n\nclass QuarterEnd(QuarterOffset):\n \"\"\"DateOffset increments between business Quarter dates\n startingMonth = 1 corresponds to dates like 1/31/2007, 4/30/2007, ...\n startingMonth = 2 corresponds to dates like 2/28/2007, 5/31/2007, ...\n startingMonth = 3 corresponds to dates like 3/31/2007, 6/30/2007, ...\n \"\"\"\n _outputName = 'QuarterEnd'\n _default_startingMonth = 3\n _prefix = 'Q'\n\n def __init__(self, n=1, normalize=False, **kwds):\n self.n = n\n self.normalize = normalize\n self.startingMonth = kwds.get('startingMonth', 3)\n\n self.kwds = kwds\n\n def isAnchored(self):\n return (self.n == 1 and self.startingMonth is not None)\n\n @apply_wraps\n def apply(self, other):\n n = self.n\n other = datetime(other.year, other.month, other.day,\n other.hour, other.minute, other.second,\n other.microsecond)\n wkday, days_in_month = tslib.monthrange(other.year, other.month)\n\n monthsToGo = 3 - ((other.month - self.startingMonth) % 3)\n if monthsToGo == 3:\n monthsToGo = 0\n\n if n > 0 and not (other.day >= days_in_month and monthsToGo == 0):\n n = n - 1\n\n other = other + relativedelta(months=monthsToGo + 3 * n, day=31)\n return other\n\n @apply_index_wraps\n def apply_index(self, i):\n return self._end_apply_index(i, self.freqstr)\n\n def onOffset(self, dt):\n if self.normalize and not _is_normalized(dt):\n return False\n modMonth = (dt.month - self.startingMonth) % 3\n return MonthEnd().onOffset(dt) and modMonth == 0\n\n\nclass QuarterBegin(QuarterOffset):\n _outputName = 'QuarterBegin'\n _default_startingMonth = 3\n _from_name_startingMonth = 1\n _prefix = 'QS'\n\n def isAnchored(self):\n return (self.n == 1 and self.startingMonth is not None)\n\n @apply_wraps\n def apply(self, other):\n n = self.n\n wkday, days_in_month = tslib.monthrange(other.year, other.month)\n\n monthsSince = (other.month - self.startingMonth) % 3\n\n if n <= 0 and monthsSince != 0:\n # make sure you roll forward, so negate\n monthsSince = monthsSince - 3\n\n if n <= 0 and (monthsSince == 0 and other.day > 1):\n # after start, so come back an extra period as if rolled forward\n n = n + 1\n\n other = other + relativedelta(months=3 * n - monthsSince, day=1)\n return other\n\n @apply_index_wraps\n def apply_index(self, i):\n freq_month = 12 if self.startingMonth == 1 else self.startingMonth - 1\n # freq_month = self.startingMonth\n freqstr = 'Q-%s' % (_int_to_month[freq_month],)\n return self._beg_apply_index(i, freqstr)\n\n\nclass YearOffset(DateOffset):\n \"\"\"DateOffset that just needs a month\"\"\"\n _adjust_dst = True\n\n def __init__(self, n=1, normalize=False, **kwds):\n self.month = kwds.get('month', self._default_month)\n\n if self.month < 1 or self.month > 12:\n raise ValueError('Month must go from 1 to 12')\n\n DateOffset.__init__(self, n=n, normalize=normalize, **kwds)\n\n @classmethod\n def _from_name(cls, suffix=None):\n kwargs = {}\n if suffix:\n kwargs['month'] = _month_to_int[suffix]\n return cls(**kwargs)\n\n @property\n def rule_code(self):\n return '%s-%s' % (self._prefix, _int_to_month[self.month])\n\n\nclass BYearEnd(YearOffset):\n \"\"\"DateOffset increments between business EOM dates\"\"\"\n _outputName = 'BusinessYearEnd'\n _default_month = 12\n _prefix = 'BA'\n\n @apply_wraps\n def apply(self, other):\n n = self.n\n wkday, days_in_month = tslib.monthrange(other.year, self.month)\n lastBDay = (days_in_month -\n max(((wkday + days_in_month - 1) % 7) - 4, 0))\n\n years = n\n if n > 0:\n if (other.month < self.month or\n (other.month == self.month and other.day < lastBDay)):\n years -= 1\n elif n <= 0:\n if (other.month > self.month or\n (other.month == self.month and other.day > lastBDay)):\n years += 1\n\n other = other + relativedelta(years=years)\n\n _, days_in_month = tslib.monthrange(other.year, self.month)\n result = datetime(other.year, self.month, days_in_month,\n other.hour, other.minute, other.second,\n other.microsecond)\n\n if result.weekday() > 4:\n result = result - BDay()\n\n return result\n\n\nclass BYearBegin(YearOffset):\n \"\"\"DateOffset increments between business year begin dates\"\"\"\n _outputName = 'BusinessYearBegin'\n _default_month = 1\n _prefix = 'BAS'\n\n @apply_wraps\n def apply(self, other):\n n = self.n\n wkday, days_in_month = tslib.monthrange(other.year, self.month)\n\n first = _get_firstbday(wkday)\n\n years = n\n\n if n > 0: # roll back first for positive n\n if (other.month < self.month or\n (other.month == self.month and other.day < first)):\n years -= 1\n elif n <= 0: # roll forward\n if (other.month > self.month or\n (other.month == self.month and other.day > first)):\n years += 1\n\n # set first bday for result\n other = other + relativedelta(years=years)\n wkday, days_in_month = tslib.monthrange(other.year, self.month)\n first = _get_firstbday(wkday)\n return datetime(other.year, self.month, first, other.hour,\n other.minute, other.second, other.microsecond)\n\n\nclass YearEnd(YearOffset):\n \"\"\"DateOffset increments between calendar year ends\"\"\"\n _default_month = 12\n _prefix = 'A'\n\n @apply_wraps\n def apply(self, other):\n def _increment(date):\n if date.month == self.month:\n _, days_in_month = tslib.monthrange(date.year, self.month)\n if date.day != days_in_month:\n year = date.year\n else:\n year = date.year + 1\n elif date.month < self.month:\n year = date.year\n else:\n year = date.year + 1\n _, days_in_month = tslib.monthrange(year, self.month)\n return datetime(year, self.month, days_in_month,\n date.hour, date.minute, date.second,\n date.microsecond)\n\n def _decrement(date):\n year = date.year if date.month > self.month else date.year - 1\n _, days_in_month = tslib.monthrange(year, self.month)\n return datetime(year, self.month, days_in_month,\n date.hour, date.minute, date.second,\n date.microsecond)\n\n def _rollf(date):\n if date.month != self.month or\\\n date.day < tslib.monthrange(date.year, date.month)[1]:\n date = _increment(date)\n return date\n\n n = self.n\n result = other\n if n > 0:\n while n > 0:\n result = _increment(result)\n n -= 1\n elif n < 0:\n while n < 0:\n result = _decrement(result)\n n += 1\n else:\n # n == 0, roll forward\n result = _rollf(result)\n return result\n\n @apply_index_wraps\n def apply_index(self, i):\n # convert month anchor to annual period tuple\n return self._end_apply_index(i, self.freqstr)\n\n def onOffset(self, dt):\n if self.normalize and not _is_normalized(dt):\n return False\n wkday, days_in_month = tslib.monthrange(dt.year, self.month)\n return self.month == dt.month and dt.day == days_in_month\n\n\nclass YearBegin(YearOffset):\n \"\"\"DateOffset increments between calendar year begin dates\"\"\"\n _default_month = 1\n _prefix = 'AS'\n\n @apply_wraps\n def apply(self, other):\n def _increment(date, n):\n year = date.year + n - 1\n if date.month >= self.month:\n year += 1\n return datetime(year, self.month, 1, date.hour, date.minute,\n date.second, date.microsecond)\n\n def _decrement(date, n):\n year = date.year + n + 1\n if date.month < self.month or (date.month == self.month and\n date.day == 1):\n year -= 1\n return datetime(year, self.month, 1, date.hour, date.minute,\n date.second, date.microsecond)\n\n def _rollf(date):\n if (date.month != self.month) or date.day > 1:\n date = _increment(date, 1)\n return date\n\n n = self.n\n result = other\n if n > 0:\n result = _increment(result, n)\n elif n < 0:\n result = _decrement(result, n)\n else:\n # n == 0, roll forward\n result = _rollf(result)\n return result\n\n @apply_index_wraps\n def apply_index(self, i):\n freq_month = 12 if self.month == 1 else self.month - 1\n freqstr = 'A-%s' % (_int_to_month[freq_month],)\n return self._beg_apply_index(i, freqstr)\n\n def onOffset(self, dt):\n if self.normalize and not _is_normalized(dt):\n return False\n return dt.month == self.month and dt.day == 1\n\n\nclass FY5253(DateOffset):\n \"\"\"\n Describes 52-53 week fiscal year. This is also known as a 4-4-5 calendar.\n\n It is used by companies that desire that their\n fiscal year always end on the same day of the week.\n\n It is a method of managing accounting periods.\n It is a common calendar structure for some industries,\n such as retail, manufacturing and parking industry.\n\n For more information see:\n http://en.wikipedia.org/wiki/4%E2%80%934%E2%80%935_calendar\n\n\n The year may either:\n - end on the last X day of the Y month.\n - end on the last X day closest to the last day of the Y month.\n\n X is a specific day of the week.\n Y is a certain month of the year\n\n Parameters\n ----------\n n : int\n weekday : {0, 1, ..., 6}\n 0: Mondays\n 1: Tuesdays\n 2: Wednesdays\n 3: Thursdays\n 4: Fridays\n 5: Saturdays\n 6: Sundays\n startingMonth : The month in which fiscal years end. {1, 2, ... 12}\n variation : str\n {\"nearest\", \"last\"} for \"LastOfMonth\" or \"NearestEndMonth\"\n \"\"\"\n\n _prefix = 'RE'\n _suffix_prefix_last = 'L'\n _suffix_prefix_nearest = 'N'\n _adjust_dst = True\n\n def __init__(self, n=1, normalize=False, **kwds):\n self.n = n\n self.normalize = normalize\n self.startingMonth = kwds['startingMonth']\n self.weekday = kwds[\"weekday\"]\n\n self.variation = kwds[\"variation\"]\n\n self.kwds = kwds\n\n if self.n == 0:\n raise ValueError('N cannot be 0')\n\n if self.variation not in [\"nearest\", \"last\"]:\n raise ValueError('%s is not a valid variation' % self.variation)\n\n if self.variation == \"nearest\":\n weekday_offset = weekday(self.weekday)\n self._rd_forward = relativedelta(weekday=weekday_offset)\n self._rd_backward = relativedelta(weekday=weekday_offset(-1))\n else:\n self._offset_lwom = LastWeekOfMonth(n=1, weekday=self.weekday)\n\n def isAnchored(self):\n return self.n == 1 \\\n and self.startingMonth is not None \\\n and self.weekday is not None\n\n def onOffset(self, dt):\n if self.normalize and not _is_normalized(dt):\n return False\n dt = datetime(dt.year, dt.month, dt.day)\n year_end = self.get_year_end(dt)\n\n if self.variation == \"nearest\":\n # We have to check the year end of \"this\" cal year AND the previous\n return year_end == dt or \\\n self.get_year_end(dt - relativedelta(months=1)) == dt\n else:\n return year_end == dt\n\n @apply_wraps\n def apply(self, other):\n n = self.n\n prev_year = self.get_year_end(\n datetime(other.year - 1, self.startingMonth, 1))\n cur_year = self.get_year_end(\n datetime(other.year, self.startingMonth, 1))\n next_year = self.get_year_end(\n datetime(other.year + 1, self.startingMonth, 1))\n prev_year = tslib._localize_pydatetime(prev_year, other.tzinfo)\n cur_year = tslib._localize_pydatetime(cur_year, other.tzinfo)\n next_year = tslib._localize_pydatetime(next_year, other.tzinfo)\n\n if n > 0:\n if other == prev_year:\n year = other.year - 1\n elif other == cur_year:\n year = other.year\n elif other == next_year:\n year = other.year + 1\n elif other < prev_year:\n year = other.year - 1\n n -= 1\n elif other < cur_year:\n year = other.year\n n -= 1\n elif other < next_year:\n year = other.year + 1\n n -= 1\n else:\n assert False\n\n result = self.get_year_end(\n datetime(year + n, self.startingMonth, 1))\n\n result = datetime(result.year, result.month, result.day,\n other.hour, other.minute, other.second,\n other.microsecond)\n return result\n else:\n n = -n\n if other == prev_year:\n year = other.year - 1\n elif other == cur_year:\n year = other.year\n elif other == next_year:\n year = other.year + 1\n elif other > next_year:\n year = other.year + 1\n n -= 1\n elif other > cur_year:\n year = other.year\n n -= 1\n elif other > prev_year:\n year = other.year - 1\n n -= 1\n else:\n assert False\n\n result = self.get_year_end(\n datetime(year - n, self.startingMonth, 1))\n\n result = datetime(result.year, result.month, result.day,\n other.hour, other.minute, other.second,\n other.microsecond)\n return result\n\n def get_year_end(self, dt):\n if self.variation == \"nearest\":\n return self._get_year_end_nearest(dt)\n else:\n return self._get_year_end_last(dt)\n\n def get_target_month_end(self, dt):\n target_month = datetime(\n dt.year, self.startingMonth, 1, tzinfo=dt.tzinfo)\n next_month_first_of = target_month + relativedelta(months=+1)\n return next_month_first_of + relativedelta(days=-1)\n\n def _get_year_end_nearest(self, dt):\n target_date = self.get_target_month_end(dt)\n if target_date.weekday() == self.weekday:\n return target_date\n else:\n forward = target_date + self._rd_forward\n backward = target_date + self._rd_backward\n\n if forward - target_date < target_date - backward:\n return forward\n else:\n return backward\n\n def _get_year_end_last(self, dt):\n current_year = datetime(\n dt.year, self.startingMonth, 1, tzinfo=dt.tzinfo)\n return current_year + self._offset_lwom\n\n @property\n def rule_code(self):\n suffix = self.get_rule_code_suffix()\n return \"%s-%s\" % (self._get_prefix(), suffix)\n\n def _get_prefix(self):\n return self._prefix\n\n def _get_suffix_prefix(self):\n if self.variation == \"nearest\":\n return self._suffix_prefix_nearest\n else:\n return self._suffix_prefix_last\n\n def get_rule_code_suffix(self):\n return '%s-%s-%s' % (self._get_suffix_prefix(),\n _int_to_month[self.startingMonth],\n _int_to_weekday[self.weekday])\n\n @classmethod\n def _parse_suffix(cls, varion_code, startingMonth_code, weekday_code):\n if varion_code == \"N\":\n variation = \"nearest\"\n elif varion_code == \"L\":\n variation = \"last\"\n else:\n raise ValueError(\n \"Unable to parse varion_code: %s\" % (varion_code,))\n\n startingMonth = _month_to_int[startingMonth_code]\n weekday = _weekday_to_int[weekday_code]\n\n return {\n \"weekday\": weekday,\n \"startingMonth\": startingMonth,\n \"variation\": variation,\n }\n\n @classmethod\n def _from_name(cls, *args):\n return cls(**cls._parse_suffix(*args))\n\n\nclass FY5253Quarter(DateOffset):\n \"\"\"\n DateOffset increments between business quarter dates\n for 52-53 week fiscal year (also known as a 4-4-5 calendar).\n\n It is used by companies that desire that their\n fiscal year always end on the same day of the week.\n\n It is a method of managing accounting periods.\n It is a common calendar structure for some industries,\n such as retail, manufacturing and parking industry.\n\n For more information see:\n http://en.wikipedia.org/wiki/4%E2%80%934%E2%80%935_calendar\n\n The year may either:\n - end on the last X day of the Y month.\n - end on the last X day closest to the last day of the Y month.\n\n X is a specific day of the week.\n Y is a certain month of the year\n\n startingMonth = 1 corresponds to dates like 1/31/2007, 4/30/2007, ...\n startingMonth = 2 corresponds to dates like 2/28/2007, 5/31/2007, ...\n startingMonth = 3 corresponds to dates like 3/30/2007, 6/29/2007, ...\n\n Parameters\n ----------\n n : int\n weekday : {0, 1, ..., 6}\n 0: Mondays\n 1: Tuesdays\n 2: Wednesdays\n 3: Thursdays\n 4: Fridays\n 5: Saturdays\n 6: Sundays\n startingMonth : The month in which fiscal years end. {1, 2, ... 12}\n qtr_with_extra_week : The quarter number that has the leap\n or 14 week when needed. {1,2,3,4}\n variation : str\n {\"nearest\", \"last\"} for \"LastOfMonth\" or \"NearestEndMonth\"\n \"\"\"\n\n _prefix = 'REQ'\n _adjust_dst = True\n\n def __init__(self, n=1, normalize=False, **kwds):\n self.n = n\n self.normalize = normalize\n\n self.qtr_with_extra_week = kwds[\"qtr_with_extra_week\"]\n\n self.kwds = kwds\n\n if self.n == 0:\n raise ValueError('N cannot be 0')\n\n self._offset = FY5253(\n startingMonth=kwds['startingMonth'],\n weekday=kwds[\"weekday\"],\n variation=kwds[\"variation\"])\n\n def isAnchored(self):\n return self.n == 1 and self._offset.isAnchored()\n\n @apply_wraps\n def apply(self, other):\n base = other\n n = self.n\n\n if n > 0:\n while n > 0:\n if not self._offset.onOffset(other):\n qtr_lens = self.get_weeks(other)\n start = other - self._offset\n else:\n start = other\n qtr_lens = self.get_weeks(other + self._offset)\n\n for weeks in qtr_lens:\n start += relativedelta(weeks=weeks)\n if start > other:\n other = start\n n -= 1\n break\n\n else:\n n = -n\n while n > 0:\n if not self._offset.onOffset(other):\n qtr_lens = self.get_weeks(other)\n end = other + self._offset\n else:\n end = other\n qtr_lens = self.get_weeks(other)\n\n for weeks in reversed(qtr_lens):\n end -= relativedelta(weeks=weeks)\n if end < other:\n other = end\n n -= 1\n break\n other = datetime(other.year, other.month, other.day,\n base.hour, base.minute, base.second, base.microsecond)\n return other\n\n def get_weeks(self, dt):\n ret = [13] * 4\n\n year_has_extra_week = self.year_has_extra_week(dt)\n\n if year_has_extra_week:\n ret[self.qtr_with_extra_week - 1] = 14\n\n return ret\n\n def year_has_extra_week(self, dt):\n if self._offset.onOffset(dt):\n prev_year_end = dt - self._offset\n next_year_end = dt\n else:\n next_year_end = dt + self._offset\n prev_year_end = dt - self._offset\n\n week_in_year = (next_year_end - prev_year_end).days / 7\n\n return week_in_year == 53\n\n def onOffset(self, dt):\n if self.normalize and not _is_normalized(dt):\n return False\n if self._offset.onOffset(dt):\n return True\n\n next_year_end = dt - self._offset\n\n qtr_lens = self.get_weeks(dt)\n\n current = next_year_end\n for qtr_len in qtr_lens[0:4]:\n current += relativedelta(weeks=qtr_len)\n if dt == current:\n return True\n return False\n\n @property\n def rule_code(self):\n suffix = self._offset.get_rule_code_suffix()\n return \"%s-%s\" % (self._prefix,\n \"%s-%d\" % (suffix, self.qtr_with_extra_week))\n\n @classmethod\n def _from_name(cls, *args):\n return cls(**dict(FY5253._parse_suffix(*args[:-1]),\n qtr_with_extra_week=int(args[-1])))\n\n\nclass Easter(DateOffset):\n \"\"\"\n DateOffset for the Easter holiday using\n logic defined in dateutil. Right now uses\n the revised method which is valid in years\n 1583-4099.\n \"\"\"\n _adjust_dst = True\n\n def __init__(self, n=1, **kwds):\n super(Easter, self).__init__(n, **kwds)\n\n @apply_wraps\n def apply(self, other):\n currentEaster = easter(other.year)\n currentEaster = datetime(\n currentEaster.year, currentEaster.month, currentEaster.day)\n currentEaster = tslib._localize_pydatetime(currentEaster, other.tzinfo)\n\n # NOTE: easter returns a datetime.date so we have to convert to type of\n # other\n if self.n >= 0:\n if other >= currentEaster:\n new = easter(other.year + self.n)\n else:\n new = easter(other.year + self.n - 1)\n else:\n if other > currentEaster:\n new = easter(other.year + self.n + 1)\n else:\n new = easter(other.year + self.n)\n\n new = datetime(new.year, new.month, new.day, other.hour,\n other.minute, other.second, other.microsecond)\n return new\n\n def onOffset(self, dt):\n if self.normalize and not _is_normalized(dt):\n return False\n return date(dt.year, dt.month, dt.day) == easter(dt.year)\n\n# ---------------------------------------------------------------------\n# Ticks\n\n\ndef _tick_comp(op):\n def f(self, other):\n return op(self.delta, other.delta)\n\n return f\n\n\nclass Tick(SingleConstructorOffset):\n _inc = Timedelta(microseconds=1000)\n\n __gt__ = _tick_comp(operator.gt)\n __ge__ = _tick_comp(operator.ge)\n __lt__ = _tick_comp(operator.lt)\n __le__ = _tick_comp(operator.le)\n __eq__ = _tick_comp(operator.eq)\n __ne__ = _tick_comp(operator.ne)\n\n def __add__(self, other):\n if isinstance(other, Tick):\n if type(self) == type(other):\n return type(self)(self.n + other.n)\n else:\n return _delta_to_tick(self.delta + other.delta)\n elif isinstance(other, ABCPeriod):\n return other + self\n try:\n return self.apply(other)\n except ApplyTypeError:\n return NotImplemented\n except OverflowError:\n raise OverflowError(\"the add operation between {} and {} \"\n \"will overflow\".format(self, other))\n\n def __eq__(self, other):\n if isinstance(other, compat.string_types):\n from pandas.tseries.frequencies import to_offset\n\n other = to_offset(other)\n\n if isinstance(other, Tick):\n return self.delta == other.delta\n else:\n return DateOffset.__eq__(self, other)\n\n # This is identical to DateOffset.__hash__, but has to be redefined here\n # for Python 3, because we've redefined __eq__.\n def __hash__(self):\n return hash(self._params())\n\n def __ne__(self, other):\n if isinstance(other, compat.string_types):\n from pandas.tseries.frequencies import to_offset\n\n other = to_offset(other)\n\n if isinstance(other, Tick):\n return self.delta != other.delta\n else:\n return DateOffset.__ne__(self, other)\n\n @property\n def delta(self):\n return self.n * self._inc\n\n @property\n def nanos(self):\n return _delta_to_nanoseconds(self.delta)\n\n def apply(self, other):\n # Timestamp can handle tz and nano sec, thus no need to use apply_wraps\n if isinstance(other, Timestamp):\n\n # GH 15126\n # in order to avoid a recursive\n # call of __add__ and __radd__ if there is\n # an exception, when we call using the + operator,\n # we directly call the known method\n result = other.__add__(self)\n if result == NotImplemented:\n raise OverflowError\n return result\n elif isinstance(other, (datetime, np.datetime64, date)):\n return as_timestamp(other) + self\n\n if isinstance(other, timedelta):\n return other + self.delta\n elif isinstance(other, type(self)):\n return type(self)(self.n + other.n)\n\n raise ApplyTypeError('Unhandled type: %s' % type(other).__name__)\n\n _prefix = 'undefined'\n\n def isAnchored(self):\n return False\n\n\ndef _delta_to_tick(delta):\n if delta.microseconds == 0:\n if delta.seconds == 0:\n return Day(delta.days)\n else:\n seconds = delta.days * 86400 + delta.seconds\n if seconds % 3600 == 0:\n return Hour(seconds / 3600)\n elif seconds % 60 == 0:\n return Minute(seconds / 60)\n else:\n return Second(seconds)\n else:\n nanos = _delta_to_nanoseconds(delta)\n if nanos % 1000000 == 0:\n return Milli(nanos // 1000000)\n elif nanos % 1000 == 0:\n return Micro(nanos // 1000)\n else: # pragma: no cover\n return Nano(nanos)\n\n\n_delta_to_nanoseconds = tslib._delta_to_nanoseconds\n\n\nclass Day(Tick):\n _inc = Timedelta(days=1)\n _prefix = 'D'\n\n\nclass Hour(Tick):\n _inc = Timedelta(hours=1)\n _prefix = 'H'\n\n\nclass Minute(Tick):\n _inc = Timedelta(minutes=1)\n _prefix = 'T'\n\n\nclass Second(Tick):\n _inc = Timedelta(seconds=1)\n _prefix = 'S'\n\n\nclass Milli(Tick):\n _inc = Timedelta(milliseconds=1)\n _prefix = 'L'\n\n\nclass Micro(Tick):\n _inc = Timedelta(microseconds=1)\n _prefix = 'U'\n\n\nclass Nano(Tick):\n _inc = Timedelta(nanoseconds=1)\n _prefix = 'N'\n\n\nBDay = BusinessDay\nBMonthEnd = BusinessMonthEnd\nBMonthBegin = BusinessMonthBegin\nCBMonthEnd = CustomBusinessMonthEnd\nCBMonthBegin = CustomBusinessMonthBegin\nCDay = CustomBusinessDay\n\n\ndef _get_firstbday(wkday):\n \"\"\"\n wkday is the result of monthrange(year, month)\n\n If it's a saturday or sunday, increment first business day to reflect this\n \"\"\"\n first = 1\n if wkday == 5: # on Saturday\n first = 3\n elif wkday == 6: # on Sunday\n first = 2\n return first\n\n\ndef generate_range(start=None, end=None, periods=None,\n offset=BDay(), time_rule=None):\n \"\"\"\n Generates a sequence of dates corresponding to the specified time\n offset. Similar to dateutil.rrule except uses pandas DateOffset\n objects to represent time increments\n\n Parameters\n ----------\n start : datetime (default None)\n end : datetime (default None)\n periods : int, optional\n time_rule : (legacy) name of DateOffset object to be used, optional\n Corresponds with names expected by tseries.frequencies.get_offset\n\n Notes\n -----\n * This method is faster for generating weekdays than dateutil.rrule\n * At least two of (start, end, periods) must be specified.\n * If both start and end are specified, the returned dates will\n satisfy start <= date <= end.\n * If both time_rule and offset are specified, time_rule supersedes offset.\n\n Returns\n -------\n dates : generator object\n\n \"\"\"\n if time_rule is not None:\n from pandas.tseries.frequencies import get_offset\n\n offset = get_offset(time_rule)\n\n start = to_datetime(start)\n end = to_datetime(end)\n\n if start and not offset.onOffset(start):\n start = offset.rollforward(start)\n\n elif end and not offset.onOffset(end):\n end = offset.rollback(end)\n\n if periods is None and end < start:\n end = None\n periods = 0\n\n if end is None:\n end = start + (periods - 1) * offset\n\n if start is None:\n start = end - (periods - 1) * offset\n\n cur = start\n if offset.n >= 0:\n while cur <= end:\n yield cur\n\n # faster than cur + offset\n next_date = offset.apply(cur)\n if next_date <= cur:\n raise ValueError('Offset %s did not increment date' % offset)\n cur = next_date\n else:\n while cur >= end:\n yield cur\n\n # faster than cur + offset\n next_date = offset.apply(cur)\n if next_date >= cur:\n raise ValueError('Offset %s did not decrement date' % offset)\n cur = next_date\n\n\nprefix_mapping = dict((offset._prefix, offset) for offset in [\n YearBegin, # 'AS'\n YearEnd, # 'A'\n BYearBegin, # 'BAS'\n BYearEnd, # 'BA'\n BusinessDay, # 'B'\n BusinessMonthBegin, # 'BMS'\n BusinessMonthEnd, # 'BM'\n BQuarterEnd, # 'BQ'\n BQuarterBegin, # 'BQS'\n BusinessHour, # 'BH'\n CustomBusinessDay, # 'C'\n CustomBusinessMonthEnd, # 'CBM'\n CustomBusinessMonthBegin, # 'CBMS'\n CustomBusinessHour, # 'CBH'\n MonthEnd, # 'M'\n MonthBegin, # 'MS'\n SemiMonthEnd, # 'SM'\n SemiMonthBegin, # 'SMS'\n Week, # 'W'\n Second, # 'S'\n Minute, # 'T'\n Micro, # 'U'\n QuarterEnd, # 'Q'\n QuarterBegin, # 'QS'\n Milli, # 'L'\n Hour, # 'H'\n Day, # 'D'\n WeekOfMonth, # 'WOM'\n FY5253,\n FY5253Quarter,\n])\n\nprefix_mapping['N'] = Nano\n", "# -*- coding: utf-8 -*-\n\n\"\"\"\nTests that apply specifically to the CParser. Unless specifically stated\nas a CParser-specific issue, the goal is to eventually move as many of\nthese tests out of this module as soon as the Python parser can accept\nfurther arguments when parsing.\n\"\"\"\n\nimport pytest\nimport numpy as np\n\nimport pandas as pd\nimport pandas.util.testing as tm\nfrom pandas import DataFrame\nfrom pandas import compat\nfrom pandas.compat import StringIO, range, lrange\n\n\nclass CParserTests(object):\n\n def test_buffer_overflow(self):\n # see gh-9205: test certain malformed input files that cause\n # buffer overflows in tokenizer.c\n\n malfw = \"1\\r1\\r1\\r 1\\r 1\\r\" # buffer overflow in words pointer\n malfs = \"1\\r1\\r1\\r 1\\r 1\\r11\\r\" # buffer overflow in stream pointer\n malfl = \"1\\r1\\r1\\r 1\\r 1\\r11\\r1\\r\" # buffer overflow in lines pointer\n\n cperr = 'Buffer overflow caught - possible malformed input file.'\n\n for malf in (malfw, malfs, malfl):\n try:\n self.read_table(StringIO(malf))\n except Exception as err:\n self.assertIn(cperr, str(err))\n\n def test_buffer_rd_bytes(self):\n # see gh-12098: src->buffer in the C parser can be freed twice leading\n # to a segfault if a corrupt gzip file is read with 'read_csv' and the\n # buffer is filled more than once before gzip throws an exception\n\n data = '\\x1F\\x8B\\x08\\x00\\x00\\x00\\x00\\x00\\x00\\x03\\xED\\xC3\\x41\\x09' \\\n '\\x00\\x00\\x08\\x00\\xB1\\xB7\\xB6\\xBA\\xFE\\xA5\\xCC\\x21\\x6C\\xB0' \\\n '\\xA6\\x4D' + '\\x55' * 267 + \\\n '\\x7D\\xF7\\x00\\x91\\xE0\\x47\\x97\\x14\\x38\\x04\\x00' \\\n '\\x1f\\x8b\\x08\\x00VT\\x97V\\x00\\x03\\xed]\\xefO'\n for i in range(100):\n try:\n self.read_csv(StringIO(data),\n compression='gzip',\n delim_whitespace=True)\n except Exception:\n pass\n\n def test_delim_whitespace_custom_terminator(self):\n # See gh-12912\n data = \"\"\"a b c~1 2 3~4 5 6~7 8 9\"\"\"\n df = self.read_csv(StringIO(data), lineterminator='~',\n delim_whitespace=True)\n expected = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]],\n columns=['a', 'b', 'c'])\n tm.assert_frame_equal(df, expected)\n\n def test_dtype_and_names_error(self):\n # see gh-8833: passing both dtype and names\n # resulting in an error reporting issue\n data = \"\"\"\n1.0 1\n2.0 2\n3.0 3\n\"\"\"\n # base cases\n result = self.read_csv(StringIO(data), sep=r'\\s+', header=None)\n expected = DataFrame([[1.0, 1], [2.0, 2], [3.0, 3]])\n tm.assert_frame_equal(result, expected)\n\n result = self.read_csv(StringIO(data), sep=r'\\s+',\n header=None, names=['a', 'b'])\n expected = DataFrame(\n [[1.0, 1], [2.0, 2], [3.0, 3]], columns=['a', 'b'])\n tm.assert_frame_equal(result, expected)\n\n # fallback casting\n result = self.read_csv(StringIO(\n data), sep=r'\\s+', header=None,\n names=['a', 'b'], dtype={'a': np.int32})\n expected = DataFrame([[1, 1], [2, 2], [3, 3]],\n columns=['a', 'b'])\n expected['a'] = expected['a'].astype(np.int32)\n tm.assert_frame_equal(result, expected)\n\n data = \"\"\"\n1.0 1\nnan 2\n3.0 3\n\"\"\"\n # fallback casting, but not castable\n with tm.assertRaisesRegexp(ValueError, 'cannot safely convert'):\n self.read_csv(StringIO(data), sep=r'\\s+', header=None,\n names=['a', 'b'], dtype={'a': np.int32})\n\n def test_unsupported_dtype(self):\n df = DataFrame(np.random.rand(5, 2), columns=list(\n 'AB'), index=['1A', '1B', '1C', '1D', '1E'])\n\n with tm.ensure_clean('__unsupported_dtype__.csv') as path:\n df.to_csv(path)\n\n # valid but we don't support it (date)\n self.assertRaises(TypeError, self.read_csv, path,\n dtype={'A': 'datetime64', 'B': 'float64'},\n index_col=0)\n self.assertRaises(TypeError, self.read_csv, path,\n dtype={'A': 'datetime64', 'B': 'float64'},\n index_col=0, parse_dates=['B'])\n\n # valid but we don't support it\n self.assertRaises(TypeError, self.read_csv, path,\n dtype={'A': 'timedelta64', 'B': 'float64'},\n index_col=0)\n\n # valid but unsupported - fixed width unicode string\n self.assertRaises(TypeError, self.read_csv, path,\n dtype={'A': 'U8'},\n index_col=0)\n\n def test_precise_conversion(self):\n # see gh-8002\n tm._skip_if_32bit()\n from decimal import Decimal\n\n normal_errors = []\n precise_errors = []\n\n # test numbers between 1 and 2\n for num in np.linspace(1., 2., num=500):\n # 25 decimal digits of precision\n text = 'a\\n{0:.25}'.format(num)\n\n normal_val = float(self.read_csv(StringIO(text))['a'][0])\n precise_val = float(self.read_csv(\n StringIO(text), float_precision='high')['a'][0])\n roundtrip_val = float(self.read_csv(\n StringIO(text), float_precision='round_trip')['a'][0])\n actual_val = Decimal(text[2:])\n\n def error(val):\n return abs(Decimal('{0:.100}'.format(val)) - actual_val)\n\n normal_errors.append(error(normal_val))\n precise_errors.append(error(precise_val))\n\n # round-trip should match float()\n self.assertEqual(roundtrip_val, float(text[2:]))\n\n self.assertTrue(sum(precise_errors) <= sum(normal_errors))\n self.assertTrue(max(precise_errors) <= max(normal_errors))\n\n def test_pass_dtype_as_recarray(self):\n if compat.is_platform_windows() and self.low_memory:\n pytest.skip(\n \"segfaults on win-64, only when all tests are run\")\n\n data = \"\"\"\\\none,two\n1,2.5\n2,3.5\n3,4.5\n4,5.5\"\"\"\n\n with tm.assert_produces_warning(\n FutureWarning, check_stacklevel=False):\n result = self.read_csv(StringIO(data), dtype={\n 'one': 'u1', 1: 'S1'}, as_recarray=True)\n self.assertEqual(result['one'].dtype, 'u1')\n self.assertEqual(result['two'].dtype, 'S1')\n\n def test_usecols_dtypes(self):\n data = \"\"\"\\\n1,2,3\n4,5,6\n7,8,9\n10,11,12\"\"\"\n\n result = self.read_csv(StringIO(data), usecols=(0, 1, 2),\n names=('a', 'b', 'c'),\n header=None,\n converters={'a': str},\n dtype={'b': int, 'c': float},\n )\n result2 = self.read_csv(StringIO(data), usecols=(0, 2),\n names=('a', 'b', 'c'),\n header=None,\n converters={'a': str},\n dtype={'b': int, 'c': float},\n )\n self.assertTrue((result.dtypes == [object, np.int, np.float]).all())\n self.assertTrue((result2.dtypes == [object, np.float]).all())\n\n def test_disable_bool_parsing(self):\n # #2090\n\n data = \"\"\"A,B,C\nYes,No,Yes\nNo,Yes,Yes\nYes,,Yes\nNo,No,No\"\"\"\n\n result = self.read_csv(StringIO(data), dtype=object)\n self.assertTrue((result.dtypes == object).all())\n\n result = self.read_csv(StringIO(data), dtype=object, na_filter=False)\n self.assertEqual(result['B'][2], '')\n\n def test_custom_lineterminator(self):\n data = 'a,b,c~1,2,3~4,5,6'\n\n result = self.read_csv(StringIO(data), lineterminator='~')\n expected = self.read_csv(StringIO(data.replace('~', '\\n')))\n\n tm.assert_frame_equal(result, expected)\n\n def test_parse_ragged_csv(self):\n data = \"\"\"1,2,3\n1,2,3,4\n1,2,3,4,5\n1,2\n1,2,3,4\"\"\"\n\n nice_data = \"\"\"1,2,3,,\n1,2,3,4,\n1,2,3,4,5\n1,2,,,\n1,2,3,4,\"\"\"\n result = self.read_csv(StringIO(data), header=None,\n names=['a', 'b', 'c', 'd', 'e'])\n\n expected = self.read_csv(StringIO(nice_data), header=None,\n names=['a', 'b', 'c', 'd', 'e'])\n\n tm.assert_frame_equal(result, expected)\n\n # too many columns, cause segfault if not careful\n data = \"1,2\\n3,4,5\"\n\n result = self.read_csv(StringIO(data), header=None,\n names=lrange(50))\n expected = self.read_csv(StringIO(data), header=None,\n names=lrange(3)).reindex(columns=lrange(50))\n\n tm.assert_frame_equal(result, expected)\n\n def test_tokenize_CR_with_quoting(self):\n # see gh-3453\n\n data = ' a,b,c\\r\"a,b\",\"e,d\",\"f,f\"'\n\n result = self.read_csv(StringIO(data), header=None)\n expected = self.read_csv(StringIO(data.replace('\\r', '\\n')),\n header=None)\n tm.assert_frame_equal(result, expected)\n\n result = self.read_csv(StringIO(data))\n expected = self.read_csv(StringIO(data.replace('\\r', '\\n')))\n tm.assert_frame_equal(result, expected)\n\n def test_grow_boundary_at_cap(self):\n # See gh-12494\n #\n # Cause of error was that the C parser\n # was not increasing the buffer size when\n # the desired space would fill the buffer\n # to capacity, which would later cause a\n # buffer overflow error when checking the\n # EOF terminator of the CSV stream\n def test_empty_header_read(count):\n s = StringIO(',' * count)\n expected = DataFrame(columns=[\n 'Unnamed: {i}'.format(i=i)\n for i in range(count + 1)])\n df = self.read_csv(s)\n tm.assert_frame_equal(df, expected)\n\n for count in range(1, 101):\n test_empty_header_read(count)\n\n def test_parse_trim_buffers(self):\n # This test is part of a bugfix for issue #13703. It attmepts to\n # to stress the system memory allocator, to cause it to move the\n # stream buffer and either let the OS reclaim the region, or let\n # other memory requests of parser otherwise modify the contents\n # of memory space, where it was formely located.\n # This test is designed to cause a `segfault` with unpatched\n # `tokenizer.c`. Sometimes the test fails on `segfault`, other\n # times it fails due to memory corruption, which causes the\n # loaded DataFrame to differ from the expected one.\n\n # Generate a large mixed-type CSV file on-the-fly (one record is\n # approx 1.5KiB).\n record_ = \\\n \"\"\"9999-9,99:99,,,,ZZ,ZZ,,,ZZZ-ZZZZ,.Z-ZZZZ,-9.99,,,9.99,Z\"\"\" \\\n \"\"\"ZZZZ,,-99,9,ZZZ-ZZZZ,ZZ-ZZZZ,,9.99,ZZZ-ZZZZZ,ZZZ-ZZZZZ,\"\"\" \\\n \"\"\"ZZZ-ZZZZ,ZZZ-ZZZZ,ZZZ-ZZZZ,ZZZ-ZZZZ,ZZZ-ZZZZ,ZZZ-ZZZZ,9\"\"\" \\\n \"\"\"99,ZZZ-ZZZZ,,ZZ-ZZZZ,,,,,ZZZZ,ZZZ-ZZZZZ,ZZZ-ZZZZ,,,9,9,\"\"\" \\\n \"\"\"9,9,99,99,999,999,ZZZZZ,ZZZ-ZZZZZ,ZZZ-ZZZZ,9,ZZ-ZZZZ,9.\"\"\" \\\n \"\"\"99,ZZ-ZZZZ,ZZ-ZZZZ,,,,ZZZZ,,,ZZ,ZZ,,,,,,,,,,,,,9,,,999.\"\"\" \\\n \"\"\"99,999.99,,,ZZZZZ,,,Z9,,,,,,,ZZZ,ZZZ,,,,,,,,,,,ZZZZZ,ZZ\"\"\" \\\n \"\"\"ZZZ,ZZZ-ZZZZZZ,ZZZ-ZZZZZZ,ZZ-ZZZZ,ZZ-ZZZZ,ZZ-ZZZZ,ZZ-ZZ\"\"\" \\\n \"\"\"ZZ,,,999999,999999,ZZZ,ZZZ,,,ZZZ,ZZZ,999.99,999.99,,,,Z\"\"\" \\\n \"\"\"ZZ-ZZZ,ZZZ-ZZZ,-9.99,-9.99,9,9,,99,,9.99,9.99,9,9,9.99,\"\"\" \\\n \"\"\"9.99,,,,9.99,9.99,,99,,99,9.99,9.99,,,ZZZ,ZZZ,,999.99,,\"\"\" \\\n \"\"\"999.99,ZZZ,ZZZ-ZZZZ,ZZZ-ZZZZ,,,ZZZZZ,ZZZZZ,ZZZ,ZZZ,9,9,\"\"\" \\\n \"\"\",,,,,ZZZ-ZZZZ,ZZZ999Z,,,999.99,,999.99,ZZZ-ZZZZ,,,9.999\"\"\" \\\n \"\"\",9.999,9.999,9.999,-9.999,-9.999,-9.999,-9.999,9.999,9.\"\"\" \\\n \"\"\"999,9.999,9.999,9.999,9.999,9.999,9.999,99999,ZZZ-ZZZZ,\"\"\" \\\n \"\"\",9.99,ZZZ,,,,,,,,ZZZ,,,,,9,,,,9,,,,,,,,,,ZZZ-ZZZZ,ZZZ-Z\"\"\" \\\n \"\"\"ZZZ,,ZZZZZ,ZZZZZ,ZZZZZ,ZZZZZ,,,9.99,,ZZ-ZZZZ,ZZ-ZZZZ,ZZ\"\"\" \\\n \"\"\",999,,,,ZZ-ZZZZ,ZZZ,ZZZ,ZZZ-ZZZZ,ZZZ-ZZZZ,,,99.99,99.99\"\"\" \\\n \"\"\",,,9.99,9.99,9.99,9.99,ZZZ-ZZZZ,,,ZZZ-ZZZZZ,,,,,-9.99,-\"\"\" \\\n \"\"\"9.99,-9.99,-9.99,,,,,,,,,ZZZ-ZZZZ,,9,9.99,9.99,99ZZ,,-9\"\"\" \\\n \"\"\".99,-9.99,ZZZ-ZZZZ,,,,,,,ZZZ-ZZZZ,9.99,9.99,9999,,,,,,,\"\"\" \\\n \"\"\",,,-9.9,Z/Z-ZZZZ,999.99,9.99,,999.99,ZZ-ZZZZ,ZZ-ZZZZ,9.\"\"\" \\\n \"\"\"99,9.99,9.99,9.99,9.99,9.99,,ZZZ-ZZZZZ,ZZZ-ZZZZZ,ZZZ-ZZ\"\"\" \\\n \"\"\"ZZZ,ZZZ-ZZZZZ,ZZZ-ZZZZZ,ZZZ,ZZZ,ZZZ,ZZZ,9.99,,,-9.99,ZZ\"\"\" \\\n \"\"\"-ZZZZ,-999.99,,-9999,,999.99,,,,999.99,99.99,,,ZZ-ZZZZZ\"\"\" \\\n \"\"\"ZZZ,ZZ-ZZZZ-ZZZZZZZ,,,,ZZ-ZZ-ZZZZZZZZ,ZZZZZZZZ,ZZZ-ZZZZ\"\"\" \\\n \"\"\",9999,999.99,ZZZ-ZZZZ,-9.99,-9.99,ZZZ-ZZZZ,99:99:99,,99\"\"\" \\\n \"\"\",99,,9.99,,-99.99,,,,,,9.99,ZZZ-ZZZZ,-9.99,-9.99,9.99,9\"\"\" \\\n \"\"\".99,,ZZZ,,,,,,,ZZZ,ZZZ,,,,,\"\"\"\n\n # Set the number of lines so that a call to `parser_trim_buffers`\n # is triggered: after a couple of full chunks are consumed a\n # relatively small 'residual' chunk would cause reallocation\n # within the parser.\n chunksize, n_lines = 128, 2 * 128 + 15\n csv_data = \"\\n\".join([record_] * n_lines) + \"\\n\"\n\n # We will use StringIO to load the CSV from this text buffer.\n # pd.read_csv() will iterate over the file in chunks and will\n # finally read a residual chunk of really small size.\n\n # Generate the expected output: manually create the dataframe\n # by splitting by comma and repeating the `n_lines` times.\n row = tuple(val_ if val_ else float(\"nan\")\n for val_ in record_.split(\",\"))\n expected = pd.DataFrame([row for _ in range(n_lines)],\n dtype=object, columns=None, index=None)\n\n # Iterate over the CSV file in chunks of `chunksize` lines\n chunks_ = self.read_csv(StringIO(csv_data), header=None,\n dtype=object, chunksize=chunksize)\n result = pd.concat(chunks_, axis=0, ignore_index=True)\n\n # Check for data corruption if there was no segfault\n tm.assert_frame_equal(result, expected)\n\n def test_internal_null_byte(self):\n # see gh-14012\n #\n # The null byte ('\\x00') should not be used as a\n # true line terminator, escape character, or comment\n # character, only as a placeholder to indicate that\n # none was specified.\n #\n # This test should be moved to common.py ONLY when\n # Python's csv class supports parsing '\\x00'.\n names = ['a', 'b', 'c']\n data = \"1,2,3\\n4,\\x00,6\\n7,8,9\"\n expected = pd.DataFrame([[1, 2.0, 3], [4, np.nan, 6],\n [7, 8, 9]], columns=names)\n\n result = self.read_csv(StringIO(data), names=names)\n tm.assert_frame_equal(result, expected)\n\n def test_read_nrows_large(self):\n # gh-7626 - Read only nrows of data in for large inputs (>262144b)\n header_narrow = '\\t'.join(['COL_HEADER_' + str(i)\n for i in range(10)]) + '\\n'\n data_narrow = '\\t'.join(['somedatasomedatasomedata1'\n for i in range(10)]) + '\\n'\n header_wide = '\\t'.join(['COL_HEADER_' + str(i)\n for i in range(15)]) + '\\n'\n data_wide = '\\t'.join(['somedatasomedatasomedata2'\n for i in range(15)]) + '\\n'\n test_input = (header_narrow + data_narrow * 1050 +\n header_wide + data_wide * 2)\n\n df = self.read_csv(StringIO(test_input), sep='\\t', nrows=1010)\n\n self.assertTrue(df.size == 1010 * 10)\n\n def test_float_precision_round_trip_with_text(self):\n # gh-15140 - This should not segfault on Python 2.7+\n df = self.read_csv(StringIO('a'),\n float_precision='round_trip',\n header=None)\n tm.assert_frame_equal(df, DataFrame({0: ['a']}))\n\n def test_large_difference_in_columns(self):\n # gh-14125\n count = 10000\n large_row = ('X,' * count)[:-1] + '\\n'\n normal_row = 'XXXXXX XXXXXX,111111111111111\\n'\n test_input = (large_row + normal_row * 6)[:-1]\n result = self.read_csv(StringIO(test_input), header=None, usecols=[0])\n rows = test_input.split('\\n')\n expected = DataFrame([row.split(',')[0] for row in rows])\n\n tm.assert_frame_equal(result, expected)\n", "\"\"\"\nThe config module holds package-wide configurables and provides\na uniform API for working with them.\n\nOverview\n========\n\nThis module supports the following requirements:\n- options are referenced using keys in dot.notation, e.g. \"x.y.option - z\".\n- keys are case-insensitive.\n- functions should accept partial/regex keys, when unambiguous.\n- options can be registered by modules at import time.\n- options can be registered at init-time (via core.config_init)\n- options have a default value, and (optionally) a description and\n validation function associated with them.\n- options can be deprecated, in which case referencing them\n should produce a warning.\n- deprecated options can optionally be rerouted to a replacement\n so that accessing a deprecated option reroutes to a differently\n named option.\n- options can be reset to their default value.\n- all option can be reset to their default value at once.\n- all options in a certain sub - namespace can be reset at once.\n- the user can set / get / reset or ask for the description of an option.\n- a developer can register and mark an option as deprecated.\n- you can register a callback to be invoked when the the option value\n is set or reset. Changing the stored value is considered misuse, but\n is not verboten.\n\nImplementation\n==============\n\n- Data is stored using nested dictionaries, and should be accessed\n through the provided API.\n\n- \"Registered options\" and \"Deprecated options\" have metadata associcated\n with them, which are stored in auxilary dictionaries keyed on the\n fully-qualified key, e.g. \"x.y.z.option\".\n\n- the config_init module is imported by the package's __init__.py file.\n placing any register_option() calls there will ensure those options\n are available as soon as pandas is loaded. If you use register_option\n in a module, it will only be available after that module is imported,\n which you should be aware of.\n\n- `config_prefix` is a context_manager (for use with the `with` keyword)\n which can save developers some typing, see the docstring.\n\n\"\"\"\n\nimport re\n\nfrom collections import namedtuple\nfrom contextlib import contextmanager\nimport warnings\nfrom pandas.compat import map, lmap, u\nimport pandas.compat as compat\n\nDeprecatedOption = namedtuple('DeprecatedOption', 'key msg rkey removal_ver')\nRegisteredOption = namedtuple('RegisteredOption',\n 'key defval doc validator cb')\n\n_deprecated_options = {} # holds deprecated option metdata\n_registered_options = {} # holds registered option metdata\n_global_config = {} # holds the current values for registered options\n_reserved_keys = ['all'] # keys which have a special meaning\n\n\nclass OptionError(AttributeError, KeyError):\n \"\"\"Exception for pandas.options, backwards compatible with KeyError\n checks\n \"\"\"\n\n#\n# User API\n\n\ndef _get_single_key(pat, silent):\n keys = _select_options(pat)\n if len(keys) == 0:\n if not silent:\n _warn_if_deprecated(pat)\n raise OptionError('No such keys(s): %r' % pat)\n if len(keys) > 1:\n raise OptionError('Pattern matched multiple keys')\n key = keys[0]\n\n if not silent:\n _warn_if_deprecated(key)\n\n key = _translate_key(key)\n\n return key\n\n\ndef _get_option(pat, silent=False):\n key = _get_single_key(pat, silent)\n\n # walk the nested dict\n root, k = _get_root(key)\n return root[k]\n\n\ndef _set_option(*args, **kwargs):\n # must at least 1 arg deal with constraints later\n nargs = len(args)\n if not nargs or nargs % 2 != 0:\n raise ValueError(\"Must provide an even number of non-keyword \"\n \"arguments\")\n\n # default to false\n silent = kwargs.pop('silent', False)\n\n if kwargs:\n raise TypeError('_set_option() got an unexpected keyword '\n 'argument \"{0}\"'.format(list(kwargs.keys())[0]))\n\n for k, v in zip(args[::2], args[1::2]):\n key = _get_single_key(k, silent)\n\n o = _get_registered_option(key)\n if o and o.validator:\n o.validator(v)\n\n # walk the nested dict\n root, k = _get_root(key)\n root[k] = v\n\n if o.cb:\n if silent:\n with warnings.catch_warnings(record=True):\n o.cb(key)\n else:\n o.cb(key)\n\n\ndef _describe_option(pat='', _print_desc=True):\n\n keys = _select_options(pat)\n if len(keys) == 0:\n raise OptionError('No such keys(s)')\n\n s = u('')\n for k in keys: # filter by pat\n s += _build_option_description(k)\n\n if _print_desc:\n print(s)\n else:\n return s\n\n\ndef _reset_option(pat, silent=False):\n\n keys = _select_options(pat)\n\n if len(keys) == 0:\n raise OptionError('No such keys(s)')\n\n if len(keys) > 1 and len(pat) < 4 and pat != 'all':\n raise ValueError('You must specify at least 4 characters when '\n 'resetting multiple keys, use the special keyword '\n '\"all\" to reset all the options to their default '\n 'value')\n\n for k in keys:\n _set_option(k, _registered_options[k].defval, silent=silent)\n\n\ndef get_default_val(pat):\n key = _get_single_key(pat, silent=True)\n return _get_registered_option(key).defval\n\n\nclass DictWrapper(object):\n \"\"\" provide attribute-style access to a nested dict\"\"\"\n\n def __init__(self, d, prefix=\"\"):\n object.__setattr__(self, \"d\", d)\n object.__setattr__(self, \"prefix\", prefix)\n\n def __setattr__(self, key, val):\n prefix = object.__getattribute__(self, \"prefix\")\n if prefix:\n prefix += \".\"\n prefix += key\n # you can't set new keys\n # can you can't overwrite subtrees\n if key in self.d and not isinstance(self.d[key], dict):\n _set_option(prefix, val)\n else:\n raise OptionError(\"You can only set the value of existing options\")\n\n def __getattr__(self, key):\n prefix = object.__getattribute__(self, \"prefix\")\n if prefix:\n prefix += \".\"\n prefix += key\n v = object.__getattribute__(self, \"d\")[key]\n if isinstance(v, dict):\n return DictWrapper(v, prefix)\n else:\n return _get_option(prefix)\n\n def __dir__(self):\n return list(self.d.keys())\n\n# For user convenience, we'd like to have the available options described\n# in the docstring. For dev convenience we'd like to generate the docstrings\n# dynamically instead of maintaining them by hand. To this, we use the\n# class below which wraps functions inside a callable, and converts\n# __doc__ into a propery function. The doctsrings below are templates\n# using the py2.6+ advanced formatting syntax to plug in a concise list\n# of options, and option descriptions.\n\n\nclass CallableDynamicDoc(object):\n\n def __init__(self, func, doc_tmpl):\n self.__doc_tmpl__ = doc_tmpl\n self.__func__ = func\n\n def __call__(self, *args, **kwds):\n return self.__func__(*args, **kwds)\n\n @property\n def __doc__(self):\n opts_desc = _describe_option('all', _print_desc=False)\n opts_list = pp_options_list(list(_registered_options.keys()))\n return self.__doc_tmpl__.format(opts_desc=opts_desc,\n opts_list=opts_list)\n\n\n_get_option_tmpl = \"\"\"\nget_option(pat)\n\nRetrieves the value of the specified option.\n\nAvailable options:\n\n{opts_list}\n\nParameters\n----------\npat : str\n Regexp which should match a single option.\n Note: partial matches are supported for convenience, but unless you use the\n full option name (e.g. x.y.z.option_name), your code may break in future\n versions if new options with similar names are introduced.\n\nReturns\n-------\nresult : the value of the option\n\nRaises\n------\nOptionError : if no such option exists\n\nNotes\n-----\nThe available options with its descriptions:\n\n{opts_desc}\n\"\"\"\n\n_set_option_tmpl = \"\"\"\nset_option(pat, value)\n\nSets the value of the specified option.\n\nAvailable options:\n\n{opts_list}\n\nParameters\n----------\npat : str\n Regexp which should match a single option.\n Note: partial matches are supported for convenience, but unless you use the\n full option name (e.g. x.y.z.option_name), your code may break in future\n versions if new options with similar names are introduced.\nvalue :\n new value of option.\n\nReturns\n-------\nNone\n\nRaises\n------\nOptionError if no such option exists\n\nNotes\n-----\nThe available options with its descriptions:\n\n{opts_desc}\n\"\"\"\n\n_describe_option_tmpl = \"\"\"\ndescribe_option(pat, _print_desc=False)\n\nPrints the description for one or more registered options.\n\nCall with not arguments to get a listing for all registered options.\n\nAvailable options:\n\n{opts_list}\n\nParameters\n----------\npat : str\n Regexp pattern. All matching keys will have their description displayed.\n_print_desc : bool, default True\n If True (default) the description(s) will be printed to stdout.\n Otherwise, the description(s) will be returned as a unicode string\n (for testing).\n\nReturns\n-------\nNone by default, the description(s) as a unicode string if _print_desc\nis False\n\nNotes\n-----\nThe available options with its descriptions:\n\n{opts_desc}\n\"\"\"\n\n_reset_option_tmpl = \"\"\"\nreset_option(pat)\n\nReset one or more options to their default value.\n\nPass \"all\" as argument to reset all options.\n\nAvailable options:\n\n{opts_list}\n\nParameters\n----------\npat : str/regex\n If specified only options matching `prefix*` will be reset.\n Note: partial matches are supported for convenience, but unless you\n use the full option name (e.g. x.y.z.option_name), your code may break\n in future versions if new options with similar names are introduced.\n\nReturns\n-------\nNone\n\nNotes\n-----\nThe available options with its descriptions:\n\n{opts_desc}\n\"\"\"\n\n# bind the functions with their docstrings into a Callable\n# and use that as the functions exposed in pd.api\nget_option = CallableDynamicDoc(_get_option, _get_option_tmpl)\nset_option = CallableDynamicDoc(_set_option, _set_option_tmpl)\nreset_option = CallableDynamicDoc(_reset_option, _reset_option_tmpl)\ndescribe_option = CallableDynamicDoc(_describe_option, _describe_option_tmpl)\noptions = DictWrapper(_global_config)\n\n#\n# Functions for use by pandas developers, in addition to User - api\n\n\nclass option_context(object):\n \"\"\"\n Context manager to temporarily set options in the `with` statement context.\n\n You need to invoke as ``option_context(pat, val, [(pat, val), ...])``.\n\n Examples\n --------\n\n >>> with option_context('display.max_rows', 10, 'display.max_columns', 5):\n ...\n\n \"\"\"\n\n def __init__(self, *args):\n if not (len(args) % 2 == 0 and len(args) >= 2):\n raise ValueError('Need to invoke as'\n 'option_context(pat, val, [(pat, val), ...)).')\n\n self.ops = list(zip(args[::2], args[1::2]))\n\n def __enter__(self):\n undo = []\n for pat, val in self.ops:\n undo.append((pat, _get_option(pat, silent=True)))\n\n self.undo = undo\n\n for pat, val in self.ops:\n _set_option(pat, val, silent=True)\n\n def __exit__(self, *args):\n if self.undo:\n for pat, val in self.undo:\n _set_option(pat, val, silent=True)\n\n\ndef register_option(key, defval, doc='', validator=None, cb=None):\n \"\"\"Register an option in the package-wide pandas config object\n\n Parameters\n ----------\n key - a fully-qualified key, e.g. \"x.y.option - z\".\n defval - the default value of the option\n doc - a string description of the option\n validator - a function of a single argument, should raise `ValueError` if\n called with a value which is not a legal value for the option.\n cb - a function of a single argument \"key\", which is called\n immediately after an option value is set/reset. key is\n the full name of the option.\n\n Returns\n -------\n Nothing.\n\n Raises\n ------\n ValueError if `validator` is specified and `defval` is not a valid value.\n\n \"\"\"\n import tokenize\n import keyword\n key = key.lower()\n\n if key in _registered_options:\n raise OptionError(\"Option '%s' has already been registered\" % key)\n if key in _reserved_keys:\n raise OptionError(\"Option '%s' is a reserved key\" % key)\n\n # the default value should be legal\n if validator:\n validator(defval)\n\n # walk the nested dict, creating dicts as needed along the path\n path = key.split('.')\n\n for k in path:\n if not bool(re.match('^' + tokenize.Name + '$', k)):\n raise ValueError(\"%s is not a valid identifier\" % k)\n if keyword.iskeyword(k):\n raise ValueError(\"%s is a python keyword\" % k)\n\n cursor = _global_config\n for i, p in enumerate(path[:-1]):\n if not isinstance(cursor, dict):\n raise OptionError(\"Path prefix to option '%s' is already an option\"\n % '.'.join(path[:i]))\n if p not in cursor:\n cursor[p] = {}\n cursor = cursor[p]\n\n if not isinstance(cursor, dict):\n raise OptionError(\"Path prefix to option '%s' is already an option\" %\n '.'.join(path[:-1]))\n\n cursor[path[-1]] = defval # initialize\n\n # save the option metadata\n _registered_options[key] = RegisteredOption(key=key, defval=defval,\n doc=doc, validator=validator,\n cb=cb)\n\n\ndef deprecate_option(key, msg=None, rkey=None, removal_ver=None):\n \"\"\"\n Mark option `key` as deprecated, if code attempts to access this option,\n a warning will be produced, using `msg` if given, or a default message\n if not.\n if `rkey` is given, any access to the key will be re-routed to `rkey`.\n\n Neither the existence of `key` nor that if `rkey` is checked. If they\n do not exist, any subsequence access will fail as usual, after the\n deprecation warning is given.\n\n Parameters\n ----------\n key - the name of the option to be deprecated. must be a fully-qualified\n option name (e.g \"x.y.z.rkey\").\n\n msg - (Optional) a warning message to output when the key is referenced.\n if no message is given a default message will be emitted.\n\n rkey - (Optional) the name of an option to reroute access to.\n If specified, any referenced `key` will be re-routed to `rkey`\n including set/get/reset.\n rkey must be a fully-qualified option name (e.g \"x.y.z.rkey\").\n used by the default message if no `msg` is specified.\n\n removal_ver - (Optional) specifies the version in which this option will\n be removed. used by the default message if no `msg`\n is specified.\n\n Returns\n -------\n Nothing\n\n Raises\n ------\n OptionError - if key has already been deprecated.\n\n \"\"\"\n\n key = key.lower()\n\n if key in _deprecated_options:\n raise OptionError(\"Option '%s' has already been defined as deprecated.\"\n % key)\n\n _deprecated_options[key] = DeprecatedOption(key, msg, rkey, removal_ver)\n\n#\n# functions internal to the module\n\n\ndef _select_options(pat):\n \"\"\"returns a list of keys matching `pat`\n\n if pat==\"all\", returns all registered options\n \"\"\"\n\n # short-circuit for exact key\n if pat in _registered_options:\n return [pat]\n\n # else look through all of them\n keys = sorted(_registered_options.keys())\n if pat == 'all': # reserved key\n return keys\n\n return [k for k in keys if re.search(pat, k, re.I)]\n\n\ndef _get_root(key):\n path = key.split('.')\n cursor = _global_config\n for p in path[:-1]:\n cursor = cursor[p]\n return cursor, path[-1]\n\n\ndef _is_deprecated(key):\n \"\"\" Returns True if the given option has been deprecated \"\"\"\n\n key = key.lower()\n return key in _deprecated_options\n\n\ndef _get_deprecated_option(key):\n \"\"\"\n Retrieves the metadata for a deprecated option, if `key` is deprecated.\n\n Returns\n -------\n DeprecatedOption (namedtuple) if key is deprecated, None otherwise\n \"\"\"\n\n try:\n d = _deprecated_options[key]\n except KeyError:\n return None\n else:\n return d\n\n\ndef _get_registered_option(key):\n \"\"\"\n Retrieves the option metadata if `key` is a registered option.\n\n Returns\n -------\n RegisteredOption (namedtuple) if key is deprecated, None otherwise\n \"\"\"\n return _registered_options.get(key)\n\n\ndef _translate_key(key):\n \"\"\"\n if key id deprecated and a replacement key defined, will return the\n replacement key, otherwise returns `key` as - is\n \"\"\"\n\n d = _get_deprecated_option(key)\n if d:\n return d.rkey or key\n else:\n return key\n\n\ndef _warn_if_deprecated(key):\n \"\"\"\n Checks if `key` is a deprecated option and if so, prints a warning.\n\n Returns\n -------\n bool - True if `key` is deprecated, False otherwise.\n \"\"\"\n\n d = _get_deprecated_option(key)\n if d:\n if d.msg:\n print(d.msg)\n warnings.warn(d.msg, DeprecationWarning)\n else:\n msg = \"'%s' is deprecated\" % key\n if d.removal_ver:\n msg += ' and will be removed in %s' % d.removal_ver\n if d.rkey:\n msg += \", please use '%s' instead.\" % d.rkey\n else:\n msg += ', please refrain from using it.'\n\n warnings.warn(msg, DeprecationWarning)\n return True\n return False\n\n\ndef _build_option_description(k):\n \"\"\" Builds a formatted description of a registered option and prints it \"\"\"\n\n o = _get_registered_option(k)\n d = _get_deprecated_option(k)\n\n s = u('%s ') % k\n\n if o.doc:\n s += '\\n'.join(o.doc.strip().split('\\n'))\n else:\n s += 'No description available.'\n\n if o:\n s += u('\\n [default: %s] [currently: %s]') % (o.defval,\n _get_option(k, True))\n\n if d:\n s += u('\\n (Deprecated')\n s += (u(', use `%s` instead.') % d.rkey if d.rkey else '')\n s += u(')')\n\n s += '\\n\\n'\n return s\n\n\ndef pp_options_list(keys, width=80, _print=False):\n \"\"\" Builds a concise listing of available options, grouped by prefix \"\"\"\n\n from textwrap import wrap\n from itertools import groupby\n\n def pp(name, ks):\n pfx = ('- ' + name + '.[' if name else '')\n ls = wrap(', '.join(ks), width, initial_indent=pfx,\n subsequent_indent=' ', break_long_words=False)\n if ls and ls[-1] and name:\n ls[-1] = ls[-1] + ']'\n return ls\n\n ls = []\n singles = [x for x in sorted(keys) if x.find('.') < 0]\n if singles:\n ls += pp('', singles)\n keys = [x for x in keys if x.find('.') >= 0]\n\n for k, g in groupby(sorted(keys), lambda x: x[:x.rfind('.')]):\n ks = [x[len(k) + 1:] for x in list(g)]\n ls += pp(k, ks)\n s = '\\n'.join(ls)\n if _print:\n print(s)\n else:\n return s\n\n#\n# helpers\n\n\n@contextmanager\ndef config_prefix(prefix):\n \"\"\"contextmanager for multiple invocations of API with a common prefix\n\n supported API functions: (register / get / set )__option\n\n Warning: This is not thread - safe, and won't work properly if you import\n the API functions into your module using the \"from x import y\" construct.\n\n Example:\n\n import pandas.core.config as cf\n with cf.config_prefix(\"display.font\"):\n cf.register_option(\"color\", \"red\")\n cf.register_option(\"size\", \" 5 pt\")\n cf.set_option(size, \" 6 pt\")\n cf.get_option(size)\n ...\n\n etc'\n\n will register options \"display.font.color\", \"display.font.size\", set the\n value of \"display.font.size\"... and so on.\n \"\"\"\n\n # Note: reset_option relies on set_option, and on key directly\n # it does not fit in to this monkey-patching scheme\n\n global register_option, get_option, set_option, reset_option\n\n def wrap(func):\n def inner(key, *args, **kwds):\n pkey = '%s.%s' % (prefix, key)\n return func(pkey, *args, **kwds)\n\n return inner\n\n _register_option = register_option\n _get_option = get_option\n _set_option = set_option\n set_option = wrap(set_option)\n get_option = wrap(get_option)\n register_option = wrap(register_option)\n yield None\n set_option = _set_option\n get_option = _get_option\n register_option = _register_option\n\n# These factories and methods are handy for use as the validator\n# arg in register_option\n\n\ndef is_type_factory(_type):\n \"\"\"\n\n Parameters\n ----------\n `_type` - a type to be compared against (e.g. type(x) == `_type`)\n\n Returns\n -------\n validator - a function of a single argument x , which returns the\n True if type(x) is equal to `_type`\n\n \"\"\"\n\n def inner(x):\n if type(x) != _type:\n raise ValueError(\"Value must have type '%s'\" % str(_type))\n\n return inner\n\n\ndef is_instance_factory(_type):\n \"\"\"\n\n Parameters\n ----------\n `_type` - the type to be checked against\n\n Returns\n -------\n validator - a function of a single argument x , which returns the\n True if x is an instance of `_type`\n\n \"\"\"\n if isinstance(_type, (tuple, list)):\n _type = tuple(_type)\n from pandas.formats.printing import pprint_thing\n type_repr = \"|\".join(map(pprint_thing, _type))\n else:\n type_repr = \"'%s'\" % _type\n\n def inner(x):\n if not isinstance(x, _type):\n raise ValueError(\"Value must be an instance of %s\" % type_repr)\n\n return inner\n\n\ndef is_one_of_factory(legal_values):\n\n callables = [c for c in legal_values if callable(c)]\n legal_values = [c for c in legal_values if not callable(c)]\n\n def inner(x):\n from pandas.formats.printing import pprint_thing as pp\n if x not in legal_values:\n\n if not any([c(x) for c in callables]):\n pp_values = pp(\"|\".join(lmap(pp, legal_values)))\n msg = \"Value must be one of {0}\".format(pp_values)\n if len(callables):\n msg += \" or a callable\"\n raise ValueError(msg)\n\n return inner\n\n\n# common type validators, for convenience\n# usage: register_option(... , validator = is_int)\nis_int = is_type_factory(int)\nis_bool = is_type_factory(bool)\nis_float = is_type_factory(float)\nis_str = is_type_factory(str)\nis_unicode = is_type_factory(compat.text_type)\nis_text = is_instance_factory((str, bytes))\n\n\ndef is_callable(obj):\n \"\"\"\n\n Parameters\n ----------\n `obj` - the object to be checked\n\n Returns\n -------\n validator - returns True if object is callable\n raises ValueError otherwise.\n\n \"\"\"\n if not callable(obj):\n raise ValueError(\"Value must be a callable\")\n return True\n", "import numpy as np\nimport pandas as pd\nfrom pandas import date_range, Index, DataFrame, Series, Timestamp\nfrom pandas.util import testing as tm\n\n\nclass TestDatetimeIndex(tm.TestCase):\n\n def test_indexing_with_datetime_tz(self):\n\n # 8260\n # support datetime64 with tz\n\n idx = Index(date_range('20130101', periods=3, tz='US/Eastern'),\n name='foo')\n dr = date_range('20130110', periods=3)\n df = DataFrame({'A': idx, 'B': dr})\n df['C'] = idx\n df.iloc[1, 1] = pd.NaT\n df.iloc[1, 2] = pd.NaT\n\n # indexing\n result = df.iloc[1]\n expected = Series([Timestamp('2013-01-02 00:00:00-0500',\n tz='US/Eastern'), np.nan, np.nan],\n index=list('ABC'), dtype='object', name=1)\n tm.assert_series_equal(result, expected)\n result = df.loc[1]\n expected = Series([Timestamp('2013-01-02 00:00:00-0500',\n tz='US/Eastern'), np.nan, np.nan],\n index=list('ABC'), dtype='object', name=1)\n tm.assert_series_equal(result, expected)\n\n # indexing - fast_xs\n df = DataFrame({'a': date_range('2014-01-01', periods=10, tz='UTC')})\n result = df.iloc[5]\n expected = Timestamp('2014-01-06 00:00:00+0000', tz='UTC', freq='D')\n self.assertEqual(result, expected)\n\n result = df.loc[5]\n self.assertEqual(result, expected)\n\n # indexing - boolean\n result = df[df.a > df.a[3]]\n expected = df.iloc[4:]\n tm.assert_frame_equal(result, expected)\n\n # indexing - setting an element\n df = DataFrame(data=pd.to_datetime(\n ['2015-03-30 20:12:32', '2015-03-12 00:11:11']), columns=['time'])\n df['new_col'] = ['new', 'old']\n df.time = df.set_index('time').index.tz_localize('UTC')\n v = df[df.new_col == 'new'].set_index('time').index.tz_convert(\n 'US/Pacific')\n\n # trying to set a single element on a part of a different timezone\n def f():\n df.loc[df.new_col == 'new', 'time'] = v\n\n self.assertRaises(ValueError, f)\n\n v = df.loc[df.new_col == 'new', 'time'] + pd.Timedelta('1s')\n df.loc[df.new_col == 'new', 'time'] = v\n tm.assert_series_equal(df.loc[df.new_col == 'new', 'time'], v)\n\n def test_indexing_with_datetimeindex_tz(self):\n\n # GH 12050\n # indexing on a series with a datetimeindex with tz\n index = pd.date_range('2015-01-01', periods=2, tz='utc')\n\n ser = pd.Series(range(2), index=index,\n dtype='int64')\n\n # list-like indexing\n\n for sel in (index, list(index)):\n # getitem\n tm.assert_series_equal(ser[sel], ser)\n\n # setitem\n result = ser.copy()\n result[sel] = 1\n expected = pd.Series(1, index=index)\n tm.assert_series_equal(result, expected)\n\n # .loc getitem\n tm.assert_series_equal(ser.loc[sel], ser)\n\n # .loc setitem\n result = ser.copy()\n result.loc[sel] = 1\n expected = pd.Series(1, index=index)\n tm.assert_series_equal(result, expected)\n\n # single element indexing\n\n # getitem\n self.assertEqual(ser[index[1]], 1)\n\n # setitem\n result = ser.copy()\n result[index[1]] = 5\n expected = pd.Series([0, 5], index=index)\n tm.assert_series_equal(result, expected)\n\n # .loc getitem\n self.assertEqual(ser.loc[index[1]], 1)\n\n # .loc setitem\n result = ser.copy()\n result.loc[index[1]] = 5\n expected = pd.Series([0, 5], index=index)\n tm.assert_series_equal(result, expected)\n\n def test_partial_setting_with_datetimelike_dtype(self):\n\n # GH9478\n # a datetimeindex alignment issue with partial setting\n df = pd.DataFrame(np.arange(6.).reshape(3, 2), columns=list('AB'),\n index=pd.date_range('1/1/2000', periods=3,\n freq='1H'))\n expected = df.copy()\n expected['C'] = [expected.index[0]] + [pd.NaT, pd.NaT]\n\n mask = df.A < 1\n df.loc[mask, 'C'] = df.loc[mask].index\n tm.assert_frame_equal(df, expected)\n\n def test_loc_setitem_datetime(self):\n\n # GH 9516\n dt1 = Timestamp('20130101 09:00:00')\n dt2 = Timestamp('20130101 10:00:00')\n\n for conv in [lambda x: x, lambda x: x.to_datetime64(),\n lambda x: x.to_pydatetime(), lambda x: np.datetime64(x)]:\n\n df = pd.DataFrame()\n df.loc[conv(dt1), 'one'] = 100\n df.loc[conv(dt2), 'one'] = 200\n\n expected = DataFrame({'one': [100.0, 200.0]}, index=[dt1, dt2])\n tm.assert_frame_equal(df, expected)\n\n def test_series_partial_set_datetime(self):\n # GH 11497\n\n idx = date_range('2011-01-01', '2011-01-02', freq='D', name='idx')\n ser = Series([0.1, 0.2], index=idx, name='s')\n\n result = ser.loc[[Timestamp('2011-01-01'), Timestamp('2011-01-02')]]\n exp = Series([0.1, 0.2], index=idx, name='s')\n tm.assert_series_equal(result, exp, check_index_type=True)\n\n keys = [Timestamp('2011-01-02'), Timestamp('2011-01-02'),\n Timestamp('2011-01-01')]\n exp = Series([0.2, 0.2, 0.1], index=pd.DatetimeIndex(keys, name='idx'),\n name='s')\n tm.assert_series_equal(ser.loc[keys], exp, check_index_type=True)\n\n keys = [Timestamp('2011-01-03'), Timestamp('2011-01-02'),\n Timestamp('2011-01-03')]\n exp = Series([np.nan, 0.2, np.nan],\n index=pd.DatetimeIndex(keys, name='idx'), name='s')\n tm.assert_series_equal(ser.loc[keys], exp, check_index_type=True)\n\n def test_series_partial_set_period(self):\n # GH 11497\n\n idx = pd.period_range('2011-01-01', '2011-01-02', freq='D', name='idx')\n ser = Series([0.1, 0.2], index=idx, name='s')\n\n result = ser.loc[[pd.Period('2011-01-01', freq='D'),\n pd.Period('2011-01-02', freq='D')]]\n exp = Series([0.1, 0.2], index=idx, name='s')\n tm.assert_series_equal(result, exp, check_index_type=True)\n\n keys = [pd.Period('2011-01-02', freq='D'),\n pd.Period('2011-01-02', freq='D'),\n pd.Period('2011-01-01', freq='D')]\n exp = Series([0.2, 0.2, 0.1], index=pd.PeriodIndex(keys, name='idx'),\n name='s')\n tm.assert_series_equal(ser.loc[keys], exp, check_index_type=True)\n\n keys = [pd.Period('2011-01-03', freq='D'),\n pd.Period('2011-01-02', freq='D'),\n pd.Period('2011-01-03', freq='D')]\n exp = Series([np.nan, 0.2, np.nan],\n index=pd.PeriodIndex(keys, name='idx'), name='s')\n result = ser.loc[keys]\n tm.assert_series_equal(result, exp)\n", "from .pandas_vb_common import *\nfrom pandas.core import common as com\n\ntry:\n from cStringIO import StringIO\nexcept ImportError:\n from io import StringIO\n\ntry:\n from pandas.util.testing import test_parallel\n\n have_real_test_parallel = True\nexcept ImportError:\n have_real_test_parallel = False\n\n\n def test_parallel(num_threads=1):\n\n def wrapper(fname):\n return fname\n\n return wrapper\n\n\nclass NoGilGroupby(object):\n goal_time = 0.2\n\n def setup(self):\n self.N = 1000000\n self.ngroups = 1000\n np.random.seed(1234)\n self.df = DataFrame({'key': np.random.randint(0, self.ngroups, size=self.N), 'data': np.random.randn(self.N), })\n\n np.random.seed(1234)\n self.size = 2 ** 22\n self.ngroups = 100\n self.data = Series(np.random.randint(0, self.ngroups, size=self.size))\n\n if (not have_real_test_parallel):\n raise NotImplementedError\n\n @test_parallel(num_threads=2)\n def _pg2_count(self):\n self.df.groupby('key')['data'].count()\n\n def time_count_2(self):\n self._pg2_count()\n\n @test_parallel(num_threads=2)\n def _pg2_last(self):\n self.df.groupby('key')['data'].last()\n\n def time_last_2(self):\n self._pg2_last()\n\n @test_parallel(num_threads=2)\n def _pg2_max(self):\n self.df.groupby('key')['data'].max()\n\n def time_max_2(self):\n self._pg2_max()\n\n @test_parallel(num_threads=2)\n def _pg2_mean(self):\n self.df.groupby('key')['data'].mean()\n\n def time_mean_2(self):\n self._pg2_mean()\n\n @test_parallel(num_threads=2)\n def _pg2_min(self):\n self.df.groupby('key')['data'].min()\n\n def time_min_2(self):\n self._pg2_min()\n\n @test_parallel(num_threads=2)\n def _pg2_prod(self):\n self.df.groupby('key')['data'].prod()\n\n def time_prod_2(self):\n self._pg2_prod()\n\n @test_parallel(num_threads=2)\n def _pg2_sum(self):\n self.df.groupby('key')['data'].sum()\n\n def time_sum_2(self):\n self._pg2_sum()\n\n @test_parallel(num_threads=4)\n def _pg4_sum(self):\n self.df.groupby('key')['data'].sum()\n\n def time_sum_4(self):\n self._pg4_sum()\n\n def time_sum_4_notp(self):\n for i in range(4):\n self.df.groupby('key')['data'].sum()\n\n def _f_sum(self):\n self.df.groupby('key')['data'].sum()\n\n @test_parallel(num_threads=8)\n def _pg8_sum(self):\n self._f_sum()\n\n def time_sum_8(self):\n self._pg8_sum()\n\n def time_sum_8_notp(self):\n for i in range(8):\n self._f_sum()\n\n @test_parallel(num_threads=2)\n def _pg2_var(self):\n self.df.groupby('key')['data'].var()\n\n def time_var_2(self):\n self._pg2_var()\n\n # get groups\n\n def _groups(self):\n self.data.groupby(self.data).groups\n\n @test_parallel(num_threads=2)\n def _pg2_groups(self):\n self._groups()\n\n def time_groups_2(self):\n self._pg2_groups()\n\n @test_parallel(num_threads=4)\n def _pg4_groups(self):\n self._groups()\n\n def time_groups_4(self):\n self._pg4_groups()\n\n @test_parallel(num_threads=8)\n def _pg8_groups(self):\n self._groups()\n\n def time_groups_8(self):\n self._pg8_groups()\n\n\n\nclass nogil_take1d_float64(object):\n goal_time = 0.2\n\n def setup(self):\n self.N = 1000000\n self.ngroups = 1000\n np.random.seed(1234)\n self.df = DataFrame({'key': np.random.randint(0, self.ngroups, size=self.N), 'data': np.random.randn(self.N), })\n if (not have_real_test_parallel):\n raise NotImplementedError\n self.N = 10000000.0\n self.df = DataFrame({'int64': np.arange(self.N, dtype='int64'), 'float64': np.arange(self.N, dtype='float64'), })\n self.indexer = np.arange(100, (len(self.df) - 100))\n\n def time_nogil_take1d_float64(self):\n self.take_1d_pg2_int64()\n\n @test_parallel(num_threads=2)\n def take_1d_pg2_int64(self):\n com.take_1d(self.df.int64.values, self.indexer)\n\n @test_parallel(num_threads=2)\n def take_1d_pg2_float64(self):\n com.take_1d(self.df.float64.values, self.indexer)\n\n\nclass nogil_take1d_int64(object):\n goal_time = 0.2\n\n def setup(self):\n self.N = 1000000\n self.ngroups = 1000\n np.random.seed(1234)\n self.df = DataFrame({'key': np.random.randint(0, self.ngroups, size=self.N), 'data': np.random.randn(self.N), })\n if (not have_real_test_parallel):\n raise NotImplementedError\n self.N = 10000000.0\n self.df = DataFrame({'int64': np.arange(self.N, dtype='int64'), 'float64': np.arange(self.N, dtype='float64'), })\n self.indexer = np.arange(100, (len(self.df) - 100))\n\n def time_nogil_take1d_int64(self):\n self.take_1d_pg2_float64()\n\n @test_parallel(num_threads=2)\n def take_1d_pg2_int64(self):\n com.take_1d(self.df.int64.values, self.indexer)\n\n @test_parallel(num_threads=2)\n def take_1d_pg2_float64(self):\n com.take_1d(self.df.float64.values, self.indexer)\n\n\nclass nogil_kth_smallest(object):\n number = 1\n repeat = 5\n\n def setup(self):\n if (not have_real_test_parallel):\n raise NotImplementedError\n np.random.seed(1234)\n self.N = 10000000\n self.k = 500000\n self.a = np.random.randn(self.N)\n self.b = self.a.copy()\n self.kwargs_list = [{'arr': self.a}, {'arr': self.b}]\n\n def time_nogil_kth_smallest(self):\n @test_parallel(num_threads=2, kwargs_list=self.kwargs_list)\n def run(arr):\n algos.kth_smallest(arr, self.k)\n run()\n\n\nclass nogil_datetime_fields(object):\n goal_time = 0.2\n\n def setup(self):\n self.N = 100000000\n self.dti = pd.date_range('1900-01-01', periods=self.N, freq='D')\n self.period = self.dti.to_period('D')\n if (not have_real_test_parallel):\n raise NotImplementedError\n\n def time_datetime_field_year(self):\n @test_parallel(num_threads=2)\n def run(dti):\n dti.year\n run(self.dti)\n\n def time_datetime_field_day(self):\n @test_parallel(num_threads=2)\n def run(dti):\n dti.day\n run(self.dti)\n\n def time_datetime_field_daysinmonth(self):\n @test_parallel(num_threads=2)\n def run(dti):\n dti.days_in_month\n run(self.dti)\n\n def time_datetime_field_normalize(self):\n @test_parallel(num_threads=2)\n def run(dti):\n dti.normalize()\n run(self.dti)\n\n def time_datetime_to_period(self):\n @test_parallel(num_threads=2)\n def run(dti):\n dti.to_period('S')\n run(self.dti)\n\n def time_period_to_datetime(self):\n @test_parallel(num_threads=2)\n def run(period):\n period.to_timestamp()\n run(self.period)\n\n\nclass nogil_rolling_algos_slow(object):\n goal_time = 0.2\n\n def setup(self):\n self.win = 100\n np.random.seed(1234)\n self.arr = np.random.rand(100000)\n if (not have_real_test_parallel):\n raise NotImplementedError\n\n def time_nogil_rolling_median(self):\n @test_parallel(num_threads=2)\n def run(arr, win):\n rolling_median(arr, win)\n run(self.arr, self.win)\n\n\nclass nogil_rolling_algos_fast(object):\n goal_time = 0.2\n\n def setup(self):\n self.win = 100\n np.random.seed(1234)\n self.arr = np.random.rand(1000000)\n if (not have_real_test_parallel):\n raise NotImplementedError\n\n def time_nogil_rolling_mean(self):\n @test_parallel(num_threads=2)\n def run(arr, win):\n rolling_mean(arr, win)\n run(self.arr, self.win)\n\n def time_nogil_rolling_min(self):\n @test_parallel(num_threads=2)\n def run(arr, win):\n rolling_min(arr, win)\n run(self.arr, self.win)\n\n def time_nogil_rolling_max(self):\n @test_parallel(num_threads=2)\n def run(arr, win):\n rolling_max(arr, win)\n run(self.arr, self.win)\n\n def time_nogil_rolling_var(self):\n @test_parallel(num_threads=2)\n def run(arr, win):\n rolling_var(arr, win)\n run(self.arr, self.win)\n\n def time_nogil_rolling_skew(self):\n @test_parallel(num_threads=2)\n def run(arr, win):\n rolling_skew(arr, win)\n run(self.arr, self.win)\n\n def time_nogil_rolling_kurt(self):\n @test_parallel(num_threads=2)\n def run(arr, win):\n rolling_kurt(arr, win)\n run(self.arr, self.win)\n\n def time_nogil_rolling_std(self):\n @test_parallel(num_threads=2)\n def run(arr, win):\n rolling_std(arr, win)\n run(self.arr, self.win)\n\n\nclass nogil_read_csv(object):\n number = 1\n repeat = 5\n\n def setup(self):\n if (not have_real_test_parallel):\n raise NotImplementedError\n # Using the values\n self.df = DataFrame(np.random.randn(10000, 50))\n self.df.to_csv('__test__.csv')\n\n self.rng = date_range('1/1/2000', periods=10000)\n self.df_date_time = DataFrame(np.random.randn(10000, 50), index=self.rng)\n self.df_date_time.to_csv('__test_datetime__.csv')\n\n self.df_object = DataFrame('foo', index=self.df.index, columns=self.create_cols('object'))\n self.df_object.to_csv('__test_object__.csv')\n\n def create_cols(self, name):\n return [('%s%03d' % (name, i)) for i in range(5)]\n\n @test_parallel(num_threads=2)\n def pg_read_csv(self):\n read_csv('__test__.csv', sep=',', header=None, float_precision=None)\n\n def time_read_csv(self):\n self.pg_read_csv()\n\n @test_parallel(num_threads=2)\n def pg_read_csv_object(self):\n read_csv('__test_object__.csv', sep=',')\n\n def time_read_csv_object(self):\n self.pg_read_csv_object()\n\n @test_parallel(num_threads=2)\n def pg_read_csv_datetime(self):\n read_csv('__test_datetime__.csv', sep=',', header=None)\n\n def time_read_csv_datetime(self):\n self.pg_read_csv_datetime()\n\n\nclass nogil_factorize(object):\n number = 1\n repeat = 5\n\n def setup(self):\n if (not have_real_test_parallel):\n raise NotImplementedError\n\n np.random.seed(1234)\n self.strings = tm.makeStringIndex(100000)\n\n def factorize_strings(self):\n pd.factorize(self.strings)\n\n @test_parallel(num_threads=4)\n def _pg_factorize_strings_4(self):\n self.factorize_strings()\n\n def time_factorize_strings_4(self):\n for i in range(2):\n self._pg_factorize_strings_4()\n\n @test_parallel(num_threads=2)\n def _pg_factorize_strings_2(self):\n self.factorize_strings()\n\n def time_factorize_strings_2(self):\n for i in range(4):\n self._pg_factorize_strings_2()\n\n def time_factorize_strings(self):\n for i in range(8):\n self.factorize_strings()\n", "from pandas.compat import range, lrange\nimport numpy as np\nimport pandas.lib as lib\nfrom pandas import *\nfrom copy import deepcopy\nimport time\n\nn = 1000000\nK = 1\npct_overlap = 0.2\n\na = np.arange(n, dtype=np.int64)\nb = np.arange(n * pct_overlap, n * (1 + pct_overlap), dtype=np.int64)\n\ndr1 = DatetimeIndex('1/1/2000', periods=n, offset=offsets.Minute())\ndr2 = DatetimeIndex(\n dr1[int(pct_overlap * n)], periods=n, offset=offsets.Minute(2))\n\naobj = a.astype(object)\nbobj = b.astype(object)\n\nav = np.random.randn(n)\nbv = np.random.randn(n)\n\navf = np.random.randn(n, K)\nbvf = np.random.randn(n, K)\n\na_series = Series(av, index=a)\nb_series = Series(bv, index=b)\n\na_frame = DataFrame(avf, index=a, columns=lrange(K))\nb_frame = DataFrame(bvf, index=b, columns=lrange(K, 2 * K))\n\n\ndef do_left_join(a, b, av, bv):\n out = np.empty((len(a), 2))\n lib.left_join_1d(a, b, av, bv, out)\n return out\n\n\ndef do_outer_join(a, b, av, bv):\n result_index, aindexer, bindexer = lib.outer_join_indexer(a, b)\n result = np.empty((2, len(result_index)))\n lib.take_1d(av, aindexer, result[0])\n lib.take_1d(bv, bindexer, result[1])\n return result_index, result\n\n\ndef do_inner_join(a, b, av, bv):\n result_index, aindexer, bindexer = lib.inner_join_indexer(a, b)\n result = np.empty((2, len(result_index)))\n lib.take_1d(av, aindexer, result[0])\n lib.take_1d(bv, bindexer, result[1])\n return result_index, result\n\nfrom line_profiler import LineProfiler\nprof = LineProfiler()\n\nfrom pandas.util.testing import set_trace\n\n\ndef do_left_join_python(a, b, av, bv):\n indexer, mask = lib.ordered_left_join_int64(a, b)\n\n n, ak = av.shape\n _, bk = bv.shape\n result_width = ak + bk\n\n result = np.empty((result_width, n), dtype=np.float64)\n result[:ak] = av.T\n\n bchunk = result[ak:]\n _take_multi(bv.T, indexer, bchunk)\n np.putmask(bchunk, np.tile(mask, bk), np.nan)\n return result\n\n\ndef _take_multi(data, indexer, out):\n if not data.flags.c_contiguous:\n data = data.copy()\n for i in range(data.shape[0]):\n data[i].take(indexer, out=out[i])\n\n\ndef do_left_join_multi(a, b, av, bv):\n n, ak = av.shape\n _, bk = bv.shape\n result = np.empty((n, ak + bk), dtype=np.float64)\n lib.left_join_2d(a, b, av, bv, result)\n return result\n\n\ndef do_outer_join_multi(a, b, av, bv):\n n, ak = av.shape\n _, bk = bv.shape\n result_index, rindexer, lindexer = lib.outer_join_indexer(a, b)\n result = np.empty((len(result_index), ak + bk), dtype=np.float64)\n lib.take_join_contiguous(av, bv, lindexer, rindexer, result)\n # result = np.empty((ak + bk, len(result_index)), dtype=np.float64)\n # lib.take_axis0(av, rindexer, out=result[:ak].T)\n # lib.take_axis0(bv, lindexer, out=result[ak:].T)\n return result_index, result\n\n\ndef do_inner_join_multi(a, b, av, bv):\n n, ak = av.shape\n _, bk = bv.shape\n result_index, rindexer, lindexer = lib.inner_join_indexer(a, b)\n result = np.empty((len(result_index), ak + bk), dtype=np.float64)\n lib.take_join_contiguous(av, bv, lindexer, rindexer, result)\n # result = np.empty((ak + bk, len(result_index)), dtype=np.float64)\n # lib.take_axis0(av, rindexer, out=result[:ak].T)\n # lib.take_axis0(bv, lindexer, out=result[ak:].T)\n return result_index, result\n\n\ndef do_left_join_multi_v2(a, b, av, bv):\n indexer, mask = lib.ordered_left_join_int64(a, b)\n bv_taken = bv.take(indexer, axis=0)\n np.putmask(bv_taken, mask.repeat(bv.shape[1]), np.nan)\n return np.concatenate((av, bv_taken), axis=1)\n\n\ndef do_left_join_series(a, b):\n return b.reindex(a.index)\n\n\ndef do_left_join_frame(a, b):\n a.index._indexMap = None\n b.index._indexMap = None\n return a.join(b, how='left')\n\n\n# a = np.array([1, 2, 3, 4, 5], dtype=np.int64)\n# b = np.array([0, 3, 5, 7, 9], dtype=np.int64)\n# print(lib.inner_join_indexer(a, b))\n\nout = np.empty((10, 120000))\n\n\ndef join(a, b, av, bv, how=\"left\"):\n func_dict = {'left': do_left_join_multi,\n 'outer': do_outer_join_multi,\n 'inner': do_inner_join_multi}\n\n f = func_dict[how]\n return f(a, b, av, bv)\n\n\ndef bench_python(n=100000, pct_overlap=0.20, K=1):\n import gc\n ns = [2, 3, 4, 5, 6]\n iterations = 200\n pct_overlap = 0.2\n kinds = ['outer', 'left', 'inner']\n\n all_results = {}\n for logn in ns:\n n = 10 ** logn\n a = np.arange(n, dtype=np.int64)\n b = np.arange(n * pct_overlap, n * pct_overlap + n, dtype=np.int64)\n\n avf = np.random.randn(n, K)\n bvf = np.random.randn(n, K)\n\n a_frame = DataFrame(avf, index=a, columns=lrange(K))\n b_frame = DataFrame(bvf, index=b, columns=lrange(K, 2 * K))\n\n all_results[logn] = result = {}\n\n for kind in kinds:\n gc.disable()\n elapsed = 0\n _s = time.clock()\n for i in range(iterations):\n if i % 10 == 0:\n elapsed += time.clock() - _s\n gc.collect()\n _s = time.clock()\n a_frame.join(b_frame, how=kind)\n # join(a, b, avf, bvf, how=kind)\n elapsed += time.clock() - _s\n gc.enable()\n result[kind] = (elapsed / iterations) * 1000\n\n return DataFrame(all_results, index=kinds)\n\n\ndef bench_xts(n=100000, pct_overlap=0.20):\n from pandas.rpy.common import r\n r('a <- 5')\n\n xrng = '1:%d' % n\n\n start = n * pct_overlap + 1\n end = n + start - 1\n yrng = '%d:%d' % (start, end)\n\n r('library(xts)')\n\n iterations = 500\n\n kinds = ['left', 'outer', 'inner']\n result = {}\n for kind in kinds:\n r('x <- xts(rnorm(%d), as.POSIXct(Sys.Date()) + %s)' % (n, xrng))\n r('y <- xts(rnorm(%d), as.POSIXct(Sys.Date()) + %s)' % (n, yrng))\n stmt = 'for (i in 1:%d) merge(x, y, join=\"%s\")' % (iterations, kind)\n elapsed = r('as.list(system.time(%s, gcFirst=F))$elapsed' % stmt)[0]\n result[kind] = (elapsed / iterations) * 1000\n return Series(result)\n" ]
[ [ "pandas.Series", "pandas.tslib.array_to_datetime", "pandas.DataFrame", "pandas.core.config.get_option", "pandas.lib.ismember", "pandas.types.common.is_integer_dtype", "pandas.types.missing.notnull", "pandas.Index", "pandas.types.common.is_list_like", "pandas.tslib.array_with_unit_to_datetime", "pandas.to_numeric", "pandas.lib.try_parse_year_month_day", "pandas.types.common.is_datetime64_ns_dtype", "pandas.tslib.parse_datetime_string_with_reso", "pandas.tslib.get_timezone", "pandas.tseries.index.DatetimeIndex._simple_new", "pandas.tseries.index.DatetimeIndex", "numpy.array", "pandas.tslib.datetime_to_datetime64", "pandas.types.common._ensure_object", "pandas.tslib.array_strptime", "pandas.types.common.is_datetime64tz_dtype", "pandas.types.common.is_datetime64_dtype", "numpy.empty" ], [ "pandas.tseries.frequencies.to_offset", "pandas.core.common.AbstractMethodError", "pandas.tseries.frequencies.get_freq_code", "numpy.busday_offset", "numpy.where", "pandas.tseries.tools.to_datetime", "pandas.tslib.tz_convert_single", "numpy.busdaycalendar", "pandas.tseries.frequencies.get_offset", "pandas.tslib._localize_pydatetime", "pandas.tslib.pydt_to_i8", "pandas.tslib.Timestamp", "pandas.tslib.tot_seconds", "pandas.tseries.tools.normalize_date", "pandas.tslib.shift_months", "pandas.tslib.Timedelta", "numpy.datetime64", "numpy.is_busday", "pandas.tslib.monthrange", "pandas.compat.range" ], [ "pandas.concat", "pandas.util.testing.ensure_clean", "numpy.linspace", "pandas.util.testing.assertRaisesRegexp", "pandas.compat.is_platform_windows", "pandas.compat.StringIO", "pandas.util.testing.assert_produces_warning", "pandas.DataFrame", "pandas.util.testing.assert_frame_equal", "numpy.random.rand", "pandas.util.testing._skip_if_32bit", "pandas.compat.lrange", "pandas.compat.range" ], [ "pandas.formats.printing.pprint_thing", "pandas.compat.map", "pandas.compat.lmap", "pandas.compat.u" ], [ "pandas.to_datetime", "pandas.Series", "pandas.PeriodIndex", "pandas.period_range", "numpy.arange", "pandas.util.testing.assert_series_equal", "pandas.DataFrame", "pandas.Timedelta", "pandas.util.testing.assert_frame_equal", "numpy.datetime64", "pandas.DatetimeIndex", "pandas.Period", "pandas.date_range", "pandas.Timestamp" ], [ "pandas.core.common.take_1d", "pandas.util.testing.test_parallel" ], [ "pandas.lib.ordered_left_join_int64", "numpy.arange", "pandas.lib.left_join_1d", "pandas.lib.outer_join_indexer", "numpy.tile", "pandas.lib.take_1d", "pandas.rpy.common.r", "numpy.concatenate", "pandas.lib.left_join_2d", "pandas.lib.inner_join_indexer", "numpy.random.randn", "pandas.lib.take_join_contiguous", "pandas.compat.lrange", "numpy.empty", "pandas.compat.range" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "0.20", "0.19" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "0.19", "0.24", "0.20" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "1.4", "0.19", "1.1", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Lalf-Klein/megnet
[ "e3977ce372b74380268659e964c85bf59c1aac34" ]
[ "megnet/utils/tests/test_general.py" ]
[ "import unittest\nimport numpy as np\n\nfrom megnet.utils.general import expand_1st, to_list, fast_label_binarize\n\n\nclass TestGeneralUtils(unittest.TestCase):\n def test_expand_dim(self):\n x = np.array([1, 2, 3])\n self.assertListEqual(list(expand_1st(x).shape), [1, 3])\n\n def test_to_list(self):\n x = 1\n y = [1]\n z = tuple([1, 2, 3])\n v = np.array([1, 2, 3])\n k = np.array([[1, 2], [3, 4]])\n for k in [x, y, z, v, k]:\n self.assertTrue(type(to_list(k)), list)\n\n def test_fast_label_binarize(self):\n binaries = fast_label_binarize(1, [0, 1])\n self.assertListEqual(binaries, [0])\n binaries = fast_label_binarize(1, [0, 1, 2])\n self.assertListEqual(binaries, [0, 1, 0])\n\n\nif __name__ == \"__main__\":\n unittest.main()\n" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
benoitLemoine/stage2A
[ "a81de38deaf786227e6d34c04803da5e5854c9f1", "a81de38deaf786227e6d34c04803da5e5854c9f1", "a81de38deaf786227e6d34c04803da5e5854c9f1" ]
[ "detection/yolov3/yolov3.py", "detection/yolov3/src/evaluate.py", "detection/tinyYolo/src/yolov3_tf2/dataset.py" ]
[ "import tensorflow as tf\nimport cv2 as cv\nimport numpy as np\n\nfrom PIL import Image\nfrom core import utils\n\nclassesPath = \"../../data/coco.names\"\nmodelPath = \"../../checkpoint/yolov3_cpu_nms.pb\"\n\nIMAGE_H, IMAGE_W = 416, 416\nclasses = utils.read_coco_names(classesPath)\nnum_classes = len(classes)\ninput_tensor, output_tensors = utils.read_pb_return_tensors(tf.get_default_graph(), modelPath,\n [\"Placeholder:0\", \"concat_9:0\", \"mul_6:0\"])\n\n\nclass YoloV3Net:\n def __init__(self):\n self.sess = tf.Session()\n\n def run(self, img):\n # Processing frame\n img_resized = self._preprocessFrame(img)\n\n boxes, scores = self.sess.run(output_tensors, feed_dict={input_tensor: np.expand_dims(img_resized, axis=0)})\n boxes, scores, labels = utils.cpu_nms(boxes, scores, num_classes, score_thresh=0.4, iou_thresh=0.5)\n\n # Keeping only box labelled \"person\"\n if boxes is not None:\n boxes = self._getOnlyDetectedPeople(boxes, labels)\n\n return boxes\n\n def __del__(self):\n self.sess.close()\n\n @staticmethod\n def _getOnlyDetectedPeople(boxes, labels):\n pBoxes = []\n for i in np.arange(len(boxes)):\n if labels[i] == 0:\n pBoxes.append(boxes[i])\n return pBoxes\n\n @staticmethod\n def _preprocessFrame(frame):\n frameRGB = cv.cvtColor(frame, cv.COLOR_BGR2RGB)\n image = Image.fromarray(frameRGB)\n img_resized = np.array(image.resize(size=(IMAGE_H, IMAGE_W)), dtype=np.float32)\n return img_resized / 255.\n", "#! /usr/bin/env python\n# coding=utf-8\n#================================================================\n# Copyright (C) 2018 * Ltd. All rights reserved.\n#\n# Editor : VIM\n# File name : evaluate.py\n# Author : YunYang1994\n# Created date: 2018-12-20 11:58:21\n# Description : compute mAP\n#\n#================================================================\n\nimport sys\nimport numpy as np\nimport tensorflow as tf\nfrom tqdm import tqdm\nfrom PIL import Image\nfrom core import utils, yolov3\nfrom core.dataset import dataset, Parser\nsess = tf.Session()\n\n\nIMAGE_H, IMAGE_W = 416, 416\nCLASSES = utils.read_coco_names('./data/raccoon.names')\nNUM_CLASSES = len(CLASSES)\nANCHORS = utils.get_anchors('./data/raccoon_anchors.txt', IMAGE_H, IMAGE_W)\nCKPT_FILE = \"./checkpoint/yolov3.ckpt-2500\"\nIOU_THRESH = 0.5\nSCORE_THRESH = 0.3\n\nall_detections = []\nall_annotations = []\nall_aver_precs = {CLASSES[i]:0. for i in range(NUM_CLASSES)}\n\ntest_tfrecord = \"./raccoon_dataset/raccoon_*.tfrecords\"\nparser = Parser(IMAGE_H, IMAGE_W, ANCHORS, NUM_CLASSES)\ntestset = dataset(parser, test_tfrecord , batch_size=1, shuffle=None, repeat=False)\n\n\nimages_tensor, *y_true_tensor = testset.get_next()\nmodel = yolov3.yolov3(NUM_CLASSES, ANCHORS)\nwith tf.variable_scope('yolov3'):\n pred_feature_map = model.forward(images_tensor, is_training=False)\n y_pred_tensor = model.predict(pred_feature_map)\n\nsaver = tf.train.Saver()\nsaver.restore(sess, CKPT_FILE)\n\ntry:\n image_idx = 0\n while True:\n y_pred, y_true, image = sess.run([y_pred_tensor, y_true_tensor, images_tensor])\n pred_boxes = y_pred[0][0]\n pred_confs = y_pred[1][0]\n pred_probs = y_pred[2][0]\n image = Image.fromarray(np.uint8(image[0]*255))\n\n true_labels_list, true_boxes_list = [], []\n for i in range(3):\n true_probs_temp = y_true[i][..., 5: ]\n true_boxes_temp = y_true[i][..., 0:4]\n object_mask = true_probs_temp.sum(axis=-1) > 0\n\n true_probs_temp = true_probs_temp[object_mask]\n true_boxes_temp = true_boxes_temp[object_mask]\n\n true_labels_list += np.argmax(true_probs_temp, axis=-1).tolist()\n true_boxes_list += true_boxes_temp.tolist()\n\n pred_boxes, pred_scores, pred_labels = utils.cpu_nms(pred_boxes, pred_confs*pred_probs, NUM_CLASSES,\n score_thresh=SCORE_THRESH, iou_thresh=IOU_THRESH)\n # image = datasets.draw_boxes(image, pred_boxes, pred_scores, pred_labels, CLASSES, [IMAGE_H, IMAGE_W], show=True)\n true_boxes = np.array(true_boxes_list)\n box_centers, box_sizes = true_boxes[:,0:2], true_boxes[:,2:4]\n\n true_boxes[:,0:2] = box_centers - box_sizes / 2.\n true_boxes[:,2:4] = true_boxes[:,0:2] + box_sizes\n pred_labels_list = [] if pred_labels is None else pred_labels.tolist()\n\n all_detections.append( [pred_boxes, pred_scores, pred_labels_list])\n all_annotations.append([true_boxes, true_labels_list])\n image_idx += 1\n if image_idx % 100 == 0:\n sys.stdout.write(\".\")\n sys.stdout.flush()\n\n\nexcept tf.errors.OutOfRangeError:\n pass\n\n\nfor idx in range(NUM_CLASSES):\n true_positives = []\n scores = []\n num_annotations = 0\n\n for i in tqdm(range(len(all_annotations)), desc=\"Computing AP for class %12s\" %(CLASSES[idx])):\n pred_boxes, pred_scores, pred_labels_list = all_detections[i]\n true_boxes, true_labels_list = all_annotations[i]\n detected = []\n num_annotations += true_labels_list.count(idx)\n\n for k in range(len(pred_labels_list)):\n if pred_labels_list[k] != idx: continue\n\n scores.append(pred_scores[k])\n ious = utils.bbox_iou(pred_boxes[k:k+1], true_boxes)\n m = np.argmax(ious)\n if ious[m] > IOU_THRESH and pred_labels_list[k] == true_labels_list[m] and m not in detected:\n detected.append(m)\n true_positives.append(1)\n else:\n true_positives.append(0)\n\n num_predictions = len(true_positives)\n true_positives = np.array(true_positives)\n false_positives = np.ones_like(true_positives) - true_positives\n # sorted by score\n indices = np.argsort(-np.array(scores))\n false_positives = false_positives[indices]\n true_positives = true_positives[indices]\n # compute false positives and true positives\n false_positives = np.cumsum(false_positives)\n true_positives = np.cumsum(true_positives)\n # compute recall and precision\n recall = true_positives / np.maximum(num_annotations, np.finfo(np.float64).eps)\n precision = true_positives / np.maximum(num_predictions, np.finfo(np.float64).eps)\n # compute average precision\n average_precision = utils.compute_ap(recall, precision)\n all_aver_precs[CLASSES[idx]] = average_precision\n\nfor idx in range(NUM_CLASSES):\n cls_name = CLASSES[idx]\n print(\"=> Class %10s - AP: %.4f\" %(cls_name, all_aver_precs[cls_name]))\n\nprint(\"=> mAP: %.4f\" %(sum(all_aver_precs.values()) / NUM_CLASSES))\n\n\n\n", "import tensorflow as tf\n\n\ndef transform_targets_for_output(y_true, grid_size, anchor_idxs, classes):\n # y_true: (N, boxes, (x1, y1, x2, y2, class, best_anchor))\n N = tf.shape(y_true)[0]\n\n # y_true_out: (N, grid, grid, anchors, [x, y, w, h, obj, class])\n y_true_out = tf.zeros(\n (N, grid_size, grid_size, tf.shape(anchor_idxs)[0], 6))\n\n anchor_idxs = tf.cast(anchor_idxs, tf.int32)\n\n indexes = tf.TensorArray(tf.int32, 1, dynamic_size=True)\n updates = tf.TensorArray(tf.float32, 1, dynamic_size=True)\n idx = 0\n for i in tf.range(N):\n for j in tf.range(tf.shape(y_true)[1]):\n if tf.equal(y_true[i][j][2], 0):\n continue\n anchor_eq = tf.equal(\n anchor_idxs, tf.cast(y_true[i][j][5], tf.int32))\n\n if tf.reduce_any(anchor_eq):\n box = y_true[i][j][0:4]\n box_xy = (y_true[i][j][0:2] + y_true[i][j][2:4]) / 2\n\n anchor_idx = tf.cast(tf.where(anchor_eq), tf.int32)\n grid_xy = tf.cast(box_xy // (1/grid_size), tf.int32)\n\n # grid[y][x][anchor] = (tx, ty, bw, bh, obj, class)\n indexes = indexes.write(\n idx, [i, grid_xy[1], grid_xy[0], anchor_idx[0][0]])\n updates = updates.write(\n idx, [box[0], box[1], box[2], box[3], 1, y_true[i][j][4]])\n idx += 1\n\n # tf.print(indexes.stack())\n # tf.print(updates.stack())\n\n return tf.tensor_scatter_nd_update(\n y_true_out, indexes.stack(), updates.stack())\n\n\ndef transform_targets(y_train, anchors, anchor_masks, classes):\n y_outs = []\n grid_size = 13\n\n # calculate anchor index for true boxes\n anchors = tf.cast(anchors, tf.float32)\n anchor_area = anchors[..., 0] * anchors[..., 1]\n box_wh = y_train[..., 2:4] - y_train[..., 0:2]\n box_wh = tf.tile(tf.expand_dims(box_wh, -2),\n (1, 1, tf.shape(anchors)[0], 1))\n box_area = box_wh[..., 0] * box_wh[..., 1]\n intersection = tf.minimum(box_wh[..., 0], anchors[..., 0]) * \\\n tf.minimum(box_wh[..., 1], anchors[..., 1])\n iou = intersection / (box_area + anchor_area - intersection)\n anchor_idx = tf.cast(tf.argmax(iou, axis=-1), tf.float32)\n anchor_idx = tf.expand_dims(anchor_idx, axis=-1)\n\n y_train = tf.concat([y_train, anchor_idx], axis=-1)\n\n for anchor_idxs in anchor_masks:\n y_outs.append(transform_targets_for_output(\n y_train, grid_size, anchor_idxs, classes))\n grid_size *= 2\n\n return tuple(y_outs)\n\n\ndef transform_images(x_train, size):\n x_train = tf.image.resize(x_train, (size, size))\n x_train = x_train / 255\n return x_train\n\n\n# https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/using_your_own_dataset.md#conversion-script-outline-conversion-script-outline\nIMAGE_FEATURE_MAP = {\n 'image/width': tf.io.FixedLenFeature([], tf.int64),\n 'image/height': tf.io.FixedLenFeature([], tf.int64),\n 'image/filename': tf.io.FixedLenFeature([], tf.string),\n 'image/source_id': tf.io.FixedLenFeature([], tf.string),\n 'image/key/sha256': tf.io.FixedLenFeature([], tf.string),\n 'image/encoded': tf.io.FixedLenFeature([], tf.string),\n 'image/format': tf.io.FixedLenFeature([], tf.string),\n 'image/object/bbox/xmin': tf.io.VarLenFeature(tf.float32),\n 'image/object/bbox/ymin': tf.io.VarLenFeature(tf.float32),\n 'image/object/bbox/xmax': tf.io.VarLenFeature(tf.float32),\n 'image/object/bbox/ymax': tf.io.VarLenFeature(tf.float32),\n 'image/object/class/text': tf.io.VarLenFeature(tf.string),\n 'image/object/class/label': tf.io.VarLenFeature(tf.int64),\n 'image/object/difficult': tf.io.VarLenFeature(tf.int64),\n 'image/object/truncated': tf.io.VarLenFeature(tf.int64),\n 'image/object/view': tf.io.VarLenFeature(tf.string),\n}\n\n\ndef parse_tfrecord(tfrecord, class_table):\n x = tf.io.parse_single_example(tfrecord, IMAGE_FEATURE_MAP)\n x_train = tf.image.decode_jpeg(x['image/encoded'], channels=3)\n x_train = tf.image.resize(x_train, (416, 416))\n\n class_text = tf.sparse.to_dense(\n x['image/object/class/text'], default_value='')\n labels = tf.cast(class_table.lookup(class_text), tf.float32)\n y_train = tf.stack([tf.sparse.to_dense(x['image/object/bbox/xmin']),\n tf.sparse.to_dense(x['image/object/bbox/ymin']),\n tf.sparse.to_dense(x['image/object/bbox/xmax']),\n tf.sparse.to_dense(x['image/object/bbox/ymax']),\n labels], axis=1)\n\n paddings = [[0, 100 - tf.shape(y_train)[0]], [0, 0]]\n y_train = tf.pad(y_train, paddings)\n\n return x_train, y_train\n\n\ndef load_tfrecord_dataset(file_pattern, class_file):\n LINE_NUMBER = -1 # TODO: use tf.lookup.TextFileIndex.LINE_NUMBER\n class_table = tf.lookup.StaticHashTable(tf.lookup.TextFileInitializer(\n class_file, tf.string, 0, tf.int64, LINE_NUMBER, delimiter=\"\\n\"), -1)\n\n files = tf.data.Dataset.list_files(file_pattern)\n dataset = files.flat_map(tf.data.TFRecordDataset)\n return dataset.map(lambda x: parse_tfrecord(x, class_table))\n\n\ndef load_fake_dataset():\n x_train = tf.image.decode_jpeg(\n open('./data/girl.png', 'rb').read(), channels=3)\n x_train = tf.expand_dims(x_train, axis=0)\n\n labels = [\n [0.18494931, 0.03049111, 0.9435849, 0.96302897, 0],\n [0.01586703, 0.35938117, 0.17582396, 0.6069674, 56],\n [0.09158827, 0.48252046, 0.26967454, 0.6403017, 67]\n ] + [[0, 0, 0, 0, 0]] * 5\n y_train = tf.convert_to_tensor(labels, tf.float32)\n y_train = tf.expand_dims(y_train, axis=0)\n\n return tf.data.Dataset.from_tensor_slices((x_train, y_train))\n" ]
[ [ "tensorflow.get_default_graph", "numpy.expand_dims", "tensorflow.Session" ], [ "numpy.ones_like", "numpy.uint8", "numpy.cumsum", "numpy.finfo", "numpy.argmax", "tensorflow.variable_scope", "tensorflow.Session", "tensorflow.train.Saver", "numpy.array" ], [ "tensorflow.convert_to_tensor", "tensorflow.concat", "tensorflow.cast", "tensorflow.minimum", "tensorflow.equal", "tensorflow.data.Dataset.list_files", "tensorflow.pad", "tensorflow.where", "tensorflow.lookup.TextFileInitializer", "tensorflow.io.VarLenFeature", "tensorflow.argmax", "tensorflow.image.decode_jpeg", "tensorflow.shape", "tensorflow.TensorArray", "tensorflow.reduce_any", "tensorflow.sparse.to_dense", "tensorflow.range", "tensorflow.data.Dataset.from_tensor_slices", "tensorflow.io.parse_single_example", "tensorflow.expand_dims", "tensorflow.io.FixedLenFeature", "tensorflow.image.resize" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
JamesWang007/Dive-into-DL-PyTorch
[ "267b54168322ab37da44e83008fba4f24b70fa9f" ]
[ "mycode/test_03_ch5_4.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Mar 25 23:09:40 2020\n 5.4 池化层\n https://tangshusen.me/Dive-into-DL-PyTorch/#/chapter05_CNN/5.4_pooling\n@author: bejin\n\"\"\"\n\n\nimport torch\nfrom torch import nn\n\n\ndef pool2d(X, pool_size, mode='max'):\n X = X.float()\n p_h, p_w = pool_size\n Y = torch.zeros(X.shape[0] - p_h + 1, X.shape[1] - p_w + 1)\n for i in range(Y.shape[0]):\n for j in range(Y.shape[1]):\n if mode == 'max':\n Y[i, j] = X[i: i + p_h, j: j + p_w].max()\n elif mode == 'avg':\n Y[i, j] = X[i: i + p_h, j: j + p_w].mean() \n return Y\n\n\nX = torch.tensor([[0, 1, 2], [3, 4, 5], [6, 7, 8]])\npool2d(X, (2, 2))\n\n\n\npool2d(X, (2, 2), 'avg')\n\n\n\n# 5.4.2 填充和步幅\nX = torch.arange(16, dtype=torch.float).view((1, 1, 4, 4))\nX\n\n\n\npool2d = nn.MaxPool2d(3)\npool2d(X) \n\n\n\npool2d = nn.MaxPool2d(3, padding=1, stride=2)\npool2d(X)\n\n\n\npool2d = nn.MaxPool2d((2, 4), padding=(1, 2), stride=(2, 3))\npool2d(X)\n\n\n# 5.4.3 多通道\nX = torch.cat((X, X + 1), dim=1)\nX\n\n\npool2d = nn.MaxPool2d(3, padding=1, stride=2)\npool2d(X)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n" ]
[ [ "torch.zeros", "torch.cat", "torch.tensor", "torch.nn.MaxPool2d", "torch.arange" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
dheerajgupta0001/rtm_daily_report_generator
[ "591a21ed1888df8b50eb8e873e03387df5d5a6e7" ]
[ "src/app/section_3/sectionWrInjGraph.py" ]
[ "import datetime as dt\nfrom src.repos.metricsData.metricsDataRepo import MetricsDataRepo\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nplt.rcParams.update({'figure.max_open_warning': 0})\nimport matplotlib.dates as mdates\n\n\ndef fetchWrInjGraphContext(appDbConnStr: str, startDt: dt.datetime, endDt: dt.datetime) -> bool:\n mRepo = MetricsDataRepo(appDbConnStr)\n\n # get iex rtm data for the range between start date and end date\n wbesRtmIexVals = mRepo.getWbesRtmIexBeneficiaryBlockWiseData(startDt, endDt,beneficiary='West ',beneficiary_type=' Injection ')\n\n wbesRtmPxiVals = mRepo.getWbesRtmPxiBeneficiaryBlockWiseData(startDt, endDt,beneficiary='West ',beneficiary_type=' Injection ')\n wbesPxIexVals = mRepo.getWbesPxIexBeneficiaryBlockWiseData(startDt, endDt,beneficiary='West ',beneficiary_type=' Injection ')\n\n wbesPxPxiVals = mRepo.getWbesPxPxiBeneficiaryBlockWiseData(startDt, endDt,beneficiary='West ',beneficiary_type=' Injection ')\n\n wbesRtmIexDf = pd.DataFrame(wbesRtmIexVals)\n wbesRtmPxiDf = pd.DataFrame(wbesRtmPxiVals)\n wbesPxIexDf = pd.DataFrame(wbesPxIexVals)\n wbesPxPxiDf = pd.DataFrame(wbesPxPxiVals)\n\n # wbesRtmPxiDf['date']=wbesRtmPxiDf['time_stamp'].dt.date\n # wbesRtmPxiDf['time']=wbesRtmPxiDf['time_stamp'].dt.time\n wbesRtmPxiDf.drop(['beneficiary','beneficiary_type'],axis=1,inplace=True)\n # wbesRtmPxiDf = wbesRtmPxiDf.pivot(index='time',columns='date', values='data_value')\n\n\n # wbesRtmIexDf['date'] = wbesRtmIexDf['time_stamp'].dt.date\n # wbesRtmIexDf['time'] = wbesRtmIexDf['time_stamp'].dt.time\n wbesRtmIexDf.drop(['beneficiary', 'beneficiary_type'], axis=1, inplace=True)\n # wbesRtmIexDf = wbesRtmIexDf.pivot(index='time', columns='date', values='data_value')\n wbesPxIexDf.drop(['beneficiary','beneficiary_type'],axis=1,inplace=True)\n wbesPxPxiDf.drop(['beneficiary','beneficiary_type'],axis=1,inplace=True)\n wbesPxDf = wbesRtmPxiDf.append(wbesPxIexDf,ignore_index=False).groupby(['time_stamp']).sum().reset_index()\n wbesRtmDf = wbesRtmPxiDf.append(wbesRtmIexDf,ignore_index=False).groupby(['time_stamp']).sum().reset_index()\n wbesRtmDf['time_stamp']=wbesRtmDf['time_stamp'].dt.date\n wbesPxDf['time_stamp']=wbesPxDf['time_stamp'].dt.date\n\n wbesRtmDfMax = wbesRtmDf.groupby(['time_stamp']).max().reset_index()\n wbesRtmDfMin = wbesRtmDf.groupby(['time_stamp']).min().reset_index()\n mergewbesRtmDf=pd.merge(wbesRtmDfMax,wbesRtmDfMin,on='time_stamp')\n mergewbesRtmDf.set_index(['time_stamp'],inplace=True)\n mergewbesRtmDf = mergewbesRtmDf.rename(columns={'data_value_x': 'RTM_MAX','data_value_y':'RTM_MIN'})\n\n wbesPxDfMax = wbesPxDf.groupby(['time_stamp']).max().reset_index()\n wbesPxDfMin = wbesPxDf.groupby(['time_stamp']).min().reset_index()\n mergeWbesPxDf=pd.merge(wbesPxDfMax,wbesPxDfMin,on='time_stamp')\n mergeWbesPxDf.set_index(['time_stamp'],inplace=True)\n mergeWbesPxDf = mergeWbesPxDf.rename(columns={'data_value_x': 'DAM_MAX','data_value_y':'DAM_MIN'})\n\n # derive plot title\n pltTitle = 'WR Sell RTM vs DAM'\n\n # create a plotting area and get the figure, axes handle in return\n fig, ax = plt.subplots(figsize=(7.5, 4.5))\n # instantiate a second axes that shares the same x-axis\n ax2 = ax.twinx()\n # set plot title\n ax.set_title(pltTitle)\n ax.set_ylabel('MW')\n ax2.set_ylabel('RTM SELL MIN(MW)')\n ax.set_facecolor(\"#474747\")\n # fig.patch.set_facecolor('#d9ccff')\n\n clr = ['#66b3ff', '#df80ff', '#ff6666', '#00b359']\n\n # plot data and get the line artist object in return\n laThisMonth, = ax.plot(\n mergewbesRtmDf.index.values, mergewbesRtmDf['RTM_MAX'].values, color='#66b3ff')\n laThisMonth.set_label('RTM Sell Max')\n\n laLastYear, = ax.plot(\n mergeWbesPxDf.index.values, mergeWbesPxDf['DAM_MAX'].values, color='#df80ff')\n laLastYear.set_label('DAM Sell Max')\n\n laPrevMonth, = ax2.plot(\n mergewbesRtmDf.index.values, mergewbesRtmDf['RTM_MIN'].values, color='#00b359')\n laPrevMonth.set_label('RTM Sell Min')\n\n laPrevMonth, = ax.plot(\n mergeWbesPxDf.index.values, mergeWbesPxDf['DAM_MIN'].values, color='#ff6666')\n laPrevMonth.set_label('DAM Sell Min')\n # plt.show()\n # ax.set_xlim((1, 31), auto=True)\n # enable legends\n ax.legend(bbox_to_anchor=(0.0, -0.3, 1, 0), loc='best',\n ncol=3, mode=\"expand\", borderaxespad=0.)\n ax2.legend(bbox_to_anchor=(0.0, -0.3, 1, 0), loc='lower right',\n ncol=3, mode=\"expand\", borderaxespad=0.)\n fig.subplots_adjust(bottom=0.25, top=0.8)\n fig.savefig('assets/section_3_1.png')\n\n plt.close()\n\n\n return True\n" ]
[ [ "pandas.merge", "matplotlib.pyplot.subplots", "pandas.DataFrame", "matplotlib.pyplot.close", "matplotlib.pyplot.rcParams.update" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
ii-research-yu/pgbm
[ "d050a5f71f1a458d8269c4f5201744c0d7c4d487" ]
[ "examples/pytorch/example04_housing_validation.py" ]
[ "\"\"\"\n Copyright (c) 2021 Olivier Sprangers as part of Airlab Amsterdam\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\n https://github.com/elephaint/pgbm/blob/main/LICENSE\n\n\"\"\"\n\n#%% Load packages\nimport torch\nfrom pgbm import PGBM\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.datasets import fetch_california_housing\nimport matplotlib.pyplot as plt\n#%% Objective for pgbm\ndef mseloss_objective(yhat, y, sample_weight=None):\n gradient = (yhat - y)\n hessian = torch.ones_like(yhat)\n\n return gradient, hessian\n\ndef rmseloss_metric(yhat, y, sample_weight=None):\n loss = (yhat - y).pow(2).mean().sqrt()\n\n return loss\n#%% Load data\nX, y = fetch_california_housing(return_X_y=True)\n#%% Parameters\nparams = {'min_split_gain':0,\n 'min_data_in_leaf':2,\n 'max_leaves':8,\n 'max_bin':64,\n 'learning_rate':0.1,\n 'n_estimators':2000,\n 'verbose':2,\n 'early_stopping_rounds':100,\n 'feature_fraction':1,\n 'bagging_fraction':1,\n 'seed':1,\n 'reg_lambda':1,\n 'device':'gpu',\n 'gpu_device_id':0,\n 'derivatives':'exact',\n 'distribution':'normal'}\n\nn_forecasts = 1000\nn_splits = 2\nbase_estimators = 2000\n#%% Validation loop\nrmse, crps = torch.zeros(n_splits), torch.zeros(n_splits)\nfor i in range(n_splits):\n print(f'Fold {i+1}/{n_splits}')\n # Split for model validation\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=i)\n X_train_val, X_val, y_train_val, y_val = train_test_split(X_train, y_train, test_size=0.2, random_state=i)\n # Build datasets\n train_data = (X_train, y_train)\n train_val_data = (X_train_val, y_train_val)\n valid_data = (X_val, y_val)\n # Train to retrieve best iteration\n print('PGBM Validating on partial dataset...')\n params['n_estimators'] = base_estimators\n model = PGBM()\n model.train(train_val_data, objective=mseloss_objective, metric=rmseloss_metric, valid_set=valid_data, params=params)\n # Set iterations to best iteration\n params['n_estimators'] = model.best_iteration\n # Retrain on full set \n print('PGBM Training on full dataset...')\n model = PGBM()\n model.train(train_data, objective=mseloss_objective, metric=rmseloss_metric, params=params)\n #% Predictions\n print('PGBM Prediction...')\n yhat_point = model.predict(X_test)\n yhat_dist = model.predict_dist(X_test, n_forecasts=n_forecasts)\n # Scoring\n rmse[i] = model.metric(yhat_point.cpu(), y_test)\n crps[i] = model.crps_ensemble(yhat_dist.cpu(), y_test).mean() \n # Print scores current fold\n print(f'RMSE Fold {i+1}, {rmse[i]:.2f}')\n print(f'CRPS Fold {i+1}, {crps[i]:.2f}')\n \n# Print final scores\nprint(f'RMSE {rmse.mean():.2f}+-{rmse.std():.2f}')\nprint(f'CRPS {crps.mean():.2f}+-{crps.std():.2f}')\n#%% Plot all samples\nplt.plot(y_test, 'o', label='Actual')\nplt.plot(yhat_point.cpu(), 'ko', label='Point prediction PGBM')\nplt.plot(yhat_dist.cpu().max(dim=0).values, 'k--', label='Max bound PGBM')\nplt.plot(yhat_dist.cpu().min(dim=0).values, 'k--', label='Min bound PGBM')\nplt.legend()" ]
[ [ "matplotlib.pyplot.legend", "torch.zeros", "sklearn.model_selection.train_test_split", "sklearn.datasets.fetch_california_housing", "matplotlib.pyplot.plot", "torch.ones_like" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
zhangkaifang/cp_decomposition
[ "75eb5f3df302c08ad2e62f41e6d93bb990a16797", "75eb5f3df302c08ad2e62f41e6d93bb990a16797" ]
[ "matrix_cp_one.py", "tensor_cp_one.py" ]
[ "#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n'''=====================================\n@Author :Kaifang Zhang\n@Time :2021/7/5 1:31\n@Contact: [email protected]\n========================================'''\nimport numpy as np\n\n\ndef LFM_grad_desc(R, K, max_iter, alpha=1e-4, lamda=1e-4):\n \"\"\"\n 实现矩阵缺失元素补全使用梯度下降法!\n \"\"\"\n # 基本维度参数定义\n M = len(R)\n N = len(R[0])\n\n # P、Q初始值,随机生成\n P = np.random.rand(M, K)\n Q = np.random.rand(N, K)\n Q = Q.T\n # 开始迭代\n for step in range(max_iter):\n # 对所有的用户u、物品i做遍历,对应的特征向量Pu,Qi梯度下降\n for u in range(M):\n for i in range(N):\n # 对于每一个大于0的评分,求出预测的评分误差\n if R[u][i] > 0:\n eui = np.dot(P[u, :], Q[:, i]) - R[u][i]\n\n # 带入公式,按照梯度下降算法更新当前的Pu与Qi\n for k in range(K):\n P[u][k] = P[u][k] - alpha * (2 * eui * Q[k][i] + 2 * lamda * P[u][k])\n Q[k][i] = Q[k][i] - alpha * (2 * eui * P[u][k] + 2 * lamda * Q[k][i])\n\n # u、i遍历完成,所有的特征向量更新完成,可以得到P、Q,可以计算预测评分矩阵\n predR = np.dot(P, Q)\n\n # 计算当前损失函数\n cost = 0\n for u in range(M):\n for i in range(N):\n if R[u][i] > 0:\n cost += (np.dot(P[u, :], Q[:, i]) - R[u][i]) ** 2\n # 加上正则化项\n for k in range(K):\n cost += lamda * (P[u][k] ** 2 + Q[k][i] ** 2)\n if step % 1000 == 0:\n print(\"迭代次数:\", step, \"损失函数:\", cost)\n if cost < 0.001:\n break\n\n return P, Q.T, cost\n\n\nif __name__ == '__main__':\n '''\n @输入参数\n R:M*N的评分矩阵\n K:隐特征向量维度\n max_iter:最大迭代次数\n alpha:步长\n lamda:正则化系数\n @输出\n 分解之后的P、Q\n P:初始化用户特征矩阵M*k\n Q:初始化物品特征矩阵N*K\n '''\n # 评分矩阵R\n R = np.array([[4, 0, 2, 0, 1],\n [0, 0, 2, 3, 1],\n [4, 1, 2, 0, 1],\n [4, 1, 2, 5, 1],\n [3, 0, 5, 0, 2],\n [1, 0, 3, 0, 4]])\n\n # 给定超参数\n K = 5\n max_iter = 100000\n alpha = 1e-4\n lamda = 1e-3\n P, Q, cost = LFM_grad_desc(R, K, max_iter, alpha, lamda)\n predR = P.dot(Q.T)\n # 预测矩阵\n print(predR)\n", "# !/usr/bin/env python\n# -*- encoding: utf-8 -*-\n\"\"\"=====================================\n@author : kaifang zhang\n@time : 2021/7/13 3:50 下午\n@contact: [email protected]\n=====================================\"\"\"\nimport numpy as np\nfrom scipy.linalg import khatri_rao\n\n\ndef ten2mat(tensor, mode):\n \"\"\"Return mu-mode matricization from a given tensor\"\"\"\n return np.reshape(np.moveaxis(tensor, mode, 0), (tensor.shape[mode], -1), order='F')\n\n\ndef mat2ten(mat1, mat2, mat3):\n \"\"\"Return tensor based on matrix\"\"\"\n return np.einsum('ir, jr, tr -> ijt', mat1, mat2, mat3)\n\n\ndef alter_optimization(X, r, nmax=50000):\n \"\"\"\n 使用交替最小二乘法对张量的CP分解填补空白数据,张量的CP分解!\n \"\"\"\n n1, n2, n3 = X.shape\n A = np.random.normal(0, 1, (n1, r)) # shape: (4, 10)\n B = np.random.normal(0, 1, (n2, r)) # shape: (7, 10)\n C = np.random.normal(0, 1, (n3, r)) # shape: (22, 10)\n\n pos = np.where(X != 0) # where内只有一个参数时,那个参数表示条件,当条件成立时,where返回的是每个符合condition条件元素的坐标,返回的是以元组的形式\n bin_X = np.zeros((n1, n2, n3))\n bin_X[pos] = 1\n X_hat = np.zeros((n1, n2, n3))\n\n for iters in range(nmax):\n ######################################## optimize A\n var1 = khatri_rao(C, B).T\n var2 = khatri_rao(var1, var1)\n var3 = np.matmul(var2, ten2mat(bin_X, 0).T).reshape([r, r, n1])\n var4 = np.matmul(var1, ten2mat(X, 0).T)\n for i in range(n1):\n var_Lambda = var3[:, :, i]\n inv_var_Lambda = np.linalg.inv((var_Lambda + var_Lambda.T) / 2)\n A[i, :] = np.matmul(inv_var_Lambda, var4[:, i])\n ######################################## optimize B\n var1 = khatri_rao(C, A).T\n var2 = khatri_rao(var1, var1)\n var3 = np.matmul(var2, ten2mat(bin_X, 1).T).reshape([r, r, n2])\n var4 = np.matmul(var1, ten2mat(X, 1).T)\n for j in range(n2):\n var_Lambda = var3[:, :, j]\n inv_var_Lambda = np.linalg.inv((var_Lambda + var_Lambda.T) / 2)\n B[j, :] = np.matmul(inv_var_Lambda, var4[:, j])\n ######################################## optimize C\n var1 = khatri_rao(B, A).T\n var2 = khatri_rao(var1, var1)\n var3 = np.matmul(var2, ten2mat(bin_X, 2).T).reshape([r, r, n3])\n var4 = np.matmul(var1, ten2mat(X, 2).T)\n for t in range(n3):\n var_Lambda = var3[:, :, t]\n inv_var_Lambda = np.linalg.inv((var_Lambda + var_Lambda.T) / 2)\n C[t, :] = np.matmul(inv_var_Lambda, var4[:, t])\n ######################################## Reconstruct tensor\n X_hat = mat2ten(A, B, C)\n loss = np.sum(np.square(X[pos] - X_hat[pos])) / X[pos].shape[0]\n\n if (iters + 1) % 100 == 0:\n print('迭代次数:', iters, '代价函数:', loss)\n\n return X_hat\n\n\nif __name__ == '__main__':\n tensor = np.load('./data/tensor.npy')\n X_hat = alter_optimization(tensor, 10)\n print(tensor.shape)\n\n" ]
[ [ "numpy.dot", "numpy.array", "numpy.random.rand" ], [ "numpy.square", "numpy.einsum", "numpy.linalg.inv", "numpy.matmul", "numpy.random.normal", "scipy.linalg.khatri_rao", "numpy.moveaxis", "numpy.load", "numpy.where", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
EmiCareOfCell44/BigDL
[ "6278ee8eed09b5072da53dab3a99530cf5f69ba2", "6278ee8eed09b5072da53dab3a99530cf5f69ba2" ]
[ "python/nano/src/bigdl/nano/pytorch/onnx/onnxrt_inference.py", "python/orca/src/bigdl/orca/tfpark/tf_optimizer.py" ]
[ "#\n# Copyright 2016 The BigDL Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nfrom pytorch_lightning import LightningModule\nimport onnxruntime as ort\nfrom functools import partial\nimport warnings\nimport torch\nimport math\nimport numpy as np\n\n\nONNXRT_BINDED_COMPONENTS = ['_ortsess_up_to_date',\n '_ortsess',\n '_build_ortsess',\n 'update_ortsess',\n 'predict_step',\n 'inference']\n\n\ndef bind_onnxrt_methods(pl_model: LightningModule):\n # class type check\n assert isinstance(pl_model, LightningModule),\\\n \"onnxruntime support is only valid for a LightningModule.\"\n\n # check conflicts\n for component in ONNXRT_BINDED_COMPONENTS:\n if component in dir(pl_model):\n warnings.warn(f\"{component} method/property will be replaced.\")\n\n # additional attributes\n pl_model._ortsess_up_to_date = False # indicate if we need to build ortsess again\n pl_model._ortsess = None # ortsess instance\n\n # internal function to build an ortsess\n def _build_ortsess(self,\n input_sample=None,\n file_path=\"model.onnx\",\n sess_options=None,\n **kwargs):\n '''\n Internal function to build a ortsess and bind to the lightningmodule.\n\n :param input_sample: torch.Tensor for the model tracing.\n :param file_path: The path to save onnx model file.\n :param sess_options: ortsess options in ort.SessionOptions type\n :param **kwargs: will be passed to torch.onnx.export function.\n '''\n\n if input_sample is None and self.example_input_array is not None:\n input_sample = self.example_input_array # use internal example_input_array\n else:\n self.example_input_array = input_sample # set example_input_array for future usage\n\n assert input_sample is not None,\\\n 'You should set either input_sample or self.example_input_array'\n\n default_onnx_export_args = {'export_params': True,\n 'opset_version': 10,\n 'do_constant_folding': True,\n 'input_names': ['input'],\n 'output_names': ['output'],\n 'dynamic_axes': {'input': {0: 'batch_size'},\n 'output': {0: 'batch_size'}}}\n default_onnx_export_args.update(kwargs)\n\n self.to_onnx(file_path,\n input_sample,\n **default_onnx_export_args)\n\n self._ortsess = ort.InferenceSession(file_path, sess_options=sess_options)\n self._ortsess_up_to_date = True\n\n pl_model._build_ortsess = partial(_build_ortsess, pl_model)\n\n # external method to update(& rebuild) ortsess\n def update_ortsess(self,\n input_sample=None,\n file_path=\"model.onnx\",\n sess_options=None,\n **kwargs):\n '''\n Update the onnxruntime session options and rebuild the session.\n Users may also want to call this method before `inference(..., onnx=True`)`\n to avoid implicit building.\n\n :param input_sample: torch.Tensor for the model tracing.\n :param file_path: The path to save onnx model file.\n :param sess_options: ortsess options in ort.SessionOptions type.\n :param **kwargs: will be passed to torch.onnx.export function.\n '''\n self._build_ortsess(input_sample=input_sample,\n file_path=file_path,\n sess_options=sess_options,\n **kwargs)\n\n pl_model.update_ortsess = partial(update_ortsess, pl_model)\n\n # on_fit_start (LightningModule method override)\n def on_fit_start_additional(function):\n def wrapped(*args, **kwargs):\n args[0]._ortsess_up_to_date = False\n return function(**kwargs) # drop *args because that pl_model function has self binded\n return wrapped\n pl_model.on_fit_start = partial(on_fit_start_additional(pl_model.on_fit_start), pl_model)\n\n # predict_step (LightningModule method overwrite)\n # note: this overwrite users' customized predict_step if valid\n def predict_step(self, batch, batch_idx):\n # use batch[0] because that we assume data loader will have 2 outputs in format of (x, y)\n return self.inference(batch[0].numpy())\n\n pl_model.predict_step = partial(predict_step, pl_model)\n\n # inference (new API to unifying users' inference method)\n def inference(self,\n input_data,\n batch_size=None,\n file_path=\"model.onnx\",\n sess_options=None,\n backend=\"onnx\",\n **kwargs):\n '''\n Inference with/without onnxruntime.\n This method will implicitly build onnxruntime session if it has never been built\n or out-of-date.\n\n :param input_data: input data for prediction. If backend is set to \"onnx\",\n the data type should be a numpy ndarray, where the first dim should be batch size.\n If backend is NOT set to \"onnx\", a torch tensor is needed and the pytorch\n forwarding method will be called.\n :param batch_size: int, inferencing batch_size. This value should not affect the\n final inferencing result but will affect resources cost(e.g. memory and time).\n Default to None, which takes all input_data in one batch.\n :param file_path: The path to save onnx model file.\n :param sess_options: ortsess options in ort.SessionOptions type.\n :param backend: str, to set the backend library. \"onnx\" for onnxruntime, which\n provides lower latency and any other value will make `inference` call\n the pytorch forwarding method.\n :param **kwargs: any other keywords that will be passed to onnx session's building.\n '''\n\n if backend == \"onnx\":\n if not self._ortsess_up_to_date:\n warnings.warn(\"Onnxruntime session will be built implicitly,\"\n \" this may harm your inference latency.\")\n input_sample = torch.Tensor(input_data)\n self._build_ortsess(input_sample=input_sample,\n file_path=file_path,\n sess_options=sess_options,\n **kwargs)\n input_name = self._ortsess.get_inputs()[0].name\n if batch_size is None:\n # this branch is only to speed up the inferencing when batch_size is set to None.\n ort_inputs = {input_name: input_data}\n ort_outs = self._ortsess.run(None, ort_inputs)\n return ort_outs[0]\n else:\n yhat_list = []\n sample_num = input_data.shape[0] # the first dim should be sample_num\n batch_num = math.ceil(sample_num / batch_size)\n for batch_id in range(batch_num):\n ort_inputs = {input_name: input_data[batch_id * batch_size:\n (batch_id + 1) * batch_size]}\n ort_outs = self._ortsess.run(None, ort_inputs)\n yhat_list.append(ort_outs[0])\n # this operation may cause performance degradation\n yhat = np.concatenate(yhat_list, axis=0)\n return yhat\n else:\n # inference w/o onnxruntime (fallback to pytorch native forward)\n self.eval()\n with torch.no_grad():\n yhat_list = []\n sample_num = input_data.shape[0] # the first dim should be sample_num\n batch_size = batch_size if batch_size else sample_num\n batch_num = math.ceil(sample_num / batch_size)\n for batch_id in range(batch_num):\n yhat_list.append(self(input_data[batch_id * batch_size:\n (batch_id + 1) * batch_size]))\n yhat = torch.cat(yhat_list, axis=0)\n return yhat\n\n pl_model.inference = partial(inference, pl_model)\n\n return pl_model\n", "#\n# Copyright 2016 The BigDL Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport json\nimport logging\nimport os\nimport sys\nimport tempfile\n\nfrom bigdl.dllib.nn.criterion import Criterion\nfrom bigdl.dllib.nn.layer import Layer\nfrom bigdl.dllib.optim.optimizer import MaxEpoch, EveryEpoch\nfrom bigdl.dllib.utils.common import to_list, JavaValue\n\nfrom bigdl.dllib.utils.file_utils import callZooFunc\nfrom bigdl.dllib.feature.common import FeatureSet\nfrom bigdl.dllib.keras.engine.topology import to_bigdl_metric, Loss, OptimMethod\nfrom bigdl.dllib.net.utils import find_placeholders, to_bigdl_optim_method, find_tensors\nfrom bigdl.dllib.estimator.estimator import Estimator\nfrom bigdl.dllib.utils import nest\nfrom bigdl.dllib.utils.triggers import EveryEpoch as ZEveryEpoch\nfrom bigdl.dllib.utils.triggers import ZooTrigger\nfrom bigdl.orca.tfpark.tf_dataset import TFNdarrayDataset, check_data_compatible\nfrom bigdl.orca.tfpark.tf_dataset import _standarize_feature_label_dataset\n\nif sys.version >= '3':\n long = int\n unicode = str\n\n\nclass IdentityCriterion(Criterion):\n def __init__(self):\n super(IdentityCriterion, self).__init__(None, \"float\")\n\n\nclass TFValidationMethod(JavaValue):\n def __init__(self, val_method, name, output_indices, label_indices):\n self.name = name\n self.val_method = val_method\n JavaValue.__init__(self, None, \"float\",\n val_method, name, output_indices, label_indices)\n\n\nclass StatelessMetric(JavaValue):\n def __init__(self, metric_name, idx, count_idx):\n self.name = metric_name\n self.idx = idx\n self.count_idx = count_idx\n JavaValue.__init__(self, None, \"float\", metric_name, idx, count_idx)\n\n\nclass BigDLMetric(object):\n def __init__(self, val_method, outputs, labels):\n self.val_method = val_method\n self.outputs = outputs\n self.labels = labels\n\n\nclass TFTrainingHelper(Layer):\n def __init__(self, path, config_proto, saver, meta, sess):\n self.saver = saver\n self.meta = meta\n self.export_dir = path\n self.sess = sess\n\n if config_proto is not None:\n import tensorflow as tf\n assert isinstance(config_proto, tf.ConfigProto), \\\n \"session_config should be a tf.ConfigProto\"\n config_proto.use_per_session_threads = True\n byte_arr = bytearray(config_proto.SerializeToString())\n else:\n byte_arr = None\n\n super(TFTrainingHelper, self).__init__(None, \"float\", path, byte_arr)\n\n def save_checkpoint(self):\n callZooFunc(self.bigdl_type, \"saveCheckpoint\",\n self.value)\n\n def get_weights_to_python(self):\n self.save_checkpoint()\n self.saver.restore(self.sess, os.path.join(self.export_dir, \"model\"))\n\n def load_checkpoint(self, path):\n callZooFunc(self.bigdl_type, \"loadZooCheckpoint\", self.value, path)\n self.get_weights_to_python()\n\n\ndef _to_operation_name(name):\n return name.split(\":\")[0]\n\n\ndef _to_floats(vs):\n return [float(v) for v in vs]\n\n\nclass TFModel(object):\n def __init__(self, training_helper_layer, criterion, val_methods):\n\n self.training_helper_layer = training_helper_layer\n self.criterion = criterion\n self.val_methods = val_methods\n\n @staticmethod\n def _expand_inputs(inputs, tensors_with_value, loss):\n additional_inputs = []\n additional_values = []\n inputs = nest.flatten(inputs)\n names = set([i.name for i in inputs])\n\n if tensors_with_value:\n for t, v in tensors_with_value.items():\n if t.name in names:\n msg = f\"tensor {t} already in inputs, cannot put it in tensor_with_value\"\n raise ValueError(msg)\n additional_inputs.append(t)\n additional_values.append(v)\n\n return inputs, additional_inputs, additional_values\n\n @staticmethod\n def _process_session_config(session_config):\n import tensorflow as tf\n if session_config is not None:\n\n assert isinstance(session_config, tf.ConfigProto), \\\n \"session_config should be a tf.ConfigProto\"\n session_config.use_per_session_threads = True\n return session_config\n\n @staticmethod\n def _process_grads(graph, grads):\n\n with graph.as_default():\n from bigdl.dllib.utils.tf import process_grad\n grads = [process_grad(grad) for grad in grads]\n return grads\n\n @staticmethod\n def _process_metrics(graph, metrics, real_batch_size):\n import tensorflow as tf\n outputs = [real_batch_size]\n val_methods = None\n if metrics is not None:\n idx = 1\n val_methods = []\n for metric_name in metrics:\n metric = metrics[metric_name]\n if tf.is_numeric_tensor(metric):\n outputs.append(metric)\n val_methods.append(StatelessMetric(metric_name, idx, 0))\n idx += 1\n else:\n outputs += metric.outputs\n with graph.as_default():\n val_labels = [tf.identity(v) for v in metric.labels]\n outputs += val_labels\n method = TFValidationMethod(metric.val_method,\n metric_name,\n list(range(idx, idx + len(metric.outputs))),\n list(range(idx + len(metric.outputs),\n idx + len(metric.outputs)\n + len(val_labels))))\n val_methods.append(method)\n idx += len(metric.outputs) + len(val_labels)\n\n outputs = [tf.to_float(output) for output in outputs]\n return outputs, val_methods\n\n @staticmethod\n def _process_variables(graph, variables, updates):\n import tensorflow as tf\n all_trainable_variables = variables\n\n name2idx = dict([(v.name, idx) for idx, v in enumerate(all_trainable_variables)])\n\n all_variables = graph.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)\n\n update_ops = graph.get_collection(tf.GraphKeys.UPDATE_OPS)\n\n if updates is not None:\n update_ops += updates\n\n trainable_variables = [0] * len(all_trainable_variables)\n trainable_assigns = [0] * len(all_trainable_variables)\n trainable_variable_placeholders = [0] * len(all_trainable_variables)\n extra_variables = []\n extra_variable_assigns = []\n extra_variable_assign_placeholders = []\n for v in all_variables:\n p = tf.placeholder(dtype=v.dtype, shape=v.shape)\n a = tf.assign(v, p)\n\n # special treatment for ResourceVariable\n if v.op.type == \"VarHandleOp\":\n v_float_value = tf.to_float(v.read_value())\n else:\n v_float_value = tf.to_float(v)\n\n if v.name in name2idx:\n trainable_variables[name2idx[v.name]] = v_float_value\n trainable_assigns[name2idx[v.name]] = a\n trainable_variable_placeholders[name2idx[v.name]] = p\n else:\n extra_variables.append(v_float_value)\n extra_variable_assigns.append(a)\n extra_variable_assign_placeholders.append(p)\n\n extra_variable_assign = tf.group(*extra_variable_assigns)\n trainable_assign = tf.group(*trainable_assigns)\n update_op = tf.group(update_ops)\n\n return trainable_variables, trainable_variable_placeholders, trainable_assign, \\\n extra_variables, extra_variable_assign_placeholders, \\\n extra_variable_assign, update_op\n\n @staticmethod\n def _save_to_dir(folder, sess, graph,\n metric_tensors,\n batch_size_tensor,\n loss_tensor, inputs, labels, predictions,\n trainable_variables,\n trainable_variable_placeholders,\n trainable_assign,\n extra_variables,\n extra_variable_assign_placeholders,\n extra_variable_assign,\n grads, update_op, train_op,\n additional_inputs,\n additional_values):\n import tensorflow as tf\n from tensorflow import gfile\n saver = tf.train.Saver()\n if not os.path.isdir(folder):\n os.makedirs(folder)\n saver.save(sess, os.path.join(folder, \"model\"), write_meta_graph=False)\n\n meta = {\n \"inputs\": [i.name for i in inputs],\n \"input_types\": [i.dtype.as_datatype_enum for i in inputs],\n \"additional_inputs\": [i.name for i in additional_inputs],\n \"additional_input_types\": [i.dtype.as_datatype_enum for i in additional_inputs],\n \"labels\": [l.name for l in labels],\n \"label_types\": [i.dtype.as_datatype_enum for i in labels],\n \"predictions\": [t.name for t in predictions] if predictions else [],\n \"metric_tensors\": [t.name for t in metric_tensors],\n \"batch_size_tensor\": batch_size_tensor.name,\n \"loss_tensor\": loss_tensor.name,\n \"variables\": [v.name for v in trainable_variables],\n \"variable_types\": [v.dtype.as_datatype_enum for v in trainable_variable_placeholders],\n \"variable_assign_placeholders\": [v.name for v in trainable_variable_placeholders],\n \"assign_variable_op\": trainable_assign.name,\n \"extra_variables\": [v.name for v in extra_variables],\n \"extra_variable_types\": [v.dtype.as_datatype_enum for v\n in extra_variable_assign_placeholders],\n \"extra_variable_assign_placeholders\": [p.name for p in\n extra_variable_assign_placeholders],\n \"assign_extra_variable_op\": extra_variable_assign.name,\n \"grad_variables\": [g.name for g in grads],\n \"update_op\": update_op.name,\n \"restore_op\": saver.saver_def.restore_op_name,\n \"restore_path_placeholder\": saver.saver_def.filename_tensor_name,\n \"save_op\": _to_operation_name(saver.saver_def.save_tensor_name),\n \"save_path_placeholder\": saver.saver_def.filename_tensor_name,\n \"default_tensor_value\": [_to_floats(v) for v in additional_values],\n \"init_op\": tf.tables_initializer().name\n }\n\n if train_op is not None:\n meta[\"train_op\"] = train_op.name\n\n with open(os.path.join(folder, \"training_meta.json\"), \"w\") as f:\n f.write(json.dumps(meta))\n\n with gfile.GFile(os.path.join(folder, \"model.meta\"), \"wb\") as f:\n f.write(graph.as_graph_def().SerializeToString())\n\n return meta, saver\n\n @staticmethod\n def export(model_dir, loss_tensor, sess, inputs, labels, predictions, grads, variables, graph,\n tensors_with_value, metrics, updates, train_op=None):\n import tensorflow as tf\n with graph.as_default():\n batch_size_tensor = tf.to_float(tf.shape(inputs[0])[0])\n inputs, additional_inputs, additional_values = \\\n TFModel._expand_inputs(inputs, tensors_with_value, loss_tensor)\n metric_tensors, val_methods = TFModel._process_metrics(graph, metrics, batch_size_tensor)\n grads = TFModel._process_grads(graph, grads)\n\n trainable_variables, trainable_variable_placeholders, trainable_assign, \\\n extra_variables, extra_variable_assign_placeholders, \\\n extra_variable_assign, update_op = \\\n TFModel._process_variables(graph, variables, updates)\n\n meta, saver = \\\n TFModel._save_to_dir(model_dir, sess, graph,\n metric_tensors,\n batch_size_tensor,\n loss_tensor, inputs, labels, predictions,\n trainable_variables,\n trainable_variable_placeholders,\n trainable_assign,\n extra_variables,\n extra_variable_assign_placeholders,\n extra_variable_assign,\n grads, update_op, train_op,\n additional_inputs,\n additional_values)\n return meta, saver, val_methods\n\n @staticmethod\n def create(loss_tensor, sess, inputs, labels, predictions, grads, variables, graph,\n tensors_with_value, session_config, metrics, updates,\n model_dir, train_op=None):\n\n if model_dir is None:\n model_dir = tempfile.mkdtemp()\n else:\n if not os.path.isdir(model_dir):\n os.makedirs(model_dir)\n\n meta, saver, val_methods = TFModel.export(model_dir, loss_tensor, sess,\n inputs, labels, predictions, grads, variables,\n graph, tensors_with_value, metrics, updates,\n train_op)\n\n training_helper_layer = TFTrainingHelper(model_dir,\n session_config, saver, meta, sess)\n\n criterion = IdentityCriterion()\n\n return TFModel(training_helper_layer, criterion, val_methods)\n\n\nclass TFOptimizer:\n def __init__(self, tf_model, optim_method,\n sess=None, dataset=None,\n clip_norm=None, clip_value=None,\n model_dir=None):\n \"\"\"\n TFOptimizer is used for distributed training of TensorFlow\n on Spark/BigDL.\n\n Note that if grads and variables are not None, then they need to be sorted by name\n if you want to use multiple optimization methods for a TensorFlow model according to\n variable names.\n\n :param loss: The loss tensor of the TensorFlow model, should be a scalar\n :param optim_method: the optimization method to be used, such as bigdl.dllib.optim.optimizer.Adam\n :param sess: the current TensorFlow Session, if you want to used a pre-trained model, you\n should use the Session to load the pre-trained variables and pass it to TFOptimizer.\n \"\"\"\n\n self.optim_method = optim_method\n self.sess = sess\n self.dataset = dataset\n\n self.clip_norm = clip_norm\n if clip_value is not None and not isinstance(clip_value, tuple):\n raise ValueError(\"The clip_value argument should be a tuple (min_value, max_value)\")\n self.clip_constant = clip_value\n\n if self.dataset.batch_size <= 0:\n raise ValueError(\"You should set batch_size instead of batch_per_thread for training\")\n\n self.model_dir = model_dir\n\n self.tf_model = tf_model\n\n batch_size = self.dataset.batch_size\n\n self.train_data = self.dataset.get_training_data()\n self.val_data = self.dataset.get_validation_data()\n\n self.batch_size = batch_size\n\n self.estimator = Estimator(self.tf_model.training_helper_layer,\n self.optim_method,\n self.model_dir)\n\n if self.clip_norm:\n self.estimator.set_l2_norm_gradient_clipping(self.clip_norm)\n if self.clip_constant:\n min_value, max_value = self.clip_constant\n self.estimator.set_constant_gradient_clipping(min_value, max_value)\n\n def load_checkpoint(self, path, version):\n # todo make version optional\n model_path = os.path.join(path, \"model.{}\".format(version))\n optim_method_path = os.path.join(path, \"optimMethod-TFParkTraining.{}\".format(version))\n self.tf_model.training_helper_layer.load_checkpoint(model_path)\n self.optim_method = OptimMethod.load(optim_method_path)\n self.estimator = Estimator(self.tf_model.training_helper_layer,\n self.optim_method,\n self.model_dir)\n if self.clip_norm:\n self.estimator.set_l2_norm_gradient_clipping(self.clip_norm)\n if self.clip_constant:\n min_value, max_value = self.clip_constant\n self.estimator.set_constant_gradient_clipping(min_value, max_value)\n\n @staticmethod\n def _get_or_create_session(session):\n import tensorflow as tf\n if session is None:\n sess = tf.Session()\n sess.run(tf.global_variables_initializer())\n else:\n sess = session\n return sess\n\n @staticmethod\n def _get_dataset_from_loss(loss):\n import tensorflow as tf\n all_required_inputs = find_placeholders([loss])\n dataset = tf.get_collection(all_required_inputs[0].name)[0]\n return dataset\n\n @staticmethod\n def _get_vars_grads(loss):\n import tensorflow as tf\n grads_vars = tf.train.GradientDescentOptimizer(0).compute_gradients(loss)\n grads_vars.sort(key=lambda grad_var: grad_var[1].name)\n variables = []\n grads = []\n for (grad, var) in grads_vars:\n if grad is not None:\n variables.append(var)\n grads.append(grad)\n return grads, variables\n\n @staticmethod\n def _get_vars_grads_from_train_op(train_op):\n def predicate(t):\n return t.name.split(\"/\")[-1].startswith(\"zoo_identity_op_for_grad\")\n grads = find_tensors([train_op], predicate)\n grad_ops = [grad.op for grad in grads]\n variables = []\n for grad in grad_ops:\n var = list(grad.control_inputs)[0]\n if var.name == \"VarHandleOp\":\n variables.append(var)\n else:\n variables.append(list(var.outputs)[0])\n # variables = [grad.op.control_inputs[0].outputs[0] for grad in grads]\n return grads, variables\n\n @classmethod\n def from_train_op(cls, train_op, loss, *, inputs=None, labels=None, metrics=None, updates=None,\n sess=None, dataset=None, tensor_with_value=None, session_config=None,\n model_dir=None):\n\n sess = TFOptimizer._get_or_create_session(sess)\n grads, variables = TFOptimizer._get_vars_grads_from_train_op(train_op)\n if dataset is None:\n dataset = TFOptimizer._get_dataset_from_loss(loss)\n _ = dataset.tensors # trigger create tensors if not available\n dataset_inputs = dataset._original_tensors\n if isinstance(dataset_inputs, tuple) and len(dataset_inputs) == 2:\n if inputs is None:\n inputs = dataset_inputs[0]\n\n if labels is None:\n labels = dataset_inputs[1]\n else:\n if inputs is None:\n inputs = dataset_inputs\n\n if labels is None:\n labels = []\n\n inputs = nest.flatten(inputs)\n labels = nest.flatten(labels)\n from bigdl.orca.tfpark.zoo_optimizer import FakeOptimMethod\n return TFOptimizer._from_grads(loss=loss, sess=sess, inputs=inputs, labels=labels,\n grads=grads,\n variables=variables, dataset=dataset, metrics=metrics,\n tensor_with_value=tensor_with_value,\n optim_method=FakeOptimMethod(),\n session_config=session_config, updates=updates,\n model_dir=model_dir, train_op=train_op)\n\n @classmethod\n def _from_grads(cls, loss, sess, inputs, labels, grads, variables, dataset, optim_method=None,\n clip_norm=None, clip_value=None,\n metrics=None, tensor_with_value=None, session_config=None,\n model_dir=None, updates=None, train_op=None):\n graph = loss.graph\n if metrics is None:\n metrics = {}\n\n tf_model = TFModel.create(loss, sess, inputs, labels, [], grads, variables, graph,\n tensor_with_value, session_config, metrics,\n updates, model_dir=None, train_op=train_op)\n return cls(tf_model, optim_method, sess=sess, dataset=dataset,\n clip_norm=clip_norm, clip_value=clip_value, model_dir=model_dir)\n\n @classmethod\n def from_loss(cls, loss, optim_method, session=None, inputs=None, dataset=None,\n val_outputs=None, val_labels=None, val_method=None,\n clip_norm=None, clip_value=None, metrics=None,\n tensor_with_value=None, session_config=None, model_dir=None, updates=None):\n \"\"\"\n Create a TFOptimizer from a TensorFlow loss tensor.\n The loss tensor must come from a TensorFlow graph that only takes TFDataset.tensors and\n the tensors in `tensor_with_value` as inputs.\n :param loss: The loss tensor of the TensorFlow model, should be a scalar\n :param optim_method: the optimization method to be used, such as bigdl.dllib.optim.optimizer.Adam\n :param session: the current TensorFlow Session, if you want to used a pre-trained model,\n you should use the Session to load the pre-trained variables and pass it to TFOptimizer.\n :param val_outputs: the validation output TensorFlow tensor to be used by val_methods\n :param val_labels: the validation label TensorFlow tensor to be used by val_methods\n :param val_method: the BigDL val_method(s) to be used.\n :param clip_norm: float >= 0. Gradients will be clipped when their L2 norm exceeds\n this value.\n :param clip_value: float >= 0. Gradients will be clipped when their absolute value\n exceeds this value.\n :param metrics: a dictionary. The key should be a string representing the metric's name\n and the value should be the corresponding TensorFlow tensor, which should be a scalar.\n :param tensor_with_value: a dictionary. The key is TensorFlow tensor, usually a\n placeholder, the value of the dictionary is a tuple of two elements. The first one of\n the tuple is the value to feed to the tensor in training phase and the second one\n is the value to feed to the tensor in validation phase.\n :return: a TFOptimizer\n \"\"\"\n sess = TFOptimizer._get_or_create_session(session)\n grads, variables = TFOptimizer._get_vars_grads(loss)\n\n if dataset is None and inputs is None:\n dataset = TFOptimizer._get_dataset_from_loss(loss)\n inputs = dataset._original_tensors\n else:\n if inputs is None:\n raise ValueError(\"please specify inputs\")\n _ = dataset.tensors # trigger creating placeholders\n\n if isinstance(inputs, tuple) and len(inputs) == 2:\n inputs, labels = inputs\n else:\n labels = []\n\n inputs = nest.flatten(inputs)\n labels = nest.flatten(labels)\n\n if clip_value is not None:\n if isinstance(clip_value, float) or isinstance(clip_value, int):\n if clip_value <= 0:\n ValueError(\"The clip_value argument should be positive number\")\n clip_value = (-float(clip_value), float(clip_value))\n\n if not isinstance(clip_value, tuple):\n raise ValueError(\"The clip_value argument should be\" +\n \" a positive float/int which clips to\" +\n \" (-clip_value, clip_value); \" +\n \"or a tuple which clips to (min_value, max_value)\")\n\n if val_method is not None:\n val_methods = to_list(val_method)\n if metrics is None:\n metrics = {}\n\n for i, method in enumerate(val_methods):\n metrics['bigdl_metric_' + str(i)] = BigDLMetric(method, val_outputs, val_labels)\n\n return TFOptimizer._from_grads(loss, sess, inputs, labels, grads, variables, dataset,\n optim_method, clip_norm, clip_value,\n metrics, tensor_with_value, session_config,\n model_dir, updates)\n\n @staticmethod\n def export_training_model(export_dir, loss, sess, inputs, labels=None, predictions=None,\n metrics=None, tensor_with_value=None, updates=None):\n\n grads, variables = TFOptimizer._get_vars_grads(loss)\n\n TFModel.export(export_dir, loss, sess, inputs, labels, predictions, grads, variables,\n loss.graph, tensor_with_value, metrics, updates)\n logging.info(\"Exported TensorFlow model in {} for training\".format(export_dir))\n\n @staticmethod\n def _shape_match(model_shape, dataset_shape):\n\n for i in range(len(dataset_shape)):\n if dataset_shape[i].value is None:\n return model_shape[i].value is None\n else:\n return dataset_shape[i].value == model_shape[i].value or \\\n model_shape[i].value is None\n\n @classmethod\n def from_keras(cls, keras_model, dataset,\n session_config=None, model_dir=None, metrics=None, optimizer=None):\n \"\"\"\n Create a TFOptimizer from a tensorflow.keras model. The model must be compiled.\n :param keras_model: the tensorflow.keras model, which must be compiled.\n :param dataset: a TFDataset\n :return:\n \"\"\"\n import tensorflow.keras.backend as K\n\n model_inputs = keras_model.inputs\n\n if hasattr(keras_model, \"targets\"):\n model_targets = keras_model.targets\n else:\n model_targets = keras_model._targets\n\n # target can be None if loss is None\n model_targets = list(filter(lambda x: x is not None, model_targets))\n\n check_data_compatible(dataset, keras_model, mode=\"train\")\n # standarize feature, labels to support keras model\n if isinstance(dataset, TFNdarrayDataset):\n dataset = _standarize_feature_label_dataset(dataset, keras_model)\n\n flatten_inputs = nest.flatten(dataset.feature_tensors)\n assert len(model_inputs) == len(flatten_inputs), \\\n (\"the keras model and TFDataset should have the same number of tensors\" +\n \" keras model has {} inputs \" +\n \"while TFDataset has {} inputs\").format(len(model_inputs),\n len(flatten_inputs))\n for i in range(len(flatten_inputs)):\n if not TFOptimizer._shape_match(model_inputs[i].shape, flatten_inputs[i].shape):\n raise ValueError((\"The {}th input in keras model {}\"\n \" does not match the TFDataset\"\n \"input {}\").format(i,\n model_inputs[i],\n flatten_inputs[i]))\n\n flatten_targets = nest.flatten(dataset.label_tensors)\n assert len(model_targets) == len(flatten_targets), \\\n (\"the keras model and TFDataset should have the same number of tensors\" +\n \" keras model has {} targets \" +\n \"while TFDataset has {} labels\").format(len(model_targets),\n len(flatten_inputs))\n # todo check targets shape, currently checking target shape will\n # cause too much false alarm.\n\n loss = keras_model.total_loss\n variables = keras_model._collected_trainable_weights\n variables.sort(key=lambda variable: variable.name)\n keras_optimizer = keras_model.optimizer\n\n from bigdl.orca.tfpark.zoo_optimizer import get_gradients_for_keras\n grads = get_gradients_for_keras(keras_optimizer, loss, variables)\n grads_and_vars = list(zip(grads, variables))\n import tensorflow.python.keras.optimizers as koptimizers\n if isinstance(keras_optimizer, koptimizers.TFOptimizer):\n # work around keras TFOptimzier bug\n train_op = keras_optimizer.optimizer.apply_gradients(grads_and_vars)\n else:\n train_op = keras_optimizer.apply_gradients(grads_and_vars)\n\n sess = K.get_session()\n\n if keras_model.metrics and (dataset.get_validation_data() is not None):\n if isinstance(keras_model.metrics, dict):\n raise ValueError(\n \"different metrics for different outputs are not supported right now\")\n\n if len(keras_model.outputs) > 1:\n if not all([name.endswith(\"loss\") for name in keras_model.metrics_names]):\n raise ValueError(\"metrics (except loss) for multi-head model is not supported\")\n else:\n bigdl_val_methods = [Loss()]\n val_outputs = keras_model.outputs\n val_labels = model_targets\n else:\n bigdl_val_methods = \\\n [to_bigdl_metric(m, keras_model.loss) for m in keras_model.metrics_names]\n val_outputs = keras_model.outputs\n val_labels = model_targets\n else:\n val_outputs = None\n val_labels = None\n bigdl_val_methods = None\n\n tensor_with_value = {\n K.learning_phase(): [True, False]\n }\n\n updates = []\n\n updates += keras_model.get_updates_for(None)\n # Conditional updates relevant to this model\n updates += keras_model.get_updates_for(keras_model.inputs)\n\n if bigdl_val_methods is not None:\n val_methods = to_list(bigdl_val_methods)\n bigdl_metrics = {}\n for i, method in enumerate(val_methods):\n bigdl_metrics['bigdl_metric_' + str(i)] = BigDLMetric(method,\n val_outputs,\n val_labels)\n if metrics is None:\n metrics = bigdl_metrics\n else:\n metrics.update(bigdl_metrics)\n\n if optimizer is not None:\n clip_norm = None\n clip_value = None\n if hasattr(keras_optimizer, 'clipnorm'):\n clip_norm = keras_optimizer.clipnorm\n if hasattr(keras_optimizer, 'clipvalue'):\n clip_value = (-keras_optimizer.clipvalue, keras_optimizer.clipvalue)\n tf_model = TFModel.create(loss, sess, model_inputs, model_targets, keras_model.outputs,\n grads, variables, loss.graph,\n tensor_with_value, session_config, metrics,\n updates, model_dir=None)\n\n return cls(tf_model, optimizer, sess=sess, dataset=dataset,\n clip_norm=clip_norm, clip_value=clip_value, model_dir=model_dir)\n\n return cls.from_train_op(train_op, loss, inputs=model_inputs, labels=model_targets,\n metrics=metrics, updates=updates, sess=sess, dataset=dataset,\n tensor_with_value=tensor_with_value, session_config=session_config,\n model_dir=model_dir)\n\n def set_constant_gradient_clipping(self, min_value, max_value):\n \"\"\"\n Configure constant clipping settings.\n\n :param min_value: the minimum value to clip by\n :param max_value: the maxmimum value to clip by\n \"\"\"\n self.estimator.set_constant_gradient_clipping(min_value, max_value)\n\n def set_gradient_clipping_by_l2_norm(self, clip_norm):\n \"\"\"\n Configure L2 norm clipping settings.\n :param clip_norm: gradient L2-Norm threshold\n \"\"\"\n self.estimator.set_l2_norm_gradient_clipping(clip_norm)\n\n def optimize(self, end_trigger=None, checkpoint_trigger=None):\n \"\"\"\n Run the training loop of the this optimizer\n :param end_trigger: BigDL's Trigger to indicate when to stop the training.\n :param checkpoint_trigger: When to save a checkpoint and evaluate model.\n \"\"\"\n if end_trigger is None:\n end_trigger = MaxEpoch(1)\n\n if checkpoint_trigger is None:\n checkpoint_trigger = EveryEpoch()\n\n if isinstance(self.train_data, FeatureSet):\n if self.train_data.value.getNumOfSlice() != 1:\n if isinstance(checkpoint_trigger, EveryEpoch):\n checkpoint_trigger = ZEveryEpoch()\n elif not isinstance(checkpoint_trigger, ZooTrigger):\n raise Exception(\"Please use a trigger defined in bigdl.dllib.utils.triggers\")\n\n if self.tf_model.val_methods and self.val_data is not None:\n self.estimator.train_minibatch(train_set=self.train_data,\n criterion=self.tf_model.criterion,\n end_trigger=end_trigger,\n checkpoint_trigger=checkpoint_trigger,\n validation_set=self.val_data,\n validation_method=self.tf_model.val_methods)\n else:\n self.estimator.train_minibatch(train_set=self.train_data,\n criterion=self.tf_model.criterion,\n end_trigger=end_trigger,\n checkpoint_trigger=checkpoint_trigger)\n\n self.tf_model.training_helper_layer.get_weights_to_python()\n" ]
[ [ "numpy.concatenate", "torch.no_grad", "torch.Tensor", "torch.cat" ], [ "tensorflow.shape", "tensorflow.get_collection", "tensorflow.keras.backend.get_session", "tensorflow.keras.backend.learning_phase", "tensorflow.assign", "tensorflow.is_numeric_tensor", "tensorflow.placeholder", "tensorflow.identity", "tensorflow.global_variables_initializer", "tensorflow.train.GradientDescentOptimizer", "tensorflow.to_float", "tensorflow.Session", "tensorflow.train.Saver", "tensorflow.group", "tensorflow.tables_initializer" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
gavinmacaulay/echopype
[ "1698b6076a16506f638e691d4d014c8649cc735d" ]
[ "echopype/convert/set_groups_ad2cp.py" ]
[ "from typing import List, Optional\n\nimport numpy as np\nimport xarray as xr\n\nfrom .parse_ad2cp import Ad2cpDataPacket, Field, HeaderOrDataRecordFormats\nfrom .set_groups_base import SetGroupsBase, set_encodings\n\n\ndef merge_attrs(datasets: List[xr.Dataset]) -> List[xr.Dataset]:\n \"\"\"\n Merges attrs from a list of datasets.\n Prioritizes keys from later datsets.\n \"\"\"\n\n total_attrs = dict()\n for ds in datasets:\n total_attrs.update(ds.attrs)\n for ds in datasets:\n ds.attrs = total_attrs\n return datasets\n\n\nclass SetGroupsAd2cp(SetGroupsBase):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.pulse_compressed = self.parser_obj.get_pulse_compressed()\n self.combine_packets()\n\n def combine_packets(self):\n self.ds = None\n\n # # TODO: where to put string data in output?\n\n # pad raw samples so that \"sample\" dimenion has same length\n max_samples = 0\n for packet in self.parser_obj.echosounder_raw_packets:\n # both _r and _i have same dimensions\n max_samples = max(\n max_samples, packet.data[\"echosounder_raw_samples_i\"].shape[0]\n )\n for packet in self.parser_obj.echosounder_raw_packets:\n packet.data[\"echosounder_raw_samples_i\"] = np.pad(\n packet.data[\"echosounder_raw_samples_i\"],\n ((0, max_samples - packet.data[\"echosounder_raw_samples_i\"].shape[0])),\n )\n packet.data[\"echosounder_raw_samples_q\"] = np.pad(\n packet.data[\"echosounder_raw_samples_q\"],\n ((0, max_samples - packet.data[\"echosounder_raw_samples_q\"].shape[0])),\n )\n\n def make_dataset(\n packets: List[Ad2cpDataPacket], ping_time_dim: str\n ) -> Optional[xr.Dataset]:\n for i in range(len(packets)):\n packet = packets[i]\n data_vars = dict()\n for field_name, field_value in packet.data.items():\n # add dimension names to data vars for xarray\n # TODO might not work with altimeter_spare\n field = HeaderOrDataRecordFormats.data_record_format(\n packet.data_record_type\n ).get_field(field_name)\n if field is not None:\n dims = field.dimensions(packet.data_record_type)\n units = field.units()\n else:\n dims = Field.default_dimensions()\n units = None\n if units:\n data_vars[field_name] = (\n tuple(dim.value for dim in dims),\n [field_value],\n {\"Units\": units},\n )\n else:\n data_vars[field_name] = (\n tuple(dim.value for dim in dims),\n [field_value],\n )\n coords = {\n \"ping_time\": [packet.timestamp],\n ping_time_dim: [packet.timestamp],\n }\n if \"beams\" in packet.data_exclude:\n coords[\"beam\"] = packet.data_exclude[\"beams\"]\n new_packet = xr.Dataset(data_vars=data_vars, coords=coords)\n\n # modify in place to reduce memory consumption\n packets[i] = new_packet\n if len(packets) > 0:\n packets = merge_attrs(packets)\n return xr.combine_by_coords(\n packets,\n data_vars=\"minimal\",\n coords=\"minimal\",\n combine_attrs=\"override\",\n )\n else:\n return None\n\n burst_ds = make_dataset(\n self.parser_obj.burst_packets, ping_time_dim=\"ping_time_burst\"\n )\n average_ds = make_dataset(\n self.parser_obj.average_packets, ping_time_dim=\"ping_time_average\"\n )\n echosounder_ds = make_dataset(\n self.parser_obj.echosounder_packets, ping_time_dim=\"ping_time_echosounder\"\n )\n echosounder_raw_ds = make_dataset(\n self.parser_obj.echosounder_raw_packets,\n ping_time_dim=\"ping_time_echosounder_raw\",\n )\n echosounder_raw_transmit_ds = make_dataset(\n self.parser_obj.echosounder_raw_transmit_packets,\n ping_time_dim=\"ping_time_echosounder_raw_transmit\",\n )\n\n datasets = [\n ds\n for ds in (\n burst_ds,\n average_ds,\n echosounder_ds,\n echosounder_raw_ds,\n echosounder_raw_transmit_ds,\n )\n if ds\n ]\n\n for dataset in datasets:\n if \"offset_of_data\" in dataset:\n print(dataset[\"offset_of_data\"])\n\n datasets = merge_attrs(datasets)\n self.ds = xr.merge(datasets)\n\n def set_env(self) -> xr.Dataset:\n ds = xr.Dataset(\n data_vars={\n \"sound_speed_indicative\": self.ds.get(\"speed_of_sound\"),\n \"temperature\": self.ds.get(\"temperature\"),\n \"pressure\": self.ds.get(\"pressure\"),\n },\n coords={\n \"ping_time\": self.ds.get(\"ping_time\"),\n \"ping_time_burst\": self.ds.get(\"ping_time_burst\", []),\n \"ping_time_average\": self.ds.get(\"ping_time_average\", []),\n \"ping_time_echosounder\": self.ds.get(\"ping_time_echosounder\", []),\n },\n )\n\n # FIXME: this is a hack because the current file saving\n # mechanism requires that the env group have ping_time as a dimension,\n # but ping_time might not be a dimension if the dataset is completely\n # empty\n if \"ping_time\" not in ds.dims:\n ds = ds.expand_dims(dim=\"ping_time\")\n\n return set_encodings(ds)\n\n def set_platform(self) -> xr.Dataset:\n ds = xr.Dataset(\n data_vars={\n \"heading\": self.ds.get(\"heading\"),\n \"pitch\": self.ds.get(\"pitch\"),\n \"roll\": self.ds.get(\"roll\"),\n \"magnetometer_raw_x\": self.ds.get(\"magnetometer_raw_x\"),\n \"magnetometer_raw_y\": self.ds.get(\"magnetometer_raw_y\"),\n \"magnetometer_raw_z\": self.ds.get(\"magnetometer_raw_z\"),\n },\n coords={\n \"ping_time\": self.ds.get(\"ping_time\"),\n \"ping_time_burst\": self.ds.get(\"ping_time_burst\"),\n \"ping_time_average\": self.ds.get(\"ping_time_average\"),\n \"ping_time_echosounder\": self.ds.get(\"ping_time_echosounder\"),\n \"beam\": self.ds.get(\"beam\"),\n \"range_bin_burst\": self.ds.get(\"range_bin_burst\"),\n \"range_bin_average\": self.ds.get(\"range_bin_average\"),\n \"range_bin_echosounder\": self.ds.get(\"range_bin_echosounder\"),\n },\n attrs={\n \"platform_name\": self.ui_param[\"platform_name\"],\n \"platform_type\": self.ui_param[\"platform_type\"],\n \"platform_code_ICES\": self.ui_param[\"platform_code_ICES\"],\n },\n )\n return set_encodings(ds)\n\n def set_beam(self) -> xr.Dataset:\n # TODO: should we divide beam into burst/average (e.g., beam_burst, beam_average)\n # like was done for range_bin (we have range_bin_burst, range_bin_average,\n # and range_bin_echosounder)?\n data_vars = {\n \"number_of_beams\": self.ds.get(\"num_beams\"),\n \"coordinate_system\": self.ds.get(\"coordinate_system\"),\n \"number_of_cells\": self.ds.get(\"num_cells\"),\n \"blanking\": self.ds.get(\"blanking\"),\n \"cell_size\": self.ds.get(\"cell_size\"),\n \"velocity_range\": self.ds.get(\"velocity_range\"),\n \"echosounder_frequency\": self.ds.get(\"echosounder_frequency\"),\n \"ambiguity_velocity\": self.ds.get(\"ambiguity_velocity\"),\n \"data_set_description\": self.ds.get(\"dataset_description\"),\n \"transmit_energy\": self.ds.get(\"transmit_energy\"),\n \"velocity_scaling\": self.ds.get(\"velocity_scaling\"),\n \"velocity_burst\": self.ds.get(\"velocity_data_burst\"),\n \"velocity_average\": self.ds.get(\"velocity_data_average\"),\n # \"velocity_echosounder\": self.ds.get(\"velocity_data_echosounder\"),\n \"amplitude_burst\": self.ds.get(\"amplitude_data_burst\"),\n \"amplitude_average\": self.ds.get(\"amplitude_data_average\"),\n # \"amplitude_echosounder\": self.ds.get(\"amplitude_data_echosounder\"),\n \"correlation_burst\": self.ds.get(\"correlation_data_burst\"),\n \"correlation_average\": self.ds.get(\"correlation_data_average\"),\n \"correlation_echosounder\": self.ds.get(\"correlation_data_echosounder\"),\n # \"echosounder\": self.ds.get(\"echosounder_data\"),\n \"amplitude_echosounder\": self.ds.get(\"echosounder_data\"),\n \"figure_of_merit\": self.ds.get(\"figure_of_merit_data\"),\n \"altimeter_distance\": self.ds.get(\"altimeter_distance\"),\n \"altimeter_quality\": self.ds.get(\"altimeter_quality\"),\n \"ast_distance\": self.ds.get(\"ast_distance\"),\n \"ast_quality\": self.ds.get(\"ast_quality\"),\n \"ast_offset_100us\": self.ds.get(\"ast_offset_100us\"),\n \"ast_pressure\": self.ds.get(\"ast_pressure\"),\n \"altimeter_spare\": self.ds.get(\"altimeter_spare\"),\n \"altimeter_raw_data_num_samples\": self.ds.get(\n \"altimeter_raw_data_num_samples\"\n ),\n \"altimeter_raw_data_sample_distance\": self.ds.get(\n \"altimeter_raw_data_sample_distance\"\n ),\n \"altimeter_raw_data_samples\": self.ds.get(\"altimeter_raw_data_samples\"),\n }\n\n ds = xr.Dataset(\n data_vars=data_vars,\n coords={\n \"ping_time\": self.ds.get(\"ping_time\"),\n \"ping_time_burst\": self.ds.get(\"ping_time_burst\"),\n \"ping_time_average\": self.ds.get(\"ping_time_average\"),\n \"ping_time_echosounder\": self.ds.get(\"ping_time_echosounder\"),\n \"beam\": self.ds.get(\"beam\"),\n \"range_bin_burst\": self.ds.get(\"range_bin_burst\"),\n \"range_bin_average\": self.ds.get(\"range_bin_average\"),\n \"range_bin_echosounder\": self.ds.get(\"range_bin_echosounder\"),\n \"altimeter_sample_bin\": self.ds.get(\"altimeter_sample_bin\"),\n },\n attrs={\"pulse_compressed\": self.pulse_compressed},\n )\n\n # FIXME: this is a hack because the current file saving\n # mechanism requires that the beam group have ping_time as a dimension,\n # but ping_time might not be a dimension if the dataset is completely\n # empty\n if \"ping_time\" not in ds.dims:\n ds = ds.expand_dims(dim=\"ping_time\")\n\n return set_encodings(ds)\n\n def set_vendor(self) -> xr.Dataset:\n attrs = {\n \"pressure_sensor_valid\": self.ds.get(\"pressure_sensor_valid\"),\n \"temperature_sensor_valid\": self.ds.get(\"temperature_sensor_valid\"),\n \"compass_sensor_valid\": self.ds.get(\"compass_sensor_valid\"),\n \"tilt_sensor_valid\": self.ds.get(\"tilt_sensor_valid\"),\n }\n attrs = {\n field_name: field_value.data[0]\n for field_name, field_value in attrs.items()\n if field_value is not None\n }\n ds = xr.Dataset(\n data_vars={\n \"data_record_version\": self.ds.get(\"version\"),\n \"error\": self.ds.get(\"error\"),\n \"status\": self.ds.get(\"status\"),\n \"status0\": self.ds.get(\"status0\"),\n \"battery_voltage\": self.ds.get(\"battery_voltage\"),\n \"power_level\": self.ds.get(\"power_level\"),\n \"temperature_of_pressure_sensor\": self.ds.get(\n \"temperature_from_pressure_sensor\"\n ),\n \"nominal_correlation\": self.ds.get(\"nominal_correlation\"),\n \"magnetometer_temperature\": self.ds.get(\"magnetometer_temperature\"),\n \"real_ping_time_clock_temperature\": self.ds.get(\n \"real_ping_time_clock_temperature\"\n ),\n \"ensemble_counter\": self.ds.get(\"ensemble_counter\"),\n \"ahrs_rotation_matrix_mij\": (\n (\"mij\", \"ping_time\")\n if \"ahrs_rotation_matrix_m11\" in self.ds\n else \"mij\",\n [\n self.ds.get(\"ahrs_rotation_matrix_m11\"),\n self.ds.get(\"ahrs_rotation_matrix_m12\"),\n self.ds.get(\"ahrs_rotation_matrix_m13\"),\n self.ds.get(\"ahrs_rotation_matrix_m21\"),\n self.ds.get(\"ahrs_rotation_matrix_m22\"),\n self.ds.get(\"ahrs_rotation_matrix_m23\"),\n self.ds.get(\"ahrs_rotation_matrix_m31\"),\n self.ds.get(\"ahrs_rotation_matrix_m32\"),\n self.ds.get(\"ahrs_rotation_matrix_m33\"),\n ],\n ),\n \"ahrs_quaternions_wxyz\": (\n (\"wxyz\", \"ping_time\")\n if \"ahrs_quaternions_w\" in self.ds\n else \"wxyz\",\n [\n self.ds.get(\"ahrs_quaternions_w\"),\n self.ds.get(\"ahrs_quaternions_x\"),\n self.ds.get(\"ahrs_quaternions_y\"),\n self.ds.get(\"ahrs_quaternions_z\"),\n ],\n ),\n \"ahrs_gyro_xyz\": (\n (\"xyz\", \"ping_time\") if \"ahrs_gyro_x\" in self.ds else \"xyz\",\n [\n self.ds.get(\"ahrs_gyro_x\"),\n self.ds.get(\"ahrs_gyro_y\"),\n self.ds.get(\"ahrs_gyro_z\"),\n ],\n ),\n \"percentage_good_data\": self.ds.get(\"percentage_good_data\"),\n \"std_dev_pitch\": self.ds.get(\"std_dev_pitch\"),\n \"std_dev_roll\": self.ds.get(\"std_dev_roll\"),\n \"std_dev_heading\": self.ds.get(\"std_dev_heading\"),\n \"std_dev_pressure\": self.ds.get(\"std_dev_pressure\"),\n \"echosounder_raw_samples_i\": self.ds.get(\"echosounder_raw_samples_i\"),\n \"echosounder_raw_samples_q\": self.ds.get(\"echosounder_raw_samples_q\"),\n \"echosounder_raw_transmit_samples_i\": self.ds.get(\n \"echosounder_raw_transmit_samples_i\"\n ),\n \"echosounder_raw_transmit_samples_q\": self.ds.get(\n \"echosounder_raw_transmit_samples_q\"\n ),\n \"echosounder_raw_beam\": self.ds.get(\"echosounder_raw_beam\"),\n \"echosounder_raw_echogram\": self.ds.get(\"echosounder_raw_echogram\"),\n },\n coords={\n \"ping_time\": self.ds.get(\"ping_time\"),\n \"ping_time_burst\": self.ds.get(\"ping_time_burst\"),\n \"ping_time_average\": self.ds.get(\"ping_time_average\"),\n \"ping_time_echosounder\": self.ds.get(\"ping_time_echosounder\"),\n \"ping_time_echosounder_raw\": self.ds.get(\"ping_time_echosounder_raw\"),\n \"ping_time_echosounder_raw_transmit\": self.ds.get(\n \"ping_time_echosounder_raw_transmit\"\n ),\n \"sample\": self.ds.get(\"sample\"),\n \"sample_transmit\": self.ds.get(\"sample_transmit\"),\n \"beam\": self.ds.get(\"beam\"),\n \"range_bin_average\": self.ds.get(\"range_bin_average\"),\n \"range_bin_burst\": self.ds.get(\"range_bin_burst\"),\n \"range_bin_echosounder\": self.ds.get(\"range_bin_echosounder\"),\n },\n attrs={**attrs, \"pulse_compressed\": self.pulse_compressed},\n )\n ds = ds.reindex(\n {\n \"mij\": np.array([\"11\", \"12\", \"13\", \"21\", \"22\", \"23\", \"31\", \"32\", \"33\"]),\n \"wxyz\": np.array([\"w\", \"x\", \"y\", \"z\"]),\n \"xyz\": np.array([\"x\", \"y\", \"z\"]),\n }\n )\n\n # FIXME: this is a hack because the current file saving\n # mechanism requires that the vendor group have ping_time as a dimension,\n # but ping_time might not be a dimension if the dataset is completely\n # empty\n if \"ping_time\" not in ds.dims:\n ds = ds.expand_dims(dim=\"ping_time\")\n\n return set_encodings(ds)\n\n def set_sonar(self) -> xr.Dataset:\n ds = xr.Dataset(\n attrs={\n \"sonar_manufacturer\": \"Nortek\",\n \"sonar_model\": \"AD2CP\",\n \"sonar_serial_number\": \"\",\n \"sonar_software_name\": \"\",\n \"sonar_software_version\": \"\",\n \"sonar_firmware_version\": \"\",\n \"sonar_type\": \"acoustic Doppler current profiler (ADCP)\",\n }\n )\n if \"serial_number\" in self.ds:\n ds.attrs[\"sonar_serial_number\"] = int(self.ds[\"serial_number\"].data[0])\n firmware_version = self.parser_obj.get_firmware_version()\n if firmware_version is not None:\n ds.attrs[\"sonar_firmware_version\"] = \", \".join(\n [f\"{k}:{v}\" for k, v in firmware_version.items()]\n )\n return ds\n" ]
[ [ "numpy.array", "numpy.pad" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
nenb/cycling_in_france
[ "6cddc433a2136f52be996719db0a1d876fcf5c59" ]
[ "cycling_in_france/helper_func.py" ]
[ "import regionmask\nimport numpy as np\nimport dask\n\n\ndef create_windmax_dict(u, v, names, borders, longitude, latitude):\n \"\"\"Produce a dictionary of masked maximum wind speeds in units of mph.\"\"\"\n\n if u.units != \"m s**-1\":\n raise ValueError(\"U field does not have units m/s\")\n if v.units != \"m s**-1\":\n raise ValueError(\"V field does not have units m/s\")\n metre_to_mile = 3600.0 / 1609.3\n speed = np.sqrt(u ** 2 + v ** 2) * metre_to_mile\n\n windmax_dict = {}\n for i, regname in enumerate(names):\n # Modify index in case any entries have been dropped e.g. Corsica\n idx = names.index[i]\n # Create object from 'borders' for masking gridded data\n regmask = regionmask.Regions(name=regname, outlines=list(borders[idx]))\n # Apply mask to dataset coordinates\n mask_zeros = regmask.mask(longitude, latitude)\n # Replace zeros with ones for matrix multiplication\n mask_ones = mask_zeros.where(np.isnan(mask_zeros.values), 1)\n # Use Dask dataframes for lazy execution\n mask_ones = dask.array.from_array(mask_ones)\n speed_mask = speed * mask_ones\n # Compute maximum over lat-lon grid\n windmax_dict[regname] = speed_mask.max(dim=[\"longitude\", \"latitude\"])\n return windmax_dict\n" ]
[ [ "numpy.isnan", "numpy.sqrt" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
AldrickF/SlicerRegularizedFastMarching
[ "8a04a594cf5dd6c98e1f9dd93e61af6e6852339f" ]
[ "RegularizedFastMarching/RegularizedFastMarchingLib/Regularization.py" ]
[ "def regularization(InputImage, StructuringElementRadius=3):\n \"\"\"\n Compute the 3D scalar field that will be used to regularize the seeds propagation\n Inputs:\n * InputImage: the 3D image that will be segmented. Must be a 3D numpy array.\n * StructuringElementRadius: A structuring element of size (1+2*StructuringElementRadius) x (1+2*StructuringElementRadius) x (1+2*StructuringElementRadius) will be used\n Outputs:\n * R: The 3D numpy array having the same size as InputImage, used for the regularization\n \"\"\"\n\n from scipy import ndimage\n MSE = StructuringElementRadius\n return ndimage.morphological_gradient(InputImage, size=(MSE, MSE, MSE))" ]
[ [ "scipy.ndimage.morphological_gradient" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
wohlbier/GraphSAINT
[ "cea64e77d97b77d76b05fba17cbfaa0d985d9aa3" ]
[ "graphsaint/setup.py" ]
[ "# cython: language_level=3\nfrom distutils.core import setup, Extension\nfrom Cython.Build import cythonize\nimport numpy\n# import cython_utils\n\nimport os\nos.environ[\"CC\"] = \"g++\"\nos.environ[\"CXX\"] = \"g++\"\n\nsetup(ext_modules = cythonize([\"graphsaint/cython_sampler.pyx\",\"graphsaint/cython_utils.pyx\",\"graphsaint/norm_aggr.pyx\"]), include_dirs = [numpy.get_include()])\n# to compile: python graphsaint/setup.py build_ext --inplace\n" ]
[ [ "numpy.get_include" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
YoshimitsuMatsutaIe/ans_2021
[ "a04cd9b9541583aaa8a6dc5ece323ae1cf706c3b" ]
[ "example_py/example_2.py" ]
[ "### SciPyを使った実装 ###\nimport numpy as np\nfrom scipy.integrate import solve_ivp\nimport matplotlib.pyplot as plt\n\n\ndef diff_eq(x, t, a):\n \"\"\"微分方程式\"\"\"\n return a * x\n\ndef do_example_2():\n time_list = np.arange(0.0, 2.0, 0.01) # 時間のリスト\n x_init = [1.0] # 初期値\n \n a = 1\n \n # 解く\n sol = solve_ivp(\n fun = diff_eq,\n y0 = x_init,\n t_span=(0.0, 2.0),\n t_eval = time_list,\n method = 'RK45',\n args = (a,)\n ) # scipy.integrate.odeintソルバー.他のソルバーもある.\n \n # グラフ化\n fig = plt.figure() # figureインスタンスを作成\n ax = fig.add_subplot(111) #figureオブジェクトにaxesを追加\n ax.plot(list(time_list), sol.y[0], label = \"solution\") # プロットを入れる\n ax.set_xlabel('time') # x軸にラベルを追加\n ax.set_ylabel('x') # y軸にラベルを追加\n ax.grid(True) # グリッドを入れる\n ax.legend() # 凡例を入れる\n ax.set_aspect('equal', adjustable='box') # 軸を揃える\n \n plt.show() # プロットを表示\n\n\nif __name__ == '__main__':\n do_example_2()" ]
[ [ "numpy.arange", "scipy.integrate.solve_ivp", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.6", "1.10", "1.4", "1.9", "1.5", "1.2", "1.7", "1.0", "1.3", "1.8" ], "tensorflow": [] } ]
LLNL/XNAS
[ "62f90bb29b492a3b993d7a866d229634a2d95057" ]
[ "experiments/mnist/load_data.py" ]
[ "\"\"\"\nMIT License\n\nCopyright (c) 2022, Lawrence Livermore National Security, LLC\nWritten by Zachariah Carmichael et al.\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\"\"\"\nfrom xnas.utils import get_logger\n\nlogger = get_logger(__name__)\n\n\ndef preprocess(image, label):\n import tensorflow as tf\n\n mean = [0.13066044]\n std = [0.3081079]\n\n # converting dtype changes uint8 [0..255] to float [0.,1.]\n image = tf.image.convert_image_dtype(image, dtype=tf.float32)\n image = (image - tf.reshape(mean, [1, 1, 1])) / tf.reshape(std, [1, 1, 1])\n\n label = tf.one_hot(label, depth=10, dtype=tf.int32)\n\n return image, label\n\n\ndef augment(image, label):\n import tensorflow as tf\n import tensorflow_addons as tfa\n\n pad = 4\n # random crop with zero-padding\n image = tf.image.resize_with_crop_or_pad(image,\n 28 + pad * 2,\n 28 + pad * 2)\n image = tf.image.random_crop(image, size=[28, 28, 1])\n # random LR flip\n image = tf.image.random_flip_left_right(image)\n # cutout\n image = tfa.image.random_cutout(tf.expand_dims(image, 0), (8, 8))\n image = tf.squeeze(image, axis=0)\n return image, label\n\n\ndef load_data():\n def load_train():\n import tensorflow as tf\n import tensorflow_datasets as tfds\n\n ds_train = tfds.load('mnist', as_supervised=True, split='train')\n ds_train = (\n ds_train\n .map(preprocess, num_parallel_calls=tf.data.AUTOTUNE)\n .cache()\n .map(augment, num_parallel_calls=tf.data.AUTOTUNE)\n )\n return ds_train\n\n def load_test():\n import tensorflow as tf\n import tensorflow_datasets as tfds\n\n ds_test = tfds.load('mnist', as_supervised=True, split='test')\n ds_test = (\n ds_test\n .map(preprocess, num_parallel_calls=tf.data.AUTOTUNE)\n .cache()\n )\n return ds_test\n\n train_size, valid_size = 60000, 10000\n\n return {\n 'train_gen': load_train,\n 'train_size': train_size,\n 'valid_gen': load_test,\n 'valid_size': valid_size,\n 'types': ({'input_0': 'float32'}, 'int32'),\n 'shapes': ({'input_0': (28, 28, 1)}, (10,)),\n }\n" ]
[ [ "tensorflow.image.resize_with_crop_or_pad", "tensorflow.image.random_flip_left_right", "tensorflow.reshape", "tensorflow.squeeze", "tensorflow.expand_dims", "tensorflow.image.random_crop", "tensorflow.one_hot", "tensorflow.image.convert_image_dtype" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
EpistasisLab/penn-ml-benchmarks
[ "ac4ae198e62a7828cb9ff957d805bc33197dca28" ]
[ "pmlb/pmlb.py" ]
[ "# -*- coding: utf-8 -*-\n\n\"\"\"\nPMLB was primarily developed at the University of Pennsylvania by:\n - Randal S. Olson ([email protected])\n - William La Cava ([email protected])\n - Weixuan Fu ([email protected])\n - and many more generous open source contributors\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of this software\nand associated documentation files (the \"Software\"), to deal in the Software without restriction,\nincluding without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,\nand/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,\nsubject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all copies or substantial\nportions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT\nLIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.\nIN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,\nWHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\nSOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\n\nimport pandas as pd\nimport os\nfrom .dataset_lists import (\n dataset_names,\n classification_dataset_names, \n regression_dataset_names)\nimport requests\nimport warnings\nimport subprocess\nimport pathlib\n\nfrom sklearn.neighbors import NearestNeighbors\nfrom sklearn.preprocessing import StandardScaler\nfrom .support_funcs import (\n generate_summarystats, \n get_dataset_stats,\n last_commit_message\n)\nimport numpy as np\n\nGITHUB_URL = 'https://github.com/EpistasisLab/pmlb/raw/master/datasets'\nsuffix = '.tsv.gz'\n\ndef fetch_data(dataset_name, return_X_y=False, local_cache_dir=None, dropna=True):\n \"\"\"Download a data set from the PMLB, (optionally) store it locally, and return the data set.\n\n You must be connected to the internet if you are fetching a data set that is not cached locally.\n\n Parameters\n ----------\n dataset_name: str\n The name of the data set to load from PMLB.\n return_X_y: bool (default: False)\n Whether to return the data in scikit-learn format, with the features \n and labels stored in separate NumPy arrays.\n local_cache_dir: str (default: None)\n The directory on your local machine to store the data files.\n If None, then the local data cache will not be used.\n dropna: bool\n If True, pmlb will drop NAs in exported dataset.\n\n Returns\n ----------\n dataset: pd.DataFrame or (array-like, array-like)\n if return_X_y == False: A pandas DataFrame containing the fetched data set.\n if return_X_y == True: A tuple of NumPy arrays containing (features, labels)\n\n \"\"\"\n\n if local_cache_dir is None:\n if dataset_name not in dataset_names:\n raise ValueError('Dataset not found in PMLB.')\n dataset_url = get_dataset_url(GITHUB_URL,\n dataset_name, suffix)\n dataset = pd.read_csv(dataset_url, sep='\\t', compression='gzip')\n else:\n dataset_path = os.path.join(local_cache_dir, dataset_name,\n dataset_name+suffix)\n\n # Use the local cache if the file already exists there\n if os.path.exists(dataset_path):\n dataset = pd.read_csv(dataset_path, sep='\\t', compression='gzip')\n # Download the data to the local cache if it is not already there\n else:\n if dataset_name not in dataset_names:\n raise ValueError('Dataset not found in PMLB.')\n dataset_url = get_dataset_url(GITHUB_URL,\n dataset_name, suffix)\n dataset = pd.read_csv(dataset_url, sep='\\t', compression='gzip')\n dataset_dir = os.path.split(dataset_path)[0]\n if not os.path.isdir(dataset_dir):\n os.makedirs(dataset_dir)\n dataset.to_csv(dataset_path, sep='\\t', compression='gzip',\n index=False)\n\n if dropna:\n dataset.dropna(inplace=True)\n if return_X_y:\n X = dataset.drop('target', axis=1).values\n y = dataset['target'].values\n return (X, y)\n else:\n return dataset\n\n\ndef get_dataset_url(GITHUB_URL, dataset_name, suffix):\n dataset_url = '{GITHUB_URL}/{DATASET_NAME}/{DATASET_NAME}{SUFFIX}'.format(\n GITHUB_URL=GITHUB_URL,\n DATASET_NAME=dataset_name,\n SUFFIX=suffix\n )\n\n re = requests.get(dataset_url)\n if re.status_code != 200:\n raise ValueError('Dataset not found in PMLB.')\n return dataset_url\n\ndef get_updated_datasets(local_cache_dir='datasets'):\n \"\"\"Looks at commit and returns a list of datasets that were updated.\"\"\"\n cmd = 'git diff --name-only HEAD HEAD~1'\n res = subprocess.check_output(cmd.split(), universal_newlines=True).rstrip()\n changed_datasets = set()\n changed_metadatas = set()\n for path in res.splitlines():\n path = pathlib.Path(path)\n if path.parts[0] != 'datasets':\n continue\n if path.name.endswith('.tsv.gz'):\n changed_datasets.add(path.parts[-2])\n if path.name == 'metadata.yaml':\n changed_metadatas.add(path.parts[-2])\n \n datasets_remain = [x.name for x in pathlib.Path(local_cache_dir).iterdir()]\n changed_metadatas &= set(datasets_remain)\n changed_datasets &= set(datasets_remain)\n\n changed_datasets = sorted(changed_datasets)\n changed_metadatas = sorted(changed_metadatas)\n print(\n f'changed datasets: {changed_datasets}\\n'\n f'changed metadata: {changed_metadatas}'\n )\n return {'changed_datasets': changed_datasets,\n 'changed_metadatas': changed_metadatas}\n\ndef nearest_datasets(X, y=None, task='classification', n=1, \n dimensions=['n_instances', 'n_features']):\n \"\"\"\n X: numpy array or pandas DataFrame\n an n_samples x n_features array of independent variables\n y: numpy array or None (default: None)\n a n_samples array of dependent variables\n task: 'regression' or 'classification' (default: 'classification')\n specify the task.\n n: int (default: 1)\n the number of dataset names to return\n dimensions: list of str or str (default: ['NumberOfInstances',\n 'NumberOfFeatures'])\n a list of dataset characteristics to include in similarity calculation.\n Dimensions must correspond to columns of datasets/all_summary_stats.csv.\n If 'all', uses all numeric columns.\n \"\"\"\n if isinstance(X, np.ndarray):\n if y == None:\n ValueError('the target (y) must be specified if a np array '\n 'is passed.')\n df = pd.DataFrame({**{'x_'+str(i):x for i,x in enumerate(X.transpose)}\n **{'target':y}})\n elif isinstance(X, pd.DataFrame):\n df = X\n \n return fetch_nearest_dataset_names(df, task, n, dimensions)\n\ndef fetch_nearest_dataset_names(df, task, n, dimensions):\n \"\"\"Returns names of most similar datasets to df, in order of similarity. \n\n Parameters\n ----------\n df: pandas Dataframe \n a dataframe of n_samples x n_features+1 with a target column labeled\n 'target'\n task: str \n specify classification or regression for summary stat generation. \n n: int (default: 1)\n the number of dataset names to return\n dimensions: list of str or str (default: ['NumberOfInstances',\n 'NumberOfFeatures'])\n a list of dataset characteristics to include in similarity calculation.\n Dimensions must correspond to columns of datasets/all_summary_stats.csv.\n If 'all', uses all numeric columns.\n\n Returns\n -------\n dataset_names: an n-element list of dataset names in order of most similar \n to least similar.\n \"\"\"\n\n # load pmlb summary stats\n path = pathlib.Path(__file__).parent / \"all_summary_stats.tsv\"\n pmlb_stats = pd.read_csv(path, sep = '\\t')\n # restrict to same task\n pmlb_stats = pmlb_stats.loc[pmlb_stats.task==task]\n all_names = pmlb_stats['dataset'].values\n # restrict to floating point data in stats\n pmlb_stats = pmlb_stats.apply(\n lambda x: pd.to_numeric(x,errors='coerce')).dropna(axis=1,how='all')\n\n if dimensions=='all':\n dimensions = list(pmlb_stats.columns)\n else:\n pmlb_stats = pmlb_stats[dimensions] \n assert(all([d in pmlb_stats.columns for d in dimensions]))\n\n dataset_stats_tmp = get_dataset_stats(df)\n dataset_stats_tmp['yaml_task'] = task\n dataset_stats = generate_summarystats('dataset', dataset_stats_tmp, \n write_summary=False)\n dataset_stats = dataset_stats[dimensions]\n\n\n # #categorical and #continuous features columns\n ss = StandardScaler()\n pmlb_stats_norm = ss.fit_transform(pmlb_stats) \n\n # find nearest neighbors\n nn = NearestNeighbors(n_neighbors=n).fit(pmlb_stats_norm)\n distances, ds = nn.kneighbors(ss.transform(dataset_stats), n_neighbors=n, \n return_distance=True)\n # print([(name, dist) for name, dist in zip(all_names[ds.flatten()],\n # distances.flatten())])\n dataset_names = all_names[ds.flatten()]\n\n return dataset_names\n\ndef get_reviewed_datasets(dataset_names, local_cache_dir = 'datasets/'):\n reviewed_datasets = []\n\n for dataset_name in dataset_names:\n if local_cache_dir != None:\n meta_path = pathlib.Path(f'{local_cache_dir}{dataset_name}/metadata.yaml')\n if meta_path.exists():\n with open(meta_path, 'r') as f:\n header = f.readline()\n else:\n meta_url = '{GITHUB_URL}/{DATASET_NAME}/metadata.yaml'.format(\n GITHUB_URL=GITHUB_URL,\n DATASET_NAME=dataset_name\n )\n header = requests.get(meta_url).text.splitlines()[0] + '\\n'\n\n if header != '# Reviewed by [your name here]\\n':\n reviewed_datasets.append(dataset_name)\n \n return sorted(reviewed_datasets)\n\ndef select_datasets(obs_min = None, obs_max = None, feat_min = None, feat_max = None, class_min = None, class_max = None, endpt = None, max_imbalance = None, task = None):\n \"\"\"Filters existing datasets by given parameters, and returns a list of their names.\n\n Parameters\n ----------\n obs_min: int (default: None)\n The minimum acceptable number of observations/instances in the dataset\n obs_Max: int (default: None)\n The maximum acceptable number of observations/instances in the dataset\n feat_min: int (default: None)\n The minimum acceptable number of features in the dataset\n feat_max: int (default: None)\n The maximum acceptable number of features in the dataset\n class_min: int (default: None)\n The minimum acceptable number of classes in the dataset\n class_max: int (default: None)\n The maximum acceptable number of classes in the dataset\n max_imbalance: float (default: None)\n Maximum acceptable imbalance value for the dataset\n endpt: str (default: None)\n Whether the dataset endpoint type should be discrete, continuous, categorical, or binary\n task: str (default: None)\n Whether the dataset is suited for classification or regression problems\n Returns\n ----------\n list (str): \n list of names of datasets within filters. Will return an empty list if no datasets match.\n\n\n \"\"\"\n\n path = pathlib.Path(__file__).parent / \"all_summary_stats.tsv\"\n tempdf = pd.read_csv(path, sep = '\\t')\n if obs_min is not None:\n tempdf = tempdf.loc[tempdf['n_instances'] >= obs_min]\n if obs_max is not None:\n tempdf = tempdf.loc[tempdf['n_instances'] <= obs_max]\n if feat_min is not None:\n tempdf = tempdf.loc[tempdf['n_features'] >= feat_min]\n if feat_max is not None:\n tempdf = tempdf.loc[tempdf['n_features'] <= feat_max]\n if class_min is not None:\n tempdf = tempdf.loc[tempdf['n_classes'] >= class_min]\n if class_max is not None:\n tempdf = tempdf.loc[tempdf['n_classes'] <= class_max]\n if max_imbalance is not None:\n tempdf = tempdf.loc[tempdf['imbalance'] < max_imbalance]\n if endpt is not None:\n tempdf = tempdf.loc[tempdf['endpoint_type'] == endpt]\n if task is not None:\n tempdf = tempdf.loc[tempdf['task'] == task]\n return list(tempdf['dataset'].values)\n" ]
[ [ "sklearn.preprocessing.StandardScaler", "pandas.read_csv", "pandas.to_numeric", "sklearn.neighbors.NearestNeighbors" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
haroldship/SCLPsolver
[ "70b79acb074f51d4a269993f6a1fcf04a8196a89" ]
[ "SCLPsolver/tests/MCQN_test_mpc.py" ]
[ "# Copyright 2020 IBM Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport sys\nimport numpy as np\nimport os\nproj = os.path.realpath(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..'))\nsys.path.append(proj)\nfrom SCLP import SCLP, SCLP_settings\nfrom doe.data_generators.MCQN import generate_MCQN_data\nfrom subroutines.utils import relative_to_project\nfrom doe.results_producer import write_results_to_csv\n\nK = 400\nI = 40\nimport time\nsolver_settings = SCLP_settings(find_alt_line=False, check_intermediate_solution=False, memory_management= False, suppress_printing = False)\n\nsettings = {'alpha_rate': 1, 'cost_scale':2, 'a_rate' : 0.05, 'sum_rate':0.95, 'nz': 0.5,\n 'gamma_rate':0, 'c_scale': 0, 'h_rate': 0.2}\nseed = 1009\nG, H, F, gamma, c, d, alpha, a, b, TT, total_buffer_cost, buffer_cost = generate_MCQN_data(seed, K, I, **settings)\nTT = 100\n\n# import cProfile, pstats, io\n# pr = cProfile.Profile()\n#pr.enable()\nresult = {'servers': I, 'buffers': K, 'seed': seed}\nstart_time = time.time()\nsolution, STEPCOUNT, param_line, res = SCLP(G, H, F, a, b, c, d, alpha, gamma, 3/12 * TT, solver_settings)\nt, x, q, u, p, pivots, obj, err, NN, tau, maxT = solution.get_final_solution(True)\n#pr.disable()\nprint(obj, err, maxT)\ntime1 = time.time() - start_time\nprint(\"--- %s seconds ---\" % time1)\nresult['time1'] = time1\nresult['STEPCOUNT1'] = STEPCOUNT\nt0 = 1/12 * TT\nlast_breakpoint = np.where(t<=t0)[0][-1]\ndelta_t = t0 - t[last_breakpoint]\nnew_x0 = x[:, last_breakpoint] + solution._state.dx[:, last_breakpoint] * delta_t + 0.1 * a * t0\nstart_time = time.time()\nSTEPCOUNT, pivot_problem = solution.recalculate(param_line, t0, 4/12 * TT, new_x0, solver_settings, 10E-11, mm = None)\nt, x, q, u, p, pivots, obj, err, NN, tau, maxT = solution.get_final_solution(True)\nprint(obj, err, maxT)\ntime2 = time.time() - start_time\nprint(\"--- %s seconds ---\" % time2)\nresult['time2'] = time2\nresult['STEPCOUNT2'] = STEPCOUNT\nalpha = new_x0\nstart_time = time.time()\nsolution, STEPCOUNT, param_line, res = SCLP(G, H, F, a, b, c, d, alpha, gamma, 3/12 * TT, solver_settings)\nt, x, q, u, p, pivots, obj, err, NN, tau, maxT = solution.get_final_solution(True)\n#pr.disable()\nprint(obj, err, maxT)\ntime3 = time.time() - start_time\nprint(\"--- %s seconds ---\" % time3)\nresult['time3'] = time3\nresult['STEPCOUNT3'] = STEPCOUNT\n# start_time = time.time()\n# STEPCOUNT, pivot_problem =solution.recalculate(param_line, 1/12 * TT, 4/12 * TT, None, solver_settings, 10E-11, mm = None)\n# t, x, q, u, p, pivots, obj, err, NN, tau, maxT = solution.get_final_solution(True)\n# print(obj, err, maxT)\n# time3 = time.time() - start_time\n# print(\"--- %s seconds ---\" % time3)\n# result['time3'] = time3\n# result['STEPCOUNT3'] = STEPCOUNT\n# start_time = time.time()\n# STEPCOUNT, pivot_problem = solution.recalculate(param_line, 1/12 * TT, 4/12 * TT, None, solver_settings, 10E-11, mm = None)\n# t, x, q, u, p, pivots, obj, err, NN, tau, maxT = solution.get_final_solution(True)\n# print(obj, err, maxT)\n# time4 = time.time() - start_time\n# print(\"--- %s seconds ---\" % time4)\n# result['time4'] = time4\n# result['STEPCOUNT4'] = STEPCOUNT\n# results = [result]\n# res_file = relative_to_project('online_results.csv')\n# write_results_to_csv(results, res_file)\n# # s = io.StringIO()\n# # ps = pstats.Stats(pr, stream=s)\n# # ps.print_stats()\n# # print(s.getvalue())" ]
[ [ "numpy.where" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
dennis-l/so_pysm_models
[ "da21d51fd09ef409542862a773d22ed1656bb1bb" ]
[ "so_pysm_models/alms.py" ]
[ "import numpy as np\nimport healpy as hp\n\ntry:\n from pixell import curvedsky, enmap\nexcept:\n pass\n\ntry: # PySM >= 3.2.1\n import pysm3.units as u\n import pysm3 as pysm\nexcept ImportError:\n import pysm.units as u\n import pysm\n\n\nclass PrecomputedAlms(object):\n def __init__(\n self,\n filename,\n input_units=\"uK_CMB\",\n input_reference_frequency=None,\n nside=None,\n target_shape=None,\n target_wcs=None,\n from_cl=False,\n from_cl_seed=None,\n precompute_output_map=True,\n has_polarization=True,\n map_dist=None,\n ):\n \"\"\"Generic component based on Precomputed Alms\n\n Load a set of Alms from a FITS file and generate maps at the requested\n resolution and frequency assuming the CMB black body spectrum.\n A single set of Alms is used for all frequencies requested by PySM,\n consider that PySM expects the output of components to be in uK_RJ.\n See more details at https://so-pysm-models.readthedocs.io/en/latest/so_pysm_models/models.html\n\n Also note that the Alms are clipped to 3*nside-1 to avoid\n artifacts from high-ell components which cannot be properly represented\n by a low-nside map.\n\n Parameters\n ----------\n filename : string\n Path to the input Alms in FITS format\n input_units : string\n Input unit strings as defined by pysm.convert_units, e.g. K_CMB, uK_RJ, MJysr\n input_reference_frequency: float\n If input units are K_RJ or Jysr, the reference frequency\n nside : int\n HEALPix NSIDE of the output maps\n from_cl : bool\n If True, the input file contains C_ell instead of a_lm,\n they should provided with the healpy old ordering TT, TE, TB, EE, EB, BB, sorry.\n from_cl_seed : int\n Seed set just before synalm to simulate the alms from the C_ell,\n necessary to set it in order to get the same input map for different runs\n only used if `from_cl` is True\n precompute_output_map : bool\n If True (default), Alms are transformed into a map in the constructor,\n if False, the object only stores the Alms and generate the map at each\n call of the signal method, this is useful to generate maps convolved\n with different beams\n has_polarization : bool\n whether or not to simulate also polarization maps\n Default: True\n \"\"\"\n\n self.nside = nside\n self.shape = target_shape\n self.wcs = target_wcs\n self.filename = filename\n self.input_units = u.Unit(input_units)\n self.has_polarization = has_polarization\n\n if from_cl:\n np.random.seed(from_cl_seed)\n cl = hp.read_cl(self.filename)\n if not self.has_polarization and cl.ndim > 1:\n cl = cl[0]\n # using healpy old ordering TT, TE, TB, EE, EB, BB\n alm = hp.synalm(cl, new=False, verbose=False)\n else:\n alm = np.complex128(\n hp.read_alm(\n self.filename, hdu=(1, 2, 3) if self.has_polarization else 1\n )\n )\n\n self.equivalencies = (\n None\n if input_reference_frequency is None\n else u.cmb_equivalencies(input_reference_frequency)\n )\n if precompute_output_map:\n self.output_map = self.compute_output_map(alm)\n\n else:\n self.alm = alm\n\n def compute_output_map(self, alm):\n\n lmax = hp.Alm.getlmax(alm.shape[-1]) # we assume mmax = lmax\n if self.nside is None:\n assert (self.shape is not None) and (self.wcs is not None)\n n_comp = 3 if self.has_polarization else 1\n output_map = enmap.empty((n_comp,) + self.shape[-2:], self.wcs)\n curvedsky.alm2map(alm, output_map, spin=[0, 2], verbose=True)\n elif self.nside is not None:\n if lmax > 3*self.nside-1:\n clip = np.ones(3*self.nside)\n if alm.ndim == 1:\n alm_clipped = hp.almxfl(alm, clip)\n else:\n alm_clipped = [hp.almxfl(each, clip) for each in alm]\n else:\n alm_clipped = alm\n output_map = hp.alm2map(alm_clipped, self.nside)\n else:\n raise ValueError(\"You must specify either nside or both of shape and wcs\")\n return (output_map << self.input_units).to(\n u.uK_CMB, equivalencies=self.equivalencies\n )\n\n @u.quantity_input\n def get_emission(\n self,\n freqs: u.GHz,\n fwhm: [u.arcmin, None] = None,\n weights=None,\n output_units=u.uK_RJ,\n ):\n \"\"\"Return map in uK_RJ at given frequency or array of frequencies\n\n Parameters\n ----------\n freqs : list or ndarray\n Frequency or frequencies in GHz at which compute the signal\n fwhm : float (optional)\n Smooth the input alms before computing the signal, this can only be used\n if the class was initialized with `precompute_output_map` to False.\n output_units : str\n Output units, as defined in `pysm.convert_units`, by default this is\n \"uK_RJ\" as expected by PySM.\n Returns\n -------\n output_maps : ndarray\n Output maps array with the shape (num_freqs, 1 or 3 (I or IQU), npix)\n \"\"\"\n\n freqs = pysm.utils.check_freq_input(freqs)\n weights = pysm.utils.normalize_weights(freqs, weights)\n\n try:\n output_map = self.output_map\n except AttributeError:\n if fwhm is None:\n alm = self.alm\n else:\n alm = hp.smoothalm(\n self.alm, fwhm=fwhm.to_value(u.radian), pol=True, inplace=False\n )\n\n output_map = self.compute_output_map(alm)\n\n output_units = u.Unit(output_units)\n assert output_units in [u.uK_RJ, u.uK_CMB]\n if output_units == u.uK_RJ:\n\n convert_to_uK_RJ = (\n np.ones(len(freqs), dtype=np.double) * u.uK_CMB\n ).to_value(u.uK_RJ, equivalencies=u.cmb_equivalencies(freqs * u.GHz))\n\n if len(freqs) == 1:\n scaling_factor = convert_to_uK_RJ[0]\n else:\n scaling_factor = np.trapz(convert_to_uK_RJ * weights, x=freqs)\n\n return output_map.value * scaling_factor << u.uK_RJ\n elif output_units == output_map.unit:\n return output_map\n" ]
[ [ "numpy.trapz", "numpy.random.seed", "numpy.ones" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
zzpwahaha/DataCrylsis
[ "b4436435d6ead3322ce54d22e048077732e39e57", "b4436435d6ead3322ce54d22e048077732e39e57" ]
[ "ExpFile.py", "fitters/Gaussian/bump2.py" ]
[ "# created by mark brown\nimport h5py as h5\nfrom colorama import Fore, Style\nfrom numpy import array as arr\nimport numpy as np\nimport Miscellaneous as misc\nimport datetime\ndataAddress = None\ncurrentVersion = 1\n\ndef annotate(fileID=None, expFile_version=currentVersion, useBaseA=True):\n #hashNum = int(input(\"Title-Level: \"))\n hashNum = 3\n #titleStr = ''.join('#' for _ in range(hashNum)) + ' ' + title\n with ExpFile(expFile_version=expFile_version) as file:\n print('annotating file ' + str(fileID));\n file.open_hdf5(fileID, openFlag='a', useBase=useBaseA)\n if checkAnnotation(fileID, force=False, expFile_version=expFile_version): \n title, notes, num = getAnnotation(fileID, expFile_version=expFile_version, useBaseA=useBaseA)\n title = input(\"Run Title (\\\"q\\\" to Quit) (prev was \\\"\"+title+\"\\\"):\")\n if title == 'q':\n raise RuntimeError(\"Annotation Quit\")\n notes = input(\"Experiment Notes (\\\"q\\\" to Quit)(prev was \\\"\"+notes+\"\\\"):\")\n if notes == 'q':\n raise RuntimeError(\"Annotation Quit\")\n else:\n title = input(\"Run Title (\\\"q\\\" to Quit):\")\n if title == 'q':\n raise RuntimeError(\"Annotation Quit\")\n notes = input(\"Experiment Notes (\\\"q\\\" to Quit):\")\n if notes == 'q':\n raise RuntimeError(\"Annotation Quit\")\n\n if 'Experiment_Notes' in file.f['Miscellaneous'].keys():\n del file.f['Miscellaneous']['Experiment_Notes']\n dset2 = file.f['Miscellaneous'].create_dataset(\"Experiment_Notes\", shape=(1,), dtype=\"S\"+str(len(notes))) \n dset2[0] = np.string_(notes)\n \n if 'Experiment_Title' in file.f['Miscellaneous'].keys():\n del file.f['Miscellaneous']['Experiment_Title']\n dset3 = file.f['Miscellaneous'].create_dataset(\"Experiment_Title\", shape=(1,), dtype=\"S\"+str(len(title))) \n dset3[0] = np.string_(title)\n \n if 'Experiment_Title_Level' in file.f['Miscellaneous'].keys():\n del file.f['Miscellaneous']['Experiment_Title_Level']\n dset4 = file.f['Miscellaneous'].create_dataset(\"Experiment_Title_Level\", shape=(1,), dtype=\"i8\") \n dset4[0] = hashNum\n\n \ndef checkAnnotation(fileNum, force=True, quiet=False, expFile_version=currentVersion):\n try:\n with ExpFile(fileNum, expFile_version=expFile_version) as f:\n if ( 'Experiment_Notes' not in f.f['Miscellaneous']\n or 'Experiment_Title' not in f.f['Miscellaneous']):\n #pass\n if force:\n raise RuntimeError('HDF5 File number ' + str(fileNum) + ' Has not been annotated. Please call exp.annotate() to annotate the file.')\n else:\n print('HDF5 File number ' + str(fileNum) + ' Has not been annotated. Please call exp.annotate() to annotate the file.')\n return False\n except OSError:\n # failed to open file probably, nothing to annotate.\n return False\n except KeyError:\n # file failed to open, probably a special run\n return False\n return True\n\n\ndef getAnnotation(fid, expFile_version=currentVersion, useBaseA=True):\n with ExpFile() as f:\n f.open_hdf5(fid, useBase=useBaseA)\n f_misc = f.f['Miscellaneous']\n if ( 'Experiment_Notes' not in f_misc\n or 'Experiment_Title' not in f_misc):\n raise RuntimeError('HDF5 File number ' + str(fid) + ' Has not been annotated. Please call exp.annotate() to annotate the file.')\n if 'Experiment_Title_Level' not in f_misc:\n expTitleLevel = 0\n else:\n expTitleLevel = f_misc['Experiment_Title_Level'][0]\n return (f_misc['Experiment_Title'][0].decode(\"utf-8\"), \n f_misc['Experiment_Notes'][0].decode(\"utf-8\"),\n expTitleLevel)\n \ndef getConfiguration(fid, expFile_version=currentVersion, useBaseA=True):\n with ExpFile() as file:\n file.open_hdf5(fid, useBase=useBaseA)\n f_MI = file.f['Master-Input']\n if ('Configuration' not in f_MI):\n return \"\"\n return ''.join([char.decode('utf-8') for char in f_MI['Configuration']])\n \n#\"J:\\\\Data repository\\\\New Data Repository\"\ndef setPath(day, month, year, repoAddress=\"\\\\\\\\jilafile.colorado.edu\\\\scratch\\\\regal\\\\common\\\\LabData\\\\NewRb\\\\CryoData\"):\n \"\"\"\n This function sets the location of where all of the data files are stored. It is occasionally called more\n than once in a notebook if the user needs to work past midnight.\n\n :param day: A number string, e.g. '11'.\n :param month: The name of a month, e.g. 'November' (must match file path capitalization).\n :param year: A number string, e.g. '2017'.\n :return:\n \"\"\"\n global dataAddress\n if type(day) == int:\n day = str(day)\n if type(year) == int:\n year = str(year)\n dataAddress = repoAddress + \"\\\\\" + year + \"\\\\\" + month + \"\\\\\" + month + \" \" + day + \"\\\\Raw Data\\\\\"\n #print(\"Setting new data address:\" + dataAddress)\n return dataAddress\n\n\ndef addNote(fileID=None):\n notes = input(\"New Experiment Note:\")\n with ExpFile() as file:\n noteNum = 1\n file.open_hdf5(fileID, openFlag='a')\n while noteNum < 1000:\n if 'Experiment_Note_' + str(noteNum) not in file.f['Miscellaneous'].keys():\n dset2 = file.f['Miscellaneous'].create_dataset(\"Experiment_Note_\" + str(noteNum), shape=(1,), dtype=\"S\"+str(len(notes))) \n dset2[0] = np.string_(notes)\n break\n else:\n noteNum += 1\n\ndef getStartDatetime(fileID):\n with ExpFile() as file:\n file.open_hdf5(fileID)\n file.exp_start_date, file.exp_start_time, file.exp_stop_date, file.exp_stop_time = file.get_experiment_time_and_date()\n dt = datetime.datetime.strptime(file.exp_start_date + \" \" + file.exp_start_time[:-1], '%Y-%m-%d %H:%M:%S')\n return dt\n \n# Exp is short for experiment here.\nclass ExpFile:\n \"\"\"\n a wrapper around an hdf5 file for easier handling and management.\n \"\"\"\n def __init__(self, file_id=None, expFile_version=currentVersion, useBaseA=True, keyParameter=None):\n \"\"\"\n if you give the constructor a file_id, it will automatically fill the relevant member variables.\n \"\"\"\n if expFile_version is None:\n expFile_version = currentVersion\n #print('expfile version:', expFile_version)\n # copy the current value of the address\n self.version = expFile_version\n self.f = None\n self.key_name = None\n self.key = None \n self.pics = None\n self.reps = None\n self.exp_start_time = None\n self.exp_start_date = None\n self.exp_stop_time = None\n self.exp_stop_date = None\n self.data_addr = dataAddress\n if file_id is not None:\n self.f = self.open_hdf5(fileID=file_id, useBase=useBaseA)\n self.key_name, self.key = self.get_key(keyParameter=keyParameter)\n self.pics = self.get_pics()\n self.reps = self.get_reps()\n self.exp_start_date, self.exp_start_time, self.exp_stop_date, self.exp_stop_time = self.get_experiment_time_and_date()\n \n \n def __enter__(self):\n return self\n\n \n def __exit__(self, exc_type, exc_value, traceback):\n try:\n return self.f.close()\n except AttributeError:\n return\n \n \n def open_hdf5(self, fileID=None, useBase=True, openFlag='r'): \n \n if type(fileID) == int:\n path = self.data_addr + \"data_\" + str(fileID) + \".h5\"\n elif useBase:\n # assume a file address itself\n path = self.data_addr + fileID + \".h5\"\n else:\n path = fileID\n try:\n file = h5.File(path, openFlag) \n except OSError as err:\n raise OSError(\"Failed to open file! file address was \\\"\" + path + \"\\\". OSError: \" + str(err))\n self.f = file\n return file\n \n def get_reps(self):\n # call this one.\n self.reps = self.f['Master-Runtime']['Repetitions'][0]\n return self.reps \n\n def get_params(self):\n return self.f['Master-Runtime']['Parameters'] \n \n def get_key(self, keyParameter=None):\n \"\"\"\n :param file:\n :return:\n \"\"\"\n keyNames = []\n keyValues = []\n foundOne = False\n nokeyreturn = 'No-Variation', arr([1])\n try:\n params = self.get_params()\n for var in params:\n if not params[var]['Is Constant'][0]:\n foundOne = True\n keyNames.append(''.join([char.decode('utf-8') for char in params[var]['Name']]))\n keyValues.append(arr(params[var]['Key Values']))\n\n if foundOne:\n if len(keyNames) > 1:\n return keyNames, arr(misc.transpose(arr(keyValues)))\n else:\n return keyNames[0], arr(keyValues[0])\n else:\n if keyParameter is None:\n return nokeyreturn\n else:\n for var in params:\n name = ''.join([char.decode('utf-8') for char in params[var]['Name']])\n print(name)\n if name == keyParameter:\n return name , arr(params[var]['Key Values'])\n return \"Key not found!\", arr([1])\n except KeyError:\n return nokeyreturn\n \n def get_pics(self):\n p_t = arr(self.f['Andor']['Pictures'])\n pics = p_t.reshape((p_t.shape[0], p_t.shape[2], p_t.shape[1]))\n return pics\n \n def get_mako_pics(self):\n p_t = arr(self.f['Mako']['Pictures'])\n pics = p_t.reshape((p_t.shape[0], p_t.shape[2], p_t.shape[1]))\n return pics\n \n def get_basler_pics(self):\n p_t = arr(self.f['Basler']['Pictures'])\n pics = p_t.reshape((p_t.shape[0], p_t.shape[2], p_t.shape[1]))\n return pics\n \n def get_avg_pic(self):\n pics = self.get_pics()\n avg_pic = np.zeros(pics[0].shape)\n for p in pics:\n avg_pic += p\n avg_pic /= len(pics)\n return avg_pic\n\n def get_avg_mako_pic(self):\n pics = self.get_mako_pics()\n avg_pic = np.zeros(pics[0].shape)\n for p in pics:\n avg_pic += p\n avg_pic /= len(pics)\n return avg_pic\n\n def get_avg_basler_pic(self):\n pics = self.get_basler_pics()\n avg_pic = np.zeros(pics[0].shape)\n for p in pics:\n avg_pic += p\n avg_pic /= len(pics)\n return avg_pic\n \n def get_binning(self, type):\n if type == 'andor':\n binH = self.f['Andor']['Image-Dimensions']['Horizontal-Binning'][()][0]\n binV = self.f['Andor']['Image-Dimensions']['Vertical-Binning'][()][0]\n elif type == 'mako':\n binH = self.f['Mako']['Image-Dimensions']['Horizontal-Binning'][()][0]\n binV = self.f['Mako']['Image-Dimensions']['Vertical-Binning'][()][0]\n else:\n raise ValueError('Bad value for CameraType.')\n return binH, binV \n\n def print_all(self):\n self.__print_hdf5_obj(self.f,'')\n \n def print_all_groups(self):\n self.__print_groups(self.f,'')\n\n \n def print_parameters(self):\n self.__print_hdf5_obj(self.get_params(),'')\n \n def __print_groups(self, obj, prefix):\n \"\"\"\n Used recursively to print the structure of the file.\n obj can be a single file or a group or dataset within.\n \"\"\"\n for o in obj:\n if o == 'Functions':\n print(prefix, o)\n self.print_functions(prefix=prefix+'\\t')\n elif o == 'Master-Script' or o == \"Seq. 1 NIAWG-Script\":\n print(prefix,o)\n elif type(obj[o]) == h5._hl.group.Group:\n print(prefix, o)\n self.__print_groups(obj[o], prefix + '\\t')\n elif type(obj[o]) == h5._hl.dataset.Dataset:\n print(prefix, o)\n #else:\n # raise TypeError('???')\n \n def __print_hdf5_obj(self, obj, prefix):\n \"\"\"\n Used recursively in other print functions.\n obj can be a single file or a group or dataset within.\n \"\"\"\n for o in obj:\n if o == 'Functions':\n print(prefix, o)\n self.print_functions(prefix=prefix+'\\t')\n elif o == 'Master-Script' or o == \"Seq. 1 NIAWG-Script\":\n print(prefix,o)\n self.print_script(obj[o])\n elif type(obj[o]) == h5._hl.group.Group:\n print(prefix, o)\n self.__print_hdf5_obj(obj[o], prefix + '\\t')\n elif type(obj[o]) == h5._hl.dataset.Dataset:\n print(prefix, o, ':',end='')\n self.__print_ds(obj[o],prefix+'\\t')\n else:\n raise TypeError('???')\n \n def print_functions(self, brief=True, prefix='', which=None):\n \"\"\"\n print the list of all functions which were created at the time of the experiment.\n if not brief, print the contents of every function.\n \"\"\"\n funcList = self.f['Master-Input']['Functions']\n for func in funcList:\n if which is not None:\n if func != which:\n print(func)\n continue\n print(prefix,'-',func,end='')\n if not brief:\n print(': \\n---------------------------------------')\n # I think it's a bug that this is nested like this.\n indvFunc = funcList[func]\n for x in indvFunc:\n for y in indvFunc[x]:\n # print(Style.DIM, y.decode('utf-8'), end='') for some reason the \n # DIM isn't working at the moment on the data analysis comp...\n print(y.decode('utf-8'), end='')\n print('\\n---------------------------------------\\ncount=')\n print('')\n\n def print_master_script(self):\n # A shortcut\n self.print_script(self.f['Master-Input']['Master-Script'])\n\n def print_niawg_script(self):\n # A shortcut\n self.print_script(self.f['NIAWG']['Seq. 1 NIAWG-Script'])\n\n \n def print_script(self, script):\n \"\"\"\n special formatting used for printing long scripts which are stored as normal numpy bytes.\n \"\"\"\n print(Fore.GREEN,'\\n--------------------------------------------')\n for x in script:\n print(x.decode('UTF-8'),end='')\n print('\\n--------------------------------------------\\n\\n', Style.RESET_ALL)\n \n def __print_ds(self, ds, prefix):\n \"\"\"\n Print dataset\n \"\"\"\n if type(ds) != h5._hl.dataset.Dataset:\n raise TypeError('Tried to print non dataset as dataset.')\n else:\n if len(ds) > 0:\n if type(ds[0]) == np.bytes_:\n print(' \"',end='')\n for x in ds:\n print(x.decode('UTF-8'),end='')\n print(' \"',end='')\n elif type(ds[0]) in [np.uint8, np.uint16, np.uint32, np.uint64, \n np.int8, np.int16, np.int32, np.int64, \n np.float32, np.float64]:\n for x in ds:\n print(x,end=' ')\n else:\n print(' type:', type(ds[0]), ds[0])\n print('')\n \n def get_pic_info(self):\n infoStr = 'Number of Pictures: ' + str(self.pics.shape[0]) + '; '\n infoStr += 'Picture Dimensions: ' + str(self.pics.shape[1]) + ' x ' + str(self.pics.shape[2]) + '\\n'\n return infoStr\n \n \n def get_basic_info(self):\n \"\"\"\n Some quick easy to read summary info\n \"\"\"\n infoStr = self.get_pic_info()\n \n infoStr += 'Variations: ' + str(len(self.key)) + ';\\t'\n infoStr += 'Repetitions: ' + str(self.reps) + ';\\tExp File Version: ' + str(self.version) + ';''\\n'\n infoStr += 'Experiment started at (H:M:S) ' + str(self.exp_start_time) + ' on (Y-M-D) ' + str(self.exp_start_date) + ', '\n infoStr += 'And ended at ' + str(self.exp_stop_time) + ' on ' + str(self.exp_stop_date) + '\\n'\n if 'Experiment_Notes' in self.f['Miscellaneous'].keys():\n infoStr += 'Experiment Notes: ' + str(self.f['Miscellaneous']['Experiment_Notes'][0].decode(\"utf-8\")) + '\\n'\n else:\n infoStr += 'Experiment Notes: HDF5 NOT ANNOTATED: please call exp.Annotate() to annotate this file.\\n'\n if 'Experiment_Rationale' in self.f['Miscellaneous'].keys():\n infoStr += '(Old Notes format:) Experiment Rationale: ' + str(self.f['Miscellaneous']['Experiment_Rationale'][0].decode(\"utf-8\")) + '\\n'\n if 'Experiment_Result' in self.f['Miscellaneous'].keys():\n infoStr += '(Old Notes format:) Experiment Result: ' + str(self.f['Miscellaneous']['Experiment_Result'][0].decode(\"utf-8\")) + '\\n'\n expNoteNum = 1\n while expNoteNum < 1000:\n if 'Experiment_Note_' + str(expNoteNum) in self.f['Miscellaneous'].keys():\n infoStr += \"Extra Experiment Note #\" + str(expNoteNum) + \": \" + str(self.f['Miscellaneous']['Experiment_Note_' + str(expNoteNum)][0].decode(\"utf-8\")) + '\\n'\n expNoteNum += 1\n else: \n break\n print(infoStr)\n return infoStr\n\n def get_experiment_time_and_date(self):\n start_date, stop_date, start_time, stop_time = '','','',''\n try:\n start_date = ''.join([x.decode('UTF-8') for x in self.f['Miscellaneous']['Start-Date']])\n except KeyError:\n pass\n try:\n start_time = ''.join([x.decode('UTF-8') for x in self.f['Miscellaneous']['Start-Time']])\n except KeyError:\n pass\n try:\n stop_date = ''.join([x.decode('UTF-8') for x in self.f['Miscellaneous']['Stop-Date']])\n except KeyError:\n pass\n try:\n stop_time = ''.join([x.decode('UTF-8') for x in self.f['Miscellaneous']['Stop-Time']])\n except KeyError:\n pass\n return start_date, start_time, stop_date, stop_time\n #return \"\",\"\",\"\",\"\"\n\nif __name__ == \"__main__\":\n print(\"I am expfile\")\nif __name__==\"ExpFile\":\n print(\"I am imported expfile\")", "import numpy as np\nimport uncertainties.unumpy as unp\nfrom . import arb_1d_sum\n\nnumGauss = 2\n\ndef getExp(val):\n if val == 0:\n return 0\n return np.floor(np.log10(np.abs(val)))\n\ndef round_sig_str(x, sig=3):\n \"\"\"\n round a float to some number of significant digits\n :param x: the numebr to round\n :param sig: the number of significant digits to use in the rounding\n :return the rounded number, as a string.\n \"\"\"\n if sig<=0:\n return \"0\"\n if np.isnan(x):\n x = 0\n try:\n num = round(x, sig-int(np.floor(np.log10(abs(x)+2*np.finfo(float).eps)))-1)\n decimals = sig-getExp(num)-1\n if decimals == float('inf'):\n decimals = 3\n if decimals <= 0:\n decimals = 0\n result = (\"{0:.\"+str(int(decimals))+\"f}\").format(num)\n # make sure result has the correct number of significant digits given the precision.\n return result\n except ValueError:\n print(abs(x))\n\n\ndef fitCharacter(params):\n # for raman spectra, return the nbar, assuming correct orientation of the 2 gaussians\n r = params[4]/params[1]\n return r/(1-r) if not (r>=1) else np.inf\n # return the diff/2\n #return (params[5] + params[2])/2\n\ndef fitCharacterErr(params, errs):\n # sqrt(f_x'^2 sig_x^2 + f_y'^2 sig_y^2)\n # error in r:\n # sqrt(1/b^2 sig_r^2 + (-r/b^2)^2 sig_b^2)\n r = params[4]/params[1]\n errR = np.sqrt(errs[4]**2/params[1]**2 + errs[1]**2 * (r**2/params[1]**2) )\n # error in nbar: \n # sigma_r*((1-r)+r)/(1-r)**2 = 1/(1-r)**2 sigma_r\n return errR/(1-r)**2\n \ndef getFitCharacterString():\n return r'$\\bar{n}$'\n\n\ndef args():\n arglist = ['Offset']\n for i in range(numGauss):\n j = i+1\n arglist += [r'$A_'+str(j)+'$', r'$x_'+str(j)+'$',r'$\\sigma_'+str(j)+'$']\n return arglist\n\ndef f(x, *params):\n \"\"\"\n The normal function call for this function. Performs checks on valid arguments, then calls the \"raw\" function.\n :return:\n \"\"\"\n if len(params) != 3*numGauss+1:\n raise ValueError('the bump2 fitting function expects '+str(3*numGauss+1) + ' parameters and got ' + str(len(params)))\n penalty = 10**10 * np.ones(len(x))\n for i in range(numGauss):\n if params[3*i+3] < 3:\n # Penalize super-narrow fits\n return penalty\n if params[3*i+1] < 0:\n # Penalize negative amplitude fits.\n return penalty\n if not (min(x) < params[3*i+2] < max(x)):\n # penalize fit centers outside of the data range (assuming if you want to see these that you've\n # at least put the gaussian in the scan)\n return penalty\n if params[0] < 0:\n # penalize negative offset\n return penalty\n return f_raw(x, *params)\n\n\ndef f_raw(x, *params):\n \"\"\"\n The raw function call, performs no checks on valid parameters..\n :return:\n \"\"\"\n return arb_1d_sum.f(x, *params)\n\n\ndef f_unc(x, *params):\n \"\"\"\n similar to the raw function call, but uses unp instead of np for uncertainties calculations.\n :return:\n \"\"\"\n return arb_1d_sum.f_unc(x, *params)\n\ndef guess(key, values):\n \"\"\"\n Returns guess values for the parameters of this function class based on the input. Used for fitting using this class.\n \"\"\"\n return sbcGuess()[0]\n #return [min(values),\n # 0.2, -150, 5,\n # 0.1, 95, 5]\n\ndef sbcGuess():\n return [[0,0.3,-150,10, 0.3, 150, 10]]\n \n \ndef areas(A1, x01, sig1, A2, x02, sig2):\n return np.array([A1*sig1,A2*sig2])*np.sqrt(2*np.pi)" ]
[ [ "numpy.array", "numpy.string_", "numpy.zeros" ], [ "numpy.abs", "numpy.sqrt", "numpy.isnan", "numpy.finfo", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
yotammarton/TransformDF2Numpy
[ "3528fe2f207089186865290b9f5cbd14d91e8c82" ]
[ "tests/test_one_hot_encode.py" ]
[ "import unittest\nimport numpy as np\nimport pandas as pd\nimport df2numpy\nfrom df2numpy import TransformDF2Numpy, one_hot_encode, NAN_CATEGORY, DROPPED_CATEGORY\nfrom df2numpy.errors import *\n\n\ndf = pd.DataFrame({\n \"A\": [\"Aa\", \"Ab\", \"Ac\", \"Aa\", \"Ac\", \"Aa\", \"Aa\", \"Aa\"], # uniques: 3, to_be_thresholded: \"Ab\"\n \"B\": [1., -3., 0., 2, 3, 0, -1.3, 0.192],\n \"C\": [\"Ca\", np.nan, \"Cc\", \"Ca\", \"Cc\", \"Ca\", \"Cc\", \"Cc\"], # uniques: 2, nan: 1, (must be numerical)\n \"D\": [\"Da\", \"Db\", \"Dc\", \"Db\", \"Dc\", \"Da\", np.nan, \"Dc\"], # uniques: 3, nan: 1\n \"E\": [1., -3., np.nan, 2, np.nan, 0, -16.9, 20],\n \"Drop\": [\"x\", \"x\", \"x\", \"x\", \"x\", \"x\", \"x\", \"x\"], # must be dropped\n \"F\": [\"Fa\", \"Fb\", \"Fc\", \"Fd\", \"Fa\", \"Fb\", \"Fc\", \"Fd\"], # uniques: 4\n})\n\ntest_df = pd.DataFrame({\n \"A\": [\"Ac\", \"Aa\"],\n \"B\": [1.4, 0.],\n \"C\": [\"Cc\", \"Ca\"],\n \"D\": [\"Dc\", \"Db\"],\n \"E\": [4.3, 2],\n \"Drop\": [\"x\", \"x\"],\n \"F\": [\"Fd\", \"Fc\"]\n})\n\ntest_df_only1data = pd.DataFrame({\n \"A\": [\"Ac\"],\n \"B\": [1.4],\n \"C\": [\"Cc\"],\n \"D\": [\"Dc\"],\n \"E\": [4.3],\n \"Drop\": [\"x\"],\n \"F\": [\"Fd\"]\n})\n\ntest_df_with_nan = pd.DataFrame({\n \"A\": [\"Ac\", np.nan],\n \"B\": [np.nan, 1.4],\n \"C\": [np.nan, \"Cc\"],\n \"D\": [\"Dc\", np.nan],\n \"E\": [4.3, np.nan],\n \"Drop\": [\"x\", np.nan],\n \"F\": [np.nan, \"Fd\"]\n})\n\ntest_df_with_new_category = pd.DataFrame({\n \"A\": [\"Ac\", \"Anew\"], # should be in DROPPED_CATEGORY\n \"B\": [1.4, 0.],\n \"C\": [\"Cc\", \"Ca\"],\n \"D\": [\"Dnew\", \"Db\"], # should be in NAN_CATEGORY\n \"E\": [4.3, 2],\n \"Drop\": [\"x\", \"x\"],\n \"F\": [\"Fd\", \"Fnew\"] # should be in the most frequent category 'Fd'\n})\n\ntest_df_wrong_const1 = pd.DataFrame({\n \"A\": [\"Ac\", \"Aa\"],\n \"B\": [1.4, 0.],\n \"Wrong\": [\"wtf\", \"???\"],\n \"D\": [\"Dc\", \"Db\"],\n \"E\": [4.3, 2],\n \"Drop\": [\"x\", \"x\"],\n \"F\": [\"Fd\", \"Fc\"]\n})\n\ntest_df_wrong_const2 = pd.DataFrame({\n \"A\": [\"Ac\", \"Aa\"],\n \"C\": [\"Cc\", \"Ca\"],\n \"B\": [1.4, 0.],\n \"D\": [\"Dc\", \"Db\"],\n \"E\": [4.3, 2],\n \"Drop\": [\"x\", \"x\"],\n \"F\": [\"Fd\", \"Fc\"]\n})\n\ntest_df_wrong_const3 = pd.DataFrame({\n \"A\": [\"Ac\", \"Aa\"],\n \"B\": [1.4, 0.],\n \"D\": [\"Dc\", \"Db\"],\n \"E\": [4.3, 2],\n \"Drop\": [\"x\", \"x\"],\n \"F\": [\"Fd\", \"Fc\"]\n})\n\n\nclass TestOneHotEncode(unittest.TestCase):\n\n def setUp(self) -> None:\n pass\n\n def test_one_hot_encode_scaled(self):\n t = TransformDF2Numpy(min_category_count=2,\n numerical_scaling=True,\n fillnan=True,\n objective_col=\"B\")\n\n x, y = t.fit_transform(df)\n\n x_one_hot, var_names = one_hot_encode(t, x)\n\n self.assertTrue(x_one_hot.shape == (8, 13))\n\n self.assertListEqual(var_names, ['A_Aa', 'A_TransformDF2Numpy_dropped_category', 'A_Ac', 'D_Da',\n 'D_Db', 'D_Dc', 'D_TransformDF2Numpy_NaN_category', 'F_Fa', 'F_Fb',\n 'F_Fc', 'F_Fd', 'C', 'E'])\n\n for i, name in enumerate(var_names):\n self.assertTrue(-0.00001 < x_one_hot[:, i].mean() < 0.00001)\n self.assertTrue(0.9999 < x_one_hot[:, i].std() < 1.00001)\n\n def test_one_hot_encode_fillnan_false(self):\n t = TransformDF2Numpy(min_category_count=2,\n fillnan=False,\n objective_col=\"B\")\n\n x, y = t.fit_transform(df)\n\n x_one_hot, var_names = one_hot_encode(t, x)\n\n self.assertListEqual(var_names, ['A_Aa', 'A_TransformDF2Numpy_dropped_category', 'A_Ac', 'D_Da', 'D_Db',\n 'D_Dc', 'F_Fa', 'F_Fb', 'F_Fc', 'F_Fd', 'C', 'E'])\n\n self.assertTrue(x_one_hot.shape == (8, 12))\n\n self.assertListEqual(list(x_one_hot[6, 3:6]), [0., 0., 0.])\n\n def test_one_hot_encode_eliminate_verbose_feature(self):\n t = TransformDF2Numpy(min_category_count=2,\n fillnan=False,\n objective_col=\"B\")\n\n x, y = t.fit_transform(df)\n\n x_one_hot, var_names = one_hot_encode(t, x, elim_verbose=True)\n\n self.assertListEqual(var_names, ['A_Aa', 'A_TransformDF2Numpy_dropped_category', 'D_Da', 'D_Db',\n 'F_Fa', 'F_Fb', 'F_Fc', 'C', 'E'])\n\n self.assertTrue(x_one_hot.shape == (8, 9))\n\n x_one_hot_verbose, _ = one_hot_encode(t, x)\n self.assertTrue(np.alltrue(x_one_hot[:, 0:-2] == x_one_hot_verbose[:, [0,1,3,4,6,7,8]]))\n\n\n\n\n\n\n\n\n" ]
[ [ "numpy.alltrue", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
chrisjuniorli/pytorch-image-models
[ "bb815fa90c46b1f5f2f59a0dcddab8ce69f91dcf", "bb815fa90c46b1f5f2f59a0dcddab8ce69f91dcf" ]
[ "validate.py", "timm/models/layers/mlp.py" ]
[ "#!/usr/bin/env python3\n\"\"\" ImageNet Validation Script\n\nThis is intended to be a lean and easily modifiable ImageNet validation script for evaluating pretrained\nmodels or training checkpoints against ImageNet or similarly organized image datasets. It prioritizes\ncanonical PyTorch, standard Python style, and good performance. Repurpose as you see fit.\n\nHacked together by Ross Wightman (https://github.com/rwightman)\n\"\"\"\nimport argparse\nimport os\nimport csv\nimport glob\nimport time\nimport logging\nimport torch\nimport torch.nn as nn\nimport torch.nn.parallel\nfrom collections import OrderedDict\nfrom contextlib import suppress\n\nfrom timm.models import create_model, apply_test_time_pool, load_checkpoint, is_model, list_models\nfrom timm.data import create_dataset, create_loader, resolve_data_config, RealLabelsImagenet\nfrom timm.utils import accuracy, AverageMeter, natural_key, setup_default_logging, set_jit_legacy\nfrom ptflops import get_model_complexity_info\nimport pdb\nhas_apex = False\ntry:\n from apex import amp\n has_apex = True\nexcept ImportError:\n pass\n\nhas_native_amp = False\ntry:\n if getattr(torch.cuda.amp, 'autocast') is not None:\n has_native_amp = True\nexcept AttributeError:\n pass\n\ntorch.backends.cudnn.benchmark = True\n_logger = logging.getLogger('validate')\n\n\nparser = argparse.ArgumentParser(description='PyTorch ImageNet Validation')\nparser.add_argument('data', metavar='DIR',\n help='path to dataset')\nparser.add_argument('--dataset', '-d', metavar='NAME', default='',\n help='dataset type (default: ImageFolder/ImageTar if empty)')\nparser.add_argument('--split', metavar='NAME', default='validation',\n help='dataset split (default: validation)')\nparser.add_argument('--model', '-m', metavar='NAME', default='dpn92',\n help='model architecture (default: dpn92)')\nparser.add_argument('-j', '--workers', default=4, type=int, metavar='N',\n help='number of data loading workers (default: 2)')\nparser.add_argument('-b', '--batch-size', default=256, type=int,\n metavar='N', help='mini-batch size (default: 256)')\nparser.add_argument('--img-size', default=None, type=int,\n metavar='N', help='Input image dimension, uses model default if empty')\nparser.add_argument('--input-size', default=None, nargs=3, type=int,\n metavar='N N N', help='Input all image dimensions (d h w, e.g. --input-size 3 224 224), uses model default if empty')\nparser.add_argument('--crop-pct', default=None, type=float,\n metavar='N', help='Input image center crop pct')\nparser.add_argument('--mean', type=float, nargs='+', default=None, metavar='MEAN',\n help='Override mean pixel value of dataset')\nparser.add_argument('--std', type=float, nargs='+', default=None, metavar='STD',\n help='Override std deviation of of dataset')\nparser.add_argument('--interpolation', default='', type=str, metavar='NAME',\n help='Image resize interpolation type (overrides model)')\nparser.add_argument('--num-classes', type=int, default=None,\n help='Number classes in dataset')\nparser.add_argument('--class-map', default='', type=str, metavar='FILENAME',\n help='path to class to idx mapping file (default: \"\")')\nparser.add_argument('--gp', default=None, type=str, metavar='POOL',\n help='Global pool type, one of (fast, avg, max, avgmax, avgmaxc). Model default if None.')\nparser.add_argument('--log-freq', default=10, type=int,\n metavar='N', help='batch logging frequency (default: 10)')\nparser.add_argument('--checkpoint', default='', type=str, metavar='PATH',\n help='path to latest checkpoint (default: none)')\nparser.add_argument('--pretrained', dest='pretrained', action='store_true',\n help='use pre-trained model')\nparser.add_argument('--num-gpu', type=int, default=1,\n help='Number of GPUS to use')\nparser.add_argument('--no-test-pool', dest='no_test_pool', action='store_true',\n help='disable test time pool')\nparser.add_argument('--no-prefetcher', action='store_true', default=False,\n help='disable fast prefetcher')\nparser.add_argument('--pin-mem', action='store_true', default=False,\n help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.')\nparser.add_argument('--channels-last', action='store_true', default=False,\n help='Use channels_last memory layout')\nparser.add_argument('--amp', action='store_true', default=False,\n help='Use AMP mixed precision. Defaults to Apex, fallback to native Torch AMP.')\nparser.add_argument('--apex-amp', action='store_true', default=False,\n help='Use NVIDIA Apex AMP mixed precision')\nparser.add_argument('--native-amp', action='store_true', default=False,\n help='Use Native Torch AMP mixed precision')\nparser.add_argument('--tf-preprocessing', action='store_true', default=False,\n help='Use Tensorflow preprocessing pipeline (require CPU TF installed')\nparser.add_argument('--use-ema', dest='use_ema', action='store_true',\n help='use ema version of weights if present')\nparser.add_argument('--torchscript', dest='torchscript', action='store_true',\n help='convert model torchscript for inference')\nparser.add_argument('--legacy-jit', dest='legacy_jit', action='store_true',\n help='use legacy jit mode for pytorch 1.5/1.5.1/1.6 to get back fusion performance')\nparser.add_argument('--results-file', default='', type=str, metavar='FILENAME',\n help='Output csv file for validation results (summary)')\nparser.add_argument('--real-labels', default='', type=str, metavar='FILENAME',\n help='Real labels JSON file for imagenet evaluation')\nparser.add_argument('--valid-labels', default='', type=str, metavar='FILENAME',\n help='Valid label indices txt file for validation of partial label space')\nparser.add_argument('--params', action='store_true', default=False,\n help='only caculate params')\n\ndef validate(args):\n # might as well try to validate something\n args.pretrained = args.pretrained or not args.checkpoint\n args.prefetcher = not args.no_prefetcher\n amp_autocast = suppress # do nothing\n if args.amp:\n if has_native_amp:\n args.native_amp = True\n elif has_apex:\n args.apex_amp = True\n else:\n _logger.warning(\"Neither APEX or Native Torch AMP is available.\")\n assert not args.apex_amp or not args.native_amp, \"Only one AMP mode should be set.\"\n if args.native_amp:\n amp_autocast = torch.cuda.amp.autocast\n _logger.info('Validating in mixed precision with native PyTorch AMP.')\n elif args.apex_amp:\n _logger.info('Validating in mixed precision with NVIDIA APEX AMP.')\n else:\n _logger.info('Validating in float32. AMP not enabled.')\n\n if args.legacy_jit:\n set_jit_legacy()\n\n # create model\n model = create_model(\n args.model,\n pretrained=args.pretrained,\n num_classes=args.num_classes,\n in_chans=3,\n global_pool=args.gp,\n scriptable=args.torchscript)\n if args.num_classes is None:\n assert hasattr(model, 'num_classes'), 'Model must have `num_classes` attr if not set on cmd line/config.'\n args.num_classes = model.num_classes\n\n if args.checkpoint:\n load_checkpoint(model, args.checkpoint, args.use_ema)\n\n param_count = sum([m.numel() for m in model.parameters()])\n _logger.info('Model %s created, param count: %d' % (args.model, param_count))\n\n data_config = resolve_data_config(vars(args), model=model, use_test_size=True, verbose=True)\n test_time_pool = False\n if not args.no_test_pool:\n model, test_time_pool = apply_test_time_pool(model, data_config, use_test_size=True)\n\n if args.torchscript:\n torch.jit.optimized_execution(True)\n model = torch.jit.script(model)\n\n model = model.cuda()\n if args.apex_amp:\n model = amp.initialize(model, opt_level='O1')\n\n if args.channels_last:\n model = model.to(memory_format=torch.channels_last)\n\n if args.num_gpu > 1:\n model = torch.nn.DataParallel(model, device_ids=list(range(args.num_gpu)))\n\n criterion = nn.CrossEntropyLoss().cuda()\n\n dataset = create_dataset(\n root=args.data, name=args.dataset, split=args.split,\n load_bytes=args.tf_preprocessing, class_map=args.class_map)\n\n if args.valid_labels:\n with open(args.valid_labels, 'r') as f:\n valid_labels = {int(line.rstrip()) for line in f}\n valid_labels = [i in valid_labels for i in range(args.num_classes)]\n else:\n valid_labels = None\n\n if args.real_labels:\n real_labels = RealLabelsImagenet(dataset.filenames(basename=True), real_json=args.real_labels)\n else:\n real_labels = None\n\n crop_pct = 1.0 if test_time_pool else data_config['crop_pct']\n #pdb.set_trace()\n loader = create_loader(\n dataset,\n input_size=data_config['input_size'],\n batch_size=args.batch_size,\n use_prefetcher=args.prefetcher,\n interpolation=data_config['interpolation'],\n mean=data_config['mean'],\n std=data_config['std'],\n num_workers=args.workers,\n crop_pct=crop_pct,\n pin_memory=args.pin_mem,\n tf_preprocessing=args.tf_preprocessing)\n\n batch_time = AverageMeter()\n losses = AverageMeter()\n top1 = AverageMeter()\n top5 = AverageMeter()\n\n model.eval()\n with torch.no_grad():\n # warmup, reduce variability of first batch time, especially for comparing torchscript vs non\n input = torch.randn((args.batch_size,) + tuple(data_config['input_size'])).cuda()\n if args.channels_last:\n input = input.contiguous(memory_format=torch.channels_last)\n model(input)\n end = time.time()\n macs, params = get_model_complexity_info(model, data_config['input_size'], as_strings=False, print_per_layer_stat=True, verbose=True)\n if args.params:\n _logger.info('Params ({:}) Macs ({:})'.format(params, macs))\n return\n for batch_idx, (input, target) in enumerate(loader):\n if args.no_prefetcher:\n target = target.cuda()\n input = input.cuda()\n if args.channels_last:\n input = input.contiguous(memory_format=torch.channels_last)\n\n # compute output\n with amp_autocast():\n output = model(input)\n\n if valid_labels is not None:\n output = output[:, valid_labels]\n loss = criterion(output, target)\n\n if real_labels is not None:\n real_labels.add_result(output)\n\n # measure accuracy and record loss\n acc1, acc5 = accuracy(output.detach(), target, topk=(1, 5))\n losses.update(loss.item(), input.size(0))\n top1.update(acc1.item(), input.size(0))\n top5.update(acc5.item(), input.size(0))\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if batch_idx % args.log_freq == 0:\n _logger.info(\n 'Test: [{0:>4d}/{1}] '\n 'Time: {batch_time.val:.3f}s ({batch_time.avg:.3f}s, {rate_avg:>7.2f}/s) '\n 'Loss: {loss.val:>7.4f} ({loss.avg:>6.4f}) '\n 'Acc@1: {top1.val:>7.3f} ({top1.avg:>7.3f}) '\n 'Acc@5: {top5.val:>7.3f} ({top5.avg:>7.3f})'.format(\n batch_idx, len(loader), batch_time=batch_time,\n rate_avg=input.size(0) / batch_time.avg,\n loss=losses, top1=top1, top5=top5))\n \n #macs, params = get_model_complexity_info(model, (3,224,224), as_strings=False, print_per_layer_stat=True, verbose=True)\n\n if real_labels is not None:\n # real labels mode replaces topk values at the end\n top1a, top5a = real_labels.get_accuracy(k=1), real_labels.get_accuracy(k=5)\n else:\n top1a, top5a = top1.avg, top5.avg\n results = OrderedDict(\n top1=round(top1a, 4), top1_err=round(100 - top1a, 4),\n top5=round(top5a, 4), top5_err=round(100 - top5a, 4),\n param_count=round(param_count / 1e6, 2),\n img_size=data_config['input_size'][-1],\n cropt_pct=crop_pct,\n interpolation=data_config['interpolation'])\n #pdb.set_trace()\n _logger.info(' * Acc@1 {:.3f} ({:.3f}) Acc@5 {:.3f} ({:.3f}) Params ({:}) Macs ({:})'.format(results['top1'], results['top1_err'], results['top5'], results['top5_err'], params, macs))\n\n return results\n\n\ndef main():\n setup_default_logging()\n args = parser.parse_args()\n model_cfgs = []\n model_names = []\n if os.path.isdir(args.checkpoint):\n # validate all checkpoints in a path with same model\n checkpoints = glob.glob(args.checkpoint + '/*.pth.tar')\n checkpoints += glob.glob(args.checkpoint + '/*.pth')\n model_names = list_models(args.model)\n model_cfgs = [(args.model, c) for c in sorted(checkpoints, key=natural_key)]\n else:\n if args.model == 'all':\n # validate all models in a list of names with pretrained checkpoints\n args.pretrained = True\n model_names = list_models(pretrained=True, exclude_filters=['*_in21k', '*_in22k'])\n model_cfgs = [(n, '') for n in model_names]\n elif not is_model(args.model):\n # model name doesn't exist, try as wildcard filter\n model_names = list_models(args.model)\n model_cfgs = [(n, '') for n in model_names]\n\n if len(model_cfgs):\n results_file = args.results_file or './results-all.csv'\n _logger.info('Running bulk validation on these pretrained models: {}'.format(', '.join(model_names)))\n results = []\n try:\n start_batch_size = args.batch_size\n for m, c in model_cfgs:\n batch_size = start_batch_size\n args.model = m\n args.checkpoint = c\n result = OrderedDict(model=args.model)\n r = {}\n while not r and batch_size >= args.num_gpu:\n torch.cuda.empty_cache()\n try:\n args.batch_size = batch_size\n print('Validating with batch size: %d' % args.batch_size)\n r = validate(args)\n except RuntimeError as e:\n if batch_size <= args.num_gpu:\n print(\"Validation failed with no ability to reduce batch size. Exiting.\")\n raise e\n batch_size = max(batch_size // 2, args.num_gpu)\n print(\"Validation failed, reducing batch size by 50%\")\n result.update(r)\n if args.checkpoint:\n result['checkpoint'] = args.checkpoint\n results.append(result)\n except KeyboardInterrupt as e:\n pass\n results = sorted(results, key=lambda x: x['top1'], reverse=True)\n if len(results):\n write_results(results_file, results)\n else:\n validate(args)\n\n\ndef write_results(results_file, results):\n with open(results_file, mode='w') as cf:\n dw = csv.DictWriter(cf, fieldnames=results[0].keys())\n dw.writeheader()\n for r in results:\n dw.writerow(r)\n cf.flush()\n\n\nif __name__ == '__main__':\n main()\n", "\"\"\" MLP module w/ dropout and configurable activation layer\n\nHacked together by / Copyright 2020 Ross Wightman\n\"\"\"\nfrom torch import nn as nn\n\n\nclass Mlp(nn.Module):\n \"\"\" MLP as used in Vision Transformer, MLP-Mixer and related networks\n \"\"\"\n def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):\n super().__init__()\n out_features = out_features or in_features\n hidden_features = hidden_features or in_features\n self.fc1 = nn.Linear(in_features, hidden_features)\n self.act = act_layer()\n self.fc2 = nn.Linear(hidden_features, out_features)\n self.drop = nn.Dropout(drop)\n\n def forward(self, x):\n x = self.fc1(x)\n x = self.act(x)\n x = self.drop(x)\n x = self.fc2(x)\n x = self.drop(x)\n return x\n\nclass GluMlp(nn.Module):\n \"\"\" MLP w/ GLU style gating\n See: https://arxiv.org/abs/1612.08083, https://arxiv.org/abs/2002.05202\n \"\"\"\n def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.Sigmoid, drop=0.):\n super().__init__()\n out_features = out_features or in_features\n hidden_features = hidden_features or in_features\n assert hidden_features % 2 == 0\n self.fc1 = nn.Linear(in_features, hidden_features)\n self.act = act_layer()\n self.fc2 = nn.Linear(hidden_features // 2, out_features)\n self.drop = nn.Dropout(drop)\n\n def forward(self, x):\n x = self.fc1(x)\n x, gates = x.chunk(2, dim=-1)\n x = x * self.act(gates)\n x = self.drop(x)\n x = self.fc2(x)\n x = self.drop(x)\n return x\n\n\nclass GatedMlp(nn.Module):\n \"\"\" MLP as used in gMLP\n \"\"\"\n def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU,\n gate_layer=None, drop=0.):\n super().__init__()\n out_features = out_features or in_features\n hidden_features = hidden_features or in_features\n self.fc1 = nn.Linear(in_features, hidden_features)\n self.act = act_layer()\n if gate_layer is not None:\n assert hidden_features % 2 == 0\n self.gate = gate_layer(hidden_features)\n hidden_features = hidden_features // 2 # FIXME base reduction on gate property?\n else:\n self.gate = nn.Identity()\n self.fc2 = nn.Linear(hidden_features, out_features)\n self.drop = nn.Dropout(drop)\n\n def forward(self, x):\n x = self.fc1(x)\n x = self.act(x)\n x = self.drop(x)\n x = self.gate(x)\n x = self.fc2(x)\n x = self.drop(x)\n return x\n\n\nclass ConvMlp(nn.Module):\n \"\"\" MLP using 1x1 convs that keeps spatial dims\n \"\"\"\n def __init__(\n self, in_features, hidden_features=None, out_features=None, act_layer=nn.ReLU, norm_layer=None, drop=0.):\n super().__init__()\n out_features = out_features or in_features\n hidden_features = hidden_features or in_features\n self.fc1 = nn.Conv2d(in_features, hidden_features, kernel_size=1, bias=True)\n self.norm = norm_layer(hidden_features) if norm_layer else nn.Identity()\n self.act = act_layer()\n self.fc2 = nn.Conv2d(hidden_features, out_features, kernel_size=1, bias=True)\n self.drop = nn.Dropout(drop)\n\n def forward(self, x):\n x = self.fc1(x)\n x = self.norm(x)\n x = self.act(x)\n x = self.drop(x)\n x = self.fc2(x)\n return x\n" ]
[ [ "torch.jit.script", "torch.nn.CrossEntropyLoss", "torch.jit.optimized_execution", "torch.cuda.empty_cache", "torch.no_grad" ], [ "torch.nn.Linear", "torch.nn.Dropout", "torch.nn.Conv2d", "torch.nn.Identity" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
mcnoat/pymc3
[ "8b1f64cce32db3357301b88bbe9f7108733ac70a", "8b1f64cce32db3357301b88bbe9f7108733ac70a" ]
[ "pymc3/step_methods/metropolis.py", "pymc3/plots/posteriorplot.py" ]
[ "# Copyright 2020 The PyMC Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nimport numpy.random as nr\nimport scipy.linalg\nimport theano\n\nimport pymc3 as pm\n\nfrom pymc3.distributions import draw_values\nfrom pymc3.step_methods.arraystep import (\n ArrayStep,\n ArrayStepShared,\n Competence,\n PopulationArrayStepShared,\n metrop_select,\n)\nfrom pymc3.theanof import floatX\n\n__all__ = [\n \"Metropolis\",\n \"DEMetropolis\",\n \"DEMetropolisZ\",\n \"BinaryMetropolis\",\n \"BinaryGibbsMetropolis\",\n \"CategoricalGibbsMetropolis\",\n \"NormalProposal\",\n \"CauchyProposal\",\n \"LaplaceProposal\",\n \"PoissonProposal\",\n \"MultivariateNormalProposal\",\n]\n\n# Available proposal distributions for Metropolis\n\n\nclass Proposal:\n def __init__(self, s):\n self.s = s\n\n\nclass NormalProposal(Proposal):\n def __call__(self):\n return nr.normal(scale=self.s)\n\n\nclass UniformProposal(Proposal):\n def __call__(self):\n return nr.uniform(low=-self.s, high=self.s, size=len(self.s))\n\n\nclass CauchyProposal(Proposal):\n def __call__(self):\n return nr.standard_cauchy(size=np.size(self.s)) * self.s\n\n\nclass LaplaceProposal(Proposal):\n def __call__(self):\n size = np.size(self.s)\n return (nr.standard_exponential(size=size) - nr.standard_exponential(size=size)) * self.s\n\n\nclass PoissonProposal(Proposal):\n def __call__(self):\n return nr.poisson(lam=self.s, size=np.size(self.s)) - self.s\n\n\nclass MultivariateNormalProposal(Proposal):\n def __init__(self, s):\n n, m = s.shape\n if n != m:\n raise ValueError(\"Covariance matrix is not symmetric.\")\n self.n = n\n self.chol = scipy.linalg.cholesky(s, lower=True)\n\n def __call__(self, num_draws=None):\n if num_draws is not None:\n b = np.random.randn(self.n, num_draws)\n return np.dot(self.chol, b).T\n else:\n b = np.random.randn(self.n)\n return np.dot(self.chol, b)\n\n\nclass Metropolis(ArrayStepShared):\n \"\"\"\n Metropolis-Hastings sampling step\n\n Parameters\n ----------\n vars: list\n List of variables for sampler\n S: standard deviation or covariance matrix\n Some measure of variance to parameterize proposal distribution\n proposal_dist: function\n Function that returns zero-mean deviates when parameterized with\n S (and n). Defaults to normal.\n scaling: scalar or array\n Initial scale factor for proposal. Defaults to 1.\n tune: bool\n Flag for tuning. Defaults to True.\n tune_interval: int\n The frequency of tuning. Defaults to 100 iterations.\n model: PyMC Model\n Optional model for sampling step. Defaults to None (taken from context).\n mode: string or `Mode` instance.\n compilation mode passed to Theano functions\n \"\"\"\n\n name = \"metropolis\"\n\n default_blocked = False\n generates_stats = True\n stats_dtypes = [\n {\n \"accept\": np.float64,\n \"accepted\": np.bool,\n \"tune\": np.bool,\n \"scaling\": np.float64,\n }\n ]\n\n def __init__(\n self,\n vars=None,\n S=None,\n proposal_dist=None,\n scaling=1.0,\n tune=True,\n tune_interval=100,\n model=None,\n mode=None,\n **kwargs\n ):\n\n model = pm.modelcontext(model)\n\n if vars is None:\n vars = model.vars\n vars = pm.inputvars(vars)\n\n if S is None:\n S = np.ones(sum(v.dsize for v in vars))\n\n if proposal_dist is not None:\n self.proposal_dist = proposal_dist(S)\n elif S.ndim == 1:\n self.proposal_dist = NormalProposal(S)\n elif S.ndim == 2:\n self.proposal_dist = MultivariateNormalProposal(S)\n else:\n raise ValueError(\"Invalid rank for variance: %s\" % S.ndim)\n\n self.scaling = np.atleast_1d(scaling).astype(\"d\")\n self.tune = tune\n self.tune_interval = tune_interval\n self.steps_until_tune = tune_interval\n self.accepted = 0\n\n # Determine type of variables\n self.discrete = np.concatenate(\n [[v.dtype in pm.discrete_types] * (v.dsize or 1) for v in vars]\n )\n self.any_discrete = self.discrete.any()\n self.all_discrete = self.discrete.all()\n\n # remember initial settings before tuning so they can be reset\n self._untuned_settings = dict(\n scaling=self.scaling, steps_until_tune=tune_interval, accepted=self.accepted\n )\n\n self.mode = mode\n\n shared = pm.make_shared_replacements(vars, model)\n self.delta_logp = delta_logp(model.logpt, vars, shared)\n super().__init__(vars, shared)\n\n def reset_tuning(self):\n \"\"\"Resets the tuned sampler parameters to their initial values.\"\"\"\n for attr, initial_value in self._untuned_settings.items():\n setattr(self, attr, initial_value)\n return\n\n def astep(self, q0):\n if not self.steps_until_tune and self.tune:\n # Tune scaling parameter\n self.scaling = tune(self.scaling, self.accepted / float(self.tune_interval))\n # Reset counter\n self.steps_until_tune = self.tune_interval\n self.accepted = 0\n\n delta = self.proposal_dist() * self.scaling\n\n if self.any_discrete:\n if self.all_discrete:\n delta = np.round(delta, 0).astype(\"int64\")\n q0 = q0.astype(\"int64\")\n q = (q0 + delta).astype(\"int64\")\n else:\n delta[self.discrete] = np.round(delta[self.discrete], 0)\n q = q0 + delta\n else:\n q = floatX(q0 + delta)\n\n accept = self.delta_logp(q, q0)\n q_new, accepted = metrop_select(accept, q, q0)\n self.accepted += accepted\n\n self.steps_until_tune -= 1\n\n stats = {\n \"tune\": self.tune,\n \"scaling\": self.scaling,\n \"accept\": np.exp(accept),\n \"accepted\": accepted,\n }\n\n return q_new, [stats]\n\n @staticmethod\n def competence(var, has_grad):\n return Competence.COMPATIBLE\n\n\ndef tune(scale, acc_rate):\n \"\"\"\n Tunes the scaling parameter for the proposal distribution\n according to the acceptance rate over the last tune_interval:\n\n Rate Variance adaptation\n ---- -------------------\n <0.001 x 0.1\n <0.05 x 0.5\n <0.2 x 0.9\n >0.5 x 1.1\n >0.75 x 2\n >0.95 x 10\n\n \"\"\"\n if acc_rate < 0.001:\n # reduce by 90 percent\n return scale * 0.1\n elif acc_rate < 0.05:\n # reduce by 50 percent\n return scale * 0.5\n elif acc_rate < 0.2:\n # reduce by ten percent\n return scale * 0.9\n elif acc_rate > 0.95:\n # increase by factor of ten\n return scale * 10.0\n elif acc_rate > 0.75:\n # increase by double\n return scale * 2.0\n elif acc_rate > 0.5:\n # increase by ten percent\n return scale * 1.1\n\n return scale\n\n\nclass BinaryMetropolis(ArrayStep):\n \"\"\"Metropolis-Hastings optimized for binary variables\n\n Parameters\n ----------\n vars: list\n List of variables for sampler\n scaling: scalar or array\n Initial scale factor for proposal. Defaults to 1.\n tune: bool\n Flag for tuning. Defaults to True.\n tune_interval: int\n The frequency of tuning. Defaults to 100 iterations.\n model: PyMC Model\n Optional model for sampling step. Defaults to None (taken from context).\n\n \"\"\"\n\n name = \"binary_metropolis\"\n\n generates_stats = True\n stats_dtypes = [\n {\n \"accept\": np.float64,\n \"tune\": np.bool,\n \"p_jump\": np.float64,\n }\n ]\n\n def __init__(self, vars, scaling=1.0, tune=True, tune_interval=100, model=None):\n\n model = pm.modelcontext(model)\n\n self.scaling = scaling\n self.tune = tune\n self.tune_interval = tune_interval\n self.steps_until_tune = tune_interval\n self.accepted = 0\n\n if not all([v.dtype in pm.discrete_types for v in vars]):\n raise ValueError(\"All variables must be Bernoulli for BinaryMetropolis\")\n\n super().__init__(vars, [model.fastlogp])\n\n def astep(self, q0, logp):\n\n # Convert adaptive_scale_factor to a jump probability\n p_jump = 1.0 - 0.5 ** self.scaling\n\n rand_array = nr.random(q0.shape)\n q = np.copy(q0)\n # Locations where switches occur, according to p_jump\n switch_locs = rand_array < p_jump\n q[switch_locs] = True - q[switch_locs]\n\n accept = logp(q) - logp(q0)\n q_new, accepted = metrop_select(accept, q, q0)\n self.accepted += accepted\n\n stats = {\n \"tune\": self.tune,\n \"accept\": np.exp(accept),\n \"p_jump\": p_jump,\n }\n\n return q_new, [stats]\n\n @staticmethod\n def competence(var):\n \"\"\"\n BinaryMetropolis is only suitable for binary (bool)\n and Categorical variables with k=1.\n \"\"\"\n distribution = getattr(var.distribution, \"parent_dist\", var.distribution)\n if isinstance(distribution, pm.Bernoulli) or (var.dtype in pm.bool_types):\n return Competence.COMPATIBLE\n elif isinstance(distribution, pm.Categorical) and (distribution.k == 2):\n return Competence.COMPATIBLE\n return Competence.INCOMPATIBLE\n\n\nclass BinaryGibbsMetropolis(ArrayStep):\n \"\"\"A Metropolis-within-Gibbs step method optimized for binary variables\n\n Parameters\n ----------\n vars: list\n List of variables for sampler\n order: list or 'random'\n List of integers indicating the Gibbs update order\n e.g., [0, 2, 1, ...]. Default is random\n transit_p: float\n The diagonal of the transition kernel. A value > .5 gives anticorrelated proposals,\n which resulting in more efficient antithetical sampling.\n model: PyMC Model\n Optional model for sampling step. Defaults to None (taken from context).\n\n \"\"\"\n\n name = \"binary_gibbs_metropolis\"\n\n def __init__(self, vars, order=\"random\", transit_p=0.8, model=None):\n\n model = pm.modelcontext(model)\n\n # transition probabilities\n self.transit_p = transit_p\n\n self.dim = sum(v.dsize for v in vars)\n\n if order == \"random\":\n self.shuffle_dims = True\n self.order = list(range(self.dim))\n else:\n if sorted(order) != list(range(self.dim)):\n raise ValueError(\"Argument 'order' has to be a permutation\")\n self.shuffle_dims = False\n self.order = order\n\n if not all([v.dtype in pm.discrete_types for v in vars]):\n raise ValueError(\"All variables must be binary for BinaryGibbsMetropolis\")\n\n super().__init__(vars, [model.fastlogp])\n\n def astep(self, q0, logp):\n order = self.order\n if self.shuffle_dims:\n nr.shuffle(order)\n\n q = np.copy(q0)\n logp_curr = logp(q)\n\n for idx in order:\n # No need to do metropolis update if the same value is proposed,\n # as you will get the same value regardless of accepted or reject\n if nr.rand() < self.transit_p:\n curr_val, q[idx] = q[idx], True - q[idx]\n logp_prop = logp(q)\n q[idx], accepted = metrop_select(logp_prop - logp_curr, q[idx], curr_val)\n if accepted:\n logp_curr = logp_prop\n\n return q\n\n @staticmethod\n def competence(var):\n \"\"\"\n BinaryMetropolis is only suitable for Bernoulli\n and Categorical variables with k=2.\n \"\"\"\n distribution = getattr(var.distribution, \"parent_dist\", var.distribution)\n if isinstance(distribution, pm.Bernoulli) or (var.dtype in pm.bool_types):\n return Competence.IDEAL\n elif isinstance(distribution, pm.Categorical) and (distribution.k == 2):\n return Competence.IDEAL\n return Competence.INCOMPATIBLE\n\n\nclass CategoricalGibbsMetropolis(ArrayStep):\n \"\"\"A Metropolis-within-Gibbs step method optimized for categorical variables.\n This step method works for Bernoulli variables as well, but it is not\n optimized for them, like BinaryGibbsMetropolis is. Step method supports\n two types of proposals: A uniform proposal and a proportional proposal,\n which was introduced by Liu in his 1996 technical report\n \"Metropolized Gibbs Sampler: An Improvement\".\n \"\"\"\n\n name = \"categorical_gibbs_metropolis\"\n\n def __init__(self, vars, proposal=\"uniform\", order=\"random\", model=None):\n\n model = pm.modelcontext(model)\n vars = pm.inputvars(vars)\n\n dimcats = []\n # The above variable is a list of pairs (aggregate dimension, number\n # of categories). For example, if vars = [x, y] with x being a 2-D\n # variable with M categories and y being a 3-D variable with N\n # categories, we will have dimcats = [(0, M), (1, M), (2, N), (3, N), (4, N)].\n for v in vars:\n distr = getattr(v.distribution, \"parent_dist\", v.distribution)\n if isinstance(distr, pm.Categorical):\n k = draw_values([distr.k])[0]\n elif isinstance(distr, pm.Bernoulli) or (v.dtype in pm.bool_types):\n k = 2\n else:\n raise ValueError(\n \"All variables must be categorical or binary\" + \"for CategoricalGibbsMetropolis\"\n )\n start = len(dimcats)\n dimcats += [(dim, k) for dim in range(start, start + v.dsize)]\n\n if order == \"random\":\n self.shuffle_dims = True\n self.dimcats = dimcats\n else:\n if sorted(order) != list(range(len(dimcats))):\n raise ValueError(\"Argument 'order' has to be a permutation\")\n self.shuffle_dims = False\n self.dimcats = [dimcats[j] for j in order]\n\n if proposal == \"uniform\":\n self.astep = self.astep_unif\n elif proposal == \"proportional\":\n # Use the optimized \"Metropolized Gibbs Sampler\" described in Liu96.\n self.astep = self.astep_prop\n else:\n raise ValueError(\"Argument 'proposal' should either be 'uniform' or 'proportional'\")\n\n super().__init__(vars, [model.fastlogp])\n\n def astep_unif(self, q0, logp):\n dimcats = self.dimcats\n if self.shuffle_dims:\n nr.shuffle(dimcats)\n\n q = np.copy(q0)\n logp_curr = logp(q)\n\n for dim, k in dimcats:\n curr_val, q[dim] = q[dim], sample_except(k, q[dim])\n logp_prop = logp(q)\n q[dim], accepted = metrop_select(logp_prop - logp_curr, q[dim], curr_val)\n if accepted:\n logp_curr = logp_prop\n return q\n\n def astep_prop(self, q0, logp):\n dimcats = self.dimcats\n if self.shuffle_dims:\n nr.shuffle(dimcats)\n\n q = np.copy(q0)\n logp_curr = logp(q)\n\n for dim, k in dimcats:\n logp_curr = self.metropolis_proportional(q, logp, logp_curr, dim, k)\n\n return q\n\n def metropolis_proportional(self, q, logp, logp_curr, dim, k):\n given_cat = int(q[dim])\n log_probs = np.zeros(k)\n log_probs[given_cat] = logp_curr\n candidates = list(range(k))\n for candidate_cat in candidates:\n if candidate_cat != given_cat:\n q[dim] = candidate_cat\n log_probs[candidate_cat] = logp(q)\n probs = softmax(log_probs)\n prob_curr, probs[given_cat] = probs[given_cat], 0.0\n probs /= 1.0 - prob_curr\n proposed_cat = nr.choice(candidates, p=probs)\n accept_ratio = (1.0 - prob_curr) / (1.0 - probs[proposed_cat])\n if not np.isfinite(accept_ratio) or nr.uniform() >= accept_ratio:\n q[dim] = given_cat\n return logp_curr\n q[dim] = proposed_cat\n return log_probs[proposed_cat]\n\n @staticmethod\n def competence(var):\n \"\"\"\n CategoricalGibbsMetropolis is only suitable for Bernoulli and\n Categorical variables.\n \"\"\"\n distribution = getattr(var.distribution, \"parent_dist\", var.distribution)\n if isinstance(distribution, pm.Categorical):\n if distribution.k > 2:\n return Competence.IDEAL\n return Competence.COMPATIBLE\n elif isinstance(distribution, pm.Bernoulli) or (var.dtype in pm.bool_types):\n return Competence.COMPATIBLE\n return Competence.INCOMPATIBLE\n\n\nclass DEMetropolis(PopulationArrayStepShared):\n \"\"\"\n Differential Evolution Metropolis sampling step.\n\n Parameters\n ----------\n lamb: float\n Lambda parameter of the DE proposal mechanism. Defaults to 2.38 / sqrt(2 * ndim)\n vars: list\n List of variables for sampler\n S: standard deviation or covariance matrix\n Some measure of variance to parameterize proposal distribution\n proposal_dist: function\n Function that returns zero-mean deviates when parameterized with\n S (and n). Defaults to Uniform(-S,+S).\n scaling: scalar or array\n Initial scale factor for epsilon. Defaults to 0.001\n tune: str\n Which hyperparameter to tune. Defaults to None, but can also be 'scaling' or 'lambda'.\n tune_interval: int\n The frequency of tuning. Defaults to 100 iterations.\n model: PyMC Model\n Optional model for sampling step. Defaults to None (taken from context).\n mode: string or `Mode` instance.\n compilation mode passed to Theano functions\n\n References\n ----------\n .. [Braak2006] Cajo C.F. ter Braak (2006).\n A Markov Chain Monte Carlo version of the genetic algorithm\n Differential Evolution: easy Bayesian computing for real parameter spaces.\n Statistics and Computing\n `link <https://doi.org/10.1007/s11222-006-8769-1>`__\n \"\"\"\n\n name = \"DEMetropolis\"\n\n default_blocked = True\n generates_stats = True\n stats_dtypes = [\n {\n \"accept\": np.float64,\n \"accepted\": np.bool,\n \"tune\": np.bool,\n \"scaling\": np.float64,\n \"lambda\": np.float64,\n }\n ]\n\n def __init__(\n self,\n vars=None,\n S=None,\n proposal_dist=None,\n lamb=None,\n scaling=0.001,\n tune=None,\n tune_interval=100,\n model=None,\n mode=None,\n **kwargs\n ):\n\n model = pm.modelcontext(model)\n\n if vars is None:\n vars = model.cont_vars\n vars = pm.inputvars(vars)\n\n if S is None:\n S = np.ones(model.ndim)\n\n if proposal_dist is not None:\n self.proposal_dist = proposal_dist(S)\n else:\n self.proposal_dist = UniformProposal(S)\n\n self.scaling = np.atleast_1d(scaling).astype(\"d\")\n if lamb is None:\n # default to the optimal lambda for normally distributed targets\n lamb = 2.38 / np.sqrt(2 * model.ndim)\n self.lamb = float(lamb)\n if tune not in {None, \"scaling\", \"lambda\"}:\n raise ValueError('The parameter \"tune\" must be one of {None, scaling, lambda}')\n self.tune = tune\n self.tune_interval = tune_interval\n self.steps_until_tune = tune_interval\n self.accepted = 0\n\n self.mode = mode\n\n shared = pm.make_shared_replacements(vars, model)\n self.delta_logp = delta_logp(model.logpt, vars, shared)\n super().__init__(vars, shared)\n\n def astep(self, q0):\n if not self.steps_until_tune and self.tune:\n if self.tune == \"scaling\":\n self.scaling = tune(self.scaling, self.accepted / float(self.tune_interval))\n elif self.tune == \"lambda\":\n self.lamb = tune(self.lamb, self.accepted / float(self.tune_interval))\n # Reset counter\n self.steps_until_tune = self.tune_interval\n self.accepted = 0\n\n epsilon = self.proposal_dist() * self.scaling\n\n # differential evolution proposal\n # select two other chains\n ir1, ir2 = np.random.choice(self.other_chains, 2, replace=False)\n r1 = self.bij.map(self.population[ir1])\n r2 = self.bij.map(self.population[ir2])\n # propose a jump\n q = floatX(q0 + self.lamb * (r1 - r2) + epsilon)\n\n accept = self.delta_logp(q, q0)\n q_new, accepted = metrop_select(accept, q, q0)\n self.accepted += accepted\n\n self.steps_until_tune -= 1\n\n stats = {\n \"tune\": self.tune,\n \"scaling\": self.scaling,\n \"lambda\": self.lamb,\n \"accept\": np.exp(accept),\n \"accepted\": accepted,\n }\n\n return q_new, [stats]\n\n @staticmethod\n def competence(var, has_grad):\n if var.dtype in pm.discrete_types:\n return Competence.INCOMPATIBLE\n return Competence.COMPATIBLE\n\n\nclass DEMetropolisZ(ArrayStepShared):\n \"\"\"\n Adaptive Differential Evolution Metropolis sampling step that uses the past to inform jumps.\n\n Parameters\n ----------\n lamb: float\n Lambda parameter of the DE proposal mechanism. Defaults to 2.38 / sqrt(2 * ndim)\n vars: list\n List of variables for sampler\n S: standard deviation or covariance matrix\n Some measure of variance to parameterize proposal distribution\n proposal_dist: function\n Function that returns zero-mean deviates when parameterized with\n S (and n). Defaults to Uniform(-S,+S).\n scaling: scalar or array\n Initial scale factor for epsilon. Defaults to 0.001\n tune: str\n Which hyperparameter to tune. Defaults to 'lambda', but can also be 'scaling' or None.\n tune_interval: int\n The frequency of tuning. Defaults to 100 iterations.\n tune_drop_fraction: float\n Fraction of tuning steps that will be removed from the samplers history when the tuning ends.\n Defaults to 0.9 - keeping the last 10% of tuning steps for good mixing while removing 90% of\n potentially unconverged tuning positions.\n model: PyMC Model\n Optional model for sampling step. Defaults to None (taken from context).\n mode: string or `Mode` instance.\n compilation mode passed to Theano functions\n\n References\n ----------\n .. [Braak2006] Cajo C.F. ter Braak (2006).\n Differential Evolution Markov Chain with snooker updater and fewer chains.\n Statistics and Computing\n `link <https://doi.org/10.1007/s11222-008-9104-9>`__\n \"\"\"\n\n name = \"DEMetropolisZ\"\n\n default_blocked = True\n generates_stats = True\n stats_dtypes = [\n {\n \"accept\": np.float64,\n \"accepted\": np.bool,\n \"tune\": np.bool,\n \"scaling\": np.float64,\n \"lambda\": np.float64,\n }\n ]\n\n def __init__(\n self,\n vars=None,\n S=None,\n proposal_dist=None,\n lamb=None,\n scaling=0.001,\n tune=\"lambda\",\n tune_interval=100,\n tune_drop_fraction: float = 0.9,\n model=None,\n mode=None,\n **kwargs\n ):\n model = pm.modelcontext(model)\n\n if vars is None:\n vars = model.cont_vars\n vars = pm.inputvars(vars)\n\n if S is None:\n S = np.ones(model.ndim)\n\n if proposal_dist is not None:\n self.proposal_dist = proposal_dist(S)\n else:\n self.proposal_dist = UniformProposal(S)\n\n self.scaling = np.atleast_1d(scaling).astype(\"d\")\n if lamb is None:\n # default to the optimal lambda for normally distributed targets\n lamb = 2.38 / np.sqrt(2 * model.ndim)\n self.lamb = float(lamb)\n if tune not in {None, \"scaling\", \"lambda\"}:\n raise ValueError('The parameter \"tune\" must be one of {None, scaling, lambda}')\n self.tune = True\n self.tune_target = tune\n self.tune_interval = tune_interval\n self.tune_drop_fraction = tune_drop_fraction\n self.steps_until_tune = tune_interval\n self.accepted = 0\n\n # cache local history for the Z-proposals\n self._history = []\n # remember initial settings before tuning so they can be reset\n self._untuned_settings = dict(\n scaling=self.scaling,\n lamb=self.lamb,\n steps_until_tune=tune_interval,\n accepted=self.accepted,\n )\n\n self.mode = mode\n\n shared = pm.make_shared_replacements(vars, model)\n self.delta_logp = delta_logp(model.logpt, vars, shared)\n super().__init__(vars, shared)\n\n def reset_tuning(self):\n \"\"\"Resets the tuned sampler parameters and history to their initial values.\"\"\"\n # history can't be reset via the _untuned_settings dict because it's a list\n self._history = []\n for attr, initial_value in self._untuned_settings.items():\n setattr(self, attr, initial_value)\n return\n\n def astep(self, q0):\n # same tuning scheme as DEMetropolis\n if not self.steps_until_tune and self.tune:\n if self.tune_target == \"scaling\":\n self.scaling = tune(self.scaling, self.accepted / float(self.tune_interval))\n elif self.tune_target == \"lambda\":\n self.lamb = tune(self.lamb, self.accepted / float(self.tune_interval))\n # Reset counter\n self.steps_until_tune = self.tune_interval\n self.accepted = 0\n\n epsilon = self.proposal_dist() * self.scaling\n\n it = len(self._history)\n # use the DE-MCMC-Z proposal scheme as soon as the history has 2 entries\n if it > 1:\n # differential evolution proposal\n # select two other chains\n iz1 = np.random.randint(it)\n iz2 = np.random.randint(it)\n while iz2 == iz1:\n iz2 = np.random.randint(it)\n\n z1 = self._history[iz1]\n z2 = self._history[iz2]\n # propose a jump\n q = floatX(q0 + self.lamb * (z1 - z2) + epsilon)\n else:\n # propose just with noise in the first 2 iterations\n q = floatX(q0 + epsilon)\n\n accept = self.delta_logp(q, q0)\n q_new, accepted = metrop_select(accept, q, q0)\n self.accepted += accepted\n self._history.append(q_new)\n\n self.steps_until_tune -= 1\n\n stats = {\n \"tune\": self.tune,\n \"scaling\": self.scaling,\n \"lambda\": self.lamb,\n \"accept\": np.exp(accept),\n \"accepted\": accepted,\n }\n\n return q_new, [stats]\n\n def stop_tuning(self):\n \"\"\"At the end of the tuning phase, this method removes the first x% of the history\n so future proposals are not informed by unconverged tuning iterations.\n \"\"\"\n it = len(self._history)\n n_drop = int(self.tune_drop_fraction * it)\n self._history = self._history[n_drop:]\n return super().stop_tuning()\n\n @staticmethod\n def competence(var, has_grad):\n if var.dtype in pm.discrete_types:\n return Competence.INCOMPATIBLE\n return Competence.COMPATIBLE\n\n\ndef sample_except(limit, excluded):\n candidate = nr.choice(limit - 1)\n if candidate >= excluded:\n candidate += 1\n return candidate\n\n\ndef softmax(x):\n e_x = np.exp(x - np.max(x))\n return e_x / np.sum(e_x, axis=0)\n\n\ndef delta_logp(logp, vars, shared):\n [logp0], inarray0 = pm.join_nonshared_inputs([logp], vars, shared)\n\n tensor_type = inarray0.type\n inarray1 = tensor_type(\"inarray1\")\n\n logp1 = pm.CallableTensor(logp0)(inarray1)\n\n f = theano.function([inarray1, inarray0], logp1 - logp0)\n f.trust_input = True\n return f\n", "# Copyright 2020 The PyMC Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import annotations\n\nfrom typing import TYPE_CHECKING, Any, Callable, Optional, Union\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom pymc3.backends.base import MultiTrace\n\nif TYPE_CHECKING:\n from arviz.data.inference_data import InferenceData\n\n\ndef plot_posterior_predictive_glm(\n trace: Union[InferenceData, MultiTrace],\n eval: Optional[np.ndarray] = None,\n lm: Optional[Callable] = None,\n samples: int = 30,\n **kwargs: Any\n) -> None:\n \"\"\"Plot posterior predictive of a linear model.\n :Arguments:\n trace: InferenceData or MultiTrace\n Output of pm.sample()\n eval: <array>\n Array over which to evaluate lm\n lm: function <default: linear function>\n Function mapping parameters at different points\n to their respective outputs.\n input: point, sample\n output: estimated value\n samples: int <default=30>\n How many posterior samples to draw.\n Additional keyword arguments are passed to pylab.plot().\n \"\"\"\n if lm is None:\n lm = lambda x, sample: sample[\"Intercept\"] + sample[\"x\"] * x\n\n if eval is None:\n eval = np.linspace(0, 1, 100)\n\n # Set default plotting arguments\n if \"lw\" not in kwargs and \"linewidth\" not in kwargs:\n kwargs[\"lw\"] = 0.2\n if \"c\" not in kwargs and \"color\" not in kwargs:\n kwargs[\"c\"] = \"k\"\n\n if not isinstance(trace, MultiTrace):\n trace = trace.posterior.to_dataframe().to_dict(orient=\"records\")\n\n for rand_loc in np.random.randint(0, len(trace), samples):\n rand_sample = trace[rand_loc]\n plt.plot(eval, lm(eval, rand_sample), **kwargs)\n # Make sure to not plot label multiple times\n kwargs.pop(\"label\", None)\n\n plt.title(\"Posterior predictive\")\n" ]
[ [ "numpy.dot", "numpy.sqrt", "numpy.concatenate", "numpy.max", "numpy.random.standard_exponential", "numpy.round", "numpy.random.randn", "numpy.exp", "numpy.random.randint", "numpy.atleast_1d", "numpy.size", "numpy.copy", "numpy.zeros", "numpy.random.choice", "numpy.random.rand", "numpy.sum", "numpy.random.random", "numpy.isfinite", "numpy.random.shuffle", "numpy.ones", "numpy.random.normal", "numpy.random.uniform" ], [ "matplotlib.pyplot.title", "numpy.linspace" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
wodxyj/plpp
[ "cd74916536cf180a37b088ec61ea2a12a63719f2" ]
[ "src/main.py" ]
[ "import os\nimport shutil\nimport time\n\nimport configargparse\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.nn.parallel\nimport torch.optim as optim\nimport torch.utils.data\nfrom tensorboardX import SummaryWriter\nfrom torch.optim.lr_scheduler import MultiStepLR\nfrom tqdm import tqdm\n\nimport disp_models\nimport logger\nimport models\nimport utils_func\nfrom dataloader import KITTILoader3D\nfrom dataloader import KITTILoader_dataset3d\nfrom dataloader import SceneFlowLoader\nfrom dataloader import listflowfile\n\nparser = configargparse.ArgParser(description='PSMNet')\nparser.add('-c', '--config', required=True,\n is_config_file=True, help='config file')\n\nparser.add_argument('--save_path', type=str, default='',\n help='path to save the log, tensorbaord and checkpoint')\n# network\nparser.add_argument('--data_type', default='depth', choices=['disparity', 'depth'],\n help='the network can predict either disparity or depth')\nparser.add_argument('--arch', default='SDNet', choices=['SDNet', 'PSMNet'],\n help='Model Name, default: SDNet.')\nparser.add_argument('--maxdisp', type=int, default=192,\n help='maxium disparity, the range of the disparity cost volume: [0, maxdisp-1]')\nparser.add_argument('--down', type=float, default=2,\n help='reduce x times resolution when build the depth cost volume')\nparser.add_argument('--maxdepth', type=int, default=80,\n help='the range of the depth cost volume: [1, maxdepth]')\n# dataset\nparser.add_argument('--kitti2015', action='store_true',\n help='If false, use 3d kitti dataset. If true, use kitti stereo 2015, default: False')\nparser.add_argument('--dataset', default='kitti', choices=['sceneflow', 'kitti'],\n help='train with sceneflow or kitti')\nparser.add_argument('--datapath', default='',\n help='root folder of the dataset')\nparser.add_argument('--split_train', default='Kitti/object/train.txt',\n help='data splitting file for training')\nparser.add_argument('--split_val', default='Kitti/object/subval.txt',\n help='data splitting file for validation')\nparser.add_argument('--epochs', type=int, default=300,\n help='number of training epochs')\nparser.add_argument('--btrain', type=int, default=3,\n help='training batch size')\nparser.add_argument('--bval', type=int, default=1,\n help='validation batch size')\nparser.add_argument('--workers', type=int, default=8,\n help='number of dataset workers')\n# learning rate\nparser.add_argument('--lr', type=float, default=0.001,\n help='learning rate')\nparser.add_argument('--lr_stepsize', nargs='+', type=int, default=[200],\n help='drop lr in each step')\nparser.add_argument('--lr_gamma', default=0.1, type=float,\n help='gamma of the learning rate scheduler')\n# resume\nparser.add_argument('--resume', default=None,\n help='path to a checkpoint')\nparser.add_argument('--pretrain', default=None,\n help='path to pretrained model')\nparser.add_argument('--start_epoch', type=int, default=0,\n help='start epoch')\n# evaluate\nparser.add_argument('--evaluate', action='store_true',\n help='do evaluation')\nparser.add_argument('--calib_value', type=float, default=1017,\n help='manually define focal length. (sceneflow does not have configuration)')\nparser.add_argument('--dynamic_bs', action='store_true',\n help='If true, dynamically calculate baseline from calibration file. If false, use 0.54')\nparser.add_argument('--eval_interval', type=int, default=50,\n help='evaluate model every n epochs')\nparser.add_argument('--checkpoint_interval', type=int, default=5,\n help='save checkpoint every n epoch.')\nparser.add_argument('--generate_depth_map', action='store_true',\n help='if true, generate depth maps and save the in save_path/depth_maps/{data_tag}/')\nparser.add_argument('--data_list', default=None,\n help='generate depth maps for all the data in this list')\nparser.add_argument('--data_tag', default=None,\n help='the suffix of the depth maps folder')\nargs = parser.parse_args()\nbest_RMSE = 1e10\n\n\ndef main():\n global best_RMSE\n\n # set logger\n log = logger.setup_logger(os.path.join(args.save_path, 'training.log'))\n for key, value in sorted(vars(args).items()):\n log.info(str(key) + ': ' + str(value))\n\n # set tensorboard\n writer = SummaryWriter(args.save_path + '/tensorboardx')\n\n # Data Loader\n if args.generate_depth_map:\n TrainImgLoader = None\n import dataloader.KITTI_submission_loader as KITTI_submission_loader\n TestImgLoader = torch.utils.data.DataLoader(\n KITTI_submission_loader.SubmiteDataset(args.datapath, args.data_list, args.dynamic_bs),\n batch_size=args.bval, shuffle=False, num_workers=args.workers, drop_last=False)\n elif args.dataset == 'kitti':\n train_data, val_data = KITTILoader3D.dataloader(args.datapath, args.split_train, args.split_val,\n kitti2015=args.kitti2015)\n TrainImgLoader = torch.utils.data.DataLoader(\n KITTILoader_dataset3d.myImageFloder(train_data, True, kitti2015=args.kitti2015, dynamic_bs=args.dynamic_bs),\n batch_size=args.btrain, shuffle=True, num_workers=args.workers, drop_last=False, pin_memory=True)\n TestImgLoader = torch.utils.data.DataLoader(\n KITTILoader_dataset3d.myImageFloder(val_data, False, kitti2015=args.kitti2015, dynamic_bs=args.dynamic_bs),\n batch_size=args.bval, shuffle=False, num_workers=args.workers, drop_last=False, pin_memory=True)\n else:\n train_data, val_data = listflowfile.dataloader(args.datapath)\n TrainImgLoader = torch.utils.data.DataLoader(\n SceneFlowLoader.myImageFloder(train_data, True, calib=args.calib_value),\n batch_size=args.btrain, shuffle=True, num_workers=args.workers, drop_last=False)\n TestImgLoader = torch.utils.data.DataLoader(\n SceneFlowLoader.myImageFloder(val_data, False, calib=args.calib_value),\n batch_size=args.bval, shuffle=False, num_workers=args.workers, drop_last=False)\n\n # Load Model\n if args.data_type == 'disparity':\n model = disp_models.__dict__[args.arch](maxdisp=args.maxdisp)\n elif args.data_type == 'depth':\n model = models.__dict__[args.arch](maxdepth=args.maxdepth, maxdisp=args.maxdisp, down=args.down)\n else:\n log.info('Model is not implemented')\n assert False\n\n # Number of parameters\n log.info('Number of model parameters: {}'.format(sum([p.data.nelement() for p in model.parameters()])))\n model = nn.DataParallel(model).cuda()\n torch.backends.cudnn.benchmark = True\n\n # Optimizer\n optimizer = optim.Adam(model.parameters(), lr=args.lr, betas=(0.9, 0.999))\n scheduler = MultiStepLR(optimizer, milestones=args.lr_stepsize, gamma=args.lr_gamma)\n\n if args.pretrain:\n if os.path.isfile(args.pretrain):\n log.info(\"=> loading pretrain '{}'\".format(args.pretrain))\n checkpoint = torch.load(args.pretrain)\n model.load_state_dict(checkpoint['state_dict'])\n else:\n log.info('[Attention]: Can not find checkpoint {}'.format(args.pretrain))\n\n if args.resume:\n if os.path.isfile(args.resume):\n log.info(\"=> loading checkpoint '{}'\".format(args.resume))\n checkpoint = torch.load(args.resume)\n model.load_state_dict(checkpoint['state_dict'])\n args.start_epoch = checkpoint['epoch']\n optimizer.load_state_dict(checkpoint['optimizer'])\n best_RMSE = checkpoint['best_RMSE']\n scheduler.load_state_dict(checkpoint['scheduler'])\n log.info(\"=> loaded checkpoint '{}' (epoch {})\"\n .format(args.resume, checkpoint['epoch']))\n else:\n log.info('[Attention]: Can not find checkpoint {}'.format(args.resume))\n\n if args.generate_depth_map:\n os.makedirs(args.save_path + '/depth_maps/' + args.data_tag, exist_ok=True)\n\n tqdm_eval_loader = tqdm(TestImgLoader, total=len(TestImgLoader))\n for batch_idx, (imgL_crop, imgR_crop, calib, H, W, filename) in enumerate(tqdm_eval_loader):\n pred_disp = inference(imgL_crop, imgR_crop, calib, model)\n for idx, name in enumerate(filename):\n np.save(args.save_path + '/depth_maps/' + args.data_tag + '/' + name, pred_disp[idx][-H[idx]:, :W[idx]])\n import sys\n sys.exit()\n\n # evaluation\n if args.evaluate:\n evaluate_metric = utils_func.Metric()\n ## training ##\n for batch_idx, (imgL_crop, imgR_crop, disp_crop_L, calib) in enumerate(TestImgLoader):\n start_time = time.time()\n test(imgL_crop, imgR_crop, disp_crop_L, calib, evaluate_metric, model)\n\n log.info(evaluate_metric.print(batch_idx, 'EVALUATE') + ' Time:{:.3f}'.format(time.time() - start_time))\n import sys\n sys.exit()\n\n for epoch in range(args.start_epoch, args.epochs):\n scheduler.step()\n\n ## training ##\n train_metric = utils_func.Metric()\n tqdm_train_loader = tqdm(TrainImgLoader, total=len(TrainImgLoader))\n for batch_idx, (imgL_crop, imgR_crop, disp_crop_L, calib) in enumerate(tqdm_train_loader):\n # start_time = time.time()\n train(imgL_crop, imgR_crop, disp_crop_L, calib, train_metric, optimizer, model)\n # log.info(train_metric.print(batch_idx, 'TRAIN') + ' Time:{:.3f}'.format(time.time() - start_time))\n log.info(train_metric.print(0, 'TRAIN Epoch' + str(epoch)))\n train_metric.tensorboard(writer, epoch, token='TRAIN')\n # lw.update(train_metric.get_info(), epoch, 'Train')\n\n ## testing ##\n is_best = False\n if (epoch % args.eval_interval) == 0:\n test_metric = utils_func.Metric()\n tqdm_test_loader = tqdm(TestImgLoader, total=len(TestImgLoader))\n for batch_idx, (imgL_crop, imgR_crop, disp_crop_L, calib) in enumerate(tqdm_test_loader):\n # start_time = time.time()\n test(imgL_crop, imgR_crop, disp_crop_L, calib, test_metric, model)\n # log.info(test_metric.print(batch_idx, 'TEST') + ' Time:{:.3f}'.format(time.time() - start_time))\n log.info(test_metric.print(0, 'TEST Epoch' + str(epoch)))\n test_metric.tensorboard(writer, epoch, token='TEST')\n\n # SAVE\n is_best = test_metric.RMSELIs.avg < best_RMSE\n best_RMSE = min(test_metric.RMSELIs.avg, best_RMSE)\n save_checkpoint({\n 'epoch': epoch + 1,\n 'arch': args.arch,\n 'state_dict': model.state_dict(),\n 'best_RMSE': best_RMSE,\n 'scheduler': scheduler.state_dict(),\n 'optimizer': optimizer.state_dict(),\n }, is_best, epoch, folder=args.save_path)\n # lw.done()\n\n\ndef save_checkpoint(state, is_best, epoch, filename='checkpoint.pth.tar', folder='result/default'):\n torch.save(state, folder + '/' + filename)\n if is_best:\n shutil.copyfile(folder + '/' + filename, folder + '/model_best.pth.tar')\n if args.checkpoint_interval > 0 and (epoch + 1) % args.checkpoint_interval == 0:\n shutil.copyfile(folder + '/' + filename, folder + '/checkpoint_{}.pth.tar'.format(epoch + 1))\n\n\ndef train(imgL, imgR, depth, calib, metric_log, optimizer, model):\n model.train()\n calib = calib.float()\n\n imgL, imgR, depth, calib = imgL.cuda(), imgR.cuda(), depth.cuda(), calib.cuda()\n\n # ---------\n mask = (depth >= 1) * (depth <= 80)\n mask.detach_()\n # ----\n\n optimizer.zero_grad()\n\n output1, output2, output3 = model(imgL, imgR, calib)\n output1 = torch.squeeze(output1, 1)\n output2 = torch.squeeze(output2, 1)\n output3 = torch.squeeze(output3, 1)\n if args.data_type == 'disparity':\n output1 = disp2depth(output1, calib)\n output2 = disp2depth(output2, calib)\n output3 = disp2depth(output3, calib)\n loss = 0.5 * F.smooth_l1_loss(output1[mask], depth[mask], size_average=True) + 0.7 * F.smooth_l1_loss(\n output2[mask], depth[mask], size_average=True) + F.smooth_l1_loss(output3[mask], depth[mask],\n size_average=True)\n\n metric_log.calculate(depth, output3, loss=loss.item())\n loss.backward()\n optimizer.step()\n\n\ndef inference(imgL, imgR, calib, model):\n model.eval()\n imgL, imgR, calib = imgL.cuda(), imgR.cuda(), calib.float().cuda()\n\n with torch.no_grad():\n output = model(imgL, imgR, calib)\n if args.data_type == 'disparity':\n output = disp2depth(output, calib)\n pred_disp = output.data.cpu().numpy()\n\n return pred_disp\n\n\ndef test(imgL, imgR, depth, calib, metric_log, model):\n model.eval()\n calib = calib.float()\n imgL, imgR, calib, depth = imgL.cuda(), imgR.cuda(), calib.cuda(), depth.cuda()\n\n mask = (depth >= 1) * (depth <= 80)\n mask.detach_()\n with torch.no_grad():\n output3 = model(imgL, imgR, calib)\n output3 = torch.squeeze(output3, 1)\n\n if args.data_type == 'disparity':\n output3 = disp2depth(output3, calib)\n loss = F.smooth_l1_loss(output3[mask], depth[mask], size_average=True)\n\n metric_log.calculate(depth, output3, loss=loss.item())\n\n torch.cuda.empty_cache()\n return\n\n\ndef disp2depth(disp, calib):\n depth = calib[:, None, None] / disp.clamp(min=1e-8)\n return depth\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "torch.optim.lr_scheduler.MultiStepLR", "torch.load", "torch.cuda.empty_cache", "numpy.save", "torch.no_grad", "torch.nn.functional.smooth_l1_loss", "torch.nn.DataParallel", "torch.squeeze", "torch.save" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
wangyuan249/Mymmt767
[ "6b9bb566d290bd3157350f6496fcb5df8c2b515c" ]
[ "mmt/models/resnet.py" ]
[ "from __future__ import absolute_import\n\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom torch.nn import init\nimport torchvision\nimport torch\nimport pdb\nfrom .layers import (\n SpatialAttention2d,\n WeightedSum2d)\n\n\n__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',\n 'resnet152']\n\n\nclass ResNet(nn.Module):\n __factory = {\n 18: torchvision.models.resnet18,\n 34: torchvision.models.resnet34,\n 50: torchvision.models.resnet50,\n 101: torchvision.models.resnet101,\n 152: torchvision.models.resnet152,\n }\n\n def __init__(self, depth, pretrained=True, cut_at_pooling=False, is_select=False,\n num_features=0, norm=False, dropout=0, num_classes=0):\n super(ResNet, self).__init__()\n self.pretrained = pretrained\n self.depth = depth\n self.cut_at_pooling = cut_at_pooling\n self.is_select = is_select\n # Construct base (pretrained) resnet\n if depth not in ResNet.__factory:\n raise KeyError(\"Unsupported depth:\", depth)\n resnet = ResNet.__factory[depth](pretrained=pretrained)\n resnet.layer4[0].conv2.stride = (1,1)\n resnet.layer4[0].downsample[0].stride = (1,1)\n self.base = nn.Sequential(\n resnet.conv1, resnet.bn1, resnet.maxpool, # no relu\n resnet.layer1, resnet.layer2, resnet.layer3, resnet.layer4)\n self.gap = nn.AdaptiveAvgPool2d(1)\n\n if not self.cut_at_pooling:\n self.num_features = num_features\n self.norm = norm\n self.dropout = dropout\n self.has_embedding = num_features > 0 # false\n self.num_classes = num_classes\n\n out_planes = resnet.fc.in_features\n\n # Append new layers\n if self.has_embedding: # false\n self.feat = nn.Linear(out_planes, self.num_features)\n self.feat_bn = nn.BatchNorm1d(self.num_features)\n init.kaiming_normal_(self.feat.weight, mode='fan_out')\n init.constant_(self.feat.bias, 0)\n else: # 进入这里\n # Change the num_features to CNN output channels\n self.num_features = out_planes # out_planes = 2048 num_features 重新被赋值 2048\n self.num_features_delg = 512\n self.feat_bn = nn.BatchNorm1d(self.num_features_delg)\n self.feat_bn.bias.requires_grad_(False)\n if self.dropout > 0:\n self.drop = nn.Dropout(self.dropout)\n if self.num_classes > 0:\n self.classifier = nn.Linear(self.num_features_delg, self.num_classes, bias=False)\n init.normal_(self.classifier.weight, std=0.001)\n\n ## wangzy add attention\n self.attention = SpatialAttention2d(in_c=self.num_features, act_fn='relu')\n self.weightSum = WeightedSum2d()\n\n init.constant_(self.feat_bn.weight, 1)\n init.constant_(self.feat_bn.bias, 0)\n\n if not pretrained:\n self.reset_params()\n\n def forward(self, x, feature_withbn=False):\n x = self.base(x) # b x c x H x w C = 2048 即:32 2048 16 8\n # 1*1 conv 512\n original_fea = x\n # x = self.gap(x)\n # x = x.view(x.size(0), -1)\n '''wangzy add attention'''\n\n x, att_score = self.attention(x) # 32 1 16 8 比如说取前64个\n # x torch.Size([32, 512, 16, 8]) att_score torch.Size([32, 1, 16, 8])\n # print(att_score)\n # x = self.weightSum([x,att_score])#回乘att_score分数\n x = self.gap(x) # 32*512*1*1\n # print('------------------------------------------------------------')\n # print(x)\n x = x.view(-1, x.size()[1]) # 32 512\n features = x\n # print(\"features:\",features.shape)\n # pdb.set_trace()\n\n if self.cut_at_pooling: # False\n return features\n if self.has_embedding: # false\n bn_x = self.feat_bn(self.feat(features))\n else: # 进入这里\n bn_x = self.feat_bn(features)\n\n # print(\"training:\", self.training) ### 不确定!\n if self.training is False: ## 分情况 pretrain的时候 应该是 true target finetune 确定是 false\n prob = self.classifier(bn_x)\n bn_x = F.normalize(bn_x)\n return bn_x, prob, original_fea, att_score ### !!!! finetune 的时候从这里 return\n # return bn_x, self.feat_bn(original_fea), att_score ### !!!! finetune 的时候从这里 return\n\n if self.norm: # False\n bn_x = F.normalize(bn_x)\n elif self.has_embedding:\n bn_x = F.relu(bn_x)\n\n if self.dropout > 0: # False\n bn_x = self.drop(bn_x)\n\n if self.num_classes > 0: # True\n prob = self.classifier(bn_x)\n else:\n return x, bn_x\n\n if feature_withbn: # False\n return bn_x, prob\n\n return features, prob, original_fea, att_score\n #att_score (16,1,16,8)\n #original_fea(16,2048,16,8)\n #prob (16,12936)\n #features (16,2048)\n\n\n def reset_params(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n init.kaiming_normal_(m.weight, mode='fan_out')\n if m.bias is not None:\n init.constant_(m.bias, 0)\n elif isinstance(m, nn.BatchNorm2d):\n init.constant_(m.weight, 1)\n init.constant_(m.bias, 0)\n elif isinstance(m, nn.BatchNorm1d):\n init.constant_(m.weight, 1)\n init.constant_(m.bias, 0)\n elif isinstance(m, nn.Linear):\n init.normal_(m.weight, std=0.001)\n if m.bias is not None:\n init.constant_(m.bias, 0)\n\n resnet = ResNet.__factory[self.depth](pretrained=self.pretrained)\n self.base[0].load_state_dict(resnet.conv1.state_dict())\n self.base[1].load_state_dict(resnet.bn1.state_dict())\n self.base[2].load_state_dict(resnet.maxpool.state_dict())\n self.base[3].load_state_dict(resnet.layer1.state_dict())\n self.base[4].load_state_dict(resnet.layer2.state_dict())\n self.base[5].load_state_dict(resnet.layer3.state_dict())\n self.base[6].load_state_dict(resnet.layer4.state_dict())\n\ndef resnet18(**kwargs):\n return ResNet(18, **kwargs)\n\n\ndef resnet34(**kwargs):\n return ResNet(34, **kwargs)\n\n\ndef resnet50(**kwargs):\n return ResNet(50, **kwargs)\n\n\ndef resnet101(**kwargs):\n return ResNet(101, **kwargs)\n\n\ndef resnet152(**kwargs):\n return ResNet(152, **kwargs)\n" ]
[ [ "torch.nn.functional.normalize", "torch.nn.Sequential", "torch.nn.BatchNorm1d", "torch.nn.Dropout", "torch.nn.init.constant_", "torch.nn.Linear", "torch.nn.functional.relu", "torch.nn.AdaptiveAvgPool2d", "torch.nn.init.normal_", "torch.nn.init.kaiming_normal_" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
KyGao/pygame_tracker
[ "c1c4cc4a74c478e6655aa02aa4950d2ab97f6eae" ]
[ "shmup_detect/fhog.py" ]
[ "# -*- coding: utf-8 -*\n\nimport numpy as np \nimport cv2\nfrom numba import njit\nimport time\n\nNUM_SECTOR = 9\nFLT_EPSILON = 1e-07\n\n\n@njit\ndef func1(dx, dy, boundary_x, boundary_y, height, width, numChannels):\n r = np.zeros((height, width), dtype=np.float32)\n alfa = np.zeros((height, width, 2), np.int32)\n\n for j in range(1, height-1):\n for i in range(1, width-1):\n c = 0\n x = dx[j, i, c]\n y = dy[j, i, c]\n r[j, i] = np.sqrt(x*x + y*y)\n\n for ch in range(1, numChannels):\n tx = dx[j, i, ch]\n ty = dy[j, i, ch]\n magnitude = np.sqrt(tx*tx + ty*ty)\n if(magnitude > r[j, i]):\n r[j, i] = magnitude\n c = ch\n x = tx\n y = ty\n\n mmax = boundary_x[0]*x + boundary_y[0]*y\n maxi = 0\n\n for kk in range(0, NUM_SECTOR):\n dotProd = boundary_x[kk]*x + boundary_y[kk]*y\n if(dotProd > mmax):\n mmax = dotProd\n maxi = kk\n elif(-dotProd > mmax):\n mmax = -dotProd\n maxi = kk + NUM_SECTOR\n\n alfa[j, i, 0] = maxi % NUM_SECTOR\n alfa[j, i, 1] = maxi\n return r, alfa\n#梯度计算,参考了https://blog.csdn.net/bisheng250/article/details/53672247\n@njit\ndef func2(dx, dy, boundary_x, boundary_y, r, alfa, nearest, w, k, height, width, sizeX, sizeY, p, stringSize):\n mapp = np.zeros((sizeX*sizeY*p), np.float32)\n for i in range(sizeY):\n for j in range(sizeX):\n for ii in range(k):\n for jj in range(k):\n if((i * k + ii > 0) and (i * k + ii < height - 1) and (j * k + jj > 0) and (j * k + jj < width - 1)):\n mapp[i*stringSize + j*p + alfa[k*i+ii,j*k+jj,0]] += r[k*i+ii,j*k+jj] * w[ii,0] * w[jj,0]\n mapp[i*stringSize + j*p + alfa[k*i+ii,j*k+jj,1] + NUM_SECTOR] += r[k*i+ii,j*k+jj] * w[ii,0] * w[jj,0]\n if((i + nearest[ii] >= 0) and (i + nearest[ii] <= sizeY - 1)):\n mapp[(i+nearest[ii])*stringSize + j*p + alfa[k*i+ii,j*k+jj,0]] += r[k*i+ii,j*k+jj] * w[ii,1] * w[jj,0]\n mapp[(i+nearest[ii])*stringSize + j*p + alfa[k*i+ii,j*k+jj,1] + NUM_SECTOR] += r[k*i+ii,j*k+jj] * w[ii,1] * w[jj,0]\n if((j + nearest[jj] >= 0) and (j + nearest[jj] <= sizeX - 1)):\n mapp[i*stringSize + (j+nearest[jj])*p + alfa[k*i+ii,j*k+jj,0]] += r[k*i+ii,j*k+jj] * w[ii,0] * w[jj,1]\n mapp[i*stringSize + (j+nearest[jj])*p + alfa[k*i+ii,j*k+jj,1] + NUM_SECTOR] += r[k*i+ii,j*k+jj] * w[ii,0] * w[jj,1]\n if((i + nearest[ii] >= 0) and (i + nearest[ii] <= sizeY - 1) and (j + nearest[jj] >= 0) and (j + nearest[jj] <= sizeX - 1)):\n mapp[(i+nearest[ii])*stringSize + (j+nearest[jj])*p + alfa[k*i+ii,j*k+jj,0]] += r[k*i+ii,j*k+jj] * w[ii,1] * w[jj,1]\n mapp[(i+nearest[ii])*stringSize + (j+nearest[jj])*p + alfa[k*i+ii,j*k+jj,1] + NUM_SECTOR] += r[k*i+ii,j*k+jj] * w[ii,1] * w[jj,1]\n return mapp\n\n@njit\n#计算四邻域的归一化\ndef func3(partOfNorm, mappmap, sizeX, sizeY, p, xp, pp):\n\tnewData = np.zeros((sizeY*sizeX*pp), np.float32)\n\tfor i in range(1, sizeY+1):\n\t\tfor j in range(1, sizeX+1):\n\t\t\tpos1 = i * (sizeX+2) * xp + j * xp\n\t\t\tpos2 = (i-1) * sizeX * pp + (j-1) * pp\n\n\t\t\tvalOfNorm = np.sqrt(partOfNorm[(i )*(sizeX + 2) + (j )] +\n \t\t\t\tpartOfNorm[(i )*(sizeX + 2) + (j + 1)] +\n \t\t\t\tpartOfNorm[(i + 1)*(sizeX + 2) + (j )] +\n \t\t\t\tpartOfNorm[(i + 1)*(sizeX + 2) + (j + 1)]) + FLT_EPSILON\n\t\t\tnewData[pos2:pos2+p] = mappmap[pos1:pos1+p] / valOfNorm\n\t\t\tnewData[pos2+4*p:pos2+6*p] = mappmap[pos1+p:pos1+3*p] / valOfNorm\n\n\t\t\tvalOfNorm = np.sqrt(partOfNorm[(i )*(sizeX + 2) + (j )] +\n\t\t\t\t partOfNorm[(i )*(sizeX + 2) + (j + 1)] +\n\t\t\t\t partOfNorm[(i - 1)*(sizeX + 2) + (j )] +\n\t\t\t\t partOfNorm[(i - 1)*(sizeX + 2) + (j + 1)]) + FLT_EPSILON\n\t\t\tnewData[pos2+p:pos2+2*p] = mappmap[pos1:pos1+p] / valOfNorm\n\t\t\tnewData[pos2+6*p:pos2+8*p] = mappmap[pos1+p:pos1+3*p] / valOfNorm\n\n\t\t\tvalOfNorm = np.sqrt(partOfNorm[(i )*(sizeX + 2) + (j )] +\n\t\t\t\t partOfNorm[(i )*(sizeX + 2) + (j - 1)] +\n\t\t\t\t partOfNorm[(i + 1)*(sizeX + 2) + (j )] +\n\t\t\t\t partOfNorm[(i + 1)*(sizeX + 2) + (j - 1)]) + FLT_EPSILON\n\t\t\tnewData[pos2+2*p:pos2+3*p] = mappmap[pos1:pos1+p] / valOfNorm\n\t\t\tnewData[pos2+8*p:pos2+10*p] = mappmap[pos1+p:pos1+3*p] / valOfNorm\n\n\t\t\tvalOfNorm = np.sqrt(partOfNorm[(i )*(sizeX + 2) + (j )] +\n\t\t\t\t partOfNorm[(i )*(sizeX + 2) + (j - 1)] +\n\t\t\t\t partOfNorm[(i - 1)*(sizeX + 2) + (j )] +\n\t\t\t\t partOfNorm[(i - 1)*(sizeX + 2) + (j - 1)]) + FLT_EPSILON\n\t\t\tnewData[pos2+3*p:pos2+4*p] = mappmap[pos1:pos1+p] / valOfNorm\n\t\t\tnewData[pos2+10*p:pos2+12*p] = mappmap[pos1+p:pos1+3*p] / valOfNorm\n\treturn newData\n\n@njit\ndef func4(mappmap, p, sizeX, sizeY, pp, yp, xp, nx, ny):\n\tnewData = np.zeros((sizeX*sizeY*pp), np.float32)\n\tfor i in range(sizeY):\n\t\tfor j in range(sizeX):\n\t\t\tpos1 = (i*sizeX + j) * p\n\t\t\tpos2 = (i*sizeX + j) * pp\n\n\t\t\tfor jj in range(2 * xp): # 2*9的有符号梯度\n\t\t\t\tnewData[pos2 + jj] = np.sum(mappmap[pos1 + yp*xp + jj : pos1 + 3*yp*xp + jj : 2*xp]) * ny\n\t\t\tfor jj in range(xp): # 9无符号\n\t\t\t\tnewData[pos2 + 2*xp + jj] = np.sum(mappmap[pos1 + jj : pos1 + jj + yp*xp : xp]) * ny\n\t\t\tfor ii in range(yp): # 4无符号\n\t\t\t\tnewData[pos2 + 3*xp + ii] = np.sum(mappmap[pos1 + yp*xp + ii*xp*2 : pos1 + yp*xp + ii*xp*2 + 2*xp]) * nx\n\treturn newData\n\n\n\ndef getFeatureMaps(image, k, mapp):#k为cell大小,返回map为特征\n\tkernel = np.array([[-1., 0., 1.]], np.float32)\n\n\theight = image.shape[0]\n\twidth = image.shape[1]\n\t#要求为3通道\n\tassert(image.ndim==3 and image.shape[2])\n\tnumChannels = 3\n\n\tsizeX = width // k\n\tsizeY = height // k\n\tpx = 3 * NUM_SECTOR\n\tp = px\n\tstringSize = sizeX * p\n\n\tmapp['sizeX'] = sizeX\n\tmapp['sizeY'] = sizeY\n\tmapp['numFeatures'] = p\n\tmapp['map'] = np.zeros((mapp['sizeX']*mapp['sizeY']*mapp['numFeatures']), np.float32)\n\t#两个方向梯度\n\tdx = cv2.filter2D(np.float32(image), -1, kernel)\n\tdy = cv2.filter2D(np.float32(image), -1, kernel.T)\n #初始化cos和sin函数\n\targ_vector = np.arange(NUM_SECTOR+1).astype(np.float32) * np.pi / NUM_SECTOR\n\tboundary_x = np.cos(arg_vector) \n\tboundary_y = np.sin(arg_vector)\n\n\t\n\t#计算像素梯度的大小和方向\n\tstime=time.time()\n\tr, alfa = func1(dx, dy, boundary_x, boundary_y, height, width, numChannels) #with @jit\n\tdtime=time.time()-stime\n\t# print('func1:{}s'.format(dtime))\n\tnearest = np.ones((k), np.int)\n\tnearest[0:k//2] = -1\n\n\tw = np.zeros((k, 2), np.float32)\n\ta_x = np.concatenate((k/2 - np.arange(k/2) - 0.5, np.arange(k/2,k) - k/2 + 0.5)).astype(np.float32)\n\tb_x = np.concatenate((k/2 + np.arange(k/2) + 0.5, -np.arange(k/2,k) + k/2 - 0.5 + k)).astype(np.float32)\n\tw[:, 0] = 1.0 / a_x * ((a_x*b_x) / (a_x+b_x))\n\tw[:, 1] = 1.0 / b_x * ((a_x*b_x) / (a_x+b_x))\n #梯度计算准备\n\tmapp['map'] = func2(dx, dy, boundary_x, boundary_y, r, alfa, nearest, w, k, height, width, sizeX, sizeY, p, stringSize) #with @jit\n\n\treturn mapp\n\n#归一化截断\ndef normalizeAndTruncate(mapp, alfa):\n\tsizeX = mapp['sizeX']\n\tsizeY = mapp['sizeY']\n\n\tp = NUM_SECTOR\n\txp = NUM_SECTOR * 3\n\tpp = NUM_SECTOR * 12\n\n\t'''\n\t### \n\tpartOfNorm = np.zeros((sizeY*sizeX), np.float32)\n\n\tfor i in range(sizeX*sizeY):\n\t\tpos = i * mapp['numFeatures']\n\t\tpartOfNorm[i] = np.sum(mapp['map'][pos:pos+p]**2) ###\n\t'''\n\t### \n\tidx = np.arange(0, sizeX*sizeY*mapp['numFeatures'], mapp['numFeatures']).reshape((sizeX*sizeY, 1)) + np.arange(p)\n\tpartOfNorm = np.sum(mapp['map'][idx] ** 2, axis=1) ### \n\n\tsizeX, sizeY = sizeX-2, sizeY-2\n\t\n\n\t\n\t### \n\tnewData = func3(partOfNorm, mapp['map'], sizeX, sizeY, p, xp, pp) \n\t###\n\n\t# \n\tnewData[newData > alfa] = alfa\n\n\tmapp['numFeatures'] = pp\n\tmapp['sizeX'] = sizeX\n\tmapp['sizeY'] = sizeY\n\tmapp['map'] = newData\n\n\treturn mapp\n\n\ndef PCAFeatureMaps(mapp):\n\tsizeX = mapp['sizeX']\n\tsizeY = mapp['sizeY']\n\n\tp = mapp['numFeatures']\n\tpp = NUM_SECTOR * 3 + 4\n\typ = 4\n\txp = NUM_SECTOR\n\n\tnx = 1.0 / np.sqrt(xp*2)\n\tny = 1.0 / np.sqrt(yp)\n\n\tnewData = func4(mapp['map'], p, sizeX, sizeY, pp, yp, xp, nx, ny) \n\t###\n\n\tmapp['numFeatures'] = pp\n\tmapp['map'] = newData\n\n\treturn mapp\n" ]
[ [ "numpy.sqrt", "numpy.arange", "numpy.cos", "numpy.ones", "numpy.sin", "numpy.float32", "numpy.array", "numpy.zeros", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
wylloong/TDMOA-USV
[ "3a8690559f6df76f0ed457a89dcec2ec268db34f" ]
[ "COLREGsBasedRiskDomain/RiskSubjection.py" ]
[ "# -*- coding: utf-8 -*-\r\n\r\nimport math\r\nfrom matplotlib import pyplot as plt\r\nfrom mpl_toolkits.mplot3d import Axes3D\r\nimport numpy as np\r\nimport GeoCommonBase as CoordinateBase\r\n\r\n# 危险隶属度函数,障碍物周围隶属度函数随着方向和速度逐渐变小\r\n# 改进:隶属度范围和速度大小有关\r\ndef riskSubjection(boatPnt,currPnt,boatVelo,boatOrien,evaluTime,impactFactor):\r\n #考虑求解半径\r\n evaluDistance=boatVelo*evaluTime\r\n #评估点与船的距离\r\n realDistance=CoordinateBase.Distance(boatPnt,currPnt)\r\n if(realDistance<=evaluDistance):\r\n # 评估点与船只在范围内,返回隶属函数\r\n # currPnt相对于boatPnt的向量\r\n relativeVector = CoordinateBase.Point(currPnt.x - boatPnt.x, currPnt.y - boatPnt.y)\r\n # currPnt与船行驶方向的夹角\r\n interAngle=CoordinateBase.IntersectionAngle(boatOrien,relativeVector) # 返回角度和向量的夹角\r\n # 方向影响量\r\n orienFactor = velocityDirectionFactor(interAngle,impactFactor)\r\n return 1-realDistance/(evaluDistance*orienFactor)\r\n else:\r\n return 0\r\n\r\n# 速度方向影响因子\r\ndef velocityDirectionFactor(interangle,impactFactor):\r\n # 方向影响量\r\n delta = math.cos(CoordinateBase.angle2radian(interangle))\r\n orienFactor = 1 + impactFactor * (1 / (1 + math.e ** (-delta * 3.5))) ** 3.5 * (1 + delta) # Sigmoid函数\r\n return orienFactor\r\n\r\n#求解隶属度函数值,不考虑速度方向\r\ndef subordinateFunctionWithoutOri(basePnt,currPnt,semidiameter):\r\n #考虑求解半径 semidiameter\r\n #判断是否在范围semidiameter内\r\n if(CoordinateBase.Distance(basePnt,currPnt)<=semidiameter):\r\n #在一定范围内,调用隶属函数,currPnt相对于basePnt的向量\r\n return 1-CoordinateBase.Distance(basePnt,currPnt)/semidiameter\r\n else:\r\n return 0\r\n\r\n# 绕点旋转公式\r\ndef RotationWayPnt(rotIni_x,rotIni_y,edit_x,edit_y,rotaAngle):\r\n #点绕点旋转公式,逆时针 旋转原点rotIni,待计算点edit,逆时针旋转角度rotaAngle\r\n Rotaradian=rotaAngle*math.pi/180\r\n newX=(edit_x-rotIni_x)*math.cos(Rotaradian)-(edit_y-rotIni_y)*math.sin(Rotaradian)+rotIni_x\r\n newY=(edit_x-rotIni_x)*math.sin(Rotaradian)+(edit_y-rotIni_y)*math.cos(Rotaradian)+rotIni_y\r\n return CoordinateBase.Point(newX,newY)\r\n\r\nif __name__==\"__main__\":\r\n boatLocation=CoordinateBase.Point(0,0)\r\n currLocation=CoordinateBase.Point(10,10)\r\n boatVelo=10\r\n boatOrien=45\r\n evaluTime=10\r\n impactFactor=0.7\r\n subjection=riskSubjection(boatLocation,currLocation,boatVelo,boatOrien,evaluTime,impactFactor)\r\n print (subjection)\r\n # 绘制等高线\r\n fig = plt.figure(1) # 创建图表1\r\n ax = Axes3D(fig)\r\n X = np.arange(-150, 150, 2)\r\n Y = np.arange(-150, 150, 2)\r\n X, Y = np.meshgrid(X, Y)\r\n zs = np.array([riskSubjection(boatLocation,CoordinateBase.Point(x, y),boatVelo,boatOrien,evaluTime,impactFactor) for x, y in zip(np.ravel(X), np.ravel(Y))])\r\n Z = zs.reshape(X.shape)\r\n surf = ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap='hot')\r\n ax.set_xlabel('X')\r\n ax.set_ylabel('Y')\r\n ax.set_zlabel('Z')\r\n\r\n contourfig = plt.figure(2) # 创建图表2,等高线图\r\n coutax = contourfig.add_subplot(1, 1, 1)\r\n #plt.text(15, -13, \"V\", fontsize=15, verticalalignment=\"bottom\", horizontalalignment=\"left\")\r\n plt.contour(X, Y, Z,20)\r\n # coutax.set_xlabel('X Label')\r\n # coutax.set_ylabel('Y Label')\r\n\r\n ratioPlt = plt.figure(3) # 创建图表2,等高线图\r\n ax2 = ratioPlt.add_subplot(3, 3, 3)\r\n x=0\r\n while x<math.pi:\r\n orienFactor = velocityDirectionFactor(x*180/math.pi,impactFactor)\r\n ax2.scatter(x, orienFactor, c='r', marker='.') # 航路\r\n x+=math.pi/100\r\n plt.show()\r\n" ]
[ [ "numpy.arange", "matplotlib.pyplot.contour", "numpy.ravel", "numpy.meshgrid", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
nchlis/image_captioning
[ "e1ac6ae9ed9b398417a91563f1cfe316705c8e58" ]
[ "train_model_LSTM.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu May 3 19:22:59 2018\n\n@author: nikos\n\"\"\"\n\nimport os\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom scipy.io import loadmat\nfrom skimage.transform import rescale, resize, downscale_local_mean\nfrom lmfit.models import GaussianModel, ConstantModel\nfrom keras.preprocessing import image\n#from keras.applications.imagenet_utils import preprocess_input\nfrom keras.applications.resnet50 import preprocess_input, decode_predictions, ResNet50\n\nfrom keras.models import Model, load_model\nfrom keras.layers import Input, Embedding, Dense, Activation, LSTM, GRU, Dropout\nfrom keras.layers.merge import concatenate\nfrom keras.callbacks import CSVLogger, ModelCheckpoint, EarlyStopping\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.utils import to_categorical, plot_model\n\nfrom sklearn.model_selection import train_test_split\nimport random\nimport sys\nimport time\nfrom keras.preprocessing.text import Tokenizer#map words to integers\nfrom keras.backend import clear_session\n#clear_session();print('Cleared Keras session to load new model')\nimport pickle\n\n#%% load the data\nfilenames = np.load('Flickr8k_images_filenames.npy')#8091 filenames\nimages = np.load('Flickr8k_images_encoded.npy')#8091 images\ncaptions = np.load('captions.npy').item()#5 captions per image\nassert np.array_equal(np.sort(filenames),np.sort(np.array(list(captions.keys()))))\n\n#%% Tokenize the captions: map each word/token to an integer\nfilenames_tr = pd.read_csv('./Flickr8k_text/Flickr_8k.trainImages.txt',header=None)\nfilenames_tr = np.array(filenames_tr.values.tolist())#convert to array with dtype='<U25'\ncaptions_per_image=5\n\n#find the training captions to fit the tokenizer on\ncaptions_tr = list()\nfor f in filenames_tr:\n #captions_tr.append(captions[f[0]])\n captions_tr=captions_tr+captions[f[0]]\nassert len(captions_tr) == len(filenames_tr)*captions_per_image\n#max caption length in training data set\nmax_caption_length=max([len(x.split()) for x in captions_tr])\nprint('Maximum caption length:',max_caption_length,'words/tokens.')\n#consider removing '.' from the filters\ntokenizer = Tokenizer(num_words=None,filters='!\"#$%&()*+,-./:;=?@[\\]^_`{|}~',\n lower=False, split=' ', char_level=False)\ntokenizer.fit_on_texts(captions_tr)\nvocab_size = len(tokenizer.word_index.keys())+1\nprint('Vocabulary size after tokenizer:',vocab_size,'unique words.')\n\n#%% set up a generator function to train on one image at a time (conserve RAM)\n\ndef data_generator(input_filenames=None):\n '''\n Generate online training data, one image at a time.\n Note: one image produces several \"datapoints\", since every token of each\n caption is a different output target.\n Yields:\n X_img: (#timesteps,#imagefeatures):image feature input\n X_txt: (#timesteps,#max_caption_length):text input, each word is an integer\n y: (#timesteps,#vocab_size):one-hot encoded output word to predict\n '''\n #filenames_gen = pd.read_csv(input_filepath,header=None)\n #filenames_gen = np.array(filenames_gen.values.tolist())#convert to array with dtype='<U25'\n #print('Generator for:',input_filepath)\n filenames_gen = input_filenames\n print('files total:',len(filenames_gen))\n while True:\n for f in filenames_gen:\n X_img, X_txt, y = list(), list(), list()#new list for every image\n ix = np.where(filenames==f)[0][0]#find the index of the image\n img = images[ix,:]#load the image features using the index\n img_captions = captions[f[0]]#load the captions of the image\n for c in img_captions:\n # encode the sequence\n seq = tokenizer.texts_to_sequences([c])[0]\n # split one sequence into multiple X,y pairs\n for i in range(1, len(seq)):\n # split into input and output pair\n in_seq, out_seq = seq[:i], seq[i]\n # pad input sequence\n in_seq = pad_sequences([in_seq], maxlen=max_caption_length)[0]\n # encode output sequence\n out_seq = to_categorical([out_seq], num_classes=vocab_size)#[0]\n # store\n X_img.append(img)#append the image features\n X_txt.append(in_seq)\n y.append(out_seq)\n yield([[np.array(X_img), np.array(X_txt)], np.array(y)]) \n \n#%% Specify the model\n\nnembedding = 128\nndense = 128\nnlstm = 128\ndropout_rate=0.0\n#dropout_rate=0.25\n# feature extractor model\ninput_img = Input(shape=(2048,))\nx_img = Dropout(dropout_rate)(input_img)\nx_img = Dense(ndense, activation='relu')(x_img)\n\n# sequence model\ninput_txt = Input(shape=(max_caption_length,))\nx_txt = Embedding(vocab_size, nembedding, mask_zero=True)(input_txt)\nx_txt = Dropout(dropout_rate)(x_txt)\nx_txt = LSTM(nlstm)(x_txt)\n\n# decoder model\nx_merge = concatenate([x_img, x_txt])\nx_merge = Dropout(dropout_rate)(x_merge)\nx_merge = Dense(ndense, activation='relu')(x_merge)\n#x_merge = Dropout(dropout_rate)(x_merge)\noutput = Dense(vocab_size, activation='softmax')(x_merge)\n# tie it together [image, seq] [word]\nmodel = Model(inputs=[input_img, input_txt], outputs=output)\nmodel.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n# summarize model\nprint(model.summary())\n\n#%% train the model\n#generator for training data\nfilenames_tr = pd.read_csv('./Flickr8k_text/Flickr_8k.trainImages.txt',header=None)\nfilenames_tr = np.array(filenames_tr.values.tolist())#convert to array with dtype='<U25'\ngen_train = data_generator(input_filenames=filenames_tr)\nsteps_per_epoch_tr = len(filenames_tr)\n#generator for validation data\nfilenames_val = pd.read_csv('./Flickr8k_text/Flickr_8k.devImages.txt',header=None)\nfilenames_val = np.array(filenames_val.values.tolist())#convert to array with dtype='<U25'\ngen_val = data_generator(input_filenames=filenames_val)\nsteps_per_epoch_val = len(filenames_val)\n\nfilepath='./saved_models/model128_LSTM_dropout'+str(dropout_rate) #to save the weights\n#save model architecture as a .png file\nplot_model(model, to_file=filepath+'.png', show_shapes=True)\n#save tokenizer to use on new datasets\nwith open(filepath+'_tokenizer.pkl', 'wb') as handle:\n pickle.dump(tokenizer, handle, protocol=pickle.HIGHEST_PROTOCOL)\n##how to load the tokenizer\n#with open('tokenizer.pkl', 'rb') as handle:\n# tokenizer = pickle.load(handle)\n\ncheckpoint = ModelCheckpoint(filepath+'.hdf5', monitor='val_loss', verbose=1, save_best_only=True, mode='auto')\ncsvlog = CSVLogger(filepath+'_train_log.csv',append=True)\nearly_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=5)\n\ntic=time.time()\nmodel.fit_generator(generator=gen_train,steps_per_epoch=steps_per_epoch_tr,\n validation_data=gen_val,validation_steps=steps_per_epoch_val,\n epochs=10, verbose=2,\n initial_epoch=0,callbacks=[checkpoint, csvlog, early_stopping])\ntoc=time.time()\nmodel.save(filepath+'_model.hdf5')\nfile = open(filepath+'_time.txt','w')\nfile.write('training time:'+format(toc-tic, '.2f')+'seconds')\nfile.close()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n" ]
[ [ "pandas.read_csv", "numpy.sort", "numpy.load", "numpy.array", "numpy.where" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
cda-group/baloo
[ "f6e05e35b73a75e8a300754c6bdc575e5f2d53b9" ]
[ "tests/core/indexes/test_base.py" ]
[ "import numpy as np\nimport pytest\n\nfrom baloo import Index\n\n\ndef assert_index_equal(actual, expected, sort=False):\n actual = actual.evaluate()\n expected = expected.evaluate()\n\n actual_values = actual.values\n expected_values = expected.values\n if sort:\n actual_values = np.sort(actual_values)\n expected_values = np.sort(expected_values)\n np.testing.assert_array_equal(actual_values, expected_values)\n\n assert actual.dtype.char == expected.dtype.char\n assert actual._length == expected._length\n # might seem redundant but testing the __len__ function\n assert len(actual) == len(expected)\n assert actual.name == expected.name\n\n\nclass TestBaseIndex(object):\n def test_init_list(self):\n data = [1, 2, 3]\n actual = Index(data)\n expected = Index(np.array(data))\n\n assert_index_equal(actual, expected)\n\n def test_evaluate(self, data_i64):\n actual = Index(data_i64)\n expected = Index(data_i64, np.dtype(np.int64), None)\n\n assert_index_equal(actual, expected)\n\n def test_len_raw(self, data_i64):\n ind = Index(data_i64, np.dtype(np.int64))\n\n actual = len(ind)\n expected = 5\n\n assert actual == expected\n\n def test_len_lazy(self, data_i64_lazy):\n ind = Index(data_i64_lazy, np.dtype(np.int64))\n\n actual = len(ind)\n expected = 5\n\n assert actual == expected\n\n def test_comparison(self, index_i64):\n actual = index_i64 < 3\n expected = Index(np.array([True, True, True, False, False]))\n\n assert_index_equal(actual, expected)\n\n def test_filter(self, index_i64):\n actual = index_i64[Index(np.array([False, True, True, False, False]))]\n expected = Index(np.array([1, 2]), np.dtype(np.int64))\n\n assert_index_equal(actual, expected)\n\n def test_slice(self, index_i64):\n actual = index_i64[1:3]\n expected = Index(np.array([1, 2]), np.dtype(np.int64))\n\n assert_index_equal(actual, expected)\n\n def test_head(self, index_i64):\n actual = index_i64.head(2)\n expected = Index(np.array([0, 1]), np.dtype(np.int64))\n\n assert_index_equal(actual, expected)\n\n def test_tail(self, index_i64):\n actual = index_i64.tail(2)\n expected = Index(np.array([3, 4]), np.dtype(np.int64))\n\n assert_index_equal(actual, expected)\n\n # implicitly tests if one can apply operation with Series too\n @pytest.mark.parametrize('operation, expected_data', [\n ('+', np.arange(3, 8, dtype=np.float32)),\n ('-', np.arange(-1, 4, dtype=np.float32)),\n ('*', np.arange(2, 11, 2, dtype=np.float32)),\n ('/', np.array([0.5, 1, 1.5, 2, 2.5], dtype=np.float32)),\n ('**', np.array([1, 4, 9, 16, 25], dtype=np.float32))\n ])\n def test_op_array(self, operation, expected_data, data_f32, op_array_other):\n data = Index(data_f32)\n\n actual = eval('data {} op_array_other'.format(operation))\n expected = Index(expected_data, np.dtype(np.float32))\n\n assert_index_equal(actual, expected)\n\n @pytest.mark.parametrize('operation, expected_data', [\n ('+', np.arange(3, 8, dtype=np.float32)),\n ('-', np.arange(-1, 4, dtype=np.float32)),\n ('*', np.arange(2, 11, 2, dtype=np.float32)),\n ('/', np.array([0.5, 1, 1.5, 2, 2.5], dtype=np.float32)),\n ('**', np.array([1, 4, 9, 16, 25], dtype=np.float32))\n ])\n def test_op_scalar(self, operation, expected_data, data_f32):\n ind = Index(data_f32)\n\n actual = eval('ind {} 2'.format(operation))\n expected = Index(expected_data, np.dtype(np.float32))\n\n assert_index_equal(actual, expected)\n\n def test_isna(self):\n ind = Index([3, 2, -999, 4, -999])\n\n actual = ind.isna()\n expected = Index([False, False, True, False, True], np.dtype(np.bool))\n\n assert_index_equal(actual, expected)\n\n def test_dropna(self):\n ind = Index([3, 2, -999, 4, -999])\n\n actual = ind.dropna()\n expected = Index([3, 2, 4], np.dtype(np.int64))\n\n assert_index_equal(actual, expected)\n\n def test_fillna(self):\n ind = Index([3, 2, -999, 4, -999])\n\n actual = ind.fillna(15)\n expected = Index([3, 2, 15, 4, 15], np.dtype(np.int64))\n\n assert_index_equal(actual, expected)\n" ]
[ [ "numpy.arange", "numpy.dtype", "numpy.sort", "numpy.testing.assert_array_equal", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
EdgarLefevre/wnet_pytorch
[ "ba8fa5465f72351f349c18fe7df20a60a7c7f3c5" ]
[ "wnet/utils/soft_n_cut_loss.py" ]
[ "# Some methods in this file ported to Pytorch from https://github.com/Ashish77IITM/W-Net/\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom scipy.stats import norm\nfrom torch import Tensor\n\n# The weight matrix w is a measure of the weight between each pixel and\n# every other pixel. so w[u][v] is a measure of\n# (a) Distance between the brightness of the two pixels.\n# (b) Distance in positon between the two pixels\n\n# The NCut loss metric is then:\n# (a) association: sum(weight_connection) * P(both pixels in the connection are in the class)\n# (b) disassociation: sum(weight_connection) * P(first pixel is in the class)\n# N Cut loss = disassociation / association\n\ndef soft_n_cut_loss(inputs, segmentations):\n # We don't do n_cut_loss batch wise -- split it up and do it instance wise\n loss = 0\n for i in range(inputs.shape[0]):\n flatten_image = torch.mean(inputs[i], dim=0)\n flatten_image = flatten_image.reshape(flatten_image.shape[0] ** 2)\n loss += soft_n_cut_loss_(flatten_image, segmentations[i], 2, 512,\n 512) # last 3 = k, size, size -> take from args\n loss /= inputs.shape[0]\n return loss\n\n\ndef soft_n_cut_loss_(flatten_image, prob, k, rows, cols):\n '''\n Inputs:\n prob : (rows*cols*k) tensor\n k : number of classes (integer)\n flatten_image : 1 dim tf array of the row flattened image ( intensity is the average of the three channels)\n rows : number of the rows in the original image\n cols : number of the cols in the original image\n Output :\n soft_n_cut_loss tensor for a single image\n '''\n\n soft_n_cut_loss = k\n weights = edge_weights(flatten_image, rows, cols)\n\n for t in range(k):\n soft_n_cut_loss = soft_n_cut_loss - (numerator(prob[t, :, ], weights) / denominator(prob[t, :, :], weights))\n\n return soft_n_cut_loss\n\n\ndef edge_weights(flatten_image, rows, cols, std_intensity=3, std_position=1, radius=5):\n '''\n Inputs :\n flatten_image : 1 dim tf array of the row flattened image ( intensity is the average of the three channels)\n std_intensity : standard deviation for intensity\n std_position : standard devistion for position\n radius : the length of the around the pixel where the weights\n is non-zero\n rows : rows of the original image (unflattened image)\n cols : cols of the original image (unflattened image)\n Output :\n weights : 2d tf array edge weights in the pixel graph\n Used parameters :\n n : number of pixels\n '''\n ones = torch.ones_like(flatten_image, dtype=torch.float)\n if torch.cuda.is_available():\n ones = ones.cuda()\n\n A = outer_product(flatten_image, ones)\n A_T = torch.t(A)\n d = torch.div((A - A_T), std_intensity)\n intensity_weight = torch.exp(-1 * torch.mul(d, d))\n\n xx, yy = torch.meshgrid(torch.arange(rows, dtype=torch.float), torch.arange(cols, dtype=torch.float))\n xx = xx.reshape(rows * cols)\n yy = yy.reshape(rows * cols)\n if torch.cuda.is_available():\n xx = xx.cuda()\n yy = yy.cuda()\n ones_xx = torch.ones_like(xx, dtype=torch.float)\n ones_yy = torch.ones_like(yy, dtype=torch.float)\n if torch.cuda.is_available():\n ones_yy = ones_yy.cuda()\n ones_xx = ones_xx.cuda()\n A_x = outer_product(xx, ones_xx)\n A_y = outer_product(yy, ones_yy)\n\n xi_xj = A_x - torch.t(A_x)\n yi_yj = A_y - torch.t(A_y)\n\n sq_distance_matrix = torch.mul(xi_xj, xi_xj) + torch.mul(yi_yj, yi_yj)\n\n # Might have to consider casting as float32 instead of creating meshgrid as float32\n\n dist_weight = torch.exp(-torch.div(sq_distance_matrix, std_position ** 2))\n # ele_diff = tf.reshape(ele_diff, (rows, cols))\n # w = ele_diff + distance_matrix\n return torch.mul(intensity_weight, dist_weight)\n\n\ndef outer_product(v1, v2):\n '''\n Inputs:\n v1 : m*1 tf array\n v2 : m*1 tf array\n Output :\n v1 x v2 : m*m array\n '''\n v1 = v1.reshape(-1)\n v2 = v2.reshape(-1)\n v1 = torch.unsqueeze(v1, dim=0)\n v2 = torch.unsqueeze(v2, dim=0)\n return torch.matmul(torch.t(v1), v2)\n\n\ndef numerator(k_class_prob, weights):\n '''\n Inputs :\n k_class_prob : k_class pixelwise probability (rows*cols) tensor\n weights : edge weights n*n tensor\n '''\n k_class_prob = k_class_prob.reshape(-1)\n a = torch.mul(weights, outer_product(k_class_prob, k_class_prob))\n return torch.sum(a)\n\n\ndef denominator(k_class_prob, weights):\n '''\n Inputs:\n k_class_prob : k_class pixelwise probability (rows*cols) tensor\n weights : edge weights\tn*n tensor\n '''\n k_class_prob = k_class_prob.view(-1)\n return torch.sum(\n torch.mul(\n weights,\n outer_product(\n k_class_prob,\n torch.ones_like(k_class_prob)\n )\n )\n )\n\n\ndef gaussian_kernel(radius: int = 3, sigma: float = 4, device='cpu'):\n x_2 = np.linspace(-radius, radius, 2 * radius + 1) ** 2\n dist = np.sqrt(x_2.reshape(-1, 1) + x_2.reshape(1, -1)) / sigma\n kernel = norm.pdf(dist) / norm.pdf(0)\n kernel = torch.from_numpy(kernel.astype(np.float32))\n kernel = kernel.view((1, 1, kernel.shape[0], kernel.shape[1]))\n\n if device == 'cuda':\n kernel = kernel.cuda()\n\n return kernel\n\n\nclass NCutLoss2D(nn.Module):\n r\"\"\"Implementation of the continuous N-Cut loss, as in:\n 'W-Net: A Deep Model for Fully Unsupervised Image Segmentation', by Xia, Kulis (2017)\"\"\"\n\n def __init__(self, radius: int = 4, sigma_1: float = 5, sigma_2: float = 1):\n r\"\"\"\n :param radius: Radius of the spatial interaction term\n :param sigma_1: Standard deviation of the spatial Gaussian interaction\n :param sigma_2: Standard deviation of the pixel value Gaussian interaction\n \"\"\"\n super(NCutLoss2D, self).__init__()\n self.radius = radius\n self.sigma_1 = sigma_1 # Spatial standard deviation\n self.sigma_2 = sigma_2 # Pixel value standard deviation\n\n def forward(self, inputs: Tensor, labels: Tensor) -> Tensor:\n r\"\"\"Computes the continuous N-Cut loss, given a set of class probabilities (labels) and raw images (inputs).\n Small modifications have been made here for efficiency -- specifically, we compute the pixel-wise weights\n relative to the class-wide average, rather than for every individual pixel.\n :param labels: Predicted class probabilities\n :param inputs: Raw images\n :return: Continuous N-Cut loss\n \"\"\"\n num_classes = labels.shape[1]\n kernel = gaussian_kernel(radius=self.radius, sigma=self.sigma_1, device=labels.device.type)\n loss = 0\n\n for k in range(num_classes):\n # Compute the average pixel value for this class, and the difference from each pixel\n class_probs = labels[:, k].unsqueeze(1)\n class_mean = torch.mean(inputs * class_probs, dim=(2, 3), keepdim=True) / \\\n torch.add(torch.mean(class_probs, dim=(2, 3), keepdim=True), 1e-5)\n diff = (inputs - class_mean).pow(2).sum(dim=1).unsqueeze(1)\n\n # Weight the loss by the difference from the class average.\n weights = torch.exp(diff.pow(2).mul(-1 / self.sigma_2 ** 2))\n\n # Compute N-cut loss, using the computed weights matrix, and a Gaussian spatial filter\n numerator = torch.sum(class_probs * F.conv2d(class_probs * weights, kernel, padding=self.radius))\n denominator = torch.sum(class_probs * F.conv2d(weights, kernel, padding=self.radius))\n loss += nn.L1Loss()(numerator / torch.add(denominator, 1e-6), torch.zeros_like(numerator))\n\n return num_classes - loss\n" ]
[ [ "torch.div", "torch.mean", "torch.add", "numpy.linspace", "scipy.stats.norm.pdf", "torch.nn.functional.conv2d", "torch.sum", "torch.zeros_like", "torch.unsqueeze", "torch.mul", "torch.cuda.is_available", "torch.arange", "torch.nn.L1Loss", "torch.t", "torch.ones_like" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
LCOGT/image_align
[ "12ddfb924301039a3cba5007106f6f8ff27f925e" ]
[ "quad.py" ]
[ "import numpy as np\nimport os\nfrom astropy.io import fits\nimport operator\nimport itertools\n\nclass ImgCat:\n \"\"\"\n Represent an individual image and its associated catalog, starlist, quads etc.\n \"\"\"\n\n def __init__(self, filepath, hdu=0, cat=None):\n \"\"\"\n\n :param filepath: Path to the FITS file, or alternatively just a string to identify the image.\n :type filepath: string\n\n :param cat: Catalog generated by SExtractor (if available -- if not, we'll make our own)\n :type cat: asciidata catalog\n\n :param hdu: The hdu containing the science data from which I should build the catalog. 0 is primary. If multihdu, 1 is usually science.\n\n \"\"\"\n self.filepath = filepath\n\n (imgdir, filename) = os.path.split(filepath)\n (common, ext) = os.path.splitext(filename)\n self.name = common\n\n self.hdu = hdu\n self.cat = cat\n self.starlist = []\n self.mindist = 0.0\n self.xlim = (0.0, 0.0) # Will be set using the catalog -- no need for the FITS image.\n self.ylim = (0.0, 0.0)\n\n self.quadlist = []\n self.quadlevel = 0 # encodes what kind of quads have already been computed\n\n def makestarlist(self, skipsaturated=False, n=200):\n if skipsaturated:\n maxflag = 3\n else:\n maxflag = 7\n hdu = fits.open(self.filepath)\n cats = hdu[2].data\n self.starlist = sortstarlistbyflux(cats)[:n]\n (xmin, xmax, ymin, ymax) = area(cats, border=0.01)\n self.xlim = (xmin, xmax)\n self.ylim = (ymin, ymax)\n\n # Given this starlists, what is a good minimal distance for stars in quads ?\n self.mindist = min(min(xmax - xmin, ymax - ymin) / 10.0, 30.0)\n\n def makemorequads(self, verbose=True):\n \"\"\"\n We add more quads, following the quadlevel.\n \"\"\"\n #if not add:\n # self.quadlist = []\n if verbose:\n print(\"Making more quads, from quadlevel %i ...\" % self.quadlevel)\n if self.quadlevel == 0:\n self.quadlist.extend(makequads1(self.starlist, n=7, d=self.mindist, verbose=verbose))\n elif self.quadlevel == 1:\n self.quadlist.extend(makequads2(self.starlist, f=3, n=5, d=self.mindist, verbose=verbose))\n elif self.quadlevel == 2:\n self.quadlist.extend(makequads2(self.starlist, f=6, n=5, d=self.mindist, verbose=verbose))\n elif self.quadlevel == 3:\n self.quadlist.extend(makequads2(self.starlist, f=12, n=5, d=self.mindist, verbose=verbose))\n elif self.quadlevel == 4:\n self.quadlist.extend(makequads2(self.starlist, f=10, n=6, s=3, d=self.mindist, verbose=verbose))\n\n else:\n return False\n\n self.quadlist = removeduplicates(self.quadlist, verbose=verbose)\n self.quadlevel += 1\n return True\n\nclass Quad:\n \"\"\"\n A geometric \"hash\", or asterism, as used in Astrometry.net :\n http://adsabs.harvard.edu/cgi-bin/bib_query?arXiv:0910.2233\n It is made out of 4 stars, and it is shift / scale / rotation invariant\n \"\"\"\n\n def __init__(self, fourstars):\n \"\"\"\n fourstars is a list of four stars\n\n We make the following attributes :\n self.hash\n self.stars (in the order A, B, C, D)\n\n \"\"\"\n assert len(fourstars) == 4\n\n tests = [(0,1), (0,2), (0,3), (1,2), (1,3), (2,3)]\n other = [(2,3), (1,3), (1,2), (0,3), (0,2), (0,1)]\n dists = np.array([np.linalg.norm(np.array(fourstars[0]['x'], fourstars[0]['y']) - np.array(fourstars[1]['x'], fourstars[1]['y'])) for (i,j) in tests])\n assert np.min(dists) > 1.0\n\n maxindex = np.argmax(dists)\n (Ai, Bi) = tests[maxindex] # Indexes of stars A and B\n (Ci, Di) = other[maxindex] # Indexes of stars C and D\n A = fourstars[Ai]\n B = fourstars[Bi]\n C = fourstars[Ci]\n D = fourstars[Di]\n\n # We look for matrix transform [[a -b], [b a]] + [c d] that brings A and B to 00 11 :\n x = B['x'] - A['x']\n y = B['y'] - A['y']\n b = (x-y)/(x*x + y*y)\n a = (1.0/x) * (1.0 + b*y)\n c = b*A['y'] - a*A['x']\n d = - (b*A['x'] + a*A['y'])\n\n t = SimpleTransform((a, b, c, d))\n\n # Test\n #print(t.apply((A['x'], A['y'])))\n #print(t.apply((B.x, B['y'])))\n\n (xC, yC) = t.apply(x = C['x'], y = C['y'])\n (xD, yD) = t.apply(x = D['x'], y = D['y'])\n\n # Normal case\n self.hash = (xC, yC, xD, yD)\n\n # Break symmetries :\n testa = xC > xD\n testb = xC + xD > 1\n\n if testa and not testb: # we switch C and D\n #print(\"a\")\n self.hash = (xD, yD, xC, yC)\n (C, D) = (D, C)\n\n if testb and not testa: # We switch A and B\n #print(\"b\")\n self.hash = (1.0-xD, 1.0-yD, 1.0-xC, 1.0-yC)\n (A, B) = (B, A)\n (C, D) = (D, C)\n\n if testa and testb:\n #print(\"a + b\")\n self.hash = (1.0-xC, 1.0-yC, 1.0-xD, 1.0-yD)\n (A, B) = (B, A)\n\n # Checks :\n assert self.hash[0] <= self.hash[2]\n assert self.hash[0] + self.hash[2] <= 1\n\n self.stars = [A, B, C, D] # Order might be different from the fourstars !\n\n\n def __str__(self):\n return \"Hash : %6.3f %6.3f %6.3f %6.3f / IDs : (%s, %s, %s, %s)\" % (\n self.hash[0], self.hash[1], self.hash[2], self.hash[3],\n self.stars[0].name, self.stars[1].name, self.stars[2].name, self.stars[3].name)\n\nclass SimpleTransform:\n \"\"\"\n Represents an affine transformation consisting of rotation, isotropic scaling, and shift.\n [x', y'] = [[a -b], [b a]] * [x, y] + [c d]\n \"\"\"\n\n def __init__(self, v = (1, 0, 0, 0)):\n \"\"\"\n v = (a, b, c, d)\n \"\"\"\n self.v = np.asarray(v)\n\n def getscaling(self):\n return math.sqrt(self.v[0]*self.v[0] + self.v[1]*self.v[1])\n\n def getrotation(self):\n \"\"\"\n The CCW rotation angle, in degrees\n \"\"\"\n return math.atan2(self.v[1], self.v[0]) * (180.0/math.pi)# % 360.0\n\n def __str__(self):\n return \"Rotation %+11.6f [deg], scale %8.6f\" % (self.getrotation(), self.getscaling())\n\n\n def inverse(self):\n \"\"\"\n Returns the inverse transform !\n \"\"\"\n\n # To represent affine transformations with matrices, we can use homogeneous coordinates.\n homo = np.array([\n [self.v[0], -self.v[1], self.v[2]],\n [self.v[1], self.v[0], self.v[3]],\n [0.0, 0.0, 1.0]\n ])\n\n inv = np.linalg.inv(homo)\n #print(inv)\n\n return SimpleTransform((inv[0,0], inv[1,0], inv[0,2], inv[1,2]))\n\n\n\n def matrixform(self):\n \"\"\"\n Special output for scipy.ndimage.interpolation.affine_transform\n Returns (matrix, offset)\n \"\"\"\n\n return (np.array([[self.v[0], -self.v[1]], [self.v[1], self.v[0]]]), self.v[2:4])\n\n\n def apply(self, x, y):\n \"\"\"\n Applies the transform to a point (x, y)\n \"\"\"\n xn = self.v[0]*x -self.v[1]*y + self.v[2]\n yn = self.v[1]*x +self.v[0]*y + self.v[3]\n return (xn, yn)\n\n def applystar(self, star):\n transstar = star.copy()\n (transstar.x, transstar.y) = self.apply((transstar.x, transstar.y))\n return transstar\n\n def applystarlist(self, starlist):\n return [self.applystar(star) for star in starlist]\n\ndef sortstarlistbyflux(starlist):\n \"\"\"\n We sort starlist according to flux : highest flux first !\n \"\"\"\n sortedstarlist = sorted(starlist, key=operator.itemgetter('flux'))\n sortedstarlist.reverse()\n return sortedstarlist\n\ndef area(stars, border=0.01):\n \"\"\"\n Returns the area covered by the stars.\n Border is relative to max-min\n \"\"\"\n if len(stars) == 0:\n return np.array([0, 1, 0, 1])\n\n if len(stars) == 1:\n star = stars[0]\n return np.array([star['x'] - 0.5, star['x'] + 0.5, star['y'] - 0.5, star['y'] + 0.5])\n\n (xmin, xmax) = (np.min(stars['x']), np.max(stars['x']))\n (ymin, ymax) = (np.min(stars['y']), np.max(stars['y']))\n xw = xmax - xmin\n yw = ymax - ymin\n xmin = xmin - border*xw\n xmax = xmax + border*xw\n ymin = ymin - border*yw\n ymax = ymax + border*yw\n return np.array([xmin, xmax, ymin, ymax])\n\ndef makequads1(starlist, n=7, s=0, d=50.0, verbose=True):\n \"\"\"\n First trivial quad maker.\n Makes combis of the n brightest stars.\n\n :param n: number of stars to consider (brightest ones).\n :type n: int\n :param s: how many of the brightest stars should I skip ?\n This feature is useful to avoid building quads with nearly saturated stars that are not\n available in other exposures.\n :type s: int\n :param d: minimal distance between stars\n :type d: float\n\n \"\"\"\n quadlist = []\n sortedstars = sortstarlistbyflux(starlist)\n\n for fourstars in itertools.combinations(sortedstars[s:s+n], 4):\n if mindist(fourstars) > d:\n quadlist.append(Quad(fourstars))\n\n if verbose:\n print(\"Made %4i quads from %4i stars (combi n=%i s=%i d=%.1f)\" % (len(quadlist), len(starlist), n, s, d))\n\n return quadlist\n\ndef mindist(cats):\n \"\"\"\n Function that tests if 4 stars are suitable to make a good quad...\n \"\"\"\n tests = [(0,1), (0,2), (0,3), (1,2), (1,3), (2,3)]\n dists = np.array([np.linalg.norm(np.array(cats[0]['x'], cats[0]['y']) - np.array(cats[1]['x'], cats[1]['y'])) for (i,j) in tests])\n return np.min(dists)\n\ndef removeduplicates(quadlist, verbose=True):\n \"\"\"\n Returns a quadlist without quads with identical hashes...\n \"\"\"\n # To avoid crash in lexsort if quadlist is too small :\n if len(quadlist) < 2:\n return quadlist\n hasharray = np.array([q.hash for q in quadlist])\n\n order = np.lexsort(hasharray.T)\n hasharray = hasharray[order]\n #diff = np.diff(hasharray, axis=0)\n diff = np.fabs(np.diff(hasharray, axis=0))\n #diff = np.sum(diff, axis=1)\n ui = np.ones(len(hasharray), 'bool')\n ui[1:] = (diff >= 0.000001).any(axis=1)\n #print(hasharray[ui==False])\n if verbose:\n print(\"Removing %i/%i duplicates\" % (len(quadlist) - np.sum(ui), len(quadlist)))\n\n return [quad for (quad, u) in zip(quadlist, ui) if u == True]\n" ]
[ [ "numpy.min", "numpy.asarray", "numpy.linalg.inv", "numpy.lexsort", "numpy.max", "numpy.argmax", "numpy.diff", "numpy.array", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ANCL/QuadPPO
[ "b7ed0574467bd321f4259175621a12ff7aeb7d12", "b7ed0574467bd321f4259175621a12ff7aeb7d12" ]
[ "spinup/algos/tf1/td3/core.py", "spinup/algos/pytorch/ppo/ppo_quad.py" ]
[ "import numpy as np\nimport tensorflow as tf\n\n\ndef placeholder(dim=None):\n return tf.placeholder(dtype=tf.float32, shape=(None,dim) if dim else (None,))\n\ndef placeholders(*args):\n return [placeholder(dim) for dim in args]\n\ndef mlp(x, hidden_sizes=(32,), activation=tf.tanh, output_activation=None):\n for h in hidden_sizes[:-1]:\n x = tf.layers.dense(x, units=h, activation=activation)\n return tf.layers.dense(x, units=hidden_sizes[-1], activation=output_activation)\n\ndef get_vars(scope):\n return [x for x in tf.global_variables() if scope in x.name]\n\ndef count_vars(scope):\n v = get_vars(scope)\n return sum([np.prod(var.shape.as_list()) for var in v])\n\n\"\"\"\nActor-Critics\n\"\"\"\ndef mlp_actor_critic(x, a, hidden_sizes=(256,256), activation=tf.nn.relu, \n output_activation=tf.tanh, action_space=None):\n act_dim = a.shape.as_list()[-1]\n act_limit = action_space.high[0]\n with tf.variable_scope('pi'):\n pi = act_limit * mlp(x, list(hidden_sizes)+[act_dim], activation, output_activation)\n with tf.variable_scope('q1'):\n q1 = tf.squeeze(mlp(tf.concat([x,a], axis=-1), list(hidden_sizes)+[1], activation, None), axis=1)\n with tf.variable_scope('q2'):\n q2 = tf.squeeze(mlp(tf.concat([x,a], axis=-1), list(hidden_sizes)+[1], activation, None), axis=1)\n with tf.variable_scope('q1', reuse=True):\n q1_pi = tf.squeeze(mlp(tf.concat([x,pi], axis=-1), list(hidden_sizes)+[1], activation, None), axis=1)\n return pi, q1, q2, q1_pi\n", "import numpy as np\nimport torch\nfrom torch.optim import Adam\nimport gym\nimport time\nimport spinup.algos.pytorch.ppo.core as core\nfrom spinup.utils.logx import EpochLogger\nfrom spinup.utils.mpi_pytorch import setup_pytorch_for_mpi, sync_params, mpi_avg_grads\nfrom spinup.utils.mpi_tools import mpi_fork, mpi_avg, proc_id, mpi_statistics_scalar, num_procs\nfrom rlschool import make_env\n\nclass PPOBuffer:\n \"\"\"\n A buffer for storing trajectories experienced by a PPO agent interacting\n with the environment, and using Generalized Advantage Estimation (GAE-Lambda)\n for calculating the advantages of state-action pairs.\n \"\"\"\n\n def __init__(self, obs_dim, act_dim, size, gamma=0.99, lam=0.95):\n self.obs_buf = np.zeros(core.combined_shape(size, obs_dim), dtype=np.float32)\n self.act_buf = np.zeros(core.combined_shape(size, act_dim), dtype=np.float32)\n self.adv_buf = np.zeros(size, dtype=np.float32)\n self.rew_buf = np.zeros(size, dtype=np.float32)\n self.ret_buf = np.zeros(size, dtype=np.float32) # reward to go\n self.val_buf = np.zeros(size, dtype=np.float32)\n self.logp_buf = np.zeros(size, dtype=np.float32)\n self.gamma, self.lam = gamma, lam\n self.ptr, self.path_start_idx, self.max_size = 0, 0, size\n\n def store(self, obs, act, rew, val, logp):\n \"\"\"\n Append one timestep of agent-environment interaction to the buffer.\n \"\"\"\n assert self.ptr < self.max_size # buffer has to have room so you can store\n self.obs_buf[self.ptr] = obs\n self.act_buf[self.ptr] = act\n self.rew_buf[self.ptr] = rew\n self.val_buf[self.ptr] = val\n self.logp_buf[self.ptr] = logp\n self.ptr += 1\n\n def finish_path(self, last_val=0):\n \"\"\"\n Call this at the end of a trajectory, or when one gets cut off\n by an epoch ending. This looks back in the buffer to where the\n trajectory started, and uses rewards and value estimates from\n the whole trajectory to compute advantage estimates with GAE-Lambda,\n as well as compute the rewards-to-go for each state, to use as\n the targets for the value function.\n\n The \"last_val\" argument should be 0 if the trajectory ended\n because the agent reached a terminal state (died), and otherwise\n should be V(s_T), the value function estimated for the last state.\n This allows us to bootstrap the reward-to-go calculation to account\n for timesteps beyond the arbitrary episode horizon (or epoch cutoff).\n \"\"\"\n\n path_slice = slice(self.path_start_idx, self.ptr)\n rews = np.append(self.rew_buf[path_slice], last_val)\n vals = np.append(self.val_buf[path_slice], last_val)\n\n # the next two lines implement GAE-Lambda advantage calculation\n deltas = rews[:-1] + self.gamma * vals[1:] - vals[:-1]\n self.adv_buf[path_slice] = core.discount_cumsum(deltas, self.gamma * self.lam)\n\n # the next line computes rewards-to-go, to be targets for the value function\n self.ret_buf[path_slice] = core.discount_cumsum(rews, self.gamma)[:-1]\n\n self.path_start_idx = self.ptr\n\n def get(self):\n \"\"\"\n Call this at the end of an epoch to get all of the data from\n the buffer, with advantages appropriately normalized (shifted to have\n mean zero and std one). Also, resets some pointers in the buffer.\n \"\"\"\n assert self.ptr == self.max_size # buffer has to be full before you can get\n self.ptr, self.path_start_idx = 0, 0\n # the next two lines implement the advantage normalization trick\n adv_mean, adv_std = mpi_statistics_scalar(self.adv_buf)\n self.adv_buf = (self.adv_buf - adv_mean) / adv_std\n data = dict(obs=self.obs_buf, act=self.act_buf, ret=self.ret_buf,\n adv=self.adv_buf, logp=self.logp_buf)\n return {k: torch.as_tensor(v, dtype=torch.float32) for k, v in data.items()}\n\ndef ppo(env_fn, actor_critic=core.MLPActorCritic, ac_kwargs=dict(), seed=0,\n steps_per_epoch=4000, epochs=50, gamma=0.99, clip_ratio=0.2, pi_lr=3e-4,\n vf_lr=1e-3, train_pi_iters=80, train_v_iters=80, lam=0.97, max_ep_len=1000,\n target_kl=0.01, logger_kwargs=dict(), save_freq=10):\n\n # Special function to avoid certain slowdowns from PyTorch + MPI combo.\n setup_pytorch_for_mpi()\n\n # Set up logger and save configuration\n logger = EpochLogger(**logger_kwargs)\n logger.save_config(locals())\n\n # Random seed\n seed += 10000 * proc_id()\n torch.manual_seed(seed)\n np.random.seed(seed)\n\n # Instantiate environment\n # env = env_fn()\n # obs_dim = env.observation_space.shape\n # act_dim = env.action_space.shape\n\n env = make_env(\"Quadrotor\", task=\"hovering_control\")\n obs_dim = env.observation_space.shape[0]\n act_dim = env.action_space.shape[0]\n # Create actor-critic module\n ac = actor_critic(env.observation_space, env.action_space, **ac_kwargs)\n\n # Sync params across processes\n sync_params(ac)\n\n # Count variables\n var_counts = tuple(core.count_vars(module) for module in [ac.pi, ac.v])\n logger.log('\\nNumber of parameters: \\t pi: %d, \\t v: %d\\n' % var_counts)\n\n # Set up experience buffer\n local_steps_per_epoch = int(steps_per_epoch / num_procs())\n buf = PPOBuffer(obs_dim, act_dim, local_steps_per_epoch, gamma, lam)\n\n # Set up function for computing PPO policy loss\n def compute_loss_pi(data):\n obs, act, adv, logp_old = data['obs'], data['act'], data['adv'], data['logp']\n\n # Policy loss\n pi, logp = ac.pi(obs, act)\n ratio = torch.exp(logp - logp_old)\n clip_adv = torch.clamp(ratio, 1 - clip_ratio, 1 + clip_ratio) * adv\n loss_pi = -(torch.min(ratio * adv, clip_adv)).mean()\n\n # Useful extra info\n approx_kl = (logp_old - logp).mean().item()\n ent = pi.entropy().mean().item()\n clipped = ratio.gt(1 + clip_ratio) | ratio.lt(1 - clip_ratio)\n clipfrac = torch.as_tensor(clipped, dtype=torch.float32).mean().item()\n pi_info = dict(kl=approx_kl, ent=ent, cf=clipfrac)\n\n return loss_pi, pi_info\n\n # Set up function for computing value loss\n def compute_loss_v(data):\n obs, ret = data['obs'], data['ret']\n return ((ac.v(obs) - ret) ** 2).mean()\n\n # Set up optimizers for policy and value function\n pi_optimizer = Adam(ac.pi.parameters(), lr=pi_lr)\n vf_optimizer = Adam(ac.v.parameters(), lr=vf_lr)\n\n # Set up model saving\n logger.setup_pytorch_saver(ac)\n\n def update():\n data = buf.get()\n\n pi_l_old, pi_info_old = compute_loss_pi(data)\n pi_l_old = pi_l_old.item()\n v_l_old = compute_loss_v(data).item()\n\n # Train policy with multiple steps of gradient descent\n for i in range(train_pi_iters):\n pi_optimizer.zero_grad()\n loss_pi, pi_info = compute_loss_pi(data)\n kl = mpi_avg(pi_info['kl'])\n if kl > 1.5 * target_kl:\n logger.log('Early stopping at step %d due to reaching max kl.' % i)\n break\n loss_pi.backward()\n mpi_avg_grads(ac.pi) # average grads across MPI processes\n pi_optimizer.step()\n\n logger.store(StopIter=i)\n\n # Value function learning\n for i in range(train_v_iters):\n vf_optimizer.zero_grad()\n loss_v = compute_loss_v(data)\n loss_v.backward()\n mpi_avg_grads(ac.v) # average grads across MPI processes\n vf_optimizer.step()\n\n # Log changes from update\n kl, ent, cf = pi_info['kl'], pi_info_old['ent'], pi_info['cf']\n logger.store(LossPi=pi_l_old, LossV=v_l_old,\n KL=kl, Entropy=ent, ClipFrac=cf,\n DeltaLossPi=(loss_pi.item() - pi_l_old),\n DeltaLossV=(loss_v.item() - v_l_old))\n\n # Prepare for interaction with environment\n start_time = time.time()\n o, ep_ret, ep_len = env.reset(), 0, 0\n\n # Main loop: collect experience in env and update/log each epoch\n for epoch in range(epochs):\n for t in range(local_steps_per_epoch):\n a, v, logp = ac.step(torch.as_tensor(o, dtype=torch.float32))\n\n next_o, r, d, _ = env.step(a)\n ep_ret += r\n ep_len += 1\n\n # save and log\n buf.store(o, a, r, v, logp)\n logger.store(VVals=v)\n\n # Update obs (critical!)\n o = next_o\n\n timeout = ep_len == max_ep_len\n terminal = d or timeout\n epoch_ended = t == local_steps_per_epoch - 1\n\n if terminal or epoch_ended:\n if epoch_ended and not (terminal):\n print('Warning: trajectory cut off by epoch at %d steps.' % ep_len, flush=True)\n # if trajectory didn't reach terminal state, bootstrap value target\n if timeout or epoch_ended:\n _, v, _ = ac.step(torch.as_tensor(o, dtype=torch.float32))\n else:\n v = 0\n buf.finish_path(v)\n if terminal:\n # only save EpRet / EpLen if trajectory finished\n logger.store(EpRet=ep_ret, EpLen=ep_len)\n o, ep_ret, ep_len = env.reset(), 0, 0\n\n # Save model\n if (epoch % save_freq == 0) or (epoch == epochs - 1):\n logger.save_state({'env': env}, None)\n\n # Perform PPO update!\n update()\n\n # Log info about epoch\n logger.log_tabular('Epoch', epoch)\n logger.log_tabular('EpRet', with_min_and_max=True)\n logger.log_tabular('EpLen', average_only=True)\n logger.log_tabular('VVals', with_min_and_max=True)\n logger.log_tabular('TotalEnvInteracts', (epoch + 1) * steps_per_epoch)\n logger.log_tabular('LossPi', average_only=True)\n logger.log_tabular('LossV', average_only=True)\n logger.log_tabular('DeltaLossPi', average_only=True)\n logger.log_tabular('DeltaLossV', average_only=True)\n logger.log_tabular('Entropy', average_only=True)\n logger.log_tabular('KL', average_only=True)\n logger.log_tabular('ClipFrac', average_only=True)\n logger.log_tabular('StopIter', average_only=True)\n logger.log_tabular('Time', time.time() - start_time)\n logger.dump_tabular()\n\nif __name__ == '__main__':\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('--env', type=str, default='HalfCheetah-v2')\n parser.add_argument('--hid', type=int, default=64)\n parser.add_argument('--l', type=int, default=2)\n parser.add_argument('--gamma', type=float, default=0.99)\n parser.add_argument('--seed', '-s', type=int, default=0)\n parser.add_argument('--cpu', type=int, default=4)\n parser.add_argument('--steps', type=int, default=4000)\n parser.add_argument('--epochs', type=int, default=50)\n parser.add_argument('--exp_name', type=str, default='ppo')\n args = parser.parse_args()\n\n mpi_fork(args.cpu) # run parallel code with mpi\n\n from spinup.utils.run_utils import setup_logger_kwargs\n logger_kwargs = setup_logger_kwargs(args.exp_name, args.seed)\n\n ppo(lambda : make_env(\"Quadrotor\", task=\"hovering_control\"), actor_critic=core.MLPActorCritic,\n ac_kwargs=dict(hidden_sizes=[args.hid]*args.l), gamma=args.gamma,\n seed=args.seed, steps_per_epoch=args.steps, epochs=args.epochs,\n logger_kwargs=logger_kwargs)" ]
[ [ "tensorflow.concat", "tensorflow.global_variables", "tensorflow.layers.dense", "tensorflow.placeholder", "tensorflow.variable_scope" ], [ "numpy.random.seed", "torch.manual_seed", "torch.min", "torch.exp", "numpy.append", "torch.clamp", "numpy.zeros", "torch.as_tensor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
kunalq/Cirq
[ "de0c5e855069bba71e55b070fc9b06f58c07a861", "e73c9bef672e83143ab04e7f169988149055d630", "5ad06cc7a487ca94436715a3c51b6a50dfd10513", "e73c9bef672e83143ab04e7f169988149055d630", "e73c9bef672e83143ab04e7f169988149055d630", "e73c9bef672e83143ab04e7f169988149055d630", "5ad06cc7a487ca94436715a3c51b6a50dfd10513", "e73c9bef672e83143ab04e7f169988149055d630", "5ad06cc7a487ca94436715a3c51b6a50dfd10513", "e73c9bef672e83143ab04e7f169988149055d630" ]
[ "cirq/ion/convert_to_ion_gates_test.py", "cirq/linalg/transformations.py", "cirq/testing/lin_alg_utils.py", "cirq/testing/circuit_compare.py", "cirq/study/trial_result.py", "cirq/linalg/operator_spaces_test.py", "cirq/protocols/approximate_equality_test.py", "cirq/protocols/unitary.py", "cirq/sim/mux.py", "cirq/sim/density_matrix_simulator.py" ]
[ "# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport pytest\n\nimport numpy as np\n\nimport cirq\n\n\nclass OtherX(cirq.SingleQubitGate):\n def _unitary_(self) -> np.ndarray:\n return np.array([[0, 1], [1, 0]])\n\n\nclass NoUnitary(cirq.SingleQubitGate):\n pass\n\n\nclass OtherCNOT(cirq.TwoQubitGate):\n def _unitary_(self) -> np.ndarray:\n return np.array([[1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 0, 1],\n [0, 0, 1, 0]])\n\n\ndef test_convert_to_ion_gates():\n q0 = cirq.GridQubit(0, 0)\n q1 = cirq.GridQubit(0, 1)\n op = cirq.CNOT(q0, q1)\n circuit = cirq.Circuit()\n\n with pytest.raises(TypeError):\n cirq.ion.ConvertToIonGates().convert_one(circuit)\n\n with pytest.raises(TypeError):\n cirq.ion.ConvertToIonGates().convert_one(NoUnitary().on(q0))\n\n no_unitary_op = NoUnitary().on(q0)\n assert cirq.ion.ConvertToIonGates(ignore_failures=True).convert_one(\n no_unitary_op) == [no_unitary_op]\n\n rx = cirq.ion.ConvertToIonGates().convert_one(OtherX().on(q0))\n rop = cirq.ion.ConvertToIonGates().convert_one(op)\n rcnot = cirq.ion.ConvertToIonGates().convert_one(OtherCNOT().on(q0, q1))\n assert rx == [\n cirq.PhasedXPowGate(phase_exponent=1).on(cirq.GridQubit(0, 0))\n ]\n assert rop == [cirq.Ry(np.pi/2).on(op.qubits[0]),\n cirq.ion.MS(np.pi/4).on(op.qubits[0], op.qubits[1]),\n cirq.ops.Rx(-1*np.pi/2).on(op.qubits[0]),\n cirq.ops.Rx(-1*np.pi/2).on(op.qubits[1]),\n cirq.ops.Ry(-1*np.pi/2).on(op.qubits[0])]\n assert rcnot == [\n cirq.PhasedXPowGate(phase_exponent=-0.75,\n exponent=0.5).on(cirq.GridQubit(0, 0)),\n cirq.PhasedXPowGate(phase_exponent=1,\n exponent=0.25).on(cirq.GridQubit(0, 1)),\n cirq.T.on(cirq.GridQubit(0, 0)),\n cirq.MS(-0.5 * np.pi / 2).on(cirq.GridQubit(0, 0), cirq.GridQubit(0,\n 1)),\n (cirq.Y**0.5).on(cirq.GridQubit(0, 0)),\n cirq.PhasedXPowGate(phase_exponent=1,\n exponent=0.25).on(cirq.GridQubit(0, 1)),\n (cirq.Z**-0.75).on(cirq.GridQubit(0, 0))\n ]\n\n\ndef test_convert_to_ion_circuit():\n q0 = cirq.LineQubit(0)\n q1 = cirq.LineQubit(1)\n us = cirq.Duration(nanos=1000)\n ion_device = cirq.IonDevice(us, us, us, [q0, q1])\n\n clifford_circuit_1 = cirq.Circuit()\n clifford_circuit_1.append([cirq.X(q0), cirq.H(q1),\n cirq.MS(np.pi/4).on(q0, q1)])\n ion_circuit_1 = cirq.ion.ConvertToIonGates().convert_circuit(\n clifford_circuit_1)\n\n ion_device.validate_circuit(ion_circuit_1)\n cirq.testing.assert_circuits_with_terminal_measurements_are_equivalent(\n clifford_circuit_1, ion_circuit_1, atol=1e-6)\n clifford_circuit_2 = cirq.Circuit()\n clifford_circuit_2.append([cirq.X(q0), cirq.CNOT(q1, q0), cirq.MS(\n np.pi/4).on(q0, q1)])\n ion_circuit_2 = cirq.ion.ConvertToIonGates().convert_circuit(\n clifford_circuit_2)\n ion_device.validate_circuit(ion_circuit_2)\n cirq.testing.assert_circuits_with_terminal_measurements_are_equivalent(\n clifford_circuit_2, ion_circuit_2, atol=1e-6)\n", "# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Utility methods for transforming matrices.\"\"\"\n\nfrom typing import Tuple, Optional, Sequence, List, Union, TypeVar\n\nimport numpy as np\n\nfrom cirq.protocols.approximate_equality import approx_eq\nfrom cirq.linalg import predicates\n\n# This is a special indicator value used by the subwavefunction method to\n# determine whether or not the caller provided a 'default' argument. It must be\n# of type np.ndarray to ensure the method has the correct type signature in that\n# case. It is checked for using `is`, so it won't have a false positive if the\n# user provides a different np.array([]) value.\nRaiseValueErrorIfNotProvided = np.array([]) # type: np.ndarray\n\nTDefault = TypeVar('TDefault')\n\n\ndef reflection_matrix_pow(reflection_matrix: np.ndarray, exponent: float):\n \"\"\"Raises a matrix with two opposing eigenvalues to a power.\n\n Args:\n reflection_matrix: The matrix to raise to a power.\n exponent: The power to raise the matrix to.\n\n Returns:\n The given matrix raised to the given power.\n \"\"\"\n\n # The eigenvalues are x and -x for some complex unit x. Determine x.\n squared_phase = np.dot(reflection_matrix[:, 0],\n reflection_matrix[0, :])\n phase = complex(np.sqrt(squared_phase))\n\n # Extract +x and -x eigencomponents of the matrix.\n i = np.eye(reflection_matrix.shape[0]) * phase\n pos_part = (i + reflection_matrix) * 0.5\n neg_part = (i - reflection_matrix) * 0.5\n\n # Raise the matrix to a power by raising its eigencomponents to that power.\n pos_factor = phase**(exponent - 1)\n neg_factor = pos_factor * complex(-1)**exponent\n pos_part_raised = pos_factor * pos_part\n neg_part_raised = neg_part * neg_factor\n return pos_part_raised + neg_part_raised\n\n\ndef match_global_phase(a: np.ndarray,\n b: np.ndarray\n ) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"Phases the given matrices so that they agree on the phase of one entry.\n\n To maximize precision, the position with the largest entry from one of the\n matrices is used when attempting to compute the phase difference between\n the two matrices.\n\n Args:\n a: A numpy array.\n b: Another numpy array.\n\n Returns:\n A tuple (a', b') where a' == b' implies a == b*exp(i t) for some t.\n \"\"\"\n\n # Not much point when they have different shapes.\n if a.shape != b.shape or a.size == 0:\n return np.copy(a), np.copy(b)\n\n # Find the entry with the largest magnitude in one of the matrices.\n k = max(np.ndindex(*a.shape), key=lambda t: abs(b[t]))\n\n def dephase(v):\n r = np.real(v)\n i = np.imag(v)\n\n # Avoid introducing floating point error when axis-aligned.\n if i == 0:\n return -1 if r < 0 else 1\n if r == 0:\n return 1j if i < 0 else -1j\n\n return np.exp(-1j * np.arctan2(i, r))\n\n # Zero the phase at this entry in both matrices.\n return a * dephase(a[k]), b * dephase(b[k])\n\n\ndef targeted_left_multiply(left_matrix: np.ndarray,\n right_target: np.ndarray,\n target_axes: Sequence[int],\n out: Optional[np.ndarray] = None\n ) -> np.ndarray:\n \"\"\"Left-multiplies the given axes of the target tensor by the given matrix.\n\n Note that the matrix must have a compatible tensor structure.\n\n For example, if you have an 6-qubit state vector `input_state` with shape\n (2, 2, 2, 2, 2, 2), and a 2-qubit unitary operation `op` with shape\n (2, 2, 2, 2), and you want to apply `op` to the 5'th and 3'rd qubits\n within `input_state`, then the output state vector is computed as follows:\n\n output_state = cirq.targeted_left_multiply(op, input_state, [5, 3])\n\n This method also works when the right hand side is a matrix instead of a\n vector. If a unitary circuit's matrix is `old_effect`, and you append\n a CNOT(q1, q4) operation onto the circuit, where the control q1 is the qubit\n at offset 1 and the target q4 is the qubit at offset 4, then the appended\n circuit's unitary matrix is computed as follows:\n\n new_effect = cirq.targeted_left_multiply(\n left_matrix=cirq.unitary(cirq.CNOT).reshape((2, 2, 2, 2)),\n right_target=old_effect,\n target_axes=[1, 4])\n\n Args:\n left_matrix: What to left-multiply the target tensor by.\n right_target: A tensor to carefully broadcast a left-multiply over.\n target_axes: Which axes of the target are being operated on.\n out: The buffer to store the results in. If not specified or None, a new\n buffer is used. Must have the same shape as right_target.\n\n Returns:\n The output tensor.\n \"\"\"\n k = len(target_axes)\n d = len(right_target.shape)\n work_indices = tuple(range(k))\n data_indices = tuple(range(k, k + d))\n used_data_indices = tuple(data_indices[q] for q in target_axes)\n input_indices = work_indices + used_data_indices\n output_indices = list(data_indices)\n for w, t in zip(work_indices, target_axes):\n output_indices[t] = w\n\n all_indices = set(input_indices + data_indices + tuple(output_indices))\n\n return np.einsum(left_matrix, input_indices,\n right_target, data_indices,\n output_indices,\n # We would prefer to omit 'optimize=' (it's faster),\n # but this is a workaround for a bug in numpy:\n # https://github.com/numpy/numpy/issues/10926\n optimize=len(all_indices) >= 26,\n # And this is workaround for *another* bug!\n # Supposed to be able to just say 'old=old'.\n **({'out': out} if out is not None else {}))\n\n\ndef targeted_conjugate_about(tensor: np.ndarray,\n target: np.ndarray,\n indices: Sequence[int],\n conj_indices: Sequence[int] = None,\n buffer: Optional[np.ndarray] = None,\n out: Optional[np.ndarray] = None) -> np.ndarray:\n r\"\"\"Conjugates the given tensor about the target tensor.\n\n This method computes a target tensor conjugated by another tensor.\n Here conjugate is used in the sense of conjugating by a matrix, i.a.\n A conjugated about B is $A B A^\\dagger$ where $\\dagger$ represents the\n conjugate transpose.\n\n Abstractly this compute $A \\cdot B \\cdot A^\\dagger$ where A and B are\n multi-dimensional arrays, and instead of matrix multiplication $\\cdot$\n is a contraction between the given indices (indices for first $\\cdot$,\n conj_indices for second $\\cdot$).\n\n More specifically this computes\n sum tensor_{i_0,...,i_{r-1},j_0,...,j_{r-1}}\n * target_{k_0,...,k_{r-1},l_0,...,l_{r-1}\n * tensor_{m_0,...,m_{r-1},n_0,...,n_{r-1}}^*\n where the sum is over indices where j_s = k_s and s is in `indices`\n and l_s = m_s and s is in `conj_indices`.\n\n Args:\n tensor: The tensor that will be conjugated about the target tensor.\n target: The tensor that will receive the conjugation.\n indices: The indices which will be contracted between the tensor and\n target.\n conj_indices; The indices which will be contracted between the\n complex conjugate of the tensor and the target. If this is None,\n then these will be the values in indices plus half the number\n of dimensions of the target (`ndim`). This is the most common case\n and corresponds to the case where the target is an operator on\n a n-dimensional tensor product space (here `n` would be `ndim`).\n buffer: A buffer to store partial results in. If not specified or None,\n a new buffer is used.\n out: The buffer to store the results in. If not specified or None, a new\n buffer is used. Must have the same shape as target.\n\n Returns:\n The result the conjugation.\n \"\"\"\n conj_indices = conj_indices or [i + target.ndim // 2 for i in indices]\n first_multiply = targeted_left_multiply(tensor, target, indices, out=buffer)\n return targeted_left_multiply(np.conjugate(tensor),\n first_multiply,\n conj_indices,\n out=out)\n\n\n_TSliceAtom = Union[int, slice, 'ellipsis']\n_TSlice = Union[_TSliceAtom, Sequence[_TSliceAtom]]\n\n\ndef apply_matrix_to_slices(\n target: np.ndarray,\n matrix: np.ndarray,\n slices: List[_TSlice],\n *,\n out: Optional[np.ndarray] = None) -> np.ndarray:\n \"\"\"Left-multiplies an NxN matrix onto N slices of a numpy array.\n\n Example:\n The 4x4 matrix of a fractional SWAP gate can be expressed as\n\n [ 1 ]\n [ X**t ]\n [ 1 ]\n\n Where X is the 2x2 Pauli X gate and t is the power of the swap with t=1\n being a full swap. X**t is a power of the Pauli X gate's matrix.\n Applying the fractional swap is equivalent to applying a fractional X\n within the inner 2x2 subspace; the rest of the matrix is identity. This\n can be expressed using `apply_matrix_to_slices` as follows:\n\n def fractional_swap(target):\n assert target.shape == (4,)\n return apply_matrix_to_slices(\n target=target,\n matrix=cirq.unitary(cirq.X**t),\n slices=[1, 2]\n )\n\n Args:\n target: The input array with slices that need to be left-multiplied.\n matrix: The linear operation to apply to the subspace defined by the\n slices.\n slices: The parts of the tensor that correspond to the \"vector entries\"\n that the matrix should operate on. May be integers or complicated\n multi-dimensional slices into a tensor. The slices must refer to\n non-overlapping sections of the input all with the same shape.\n out: Where to write the output. If not specified, a new numpy array is\n created, with the same shape and dtype as the target, to store the\n output.\n\n Returns:\n The transformed array.\n \"\"\"\n # Validate arguments.\n if out is target:\n raise ValueError(\"Can't write output over the input.\")\n if matrix.shape != (len(slices), len(slices)):\n raise ValueError(\"matrix.shape != (len(slices), len(slices))\")\n\n # Fill in default values and prepare space.\n if out is None:\n out = np.copy(target)\n else:\n out[...] = target[...]\n\n # Apply operation.\n for i, s_i in enumerate(slices):\n out[s_i] *= matrix[i, i]\n for j, s_j in enumerate(slices):\n if i != j:\n out[s_i] += target[s_j] * matrix[i, j]\n\n return out\n\n\ndef partial_trace(tensor: np.ndarray,\n keep_indices: List[int]) -> np.ndarray:\n \"\"\"Takes the partial trace of a given tensor.\n\n The input tensor must have shape `(d_0, ..., d_{k-1}, d_0, ..., d_{k-1})`.\n The trace is done over all indices that are not in keep_indices. The\n resulting tensor has shape `(d_{i_0}, ..., d_{i_r}, d_{i_0}, ..., d_{i_r})`\n where `i_j` is the `j`th element of `keep_indices`.\n\n Args:\n tensor: The tensor to sum over. This tensor must have a shape\n `(d_0, ..., d_{k-1}, d_0, ..., d_{k-1})`.\n keep_indices: Which indices to not sum over. These are only the indices\n of the first half of the tensors indices (i.e. all elements must\n be between `0` and `tensor.ndims / 2 - 1` inclusive).\n\n Raises:\n ValueError: if the tensor is not of the correct shape or the indices\n are not from the first half of valid indices for the tensor.\n \"\"\"\n ndim = tensor.ndim // 2\n if not all(tensor.shape[i] == tensor.shape[i + ndim] for i in range(ndim)):\n raise ValueError('Tensors must have shape (d_0,...,d_{{k-1}},d_0,...,'\n 'd_{{k-1}}) but had shape ({}).'.format(tensor.shape))\n if not all(i < ndim for i in keep_indices):\n raise ValueError('keep_indices were {} but must be in first half, '\n 'i.e. have index less that {}.'.format(keep_indices,\n ndim))\n keep_set = set(keep_indices)\n keep_map = dict(zip(keep_indices, sorted(keep_indices)))\n left_indices = [keep_map[i] if i in keep_set else i for i in range(ndim)]\n right_indices = [ndim + i if i in keep_set else i for i in left_indices]\n return np.einsum(tensor, left_indices + right_indices)\n\n\ndef wavefunction_partial_trace_as_mixture(\n wavefunction: np.ndarray,\n keep_indices: List[int],\n *,\n atol: Union[int, float] = 1e-8) -> Tuple[Tuple[float, np.ndarray], ...]:\n \"\"\"Returns a mixture representing a wavefunction with only some qubits kept.\n\n The input wavefunction must have shape `(2,) * n` or `(2 ** n)` where\n `wavefunction` is expressed over n qubits. States in the output mixture will\n retain the same type of shape as the input wavefunction, either `(2 ** k)`\n or `(2,) * k` where k is the number of qubits kept.\n\n If the wavefunction cannot be factored into a pure state over `keep_indices`\n then eigendecomposition is used and the output mixture will not be unique.\n\n Args:\n wavefunction: A wavefunction to express over a qubit subset.\n keep_indices: Which indices to express the wavefunction on.\n atol: The tolerance for determining that a factored state is pure.\n\n Returns:\n A single-component mixture in which the factored wavefunction has\n probability '1' if the factored state is pure, or else a mixture of the\n default eigendecomposition of the mixed state's partial trace.\n\n Raises:\n ValueError: if the input wavefunction is not an array of length\n `(2 ** n)` or a tensor with a shape of `(2,) * n`\n \"\"\"\n\n # Attempt to do efficient state factoring.\n state = subwavefunction(wavefunction, keep_indices, default=None, atol=atol)\n if state is not None:\n return ((1.0, state),)\n\n # Fall back to a (non-unique) mixture representation.\n keep_dims = 1 << len(keep_indices)\n ret_shape: Union[Tuple[int], Tuple[int, ...]]\n if wavefunction.shape == (wavefunction.size,):\n ret_shape = (keep_dims,)\n elif all(e == 2 for e in wavefunction.shape):\n ret_shape = tuple(2 for _ in range(len(keep_indices)))\n\n rho = np.kron(\n np.conj(wavefunction.reshape(-1, 1)).T,\n wavefunction.reshape(-1, 1)).reshape(\n (2, 2) * int(np.log2(wavefunction.size)))\n keep_rho = partial_trace(rho, keep_indices).reshape((keep_dims,) * 2)\n eigvals, eigvecs = np.linalg.eigh(keep_rho)\n mixture = tuple(zip(eigvals, [vec.reshape(ret_shape) for vec in eigvecs.T]))\n return tuple([\n (float(p[0]), p[1]) for p in mixture if not approx_eq(p[0], 0.0)\n ])\n\n\ndef subwavefunction(wavefunction: np.ndarray,\n keep_indices: List[int],\n *,\n default: TDefault = RaiseValueErrorIfNotProvided,\n atol: Union[int, float] = 1e-8) -> np.ndarray:\n r\"\"\"Attempts to factor a wavefunction into two parts and return one of them.\n\n The input wavefunction must have shape `(2,) * n` or `(2 ** n)` where\n `wavefunction` is expressed over n qubits. The returned array will retain\n the same type of shape as the input wavefunction, either `(2 ** k)` or\n `(2,) * k` where k is the number of qubits kept.\n\n If a wavefunction $|\\psi\\rangle$ defined on n qubits is an outer product\n of kets like $|\\psi\\rangle$ = $|x\\rangle \\otimes |y\\rangle$, and\n $|x\\rangle$ is defined over the subset `keep_indices` of k qubits, then\n this method will factor $|\\psi\\rangle$ into $|x\\rangle$ and $|y\\rangle$ and\n return $|x\\rangle$. Note that $|x\\rangle$ is not unique, because $(e^{i\n \\theta} |y\\rangle) \\otimes (|x\\rangle) = (|y\\rangle) \\otimes (e^{i \\theta}\n |x\\rangle)$ . This method randomizes the global phase of $|x\\rangle$ in\n order to avoid accidental reliance on it.\n\n If the provided wavefunction cannot be factored into a pure state over\n `keep_indices`, the method will fall back to return `default`. If `default`\n is not provided, the method will fail and raise `ValueError`.\n\n Args:\n wavefunction: A wavefunction to express over a qubit subset.\n keep_indices: Which indices to express the wavefunction on.\n default: Determines the fallback behavior when `wavefunction` doesn't\n have a pure state factorization. If the factored state is not pure\n and `default` is not set, a ValueError is raised. If default is set\n to a value, that value is returned.\n atol: The minimum tolerance for comparing the output state's coherence\n measure to 1.\n\n Returns:\n The wavefunction expressed over the desired subset of qubits.\n\n Raises:\n ValueError: if the wavefunction is not of the correct shape or the\n indices are not a valid subset of the input wavefunction's indices, or\n the result of factoring is not a pure state.\n \"\"\"\n\n if not np.log2(wavefunction.size).is_integer():\n raise ValueError(\"Input wavefunction of size {} does not represent a \"\n \"state over qubits.\".format(wavefunction.size))\n\n n_qubits = int(np.log2(wavefunction.size))\n keep_dims = 1 << len(keep_indices)\n ret_shape: Union[Tuple[int], Tuple[int, ...]]\n if wavefunction.shape == (wavefunction.size,):\n ret_shape = (keep_dims,)\n wavefunction = wavefunction.reshape((2,) * n_qubits)\n elif wavefunction.shape == (2,) * n_qubits:\n ret_shape = tuple(2 for _ in range(len(keep_indices)))\n else:\n raise ValueError(\n \"Input wavefunction must be shaped like (2 ** n,) or (2,) * n\")\n\n keep_dims = 1 << len(keep_indices)\n if not np.isclose(np.linalg.norm(wavefunction), 1):\n raise ValueError(\"Input state must be normalized.\")\n if len(set(keep_indices)) != len(keep_indices):\n raise ValueError(\n \"keep_indices were {} but must be unique.\".format(keep_indices))\n if any([ind >= n_qubits for ind in keep_indices]):\n raise ValueError(\n \"keep_indices {} are an invalid subset of the input wavefunction.\")\n\n other_qubits = sorted(set(range(n_qubits)) - set(keep_indices))\n candidates = [\n wavefunction[predicates.slice_for_qubits_equal_to(other_qubits,\n k)].reshape(keep_dims)\n for k in range(1 << len(other_qubits))\n ]\n # The coherence measure is computed using unnormalized candidates.\n best_candidate = max(candidates, key=lambda c: np.linalg.norm(c, 2))\n best_candidate = best_candidate / np.linalg.norm(best_candidate)\n left = np.conj(best_candidate.reshape((keep_dims,))).T\n coherence_measure = sum(\n [abs(np.dot(left, c.reshape((keep_dims,))))**2 for c in candidates])\n\n if approx_eq(coherence_measure, 1, atol=atol):\n return np.exp(\n 2j * np.pi * np.random.random()) * best_candidate.reshape(ret_shape)\n\n # Method did not yield a pure state. Fall back to `default` argument.\n if default is not RaiseValueErrorIfNotProvided:\n return default\n\n raise ValueError(\n \"Input wavefunction could not be factored into pure state over \"\n \"indices {}\".format(keep_indices))\n", "# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"A testing class with utilities for checking linear algebra.\"\"\"\n\nfrom typing import Optional\n\nimport numpy as np\n\nfrom cirq import linalg\n\n\ndef random_superposition(dim: int) -> np.ndarray:\n \"\"\"Returns a random unit-length vector from the uniform distribution.\n\n Args:\n dim: The dimension of the vector.\n\n Returns:\n The sampled unit-length vector.\n \"\"\"\n state_vector = np.random.randn(dim).astype(complex)\n state_vector += 1j * np.random.randn(dim)\n state_vector /= np.linalg.norm(state_vector)\n return state_vector\n\n\ndef random_unitary(dim: int) -> np.ndarray:\n \"\"\"Returns a random unitary matrix distributed with Haar measure.\n\n Args:\n dim: The width and height of the matrix.\n\n Returns:\n The sampled unitary matrix.\n\n References:\n 'How to generate random matrices from the classical compact groups'\n http://arxiv.org/abs/math-ph/0609050\n \"\"\"\n z = (np.random.randn(dim, dim) + 1j * np.random.randn(dim, dim))\n q, r = np.linalg.qr(z)\n d = np.diag(r)\n return q * (d / abs(d))\n\n\ndef random_orthogonal(dim: int) -> np.ndarray:\n \"\"\"Returns a random orthogonal matrix distributed with Haar measure.\n\n Args:\n dim: The width and height of the matrix.\n\n Returns:\n The sampled orthogonal matrix.\n\n References:\n 'How to generate random matrices from the classical compact groups'\n http://arxiv.org/abs/math-ph/0609050\n \"\"\"\n m = np.random.randn(dim, dim)\n q, r = np.linalg.qr(m)\n d = np.diag(r)\n return q * (d / abs(d))\n\n\ndef random_special_unitary(dim: int) -> np.ndarray:\n \"\"\"Returns a random special unitary distributed with Haar measure.\n\n Args:\n dim: The width and height of the matrix.\n\n Returns:\n The sampled special unitary.\n \"\"\"\n r = random_unitary(dim)\n r[0, :] /= np.linalg.det(r)\n return r\n\n\ndef random_special_orthogonal(dim: int) -> np.ndarray:\n \"\"\"Returns a random special orthogonal matrix distributed with Haar measure.\n\n Args:\n dim: The width and height of the matrix.\n\n Returns:\n The sampled special orthogonal matrix.\n \"\"\"\n m = random_orthogonal(dim)\n if np.linalg.det(m) < 0:\n m[0, :] *= -1\n return m\n\n\ndef assert_allclose_up_to_global_phase(\n actual: np.ndarray,\n desired: np.ndarray,\n *, # Forces keyword args.\n rtol: float = 1e-7,\n atol: float, # Require atol to be specified\n equal_nan: bool = True,\n err_msg: Optional[str] = '',\n verbose: bool = True) -> None:\n \"\"\"Checks if a ~= b * exp(i t) for some t.\n\n Args:\n actual: A numpy array.\n desired: Another numpy array.\n rtol: Relative error tolerance.\n atol: Absolute error tolerance.\n equal_nan: Whether or not NaN entries should be considered equal to\n other NaN entries.\n err_msg: The error message to be printed in case of failure.\n verbose: If True, the conflicting values are appended to the error\n message.\n\n Raises:\n AssertionError: The matrices aren't nearly equal up to global phase.\n \"\"\"\n actual, desired = linalg.match_global_phase(actual, desired)\n np.testing.assert_allclose(\n actual=actual,\n desired=desired,\n rtol=rtol,\n atol=atol,\n equal_nan=equal_nan,\n err_msg=err_msg,\n verbose=verbose)\n", "# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import Any, Iterable, Optional, Sequence, TYPE_CHECKING, Type, cast\n\nfrom collections import defaultdict\nimport itertools\nimport numpy as np\nimport sympy\n\nfrom cirq import circuits, ops, linalg, protocols, EigenGate\n\nif TYPE_CHECKING:\n # pylint: disable=unused-import\n from typing import Dict, List\n\n\ndef highlight_text_differences(actual: str, expected: str) -> str:\n diff = \"\"\n for actual_line, desired_line in itertools.zip_longest(\n actual.splitlines(), expected.splitlines(),\n fillvalue=\"\"):\n diff += \"\".join(a if a == b else \"█\"\n for a, b in itertools.zip_longest(\n actual_line, desired_line, fillvalue=\"\")) + \"\\n\"\n return diff\n\n\ndef _measurement_subspaces(\n measured_qubits: Iterable[ops.Qid],\n n_qubits: int\n) -> Sequence[Sequence[int]]:\n \"\"\"Computes subspaces associated with projective measurement.\n\n The function computes a partitioning of the computational basis such\n that the subspace spanned by each partition corresponds to a distinct\n measurement outcome. In particular, if all qubits are measured then\n 2**n singleton partitions are returned. If no qubits are measured then\n a single partition consisting of all basis states is returned.\n\n Args:\n measured_qubits: Qubits subject to measurement\n n_qubits: Total number of qubits in circuit\n Returns:\n Sequence of subspaces where each subspace is a sequence of\n computational basis states in order corresponding to qubit_order\n \"\"\"\n\n # Consider projective measurement in the computational basis on a subset\n # of qubits. Each projection operator associated with the measurement is\n # uniquely determined by its range, here called a measurement subspace.\n #\n # Suppose that qubit q is not measured. Then computational basis states\n # whose indices have binary representations that differ only at position\n # q belong to the same measurement subspace. Generally, if computational\n # basis states a and b are such that\n #\n # a & measurement_mask == b & measurement_mask\n #\n # then a and b belong to the same measurement subspace. In this case the\n # value of the expression on either side in the formula above is the\n # computational basis state in the measurement subspace containing\n # a and b which has the lowest index.\n measurement_mask = 0\n for i, _ in enumerate(sorted(measured_qubits)):\n measurement_mask |= 1 << i\n\n # Keyed by computational basis state with lowest index.\n measurement_subspaces = defaultdict(list) # type: Dict[int, List[int]]\n computational_basis = range(1 << n_qubits)\n\n for basis_state in computational_basis:\n subspace_key = basis_state & measurement_mask\n measurement_subspaces[subspace_key].append(basis_state)\n\n subspaces = list(measurement_subspaces.values())\n\n # Verify this is a partitioning (i.e. full coverage, no overlaps).\n assert sorted(itertools.chain(*subspaces)) == list(computational_basis)\n\n return subspaces\n\n\ndef assert_circuits_with_terminal_measurements_are_equivalent(\n actual: circuits.Circuit,\n reference: circuits.Circuit,\n atol: float) -> None:\n \"\"\"Determines if two circuits have equivalent effects.\n\n The circuits can contain measurements, but the measurements must be at the\n end of the circuit. Circuits are equivalent if, for all possible inputs,\n their outputs (classical bits for lines terminated with measurement and\n qubits for lines without measurement) are observationally indistinguishable\n up to a tolerance. Note that under this definition of equivalence circuits\n that differ solely in the overall phase of the post-measurement state of\n measured qubits are considered equivalent.\n\n For example, applying an extra Z gate to an unmeasured qubit changes the\n effect of a circuit. But inserting a Z gate operation just before a\n measurement does not.\n\n Args:\n actual: The circuit that was actually computed by some process.\n reference: A circuit with the correct function.\n atol: Absolute error tolerance.\n \"\"\"\n measured_qubits_actual = {qubit\n for op in actual.all_operations()\n if protocols.is_measurement(op)\n for qubit in op.qubits}\n measured_qubits_reference = {qubit\n for op in reference.all_operations()\n if protocols.is_measurement(op)\n for qubit in op.qubits}\n assert actual.are_all_measurements_terminal()\n assert reference.are_all_measurements_terminal()\n assert measured_qubits_actual == measured_qubits_reference\n\n all_qubits = actual.all_qubits().union(reference.all_qubits())\n\n matrix_actual = actual.unitary(qubits_that_should_be_present=all_qubits)\n matrix_reference = reference.unitary(\n qubits_that_should_be_present=all_qubits)\n\n n_qubits = len(all_qubits)\n n = matrix_actual.shape[0]\n assert n == 1 << n_qubits\n assert matrix_actual.shape == matrix_reference.shape == (n, n)\n\n # Consider the action of the two circuits Ca and Cr on state |x>:\n #\n # |ya> = Ca|x>\n # |yr> = Cr|x>\n #\n # Ca and Cr are equivalent according to the definition above iff\n # for each |x>:\n # - probability of each measurement outcome is the same for |ya>\n # and |yr> (across measured qubits),\n # - amplitudes of each post-measurement state are the same for |ya>\n # and |yr> except perhaps for an overall phase factor.\n #\n # These conditions are satisfied iff the matrices of the two circuits\n # are identical except perhaps for an overall phase factor for each\n # rectangular block spanning rows corresponding to the measurement\n # subspaces and all columns.\n #\n # Note two special cases of the rule above:\n # - if no qubits are measured then the circuits are equivalent if\n # their matrices are identical except for the global phase factor,\n # - if all qubits are measured then the circuits are equivalent if\n # their matrices differ by a diagonal unitary factor.\n subspaces = _measurement_subspaces(measured_qubits_actual, n_qubits)\n for subspace in subspaces:\n block_actual = matrix_actual[subspace, :]\n block_reference = matrix_reference[subspace, :]\n assert linalg.allclose_up_to_global_phase(\n block_actual, block_reference, atol=atol), (\n \"Circuit's effect differs from the reference circuit.\\n\"\n '\\n'\n 'Diagram of actual circuit:\\n'\n '{}\\n'\n '\\n'\n 'Diagram of reference circuit with desired function:\\n'\n '{}\\n'.format(actual, reference))\n\n\ndef assert_same_circuits(actual: circuits.Circuit,\n expected: circuits.Circuit,\n ) -> None:\n \"\"\"Asserts that two circuits are identical, with a descriptive error.\n\n Args:\n actual: A circuit computed by some code under test.\n expected: The circuit that should have been computed.\n \"\"\"\n assert actual == expected, (\n \"Actual circuit differs from expected circuit.\\n\"\n \"\\n\"\n \"Diagram of actual circuit:\\n\"\n \"{}\\n\"\n \"\\n\"\n \"Diagram of expected circuit:\\n\"\n \"{}\\n\"\n \"\\n\"\n \"Index of first differing moment:\\n\"\n \"{}\\n\"\n \"\\n\"\n \"Full repr of actual circuit:\\n\"\n \"{!r}\\n\"\n \"\\n\"\n \"Full repr of expected circuit:\\n\"\n \"{!r}\\n\").format(actual,\n expected,\n _first_differing_moment_index(actual, expected),\n actual,\n expected)\n\n\ndef _first_differing_moment_index(circuit1: circuits.Circuit,\n circuit2: circuits.Circuit) -> Optional[int]:\n for i, (m1, m2) in enumerate(itertools.zip_longest(circuit1, circuit2)):\n if m1 != m2:\n return i\n return None # coverage: ignore\n\n\ndef assert_has_diagram(\n actual: circuits.Circuit,\n desired: str,\n **kwargs) -> None:\n \"\"\"Determines if a given circuit has the desired text diagram.\n\n Args:\n actual: The circuit that was actually computed by some process.\n desired: The desired text diagram as a string. Newlines at the\n beginning and whitespace at the end are ignored.\n **kwargs: Keyword arguments to be passed to actual.to_text_diagram().\n \"\"\"\n actual_diagram = actual.to_text_diagram(**kwargs).lstrip(\"\\n\").rstrip()\n desired_diagram = desired.lstrip(\"\\n\").rstrip()\n assert actual_diagram == desired_diagram, (\n \"Circuit's text diagram differs from the desired diagram.\\n\"\n '\\n'\n 'Diagram of actual circuit:\\n'\n '{}\\n'\n '\\n'\n 'Desired text diagram:\\n'\n '{}\\n'\n '\\n'\n 'Highlighted differences:\\n'\n '{}\\n'.format(actual_diagram, desired_diagram,\n highlight_text_differences(actual_diagram,\n desired_diagram))\n )\n\n\ndef assert_has_consistent_apply_unitary(\n val: Any,\n *,\n qubit_count: Optional[int] = None,\n atol: float=1e-8) -> None:\n \"\"\"Tests whether a value's _apply_unitary_ is correct.\n\n Contrasts the effects of the value's `_apply_unitary_` with the\n matrix returned by the value's `_unitary_` method.\n\n Args:\n val: The value under test. Should have a `__pow__` method.\n qubit_count: Usually inferred. The number of qubits the value acts on.\n This argument isn't needed if the gate has a unitary matrix or\n implements `cirq.SingleQubitGate`/`cirq.TwoQubitGate`/\n `cirq.ThreeQubitGate`.\n atol: Absolute error tolerance.\n \"\"\"\n\n expected = protocols.unitary(val, default=None)\n\n qubit_counts = [\n qubit_count,\n expected.shape[0].bit_length() - 1 if expected is not None else None,\n _infer_qubit_count(val)\n ]\n qubit_counts = [e for e in qubit_counts if e is not None]\n if not qubit_counts:\n raise NotImplementedError(\n 'Failed to infer qubit count of <{!r}>. Specify it.'.format(\n val))\n assert len(set(qubit_counts)) == 1, (\n 'Inconsistent qubit counts from different methods: {}'.format(\n qubit_counts))\n n = cast(int, qubit_counts[0])\n\n eye = np.eye(2 << n, dtype=np.complex128).reshape((2,) * (2 * n + 2))\n actual = protocols.apply_unitary(\n unitary_value=val,\n args=protocols.ApplyUnitaryArgs(\n target_tensor=eye,\n available_buffer=np.ones_like(eye) * float('nan'),\n axes=list(range(1, n + 1))),\n default=None)\n\n # If you don't have a unitary, you shouldn't be able to apply a unitary.\n if expected is None:\n assert actual is None\n else:\n expected = np.kron(np.eye(2), expected)\n\n # If you applied a unitary, it should match the one you say you have.\n if actual is not None:\n np.testing.assert_allclose(\n actual.reshape(2 << n, 2 << n),\n expected,\n atol=atol)\n\n\ndef assert_eigen_gate_has_consistent_apply_unitary(\n eigen_gate_type: Type[EigenGate],\n *,\n exponents=(0, 1, -1, 0.5, 0.25, -0.5, 0.1, sympy.Symbol('s')),\n global_shifts=(0, 0.5, -0.5, 0.1),\n qubit_count: Optional[int] = None) -> None:\n \"\"\"Tests whether an EigenGate type's _apply_unitary_ is correct.\n\n Contrasts the effects of the gate's `_apply_unitary_` with the\n matrix returned by the gate's `_unitary_` method, trying various values for\n the gate exponent and global shift.\n\n Args:\n eigen_gate_type: The type of gate to test. The type must have an\n __init__ method that takes an exponent and a global_shift.\n exponents: The exponents to try. Defaults to a variety of special and\n arbitrary angles, as well as a parameterized angle (a symbol).\n global_shifts: The global shifts to try. Defaults to a variety of\n special angles.\n qubit_count: The qubit count to use for the gate. This argument isn't\n needed if the gate has a unitary matrix or implements\n `cirq.SingleQubitGate`/`cirq.TwoQubitGate`/`cirq.ThreeQubitGate`; it\n will be inferred.\n \"\"\"\n for exponent in exponents:\n for shift in global_shifts:\n assert_has_consistent_apply_unitary(\n eigen_gate_type(exponent=exponent, global_shift=shift),\n qubit_count=qubit_count)\n\n\ndef assert_has_consistent_apply_unitary_for_various_exponents(\n val: Any,\n *,\n exponents=(0, 1, -1, 0.5, 0.25, -0.5, 0.1, sympy.Symbol('s')),\n qubit_count: Optional[int] = None) -> None:\n \"\"\"Tests whether a value's _apply_unitary_ is correct.\n\n Contrasts the effects of the value's `_apply_unitary_` with the\n matrix returned by the value's `_unitary_` method. Attempts this after\n attempting to raise the value to several exponents.\n\n Args:\n val: The value under test. Should have a `__pow__` method.\n exponents: The exponents to try. Defaults to a variety of special and\n arbitrary angles, as well as a parameterized angle (a symbol). If\n the value's `__pow__` returns `NotImplemented` for any of these,\n they are skipped.\n qubit_count: A minimum qubit count for the test system. This argument\n isn't needed if the gate has a unitary matrix or implements\n `cirq.SingleQubitGate`/`cirq.TwoQubitGate`/`cirq.ThreeQubitGate`; it\n will be inferred.\n \"\"\"\n for exponent in exponents:\n gate = protocols.pow(val, exponent, default=None)\n if gate is not None:\n assert_has_consistent_apply_unitary(\n gate,\n qubit_count=qubit_count)\n\n\ndef assert_has_consistent_qid_shape(val: Any,\n qubit_count: Optional[int] = None) -> None:\n \"\"\"Tests whether a value's `_qid_shape_` and `_num_qubits_` are correct and\n consistent.\n\n Verifies that the entries in the shape are all positive integers and the\n length of shape equals `_num_qubits_` (and also equals `len(qubits)` if\n `val` has `qubits`.\n\n Args:\n val: The value under test. Should have `_qid_shape_` and/or\n `num_qubits_` methods. Can optionally have a `qubits` property.\n qubit_count: The expected number of qubits val should use.\n \"\"\"\n default = (-1,)\n qid_shape = protocols.qid_shape(val, default)\n num_qubits = protocols.num_qubits(val, default)\n if qid_shape is default or num_qubits is default:\n return # Nothing to check\n assert all(d >= 1 for d in qid_shape), (\n f'Not all entries in qid_shape are positive: {qid_shape}')\n assert len(qid_shape) == num_qubits, (\n f'Length of qid_shape and num_qubits disagree: {qid_shape}, '\n f'{num_qubits}')\n if qubit_count is not None:\n assert qubit_count == num_qubits, (\n f'Expected qubits and num_qubits disagree: {qubit_count}, '\n f'{num_qubits}')\n infer_qubit_count = _infer_qubit_count(val)\n if infer_qubit_count is not None:\n assert infer_qubit_count == num_qubits, (\n f'Length of qubits and num_qubits disagree: {infer_qubit_count}, '\n f'{num_qubits}')\n\n\ndef _infer_qubit_count(val: Any) -> Optional[int]:\n if isinstance(val, ops.Operation):\n return len(val.qubits)\n if isinstance(val, ops.Gate):\n return protocols.num_qubits(val)\n return None\n", "# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Defines trial results.\"\"\"\n\nfrom typing import (\n Iterable, Callable, Tuple, TypeVar, Dict, Any, TYPE_CHECKING, Union\n)\n\nimport collections\nimport numpy as np\nimport pandas as pd\n\nfrom cirq import value, ops\nfrom cirq._compat import proper_repr\nfrom cirq.study import resolver\n\nif TYPE_CHECKING:\n # pylint: disable=unused-import\n import cirq\n\nT = TypeVar('T')\nTMeasurementKey = Union[str, 'cirq.Qid', Iterable['cirq.Qid']]\n\n\ndef _tuple_of_big_endian_int(bit_groups: Iterable[Any]) -> Tuple[int, ...]:\n \"\"\"Returns the big-endian integers specified by groups of bits.\n\n Args:\n bit_groups: Groups of descending bits, each specifying a big endian\n integer with the 1s bit at the end.\n\n Returns:\n A tuple containing the integer for each group.\n \"\"\"\n return tuple(_big_endian_int(bits) for bits in bit_groups)\n\n\ndef _big_endian_int(bits: Iterable[Any]) -> int:\n \"\"\"Returns the big-endian integer specified by the given bits.\n\n For example, [True, False, False, True, False] becomes binary 10010 which\n is 18 in decimal.\n\n Args:\n bits: Descending bits of the integer, with the 1s bit at the end.\n\n Returns:\n The integer.\n \"\"\"\n result = 0\n for e in bits:\n result <<= 1\n if e:\n result |= 1\n return result\n\n\ndef _bitstring(vals: Iterable[Any]) -> str:\n return ''.join('1' if v else '0' for v in vals)\n\n\ndef _keyed_repeated_bitstrings(measurements: pd.DataFrame) -> str:\n keyed_bitstrings = []\n for key in sorted(measurements.columns):\n reps = pd.DataFrame(measurements[key].to_list())\n all_bits = ', '.join(reps.apply(_bitstring, axis=0))\n keyed_bitstrings.append('{}={}'.format(key, all_bits))\n return '\\n'.join(keyed_bitstrings)\n\n\ndef _key_to_str(key: TMeasurementKey) -> str:\n if isinstance(key, str):\n return key\n if isinstance(key, ops.Qid):\n return str(key)\n return ','.join(str(q) for q in key)\n\n\ndef _to_dict(measurements: pd.DataFrame) -> Dict[str, np.ndarray]:\n repr_dict = {}\n for key in sorted(measurements.columns):\n repr_dict[key] = np.array(measurements[key].to_list())\n return repr_dict\n\n\[email protected]_equality(unhashable=True)\nclass TrialResult:\n \"\"\"The results of multiple executions of a circuit with fixed parameters.\n Stored as a Pandas DataFrame that can be accessed through the \"data\"\n attribute. The repitition number is the row index and measurement keys\n are the columns of the DataFrame. Each element is a Pandas Series of\n measurement outcomes per bit for the measurement key in that repitition.\n\n Attributes:\n params: A ParamResolver of settings used when sampling result.\n measurements: A dictionary from measurement gate key to measurement\n results. Measurement results are stored in a 2-dimensional\n numpy array, the first dimension corresponding to the repetition\n and the second to the actual boolean measurement results (ordered\n by the qubits being measured.)\n \"\"\"\n\n def __init__(\n self,\n *, # Forces keyword args.\n params: resolver.ParamResolver,\n measurements: Dict[str, np.ndarray]) -> None:\n \"\"\"\n Args:\n params: A ParamResolver of settings used for this result.\n measurements: A dictionary from measurement gate key to measurement\n results. The value for each key is a 2-D array of booleans,\n with the first index running over the repetitions, and the\n second index running over the qubits for the corresponding\n measurements.\n \"\"\"\n self.params = params\n\n # Convert to a DataFrame with columns as measurement keys, rows as\n # repetitions and a Series of measurements for a particular key and\n # repetition as the value.\n converted_dict = {}\n for key, val in measurements.items():\n converted_dict[key] = [pd.Series(m_vals) for m_vals in val]\n self.data = pd.DataFrame(converted_dict)\n\n @staticmethod\n def from_single_parameter_set(\n *, # Forces keyword args.\n params: resolver.ParamResolver,\n measurements: Dict[str, np.ndarray]) -> 'TrialResult':\n \"\"\"Packages runs of a single parameterized circuit into a TrialResult.\n\n Args:\n params: A ParamResolver of settings used for this result.\n measurements: A dictionary from measurement gate key to measurement\n results. The value for each key is a 2-D array of booleans,\n with the first index running over the repetitions, and the\n second index running over the qubits for the corresponding\n measurements.\n \"\"\"\n return TrialResult(params=params, measurements=measurements)\n\n # Keep the old instance variables for test compatibility.\n @property\n def measurements(self) -> Dict[str, np.ndarray]:\n return _to_dict(self.data)\n\n @property\n def repetitions(self) -> int:\n return self.data.shape[0]\n\n # Reason for 'type: ignore': https://github.com/python/mypy/issues/5273\n def multi_measurement_histogram( # type: ignore\n self,\n *, # Forces keyword args.\n keys: Iterable[TMeasurementKey],\n fold_func: Callable[[pd.Series], T] = _tuple_of_big_endian_int\n ) -> collections.Counter:\n \"\"\"Counts the number of times combined measurement results occurred.\n\n This is a more general version of the 'histogram' method. Instead of\n only counting how often results occurred for one specific measurement,\n this method tensors multiple measurement results together and counts\n how often the combined results occurred.\n\n For example, suppose that:\n\n - fold_func is not specified\n - keys=['abc', 'd']\n - the measurement with key 'abc' measures qubits a, b, and c.\n - the measurement with key 'd' measures qubit d.\n - the circuit was sampled 3 times.\n - the sampled measurement values were:\n 1. a=1 b=0 c=0 d=0\n 2. a=0 b=1 c=0 d=1\n 3. a=1 b=0 c=0 d=0\n\n Then the counter returned by this method will be:\n\n collections.Counter({\n (0b100, 0): 2,\n (0b010, 1): 1\n })\n\n\n Where '0b100' is binary for '4' and '0b010' is binary for '2'. Notice\n that the bits are combined in a big-endian way by default, with the\n first measured qubit determining the highest-value bit.\n\n Args:\n fold_func: A function used to convert sampled measurement results\n into countable values. The input is a tuple containing the\n list of bits measured by each measurement specified by the\n keys argument. If this argument is not specified, it defaults\n to returning tuples of integers, where each integer is the big\n endian interpretation of the bits a measurement sampled.\n keys: Keys of measurements to include in the histogram.\n\n Returns:\n A counter indicating how often measurements sampled various\n results.\n \"\"\"\n fixed_keys = [_key_to_str(key) for key in keys]\n samples = self.data[fixed_keys]\n c = collections.Counter() # type: collections.Counter\n for i in range(self.repetitions):\n c[fold_func(samples.iloc[i])] += 1\n return c\n\n # Reason for 'type: ignore': https://github.com/python/mypy/issues/5273\n def histogram( # type: ignore\n self,\n *, # Forces keyword args.\n key: TMeasurementKey,\n fold_func: Callable[[pd.Series], T] = _big_endian_int\n ) -> collections.Counter:\n \"\"\"Counts the number of times a measurement result occurred.\n\n For example, suppose that:\n\n - fold_func is not specified\n - key='abc'\n - the measurement with key 'abc' measures qubits a, b, and c.\n - the circuit was sampled 3 times.\n - the sampled measurement values were:\n 1. a=1 b=0 c=0\n 2. a=0 b=1 c=0\n 3. a=1 b=0 c=0\n\n Then the counter returned by this method will be:\n\n collections.Counter({\n 0b100: 2,\n 0b010: 1\n })\n\n Where '0b100' is binary for '4' and '0b010' is binary for '2'. Notice\n that the bits are combined in a big-endian way by default, with the\n first measured qubit determining the highest-value bit.\n\n Args:\n key: Keys of measurements to include in the histogram.\n fold_func: A function used to convert a sampled measurement result\n into a countable value. The input is a list of bits sampled\n together by a measurement. If this argument is not specified,\n it defaults to interpreting the bits as a big endian\n integer.\n\n Returns:\n A counter indicating how often a measurement sampled various\n results.\n \"\"\"\n return self.multi_measurement_histogram(\n keys=[key], fold_func=lambda e: fold_func(e.iloc[0]))\n\n def __repr__(self):\n\n def item_repr(entry):\n key, val = entry\n return '{!r}: {}'.format(key, proper_repr(val))\n\n measurement_dict_repr = (\n '{' + ', '.join([item_repr(e) for e in self.measurements.items()]) +\n '}')\n\n return 'cirq.TrialResult(params={!r}, measurements={})'.format(\n self.params, measurement_dict_repr)\n\n def _repr_pretty_(self, p: Any, cycle: bool) -> None:\n \"\"\"Output to show in ipython and Jupyter notebooks.\"\"\"\n if cycle:\n # There should never be a cycle. This is just in case.\n p.text('TrialResult(...)')\n else:\n p.text(str(self))\n\n def __str__(self):\n return _keyed_repeated_bitstrings(self.data)\n\n def _value_equality_values_(self):\n return repr(self.data), self.params\n", "# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport itertools\n\nimport numpy as np\nimport pytest\nimport scipy.linalg\n\nimport cirq\n\nI = np.eye(2)\nX = np.array([[0, 1], [1, 0]])\nY = np.array([[0, -1j], [1j, 0]])\nZ = np.array([[1, 0], [0, -1]])\nH = np.array([[1, 1], [1, -1]]) * np.sqrt(0.5)\nSQRT_X = np.array([[np.sqrt(1j), np.sqrt(-1j)],\n [np.sqrt(-1j), np.sqrt(1j)]]) * np.sqrt(0.5)\nSQRT_Y = np.array([[np.sqrt(1j), -np.sqrt(1j)],\n [np.sqrt(1j), np.sqrt(1j)]]) * np.sqrt(0.5)\nSQRT_Z = np.diag([1, 1j])\nE00 = np.diag([1, 0])\nE01 = np.array([[0, 1], [0, 0]])\nE10 = np.array([[0, 0], [1, 0]])\nE11 = np.diag([0, 1])\nPAULI_BASIS = cirq.PAULI_BASIS\nSTANDARD_BASIS = {'a': E00, 'b': E01, 'c': E10, 'd': E11}\n\n\ndef _one_hot_matrix(size: int, i: int, j: int) -> np.ndarray:\n result = np.zeros((size, size))\n result[i, j] = 1\n return result\n\n\[email protected]('basis1, basis2, expected_kron_basis', (\n (PAULI_BASIS, PAULI_BASIS, {\n 'II': np.eye(4),\n 'IX': scipy.linalg.block_diag(X, X),\n 'IY': scipy.linalg.block_diag(Y, Y),\n 'IZ': np.diag([1, -1, 1, -1]),\n 'XI': np.array([[0, 0, 1, 0],\n [0, 0, 0, 1],\n [1, 0, 0, 0],\n [0, 1, 0, 0]]),\n 'XX': np.rot90(np.eye(4)),\n 'XY': np.rot90(np.diag([1j, -1j, 1j, -1j])),\n 'XZ': np.array([[0, 0, 1, 0],\n [0, 0, 0, -1],\n [1, 0, 0, 0],\n [0, -1, 0, 0]]),\n 'YI': np.array([[0, 0, -1j, 0],\n [0, 0, 0, -1j],\n [1j, 0, 0, 0],\n [0, 1j, 0, 0]]),\n 'YX': np.rot90(np.diag([1j, 1j, -1j, -1j])),\n 'YY': np.rot90(np.diag([-1, 1, 1, -1])),\n 'YZ': np.array([[0, 0, -1j, 0],\n [0, 0, 0, 1j],\n [1j, 0, 0, 0],\n [0, -1j, 0, 0]]),\n 'ZI': np.diag([1, 1, -1, -1]),\n 'ZX': scipy.linalg.block_diag(X, -X),\n 'ZY': scipy.linalg.block_diag(Y, -Y),\n 'ZZ': np.diag([1, -1, -1, 1]),\n }),\n (STANDARD_BASIS, STANDARD_BASIS, {\n 'abcd'[2 * row_outer + col_outer] + 'abcd'[2 * row_inner + col_inner]:\n _one_hot_matrix(4, 2 * row_outer + row_inner, 2 * col_outer + col_inner)\n for row_outer in range(2)\n for row_inner in range(2)\n for col_outer in range(2)\n for col_inner in range(2)\n }),\n))\ndef test_kron_bases(basis1, basis2, expected_kron_basis):\n kron_basis = cirq.kron_bases(basis1, basis2)\n assert len(kron_basis) == 16\n assert set(kron_basis.keys()) == set(expected_kron_basis.keys())\n for name in kron_basis.keys():\n assert np.all(kron_basis[name] == expected_kron_basis[name])\n\n\[email protected]('basis1,basis2', (\n (PAULI_BASIS, cirq.kron_bases(PAULI_BASIS)),\n (STANDARD_BASIS, cirq.kron_bases(STANDARD_BASIS, repeat=1)),\n (cirq.kron_bases(PAULI_BASIS, PAULI_BASIS),\n cirq.kron_bases(PAULI_BASIS, repeat=2)),\n (cirq.kron_bases(\n cirq.kron_bases(PAULI_BASIS, repeat=2),\n cirq.kron_bases(PAULI_BASIS, repeat=3),\n PAULI_BASIS),\n cirq.kron_bases(PAULI_BASIS, repeat=6)),\n (cirq.kron_bases(\n cirq.kron_bases(PAULI_BASIS, STANDARD_BASIS),\n cirq.kron_bases(PAULI_BASIS, STANDARD_BASIS)),\n cirq.kron_bases(PAULI_BASIS, STANDARD_BASIS, repeat=2)),\n))\ndef test_kron_bases_consistency(basis1, basis2):\n assert set(basis1.keys()) == set(basis2.keys())\n for name in basis1.keys():\n assert np.all(basis1[name] == basis2[name])\n\n\[email protected]('basis,repeat', itertools.product(\n (PAULI_BASIS, STANDARD_BASIS),\n range(1, 5)\n))\ndef test_kron_bases_repeat_sanity_checks(basis, repeat):\n product_basis = cirq.kron_bases(basis, repeat=repeat)\n assert len(product_basis) == 4**repeat\n for name1, matrix1 in product_basis.items():\n for name2, matrix2 in product_basis.items():\n p = cirq.hilbert_schmidt_inner_product(matrix1, matrix2)\n if name1 != name2:\n assert p == 0\n else:\n assert abs(p) >= 1\n\n\[email protected]('m1,m2,expect_real', (\n (X, X, True),\n (X, Y, True),\n (X, H, True),\n (X, SQRT_X, False),\n (I, SQRT_Z, False),\n))\ndef test_hilbert_schmidt_inner_product_is_conjugate_symmetric(\n m1, m2, expect_real):\n v1 = cirq.hilbert_schmidt_inner_product(m1, m2)\n v2 = cirq.hilbert_schmidt_inner_product(m2, m1)\n assert v1 == v2.conjugate()\n\n assert np.isreal(v1) == expect_real\n if not expect_real:\n assert v1 != v2\n\n\[email protected]('a,m1,b,m2', (\n (1, X, 1, Z),\n (2, X, 3, Y),\n (2j, X, 3, I),\n (2, X, 3, X),\n))\ndef test_hilbert_schmidt_inner_product_is_linear(a, m1, b, m2):\n v1 = cirq.hilbert_schmidt_inner_product(H, (a * m1 + b * m2))\n v2 = (a * cirq.hilbert_schmidt_inner_product(H, m1) +\n b * cirq.hilbert_schmidt_inner_product(H, m2))\n assert v1 == v2\n\n\[email protected]('m', (I, X, Y, Z, H, SQRT_X, SQRT_Y, SQRT_Z))\ndef test_hilbert_schmidt_inner_product_is_positive_definite(m):\n v = cirq.hilbert_schmidt_inner_product(m, m)\n assert np.isreal(v)\n assert v.real > 0\n\n\[email protected]('m1,m2,expected_value', (\n (X, I, 0),\n (X, X, 2),\n (X, Y, 0),\n (X, Z, 0),\n (H, X, np.sqrt(2)),\n (H, Y, 0),\n (H, Z, np.sqrt(2)),\n (Z, E00, 1),\n (Z, E01, 0),\n (Z, E10, 0),\n (Z, E11, -1),\n (SQRT_X, E00, np.sqrt(-.5j)),\n (SQRT_X, E01, np.sqrt(.5j)),\n (SQRT_X, E10, np.sqrt(.5j)),\n (SQRT_X, E11, np.sqrt(-.5j)),\n))\ndef test_hilbert_schmidt_inner_product_values(m1, m2, expected_value):\n v = cirq.hilbert_schmidt_inner_product(m1, m2)\n assert np.isclose(v, expected_value)\n\n\[email protected]('m,basis', itertools.product(\n (I, X, Y, Z, H, SQRT_X, SQRT_Y, SQRT_Z),\n (PAULI_BASIS, STANDARD_BASIS),\n))\ndef test_expand_matrix_in_orthogonal_basis(m, basis):\n expansion = cirq.expand_matrix_in_orthogonal_basis(m, basis)\n\n reconstructed = np.zeros(m.shape, dtype=complex)\n for name, coefficient in expansion.items():\n reconstructed += coefficient * basis[name]\n assert np.allclose(m, reconstructed)\n\n\[email protected]('expansion', (\n {'I': 1}, {'X': 1}, {'Y': 1}, {'Z': 1}, {'X': 1, 'Z': 1},\n {'I': 0.5, 'X': 0.4, 'Y': 0.3, 'Z': 0.2},\n {'I': 1, 'X': 2, 'Y': 3, 'Z': 4},\n))\ndef test_matrix_from_basis_coefficients(expansion):\n m = cirq.matrix_from_basis_coefficients(expansion, PAULI_BASIS)\n\n for name, coefficient in expansion.items():\n element = PAULI_BASIS[name]\n expected_coefficient = (\n cirq.hilbert_schmidt_inner_product(m, element) /\n cirq.hilbert_schmidt_inner_product(element, element)\n )\n assert np.isclose(coefficient, expected_coefficient)\n\n\[email protected](\n 'm1,basis', (\n itertools.product(\n (I, X, Y, Z, H, SQRT_X, SQRT_Y, SQRT_Z, E00, E01, E10, E11),\n (PAULI_BASIS, STANDARD_BASIS),\n )\n))\ndef test_expand_is_inverse_of_reconstruct(m1, basis):\n c1 = cirq.expand_matrix_in_orthogonal_basis(m1, basis)\n m2 = cirq.matrix_from_basis_coefficients(c1, basis)\n c2 = cirq.expand_matrix_in_orthogonal_basis(m2, basis)\n assert np.allclose(m1, m2)\n assert c1 == c2\n", "# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom fractions import Fraction\nfrom decimal import Decimal\nfrom numbers import Number\nimport numpy as np\nimport cirq\n\n\ndef test_approx_eq_primitives():\n assert not cirq.approx_eq(1, 2, atol=1e-01)\n assert cirq.approx_eq(1.0, 1.0 + 1e-10, atol=1e-09)\n assert not cirq.approx_eq(1.0, 1.0 + 1e-10, atol=1e-11)\n assert cirq.approx_eq(0.0, 1e-10, atol=1e-09)\n assert not cirq.approx_eq(0.0, 1e-10, atol=1e-11)\n assert cirq.approx_eq(complex(1, 1), complex(1.1, 1.2), atol=0.3)\n assert not cirq.approx_eq(complex(1, 1), complex(1.1, 1.2), atol=0.1)\n\n\ndef test_approx_eq_mixed_primitives():\n assert cirq.approx_eq(complex(1, 1e-10), 1, atol=1e-09)\n assert not cirq.approx_eq(complex(1, 1e-4), 1, atol=1e-09)\n assert cirq.approx_eq(complex(1, 1e-10), 1.0, atol=1e-09)\n assert not cirq.approx_eq(complex(1, 1e-8), 1.0, atol=1e-09)\n assert cirq.approx_eq(1, 1.0 + 1e-10, atol=1e-9)\n assert not cirq.approx_eq(1, 1.0 + 1e-10, atol=1e-11)\n\n\ndef test_numpy_dtype_compatibility():\n i_a, i_b, i_c = 0, 1, 2\n i_types = [np.intc, np.intp, np.int0, np.int8, np.int16, np.int32, np.int64]\n for i_type in i_types:\n assert cirq.approx_eq(i_type(i_a), i_type(i_b), atol=1)\n assert not cirq.approx_eq(i_type(i_a), i_type(i_c), atol=1)\n u_types = [np.uint, np.uint0, np.uint8, np.uint16, np.uint32, np.uint64]\n for u_type in u_types:\n assert cirq.approx_eq(u_type(i_a), u_type(i_b), atol=1)\n assert not cirq.approx_eq(u_type(i_a), u_type(i_c), atol=1)\n\n f_a, f_b, f_c = 0, 1e-8, 1\n f_types = [np.float16, np.float32, np.float64]\n if hasattr(np, 'float128'):\n f_types.append(np.float128)\n for f_type in f_types:\n assert cirq.approx_eq(f_type(f_a), f_type(f_b), atol=1e-8)\n assert not cirq.approx_eq(f_type(f_a), f_type(f_c), atol=1e-8)\n\n c_a, c_b, c_c = 0, 1e-8j, 1j\n c_types = [np.complex64, np.complex128]\n if hasattr(np, 'complex256'):\n c_types.append(np.complex256)\n for c_type in c_types:\n assert cirq.approx_eq(c_type(c_a), c_type(c_b), atol=1e-8)\n assert not cirq.approx_eq(c_type(c_a), c_type(c_c), atol=1e-8)\n\n\ndef test_fractions_compatibility():\n assert cirq.approx_eq(Fraction(0), Fraction(1, int(1e10)), atol=1e-9)\n assert not cirq.approx_eq(Fraction(0), Fraction(1, int(1e7)), atol=1e-9)\n\n\ndef test_decimal_compatibility():\n assert cirq.approx_eq(Decimal('0'), Decimal('0.0000000001'), atol=1e-9)\n assert not cirq.approx_eq(Decimal('0'), Decimal('0.00000001'), atol=1e-9)\n assert not cirq.approx_eq(Decimal('NaN'), Decimal('-Infinity'), atol=1e-9)\n\n\ndef test_approx_eq_mixed_types():\n assert cirq.approx_eq(np.float32(1), 1.0 + 1e-10, atol=1e-9)\n assert cirq.approx_eq(np.float64(1), np.complex64(1 + 1e-8j), atol=1e-4)\n assert cirq.approx_eq(np.uint8(1), np.complex64(1 + 1e-8j), atol=1e-4)\n if hasattr(np, 'complex256'):\n assert cirq.approx_eq(np.complex256(1), complex(1, 1e-8), atol=1e-4)\n assert cirq.approx_eq(np.int32(1), 1, atol=1e-9)\n assert cirq.approx_eq(complex(0.5, 0), Fraction(1, 2), atol=0.0)\n assert cirq.approx_eq(0.5 + 1e-4j, Fraction(1, 2), atol=1e-4)\n assert cirq.approx_eq(0, Fraction(1, 100000000), atol=1e-8)\n assert cirq.approx_eq(np.uint16(1), Decimal('1'), atol=0.0)\n assert cirq.approx_eq(np.float64(1.0), Decimal('1.00000001'), atol=1e-8)\n assert not cirq.approx_eq(np.complex64(1e-5j), Decimal('0.001'), atol=1e-4)\n\n\ndef test_approx_eq_special_numerics():\n assert not cirq.approx_eq(float('nan'), 0, atol=0.0)\n assert not cirq.approx_eq(float('nan'), float('nan'), atol=0.0)\n assert not cirq.approx_eq(float('inf'), float('-inf'), atol=0.0)\n assert not cirq.approx_eq(float('inf'), 5, atol=0.0)\n assert not cirq.approx_eq(float('inf'), 0, atol=0.0)\n assert cirq.approx_eq(float('inf'), float('inf'), atol=0.0)\n\n\nclass X(Number):\n \"\"\"Subtype of Number that can fallback to __eq__\"\"\"\n\n def __init__(self, val):\n self.val = val\n\n def __eq__(self, other):\n if not isinstance(self, type(other)):\n return NotImplemented\n return self.val == other.val\n\n\nclass Y(Number):\n \"\"\"Subtype of Number that cannot fallback to __eq__\"\"\"\n\n def __init__(self):\n pass\n\n\ndef test_approx_eq_number_uses__eq__():\n assert cirq.approx_eq(C(0), C(0), atol=0.0)\n assert not cirq.approx_eq(X(0), X(1), atol=0.0)\n assert not cirq.approx_eq(X(0), 0, atol=0.0)\n assert not cirq.approx_eq(Y(), 1, atol=0.0)\n\n\ndef test_approx_eq_tuple():\n assert cirq.approx_eq((1, 1), (1, 1), atol=0.0)\n assert not cirq.approx_eq((1, 1), (1, 1, 1), atol=0.0)\n assert not cirq.approx_eq((1, 1), (1,), atol=0.0)\n assert cirq.approx_eq((1.1, 1.2, 1.3), (1, 1, 1), atol=0.4)\n assert not cirq.approx_eq((1.1, 1.2, 1.3), (1, 1, 1), atol=0.2)\n\n\ndef test_approx_eq_list():\n assert cirq.approx_eq([], [], atol=0.0)\n assert not cirq.approx_eq([], [[]], atol=0.0)\n assert cirq.approx_eq([1, 1], [1, 1], atol=0.0)\n assert not cirq.approx_eq([1, 1], [1, 1, 1], atol=0.0)\n assert not cirq.approx_eq([1, 1], [1,], atol=0.0)\n assert cirq.approx_eq([1.1, 1.2, 1.3], [1, 1, 1], atol=0.4)\n assert not cirq.approx_eq([1.1, 1.2, 1.3], [1, 1, 1], atol=0.2)\n\n\ndef test_approx_eq_default():\n assert cirq.approx_eq(1.0, 1.0 + 1e-9)\n assert cirq.approx_eq(1.0, 1.0 - 1e-9)\n assert not cirq.approx_eq(1.0, 1.0 + 1e-7)\n assert not cirq.approx_eq(1.0, 1.0 - 1e-7)\n\n\ndef test_approx_eq_iterables():\n def gen_1_1():\n yield 1\n yield 1\n assert cirq.approx_eq((1, 1), [1, 1], atol=0.0)\n assert cirq.approx_eq((1, 1), gen_1_1(), atol=0.0)\n assert cirq.approx_eq(gen_1_1(), [1, 1], atol=0.0)\n\n\nclass A:\n\n def __init__(self, val):\n self.val = val\n\n def _approx_eq_(self, other, atol):\n if not isinstance(self, type(other)):\n return NotImplemented\n return cirq.approx_eq(self.val, other.val, atol=atol)\n\n\nclass B:\n\n def __init__(self, val):\n self.val = val\n\n def _approx_eq_(self, other, atol):\n if not isinstance(self.val, type(other)):\n return NotImplemented\n return cirq.approx_eq(self.val, other, atol=atol)\n\n\ndef test_approx_eq_supported():\n assert cirq.approx_eq(A(0.0), A(0.1), atol=0.1)\n assert not cirq.approx_eq(A(0.0), A(0.1), atol=0.0)\n assert cirq.approx_eq(B(0.0), 0.1, atol=0.1)\n assert cirq.approx_eq(0.1, B(0.0), atol=0.1)\n\n\nclass C:\n\n def __init__(self, val):\n self.val = val\n\n def __eq__(self, other):\n if not isinstance(self, type(other)):\n return NotImplemented\n return self.val == other.val\n\n\ndef test_approx_eq_uses__eq__():\n assert cirq.approx_eq(C(0), C(0), atol=0.0)\n assert not cirq.approx_eq(C(1), C(2), atol=0.0)\n assert cirq.approx_eq([C(0)], [C(0)], atol=0.0)\n assert not cirq.approx_eq([C(1)], [C(2)], atol=0.0)\n assert cirq.approx_eq(complex(0, 0), 0, atol=0.0)\n assert cirq.approx_eq(0, complex(0, 0), atol=0.0)\n\n\ndef test_approx_eq_types_mismatch():\n assert not cirq.approx_eq(0, A(0), atol=0.0)\n assert not cirq.approx_eq(A(0), 0, atol=0.0)\n assert not cirq.approx_eq(B(0), A(0), atol=0.0)\n assert not cirq.approx_eq(A(0), B(0), atol=0.0)\n assert not cirq.approx_eq(C(0), A(0), atol=0.0)\n assert not cirq.approx_eq(A(0), C(0), atol=0.0)\n assert not cirq.approx_eq(0, [0], atol=1.0)\n assert not cirq.approx_eq([0], 0, atol=0.0)\n", "# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import (\n TYPE_CHECKING,\n Any,\n TypeVar,\n Union,\n Optional,\n)\n\nimport numpy as np\nfrom typing_extensions import Protocol\n\nfrom cirq.type_workarounds import NotImplementedType\n\nif TYPE_CHECKING:\n # pylint: disable=unused-import\n import cirq\n\n# This is a special indicator value used by the unitary method to determine\n# whether or not the caller provided a 'default' argument. It must be of type\n# np.ndarray to ensure the method has the correct type signature in that case.\n# It is checked for using `is`, so it won't have a false positive if the user\n# provides a different np.array([]) value.\nRaiseTypeErrorIfNotProvided = np.array([]) # type: np.ndarray\n\nTDefault = TypeVar('TDefault')\n\n\nclass SupportsUnitary(Protocol):\n \"\"\"An object that may be describable by a unitary matrix.\"\"\"\n\n def _unitary_(self) -> Union[np.ndarray, NotImplementedType]:\n \"\"\"A unitary matrix describing this value, e.g. the matrix of a gate.\n\n This method is used by the global `cirq.unitary` method. If this method\n is not present, or returns NotImplemented, it is assumed that the\n receiving object doesn't have a unitary matrix (resulting in a TypeError\n or default result when calling `cirq.unitary` on it). (The ability to\n return NotImplemented is useful when a class cannot know if it has a\n matrix until runtime, e.g. cirq.X**c normally has a matrix but\n cirq.X**sympy.Symbol('a') doesn't.)\n\n The order of cells in the matrix is always implicit with respect to the\n object being called. For example, for gates the matrix must be ordered\n with respect to the list of qubits that the gate is applied to. For\n operations, the matrix is ordered to match the list returned by its\n `qubits` attribute. The qubit-to-amplitude order mapping matches the\n ordering of numpy.kron(A, B), where A is a qubit earlier in the list\n than the qubit B.\n\n Returns:\n A unitary matrix describing this value, or NotImplemented if there\n is no such matrix.\n \"\"\"\n\n def _has_unitary_(self) -> bool:\n \"\"\"Whether this value has a unitary matrix representation.\n\n This method is used by the global `cirq.has_unitary` method. If this\n method is not present, or returns NotImplemented, it will fallback\n to using _unitary_ with a default value, or False if neither exist.\n\n Returns:\n True if the value has a unitary matrix representation, False\n otherwise.\n \"\"\"\n\n\ndef unitary(val: Any,\n default: TDefault = RaiseTypeErrorIfNotProvided\n ) -> Union[np.ndarray, TDefault]:\n \"\"\"Returns a unitary matrix describing the given value.\n\n The matrix is determined by any one of the following techniques:\n\n - The value has a `_unitary_` method that returns something besides None or\n NotImplemented. The matrix is whatever the method returned.\n - The value has a `_decompose_` method that returns a list of operations,\n and each operation in the list has a unitary effect. The matrix is\n created by aggregating the sub-operations' unitary effects.\n - The value has an `_apply_unitary_` method, and it returns something\n besides None or NotImplemented. The matrix is created by applying\n `_apply_unitary_` to an identity matrix.\n\n If none of these techniques succeeds, it is assumed that `val` doesn't have\n a unitary effect. The order in which techniques are attempted is\n unspecified.\n\n Args:\n val: The value to describe with a unitary matrix.\n default: Determines the fallback behavior when `val` doesn't have\n a unitary effect. If `default` is not set, a TypeError is raised. If\n `default` is set to a value, that value is returned.\n\n Returns:\n If `val` has a unitary effect, the corresponding unitary matrix.\n Otherwise, if `default` is specified, it is returned.\n\n Raises:\n TypeError: `val` doesn't have a unitary effect and no default value was\n specified.\n \"\"\"\n strats = [\n _strat_unitary_from_unitary, _strat_unitary_from_apply_unitary,\n _strat_unitary_from_decompose\n ]\n for strat in strats:\n result = strat(val)\n if result is None:\n break\n if result is not NotImplemented:\n return result\n\n if default is not RaiseTypeErrorIfNotProvided:\n return default\n raise TypeError(\n \"cirq.unitary failed. \"\n \"Value doesn't have a (non-parameterized) unitary effect.\\n\"\n \"\\n\"\n \"type: {}\\n\"\n \"value: {!r}\\n\"\n \"\\n\"\n \"The value failed to satisfy any of the following criteria:\\n\"\n \"- A `_unitary_(self)` method that returned a value \"\n \"besides None or NotImplemented.\\n\"\n \"- A `_decompose_(self)` method that returned a \"\n \"list of unitary operations.\\n\"\n \"- An `_apply_unitary_(self, args) method that returned a value \"\n \"besides None or NotImplemented.\".format(type(val), val))\n\n\ndef _strat_unitary_from_unitary(val: Any) -> Optional[np.ndarray]:\n \"\"\"Attempts to compute a value's unitary via its _unitary_ method.\"\"\"\n getter = getattr(val, '_unitary_', None)\n if getter is None:\n return NotImplemented\n return getter()\n\n\ndef _strat_unitary_from_apply_unitary(val: Any) -> Optional[np.ndarray]:\n \"\"\"Attempts to compute a value's unitary via its _apply_unitary_ method.\"\"\"\n from cirq.protocols.apply_unitary import ApplyUnitaryArgs\n from cirq import ops\n\n # Check for the magic method.\n method = getattr(val, '_apply_unitary_', None)\n if method is None:\n return NotImplemented\n\n # Infer number of qubits.\n if isinstance(val, ops.Gate):\n n = val.num_qubits()\n elif isinstance(val, ops.Operation):\n n = len(val.qubits)\n else:\n return NotImplemented\n\n # Apply unitary effect to an identity matrix.\n state = np.eye(1 << n, dtype=np.complex128)\n state.shape = (2,) * (2 * n)\n buffer = np.empty_like(state)\n result = method(ApplyUnitaryArgs(state, buffer, range(n)))\n\n if result is NotImplemented or result is None:\n return result\n return result.reshape((1 << n, 1 << n))\n\n\ndef _strat_unitary_from_decompose(val: Any) -> Optional[np.ndarray]:\n \"\"\"Attempts to compute a value's unitary via its _decompose_ method.\"\"\"\n from cirq.protocols.apply_unitary import ApplyUnitaryArgs, apply_unitaries\n\n # Check if there's a decomposition.\n from cirq.protocols.has_unitary import (\n _try_decompose_into_operations_and_qubits)\n operations, qubits = _try_decompose_into_operations_and_qubits(val)\n if operations is None:\n return NotImplemented\n\n # Apply sub-operations' unitary effects to an identity matrix.\n n = len(qubits)\n state = np.eye(1 << n, dtype=np.complex128)\n state.shape = (2,) * (2 * n)\n buffer = np.empty_like(state)\n result = apply_unitaries(operations, qubits,\n ApplyUnitaryArgs(state, buffer, range(n)), None)\n\n # Package result.\n if result is None:\n return None\n return result.reshape((1 << n, 1 << n))\n", "# Copyright 2019 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Sampling/simulation methods that delegate to appropriate simulators.\n\nFilename is a reference to multiplexing.\n\"\"\"\n\nfrom typing import List, Optional, Type, Union, Sequence, cast\n\nimport numpy as np\n\nfrom cirq import circuits, protocols, study, schedules, devices, ops, line\nfrom cirq.sim import sparse_simulator, density_matrix_simulator\n\n\ndef sample(program: Union[circuits.Circuit, schedules.Schedule],\n *,\n noise: devices.NoiseModel = devices.NO_NOISE,\n param_resolver: Optional[study.ParamResolver] = None,\n repetitions: int = 1,\n dtype: Type[np.number] = np.complex64) -> study.TrialResult:\n \"\"\"Simulates sampling from the given circuit or schedule.\n\n Args:\n program: The circuit or schedule to sample from.\n noise: Noise model to use while running the simulation.\n param_resolver: Parameters to run with the program.\n repetitions: The number of samples to take.\n dtype: The `numpy.dtype` used by the simulation. Typically one of\n `numpy.complex64` or `numpy.complex128`.\n Favors speed over precision by default, i.e. uses `numpy.complex64`.\n \"\"\"\n\n # State vector simulation is much faster, but only works if no randomness.\n if noise == devices.NO_NOISE and protocols.has_unitary(program):\n return sparse_simulator.Simulator(dtype=dtype).run(\n program=program,\n param_resolver=param_resolver,\n repetitions=repetitions)\n\n return density_matrix_simulator.DensityMatrixSimulator(\n dtype=dtype, noise=noise).run(program=program,\n param_resolver=param_resolver,\n repetitions=repetitions)\n\n\ndef final_wavefunction(\n program: Union[circuits.Circuit, ops.Gate, ops.OP_TREE, schedules.\n Schedule],\n *,\n initial_state: Union[int, Sequence[Union[int, float, complex]], np.\n ndarray] = 0,\n param_resolver: study.ParamResolverOrSimilarType = None,\n qubit_order: ops.QubitOrderOrList = ops.QubitOrder.DEFAULT,\n dtype: Type[np.number] = np.complex64) -> 'np.ndarray':\n \"\"\"Returns the state vector resulting from acting operations on a state.\n\n By default the input state is the computational basis zero state, in which\n case the output is just the first column of the implied unitary matrix.\n\n Args:\n program: The circuit, schedule, gate, operation, or tree of operations\n to apply to the initial state in order to produce the result.\n param_resolver: Parameters to run with the program.\n qubit_order: Determines the canonical ordering of the qubits. This\n is often used in specifying the initial state, i.e. the\n ordering of the computational basis states.\n initial_state: If an int, the state is set to the computational\n basis state corresponding to this state. Otherwise if this\n is a np.ndarray it is the full initial state. In this case it\n must be the correct size, be normalized (an L2 norm of 1), and\n be safely castable to an appropriate dtype for the simulator.\n dtype: The `numpy.dtype` used by the simulation. Typically one of\n `numpy.complex64` or `numpy.complex128`.\n\n Returns:\n The wavefunction resulting from applying the given unitary operations to\n the desired initial state. Specifically, a numpy array containing the\n the amplitudes in np.kron order, where the order of arguments to kron\n is determined by the qubit order argument (which defaults to just\n sorting the qubits that are present into an ascending order).\n \"\"\"\n\n if not isinstance(initial_state, int):\n initial_state = np.asarray(initial_state, dtype=dtype)\n\n if isinstance(program, (schedules.Schedule, circuits.Circuit)):\n # No change needed.\n pass\n elif isinstance(program, ops.Gate):\n program = circuits.Circuit.from_ops(\n program.on(*line.LineQubit.range(program.num_qubits())))\n else:\n # It should be an OP_TREE.\n program = circuits.Circuit.from_ops(program)\n\n if not protocols.has_unitary(\n protocols.resolve_parameters(program, param_resolver)):\n raise ValueError(\n \"Program doesn't have a single well defined final wavefunction \"\n \"because it is not unitary. \"\n \"Maybe you wanted `cirq.sample_wavefunction`?\\n\"\n \"\\n\"\n \"Program: {!r}\".format(program))\n\n result = sparse_simulator.Simulator(dtype=dtype).simulate(\n program=program,\n initial_state=initial_state,\n qubit_order=qubit_order,\n param_resolver=param_resolver)\n\n return cast(sparse_simulator.SparseSimulatorStep, result).state_vector()\n\n\ndef sample_sweep(program: Union[circuits.Circuit, schedules.Schedule],\n params: study.Sweepable,\n *,\n noise: devices.NoiseModel = devices.NO_NOISE,\n repetitions: int = 1,\n dtype: Type[np.number] = np.complex64\n ) -> List[study.TrialResult]:\n \"\"\"Runs the supplied Circuit or Schedule, mimicking quantum hardware.\n\n In contrast to run, this allows for sweeping over different parameter\n values.\n\n Args:\n program: The circuit or schedule to simulate.\n params: Parameters to run with the program.\n noise: Noise model to use while running the simulation.\n repetitions: The number of repetitions to simulate, per set of\n parameter values.\n dtype: The `numpy.dtype` used by the simulation. Typically one of\n `numpy.complex64` or `numpy.complex128`.\n Favors speed over precision by default, i.e. uses `numpy.complex64`.\n\n Returns:\n TrialResult list for this run; one for each possible parameter\n resolver.\n \"\"\"\n circuit = (program if isinstance(program, circuits.Circuit)\n else program.to_circuit())\n param_resolvers = study.to_resolvers(params)\n\n trial_results = [] # type: List[study.TrialResult]\n for param_resolver in param_resolvers:\n measurements = sample(circuit,\n noise=noise,\n param_resolver=param_resolver,\n repetitions=repetitions,\n dtype=dtype)\n trial_results.append(measurements)\n return trial_results\n", "# Copyright 2019 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Simulator for density matrices that simulates noisy quantum circuits.\"\"\"\n\nimport collections\n\nfrom typing import (\n cast, Dict, Iterator, List, Optional, TYPE_CHECKING, Type, Union)\n\nimport numpy as np\n\nfrom cirq import (circuits, linalg, ops, protocols, schedules, study, value,\n devices)\nfrom cirq.sim import density_matrix_utils, simulator\n\nif TYPE_CHECKING:\n # pylint: disable=unused-import\n from typing import Any, Hashable\n\n\nclass _StateAndBuffers:\n\n def __init__(self, num_qubits: int, matrix: np.ndarray):\n self.num_qubits = num_qubits\n self.matrix = matrix\n self.buffers = [np.empty_like(matrix) for _ in range(3)]\n\n\nclass DensityMatrixSimulator(simulator.SimulatesSamples,\n simulator.SimulatesIntermediateState):\n \"\"\"A simulator for density matrices and noisy quantum circuits.\n\n This simulator can be applied on circuits that are made up of operations\n that have:\n * a `_channel_` method\n * a `_mixture_` method for a probabilistic combination of unitary gates.\n * a `_unitary_` method\n * a `_has_unitary_` and `_apply_unitary_` method.\n * measurements\n * a `_decompose_` that eventually yields one of the above\n That is, the circuit must have elements that follow on of the protocols:\n * `cirq.SupportsChannel`\n * `cirq.SupportsMixture`\n * `cirq.SupportsApplyUnitary`\n * `cirq.SupportsUnitary`\n * `cirq.SupportsDecompose`\n or is a measurement.\n\n This simulator supports three types of simulation.\n\n Run simulations which mimic running on actual quantum hardware. These\n simulations do not give access to the density matrix (like actual hardware).\n There are two variations of run methods, one which takes in a single\n (optional) way to resolve parameterized circuits, and a second which\n takes in a list or sweep of parameter resolver:\n\n run(circuit, param_resolver, repetitions)\n\n run_sweep(circuit, params, repetitions)\n\n These methods return `TrialResult`s which contain both the measurement\n results, but also the parameters used for the parameterized\n circuit operations. The initial state of a run is always the all 0s state\n in the computational basis.\n\n By contrast the simulate methods of the simulator give access to the density\n matrix of the simulation at the end of the simulation of the circuit.\n Note that if the circuit contains measurements then the density matrix\n is that result for those particular measurement results. For example\n if there is one measurement, then the simulation may result in the\n measurement result for this measurement, and the density matrix will\n be that conditional on that result. It will not be the density matrix formed\n by summing over the different measurements and their probabilities.\n The simulate methods take in two parameters that the run methods do not: a\n qubit order and an initial state. The qubit order is necessary because an\n ordering must be chosen for the kronecker product (see\n `DensityMatrixTrialResult` for details of this ordering). The initial\n state can be either the full density matrix, the full wave function (for\n pure states), or an integer which represents the initial state of being\n in a computational basis state for the binary representation of that\n integer. Similar to run methods, there are two simulate methods that run\n for single simulations or for sweeps across different parameters:\n\n simulate(circuit, param_resolver, qubit_order, initial_state)\n\n simulate_sweep(circuit, params, qubit_order, initial_state)\n\n The simulate methods in contrast to the run methods do not perform\n repetitions. The result of these simulations is a\n `DensityMatrixTrialResult` which contains, in addition to measurement\n results and information about the parameters that were used in the\n simulation, access to the density matrix via the `density_matrix` method.\n\n If one wishes to perform simulations that have access to the\n density matrix as one steps through running the circuit there is a generator\n which can be iterated over and each step is an object that gives access\n to the density matrix. This stepping through a `Circuit` is done on a\n `Moment` by `Moment` manner.\n\n simulate_moment_steps(circuit, param_resolver, qubit_order,\n initial_state)\n\n One can iterate over the moments via\n\n for step_result in simulate_moments(circuit):\n # do something with the density matrix via\n # step_result.density_matrix()\n \"\"\"\n\n def __init__(self,\n *,\n dtype: Type[np.number] = np.complex64,\n noise: devices.NoiseModel = devices.NO_NOISE):\n \"\"\"Density matrix simulator.\n\n Args:\n dtype: The `numpy.dtype` used by the simulation. One of\n `numpy.complex64` or `numpy.complex128`\n noise: A noise model to apply while simulating.\n \"\"\"\n if dtype not in {np.complex64, np.complex128}:\n raise ValueError(\n 'dtype must be complex64 or complex128, was {}'.format(dtype))\n\n self._dtype = dtype\n self.noise = noise\n\n def _run(self, circuit: circuits.Circuit,\n param_resolver: study.ParamResolver,\n repetitions: int) -> Dict[str, np.ndarray]:\n \"\"\"See definition in `cirq.SimulatesSamples`.\"\"\"\n param_resolver = param_resolver or study.ParamResolver({})\n resolved_circuit = protocols.resolve_parameters(circuit,\n param_resolver)\n\n if circuit.are_all_measurements_terminal():\n return self._run_sweep_sample(resolved_circuit, repetitions)\n return self._run_sweep_repeat(resolved_circuit, repetitions)\n\n def _run_sweep_sample(self,\n circuit: circuits.Circuit,\n repetitions: int) -> Dict[str, np.ndarray]:\n for step_result in self._base_iterator(\n circuit=circuit,\n qubit_order=ops.QubitOrder.DEFAULT,\n initial_state=0,\n perform_measurements=False):\n pass\n measurement_ops = [op for _, op, _ in\n circuit.findall_operations_with_gate_type(\n ops.MeasurementGate)]\n return step_result.sample_measurement_ops(measurement_ops, repetitions)\n\n def _run_sweep_repeat(self,\n circuit: circuits.Circuit,\n repetitions: int) -> Dict[str, np.ndarray]:\n measurements = {} # type: Dict[str, List[np.ndarray]]\n for _ in range(repetitions):\n all_step_results = self._base_iterator(\n circuit,\n qubit_order=ops.QubitOrder.DEFAULT,\n initial_state=0,\n perform_measurements=True)\n for step_result in all_step_results:\n for k, v in step_result.measurements.items():\n if not k in measurements:\n measurements[k] = []\n measurements[k].append(np.array(v, dtype=bool))\n return {k: np.array(v) for k, v in measurements.items()}\n\n def _simulator_iterator(self, circuit: circuits.Circuit,\n param_resolver: study.ParamResolver,\n qubit_order: ops.QubitOrderOrList,\n initial_state: Union[int, np.ndarray]) -> Iterator:\n \"\"\"See definition in `cirq.SimulatesIntermediateState`.\n\n If the initial state is an int, the state is set to the computational\n basis state corresponding to this state. Otherwise if the initial\n state is a np.ndarray it is the full initial state, either a pure state\n or the full density matrix. If it is the pure state it must be the\n correct size, be normalized (an L2 norm of 1), and be safely castable\n to an appropriate dtype for the simulator. If it is a mixed state\n it must be correctly sized and positive semidefinite with trace one.\n \"\"\"\n param_resolver = param_resolver or study.ParamResolver({})\n resolved_circuit = protocols.resolve_parameters(circuit, param_resolver)\n actual_initial_state = 0 if initial_state is None else initial_state\n return self._base_iterator(resolved_circuit,\n qubit_order,\n actual_initial_state)\n\n def _apply_op_channel(self, op: ops.Operation, state: _StateAndBuffers,\n indices: List[int]) -> None:\n \"\"\"Apply channel to state.\"\"\"\n result = protocols.apply_channel(\n op,\n args=protocols.ApplyChannelArgs(\n target_tensor=state.matrix,\n out_buffer=state.buffers[0],\n auxiliary_buffer0=state.buffers[1],\n auxiliary_buffer1=state.buffers[2],\n left_axes=indices,\n right_axes=[e + state.num_qubits for e in indices]))\n for i in range(3):\n if result is state.buffers[i]:\n state.buffers[i] = state.matrix\n state.matrix = result\n\n def _base_iterator(\n self,\n circuit: circuits.Circuit,\n qubit_order: ops.QubitOrderOrList,\n initial_state: Union[int, np.ndarray],\n perform_measurements: bool = True) -> Iterator:\n qubits = ops.QubitOrder.as_qubit_order(qubit_order).order_for(\n circuit.all_qubits())\n num_qubits = len(qubits)\n qubit_map = {q: i for i, q in enumerate(qubits)}\n initial_matrix = density_matrix_utils.to_valid_density_matrix(\n initial_state, num_qubits, self._dtype)\n if len(circuit) == 0:\n yield DensityMatrixStepResult(initial_matrix, {}, qubit_map,\n self._dtype)\n return\n\n state = _StateAndBuffers(num_qubits,\n initial_matrix.reshape((2,) * num_qubits * 2))\n\n def on_stuck(bad_op: ops.Operation):\n return TypeError(\n \"Can't simulate operations that don't implement \"\n \"SupportsUnitary, SupportsApplyUnitary, SupportsMixture, \"\n \"SupportsChannel or is a measurement: {!r}\".format(bad_op))\n\n def keep(potential_op: ops.Operation) -> bool:\n return (protocols.has_channel(potential_op)\n or (ops.op_gate_of_type(potential_op,\n ops.MeasurementGate) is not None)\n or isinstance(potential_op,\n (ops.SamplesDisplay,\n ops.WaveFunctionDisplay,\n ops.DensityMatrixDisplay))\n )\n\n noisy_moments = self.noise.noisy_moments(circuit,\n sorted(circuit.all_qubits()))\n\n for moment in noisy_moments:\n measurements = collections.defaultdict(\n list) # type: Dict[str, List[bool]]\n\n channel_ops_and_measurements = protocols.decompose(\n moment, keep=keep, on_stuck_raise=on_stuck)\n\n for op in channel_ops_and_measurements:\n indices = [qubit_map[qubit] for qubit in op.qubits]\n if isinstance(op,\n (ops.SamplesDisplay,\n ops.WaveFunctionDisplay,\n ops.DensityMatrixDisplay)):\n continue\n # TODO: support more general measurements.\n meas = ops.op_gate_of_type(op, ops.MeasurementGate)\n if meas:\n if perform_measurements:\n invert_mask = meas.invert_mask or num_qubits * (False,)\n # Measure updates inline.\n bits, _ = density_matrix_utils.measure_density_matrix(\n state.matrix, indices, out=state.matrix)\n corrected = [bit ^ mask for bit, mask in\n zip(bits, invert_mask)]\n key = protocols.measurement_key(meas)\n measurements[key].extend(corrected)\n else:\n # TODO: Use apply_channel similar to apply_unitary.\n self._apply_op_channel(op, state, indices)\n yield DensityMatrixStepResult(density_matrix=state.matrix,\n measurements=measurements,\n qubit_map=qubit_map,\n dtype=self._dtype)\n\n def _create_simulator_trial_result(self,\n params: study.ParamResolver,\n measurements: Dict[str, np.ndarray],\n final_simulator_state: 'DensityMatrixSimulatorState') \\\n -> 'DensityMatrixTrialResult':\n return DensityMatrixTrialResult(\n params=params,\n measurements=measurements,\n final_simulator_state=final_simulator_state)\n\n def compute_displays(\n self,\n program: Union[circuits.Circuit, schedules.Schedule],\n param_resolver: study.ParamResolver = study.ParamResolver({}),\n qubit_order: ops.QubitOrderOrList = ops.QubitOrder.DEFAULT,\n initial_state: Union[int, np.ndarray] = 0,\n ) -> study.ComputeDisplaysResult:\n \"\"\"Computes displays in the supplied Circuit or Schedule.\n\n Args:\n program: The circuit or schedule to simulate.\n param_resolver: Parameters to run with the program.\n qubit_order: Determines the canonical ordering of the qubits used\n to define the order of amplitudes in the wave function.\n initial_state: If an int, the state is set to the computational\n basis state corresponding to this state. Otherwise if it is a\n np.ndarray it is the full initial state, either a pure state\n or the full density matrix. If it is the pure state it must be\n the correct size, be normalized (an L2 norm of 1), and be\n safely castable to an appropriate dtype for the simulator.\n If it is a mixed state it must be correctly sized and\n positive semidefinite with trace one.\n\n Returns:\n ComputeDisplaysResult for the simulation.\n \"\"\"\n return self.compute_displays_sweep(\n program, [param_resolver], qubit_order, initial_state)[0]\n\n def compute_displays_sweep(\n self,\n program: Union[circuits.Circuit, schedules.Schedule],\n params: Optional[study.Sweepable] = None,\n qubit_order: ops.QubitOrderOrList = ops.QubitOrder.DEFAULT,\n initial_state: Union[int, np.ndarray] = 0,\n ) -> List[study.ComputeDisplaysResult]:\n \"\"\"Computes displays in the supplied Circuit or Schedule.\n\n In contrast to `compute_displays`, this allows for sweeping\n over different parameter values.\n\n Args:\n program: The circuit or schedule to simulate.\n params: Parameters to run with the program.\n qubit_order: Determines the canonical ordering of the qubits used to\n define the order of amplitudes in the wave function.\n initial_state: If an int, the state is set to the computational\n basis state corresponding to this state. Otherwise if it is a\n np.ndarray it is the full initial state, either a pure state\n or the full density matrix. If it is the pure state it must be\n the correct size, be normalized (an L2 norm of 1), and be\n safely castable to an appropriate dtype for the simulator.\n If it is a mixed state it must be correctly sized and\n positive semidefinite with trace one.\n\n Returns:\n List of ComputeDisplaysResults for this run, one for each\n possible parameter resolver.\n \"\"\"\n circuit = (program if isinstance(program, circuits.Circuit)\n else program.to_circuit())\n param_resolvers = study.to_resolvers(params or study.ParamResolver({}))\n qubit_order = ops.QubitOrder.as_qubit_order(qubit_order)\n qubits = qubit_order.order_for(circuit.all_qubits())\n\n compute_displays_results = [] # type: List[study.ComputeDisplaysResult]\n for param_resolver in param_resolvers:\n display_values = {} # type: Dict[Hashable, Any]\n\n # Compute the displays in the first Moment\n moment = circuit[0]\n matrix = density_matrix_utils.to_valid_density_matrix(\n initial_state, num_qubits=len(qubits), dtype=self._dtype)\n qubit_map = {q: i for i, q in enumerate(qubits)}\n _enter_moment_display_values_into_dictionary(\n display_values, moment, matrix, qubit_order, qubit_map)\n\n # Compute the displays in the rest of the Moments\n all_step_results = self.simulate_moment_steps(\n circuit,\n param_resolver,\n qubit_order,\n initial_state)\n for step_result, moment in zip(all_step_results, circuit[1:]):\n _enter_moment_display_values_into_dictionary(\n display_values,\n moment,\n step_result.density_matrix(),\n qubit_order,\n step_result._qubit_map)\n\n compute_displays_results.append(study.ComputeDisplaysResult(\n params=param_resolver,\n display_values=display_values))\n\n return compute_displays_results\n\n\ndef _enter_moment_display_values_into_dictionary(\n display_values: Dict,\n moment: ops.Moment,\n state: np.ndarray,\n qubit_order: ops.QubitOrder,\n qubit_map: Dict[ops.Qid, int]):\n for op in moment:\n if isinstance(op, ops.DensityMatrixDisplay):\n display_values[op.key] = (\n op.value_derived_from_density_matrix(state, qubit_map))\n elif isinstance(op, ops.SamplesDisplay):\n display_values[op.key] = _compute_samples_display_value(\n op, state, qubit_order, qubit_map)\n\n\ndef _compute_samples_display_value(display: ops.SamplesDisplay,\n state: np.ndarray,\n qubit_order: ops.QubitOrder,\n qubit_map: Dict[ops.Qid, int]):\n n = len(qubit_map)\n state = np.reshape(state, (2,) * n * 2)\n basis_change = ops.flatten_op_tree(display.measurement_basis_change())\n for op in basis_change:\n # TODO: Use apply_channel similar to apply_unitary.\n indices = [qubit_map[qubit] for qubit in op.qubits]\n gate = cast(ops.GateOperation, op).gate\n unitary = protocols.unitary(gate)\n krauss_tensor = np.reshape(unitary,\n (2,) * gate.num_qubits() * 2)\n state = linalg.targeted_left_multiply(krauss_tensor,\n state,\n indices)\n # TODO add a test that fails if the below is not performed\n state = linalg.targeted_left_multiply(\n np.conjugate(krauss_tensor),\n state,\n [x + n for x in indices])\n state = state.reshape((2**n, 2**n))\n indices = [qubit_map[qubit] for qubit in display.qubits]\n samples = density_matrix_utils.sample_density_matrix(\n state, indices, display.num_samples)\n return display.value_derived_from_samples(samples)\n\n\nclass DensityMatrixStepResult(simulator.StepResult):\n \"\"\"A single step in the simulation of the DensityMatrixSimulator.\n\n Attributes:\n qubit_map: A map from the Qubits in the Circuit to the the index\n of this qubit for a canonical ordering. This canonical ordering\n is used to define the state vector (see the state_vector()\n method).\n measurements: A dictionary from measurement gate key to measurement\n results, ordered by the qubits that the measurement operates on.\n \"\"\"\n\n def __init__(self,\n density_matrix: np.ndarray,\n measurements: Dict[str, np.ndarray],\n qubit_map: Dict[ops.Qid, int],\n dtype: Type[np.number] = np.complex64):\n \"\"\"DensityMatrixStepResult.\n\n Args:\n density_matrix: The density matrix at this step. Can be mutated.\n measurements: The measurements for this step of the simulation.\n qubit_map: A map from qid to index used to define the\n ordering of the basis in density_matrix.\n dtype: The numpy dtype for the density matrix.\n \"\"\"\n super().__init__(measurements)\n self._density_matrix = density_matrix\n self._qubit_map = qubit_map\n self._dtype = dtype\n\n def _simulator_state(self) -> 'DensityMatrixSimulatorState':\n return DensityMatrixSimulatorState(self._density_matrix,\n self._qubit_map)\n\n def set_density_matrix(self, density_matrix_repr: Union[int, np.ndarray]):\n \"\"\"Set the density matrix to a new density matrix.\n\n Args:\n density_matrix_repr: If this is an int, the density matrix is set to\n the computational basis state corresponding to this state. Otherwise\n if this is a np.ndarray it is the full state, either a pure state\n or the full density matrix. If it is the pure state it must be the\n correct size, be normalized (an L2 norm of 1), and be safely\n castable to an appropriate dtype for the simulator. If it is a\n mixed state it must be correctly sized and positive semidefinite\n with trace one.\n \"\"\"\n density_matrix = density_matrix_utils.to_valid_density_matrix(\n density_matrix_repr, len(self._qubit_map), self._dtype)\n density_matrix = np.reshape(\n density_matrix,\n self._simulator_state().density_matrix.shape)\n np.copyto(dst=self._simulator_state().density_matrix,\n src=density_matrix)\n\n def density_matrix(self):\n \"\"\"Returns the density matrix at this step in the simulation.\n\n The density matrix that is stored in this result is returned in the\n computational basis with these basis states defined by the qubit_map.\n In particular the value in the qubit_map is the index of the qubit,\n and these are translated into binary vectors where the last qubit is\n the 1s bit of the index, the second-to-last is the 2s bit of the index,\n and so forth (i.e. big endian ordering). The density matrix is a\n `2 ** num_qubits` square matrix, with rows and columns ordered by\n the computational basis as just described.\n\n Example:\n qubit_map: {QubitA: 0, QubitB: 1, QubitC: 2}\n Then the returned density matrix will have (row and column) indices\n mapped to qubit basis states like the following table\n\n | QubitA | QubitB | QubitC\n :-: | :----: | :----: | :----:\n 0 | 0 | 0 | 0\n 1 | 0 | 0 | 1\n 2 | 0 | 1 | 0\n 3 | 0 | 1 | 1\n 4 | 1 | 0 | 0\n 5 | 1 | 0 | 1\n 6 | 1 | 1 | 0\n 7 | 1 | 1 | 1\n \"\"\"\n size = 2 ** len(self._qubit_map)\n return np.reshape(self._density_matrix, (size, size))\n\n def sample(self,\n qubits: List[ops.Qid],\n repetitions: int = 1) -> np.ndarray:\n indices = [self._qubit_map[q] for q in qubits]\n return density_matrix_utils.sample_density_matrix(\n self._simulator_state().density_matrix, indices, repetitions)\n\n\[email protected]_equality(unhashable=True)\nclass DensityMatrixSimulatorState():\n \"\"\"The simulator state for DensityMatrixSimulator\n\n Args:\n density_matrix: The density matrix of the simulation.\n qubit_map: A map from qid to index used to define the\n ordering of the basis in density_matrix.\n \"\"\"\n\n def __init__(self,\n density_matrix: np.ndarray,\n qubit_map: Dict[ops.Qid, int]):\n self.density_matrix = density_matrix\n self.qubit_map = qubit_map\n\n def _value_equality_values_(self):\n return (self.density_matrix.tolist(), self.qubit_map)\n\n def __repr__(self):\n return (\"cirq.DensityMatrixSimulatorState(\"\n \"density_matrix=np.array({!r}), \"\n \"qubit_map={!r})\".format(self.density_matrix.tolist(),\n self.qubit_map))\n\n\[email protected]_equality(unhashable=True)\nclass DensityMatrixTrialResult(simulator.SimulationTrialResult):\n \"\"\"A `SimulationTrialResult` for `DensityMatrixSimulator` runs.\n\n The density matrix that is stored in this result is returned in the\n computational basis with these basis states defined by the qubit_map.\n In particular the value in the qubit_map is the index of the qubit,\n and these are translated into binary vectors where the last qubit is\n the 1s bit of the index, the second-to-last is the 2s bit of the index,\n and so forth (i.e. big endian ordering). The density matrix is a\n `2 ** num_qubits` square matrix, with rows and columns ordered by\n the computational basis as just described.\n\n Example:\n qubit_map: {QubitA: 0, QubitB: 1, QubitC: 2}\n Then the returned density matrix will have (row and column) indices\n mapped to qubit basis states like the following table\n\n | QubitA | QubitB | QubitC\n :-: | :----: | :----: | :----:\n 0 | 0 | 0 | 0\n 1 | 0 | 0 | 1\n 2 | 0 | 1 | 0\n 3 | 0 | 1 | 1\n 4 | 1 | 0 | 0\n 5 | 1 | 0 | 1\n 6 | 1 | 1 | 0\n 7 | 1 | 1 | 1\n\n Attributes:\n params: A ParamResolver of settings used for this result.\n measurements: A dictionary from measurement gate key to measurement\n results. Measurement results are a numpy ndarray of actual boolean\n measurement results (ordered by the qubits acted on by the\n measurement gate.)\n final_simulator_state: The final simulator state of the system after the\n trial finishes.\n final_density_matrix: The final density matrix of the system.\n \"\"\"\n\n def __init__(self,\n params: study.ParamResolver,\n measurements: Dict[str, np.ndarray],\n final_simulator_state: DensityMatrixSimulatorState) -> None:\n super().__init__(params=params,\n measurements=measurements,\n final_simulator_state=final_simulator_state)\n size = 2 ** len(final_simulator_state.qubit_map)\n self.final_density_matrix = np.reshape(\n final_simulator_state.density_matrix, (size, size))\n\n def _value_equality_values_(self):\n measurements = {k: v.tolist() for k, v in\n sorted(self.measurements.items())}\n return (self.params, measurements, self._final_simulator_state)\n\n def __repr__(self):\n return (\"cirq.DensityMatrixTrialResult(params={!r}, measurements={!r}, \"\n \"final_simulator_state={!r})\".format(\n self.params, self.measurements,\n self._final_simulator_state))\n" ]
[ [ "numpy.array" ], [ "numpy.dot", "numpy.imag", "numpy.log2", "numpy.sqrt", "numpy.einsum", "numpy.random.random", "numpy.eye", "numpy.linalg.norm", "numpy.arctan2", "numpy.real", "numpy.linalg.eigh", "numpy.copy", "numpy.ndindex", "numpy.array", "numpy.conjugate" ], [ "numpy.diag", "numpy.linalg.norm", "numpy.linalg.det", "numpy.random.randn", "numpy.testing.assert_allclose", "numpy.linalg.qr" ], [ "numpy.eye", "numpy.ones_like" ], [ "pandas.Series", "pandas.DataFrame" ], [ "numpy.diag", "numpy.isreal", "numpy.sqrt", "numpy.allclose", "numpy.eye", "numpy.all", "numpy.array", "numpy.zeros", "numpy.isclose" ], [ "numpy.uint8", "numpy.int32", "numpy.uint16", "numpy.float64", "numpy.float32", "numpy.complex256", "numpy.complex64" ], [ "numpy.empty_like", "numpy.eye", "numpy.array" ], [ "numpy.asarray" ], [ "numpy.reshape", "numpy.empty_like", "numpy.array", "numpy.conjugate" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [ "1.10", "1.12", "1.11", "1.19", "1.24", "1.13", "1.16", "1.9", "1.18", "1.23", "1.21", "1.22", "1.20", "1.7", "1.15", "1.14", "1.17", "1.8" ], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
garymm/tensorflow-onnx
[ "a8f78ac7903493dee579304b7b1717aa9ec9706f" ]
[ "tf2onnx/rewriter/quantization_ops_rewriter.py" ]
[ "# SPDX-License-Identifier: Apache-2.0\n\n\n\"\"\"\ntf2onnx.rewriter - rewrite tensorflow QuantizeAndDequantizeV2|QuantizeAndDequantizeV3|QuantizeAndDequantizeV4 op\n\"\"\"\n\nimport numpy as np\nfrom onnx import TensorProto, helper\nfrom tf2onnx.graph_matcher import OpTypePattern, GraphMatcher\nfrom tf2onnx import utils\n\n# pylint: disable=missing-docstring\n\ndef extract_numpy_array(node):\n return np.frombuffer(node.attr[\"value\"].t.raw_data, dtype=\"float32\")\n\ndef create_qdq_nodes(g, match_results):\n\n for match in match_results:\n qdq_node = match.get_op('output')\n qdq_node_output_dtype = g.get_dtype(qdq_node.output[0])\n qdq_node_output_shape = g.get_shape(qdq_node.output[0])\n\n # Get the attributes of qdq node\n narrow_range = qdq_node.attr['narrow_range'].i\n signed_input = qdq_node.attr['signed_input'].i\n range_given = qdq_node.get_attr_value(\"range_given\", qdq_node.type != \"QuantizeAndDequantizeV2\" or \\\n qdq_node.type != \"QuantizeAndDequantizeV4\")\n\n min_quantized, max_quantized = [-127, 127]\n if not narrow_range and signed_input:\n min_quantized = -128\n\n if not signed_input:\n min_quantized, max_quantized = [0, 255]\n\n # Get axis attribute for per channel implementation.\n axis = qdq_node.get_attr_value('axis', -1)\n q_attrs = {}\n\n quantized_np_dtype = np.int8 if signed_input else np.uint8\n quantized_dtype = TensorProto.INT8 if signed_input else TensorProto.UINT8\n\n if axis != -1:\n utils.make_sure(g.opset >= 13, \"Opset >= 13 is required for per channel quantization\")\n q_attrs['axis'] = axis\n\n if not range_given:\n min_np = np.array(min_quantized, np.float32)\n max_np = np.array(max_quantized, np.float32)\n max_quantized_const = g.make_const(utils.make_name(\"max_const\"), max_np).output[0]\n if signed_input:\n min_quantized_const = g.make_const(utils.make_name(\"min_const\"), min_np).output[0]\n reduce_attr = {'keepdims': 0}\n if axis != -1:\n inp_rank = g.get_rank(qdq_node.input[0])\n utils.make_sure(inp_rank is not None, \"Input rank cannot be unknown for qdq op %s\", qdq_node.name)\n reduce_axes = [i for i in range(inp_rank) if i != axis]\n reduce_attr['axes'] = reduce_axes\n\n max_value = g.make_node(\"ReduceMax\", [qdq_node.input[0]], attr=reduce_attr).output[0]\n if signed_input:\n min_value = g.make_node(\"ReduceMin\", [qdq_node.input[0]], attr=reduce_attr).output[0]\n\n scale_from_max_side = g.make_node(\"Div\", [max_value, max_quantized_const]).output[0]\n if signed_input:\n scale_from_min_side = g.make_node(\"Div\", [min_value, min_quantized_const]).output[0]\n scale = g.make_node(\"Max\", [scale_from_min_side, scale_from_max_side]).output[0]\n else:\n scale = scale_from_max_side\n\n if axis == -1:\n zero_point_np = np.zeros([], dtype=quantized_np_dtype)\n zero_point = g.make_const(utils.make_name(\"zero_point\"), zero_point_np).output[0]\n else:\n zero_tensor = helper.make_tensor(\"value\", quantized_dtype, dims=[1], vals=[0])\n scale_shape = g.make_node(\"Shape\", [scale]).output[0]\n zero_point = g.make_node(\"ConstantOfShape\", inputs=[scale_shape], attr={\"value\": zero_tensor}).output[0]\n else:\n # Get the min and max value of the inputs to QDQ op\n min_value = extract_numpy_array(qdq_node.inputs[1])\n max_value = extract_numpy_array(qdq_node.inputs[2])\n\n num_channels = min_value.shape[0]\n scales = np.zeros(num_channels, dtype=np.float32)\n\n for i in range(num_channels):\n # Calculate scales from the min and max values\n scale_from_min_side = min_value[i] / min_quantized if min_quantized < 0 else 0\n scale_from_max_side = max_value[i] / max_quantized if max_quantized > 0 else 0\n\n if scale_from_min_side > scale_from_max_side:\n scale = scale_from_min_side\n else:\n scale = scale_from_max_side\n\n utils.make_sure(scale > 0, \"Quantize/Dequantize scale must be greater than zero\")\n scales[i] = np.float32(scale)\n\n # Set scalars for scale and zero point for per layer quantization\n if num_channels == 1:\n scales = scales[0]\n zero_point_np = np.zeros([], dtype=quantized_np_dtype)\n else:\n utils.make_sure(axis != -1, \"Axis must be specified for per channel quantization\")\n zero_point_np = np.zeros([num_channels], dtype=quantized_np_dtype)\n\n # Split it into QuantizeLinear and DequantizeLinear and remove the QDQ node reference\n cast_scale = scales.astype(np.float32)\n scale = g.make_const(name=utils.make_name(\"quant_scale\"), np_val=cast_scale).output[0]\n zero_point = g.make_const(utils.make_name(\"zero_point\"), zero_point_np).output[0]\n\n quant_node = g.make_node(op_type=\"QuantizeLinear\",\n inputs=[qdq_node.input[0], scale, zero_point],\n shapes=[qdq_node_output_shape],\n attr=q_attrs,\n dtypes=[quantized_dtype],\n name=utils.make_name(\"QuantLinearNode\"))\n\n g.set_shape(quant_node.output[0], qdq_node_output_shape)\n\n g.remove_node(qdq_node.name)\n\n dequant_node = g.make_node(op_type=\"DequantizeLinear\",\n inputs=[quant_node.output[0], scale, zero_point],\n outputs=[qdq_node.output[0]],\n shapes=[qdq_node_output_shape],\n attr=q_attrs,\n dtypes=[qdq_node_output_dtype],\n name=utils.make_name(\"DequantLinearNode\"))\n g.set_shape(dequant_node.output[0], qdq_node_output_shape)\n\n return g.get_nodes()\n\ndef rewrite_quantize_and_dequantize(g, ops):\n\n pattern_for_qdq_v2 = \\\n OpTypePattern('QuantizeAndDequantizeV2', name='output', inputs=[\n OpTypePattern(\"*\"),\n OpTypePattern(None),\n OpTypePattern(None),\n ])\n pattern_for_qdq_v3 = \\\n OpTypePattern('QuantizeAndDequantizeV3', name='output', inputs=[\n OpTypePattern(\"*\"),\n OpTypePattern(None),\n OpTypePattern(None),\n OpTypePattern(None),\n ])\n pattern_for_qdq_v4 = \\\n OpTypePattern('QuantizeAndDequantizeV4', name='output', inputs=[\n OpTypePattern(\"*\"),\n OpTypePattern(None),\n OpTypePattern(None),\n ])\n\n # Match all the patterns for QDQ ops\n patterns = [pattern_for_qdq_v2, pattern_for_qdq_v3, pattern_for_qdq_v4]\n match_results = []\n for pattern in patterns:\n matcher = GraphMatcher(pattern)\n results = list(matcher.match_ops(ops))\n match_results.extend(results)\n\n return create_qdq_nodes(g, match_results)\n" ]
[ [ "numpy.frombuffer", "numpy.array", "numpy.zeros", "numpy.float32" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ChengF-Lab/scIVA
[ "f70a927531dd16236dff30decbe77f0552ad4f2d" ]
[ "sciva/loss.py" ]
[ "#!/usr/bin/env python\n\"\"\"\n#\n#\n\n# File Name: loss_function.py\n# Description:\n\n\"\"\"\nimport torch\nimport torch.nn.functional as F\n\nimport math\n\ndef kl_divergence(mu, logvar):\n \"\"\"\n Computes the KL-divergence of\n some element z.\n\n KL(q||p) = -∫ q(z) log [ p(z) / q(z) ]\n = -E[log p(z) - log q(z)]\n \"\"\"\n return -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp(), dim=1)\n\n\ndef binary_cross_entropy(recon_x, x):\n return -torch.sum(x * torch.log(recon_x + 1e-8) + (1 - x) * torch.log(1 - recon_x + 1e-8), dim=-1)\n\n\ndef elbo(recon_x, x, z_params, binary=True):\n \"\"\"\n elbo = likelihood - kl_divergence\n L = -elbo\n\n Params:\n recon_x:\n x:\n \"\"\"\n mu, logvar = z_params\n kld = kl_divergence(mu, logvar)\n if binary:\n likelihood = -binary_cross_entropy(recon_x, x)\n else:\n likelihood = -F.mse_loss(recon_x, x)\n return torch.sum(likelihood), torch.sum(kld)\n # return likelihood, kld\n\n\ndef elbo_scIVA(recon_x, x, gamma, c_params, z_params, binary=True):\n \"\"\"\n L elbo(x) = Eq(z,c|x)[ log p(x|z) ] - KL(q(z,c|x)||p(z,c))\n = Eq(z,c|x)[ log p(x|z) + log p(z|c) + log p(c) - log q(z|x) - log q(c|x) ]\n \"\"\"\n mu_c, var_c, pi = c_params; #print(mu_c.size(), var_c.size(), pi.size())\n n_centroids = pi.size(1)\n mu, logvar = z_params\n mu_expand = mu.unsqueeze(2).expand(mu.size(0), mu.size(1), n_centroids)\n logvar_expand = logvar.unsqueeze(2).expand(logvar.size(0), logvar.size(1), n_centroids)\n\n # log p(x|z)\n if binary:\n likelihood = -binary_cross_entropy(recon_x, x) #;print(logvar_expand.size()) #, torch.exp(logvar_expand)/var_c)\n else:\n likelihood = -F.mse_loss(recon_x, x)\n\n # log p(z|c)\n logpzc = -0.5*torch.sum(gamma*torch.sum(math.log(2*math.pi) + \\\n torch.log(var_c) + \\\n torch.exp(logvar_expand)/var_c + \\\n (mu_expand-mu_c)**2/var_c, dim=1), dim=1)\n # log p(c)\n logpc = torch.sum(gamma*torch.log(pi), 1)\n\n # log q(z|x) or q entropy \n qentropy = -0.5*torch.sum(1+logvar+math.log(2*math.pi), 1)\n\n # log q(c|x)\n logqcx = torch.sum(gamma*torch.log(gamma), 1)\n\n kld = -logpzc - logpc + qentropy + logqcx\n\n return torch.sum(likelihood), torch.sum(kld)\n\n\n" ]
[ [ "torch.exp", "torch.nn.functional.mse_loss", "torch.sum", "torch.log" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
sighingnow/tensorflow
[ "12579f9a795da54db405b5918f709665e2a7c07f", "12579f9a795da54db405b5918f709665e2a7c07f", "12579f9a795da54db405b5918f709665e2a7c07f" ]
[ "tensorflow/python/framework/function_def_to_graph.py", "tensorflow/python/summary/summary_v2_test.py", "tensorflow/python/ops/ragged/ragged_shape.py" ]
[ "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# =============================================================================\n\"\"\"Utility to convert FunctionDef to GraphDef and Graph.\"\"\"\n\nimport itertools\n\n\nfrom tensorflow.core.framework import function_pb2\nfrom tensorflow.core.framework import graph_pb2\nfrom tensorflow.core.framework import tensor_shape_pb2\nfrom tensorflow.core.framework import types_pb2\nfrom tensorflow.core.framework import versions_pb2\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.framework import cpp_shape_inference_pb2\nfrom tensorflow.python.framework import importer\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import versions\nfrom tensorflow.python.framework.func_graph import FuncGraph\nfrom tensorflow.python.ops import resource_variable_ops\n\n\ndef function_def_to_graph(fdef,\n structured_input_signature=None,\n structured_outputs=None,\n input_shapes=None):\n \"\"\"Converts a FunctionDef to a FuncGraph (sub-class Graph).\n\n The returned FuncGraph's `name`, `inputs` and `outputs` fields will be set.\n The input tensors are represented as placeholders.\n\n Note: `FuncGraph.inputs` and `FuncGraph.captures` are not set and may be set\n by the caller.\n\n Args:\n fdef: FunctionDef.\n structured_input_signature: Optional. The structured input signature to\n use for initializing the FuncGraph. See the docstring for FuncGraph for\n more information.\n structured_outputs: Optional. The structured outputs to use for\n initializing the FuncGraph. See the docstring for FuncGraph for more\n information.\n input_shapes: Optional. A list of TensorShape objects of the shapes of\n function inputs. Defaults to the function's \"_input_shapes\" attribute. If\n specified, its length must match length of `fdef.signature.input_arg`. If\n a shape is None, the corresponding input placeholder will have unknown\n shape.\n\n Returns:\n A FuncGraph.\n \"\"\"\n func_graph = FuncGraph(fdef.signature.name,\n structured_input_signature=structured_input_signature,\n structured_outputs=structured_outputs)\n if input_shapes is None:\n input_shapes_attr = fdef.attr.get(\"_input_shapes\", None)\n if input_shapes_attr is not None:\n raw_input_shapes = input_shapes_attr.list.shape\n\n # Replace resource handle shapes, since they are always stored as a scalar\n # shape in the _input_shapes attribute.\n input_shapes = []\n for input_shape, arg_def in zip(raw_input_shapes,\n fdef.signature.input_arg):\n if arg_def.type == types_pb2.DT_RESOURCE and arg_def.handle_data:\n input_shapes.append(arg_def.handle_data[0].shape)\n else:\n input_shapes.append(input_shape)\n\n graph_def, nested_to_flat_tensor_name = function_def_to_graph_def(\n fdef, input_shapes)\n\n with func_graph.as_default():\n # Add all function nodes to the graph.\n importer.import_graph_def_for_function(graph_def, name=\"\")\n\n # Initialize fields specific to FuncGraph.\n\n # inputs\n input_tensor_names = [\n nested_to_flat_tensor_name[arg.name] for arg in fdef.signature.input_arg\n ]\n func_graph.inputs = [\n func_graph.get_tensor_by_name(name) for name in input_tensor_names\n ]\n\n # outputs\n output_tensor_names = [\n nested_to_flat_tensor_name[fdef.ret[arg.name]]\n for arg in fdef.signature.output_arg\n ]\n func_graph.outputs = [\n func_graph.get_tensor_by_name(name) for name in output_tensor_names\n ]\n func_graph.control_outputs = [\n func_graph.get_operation_by_name(fdef.control_ret[ret_name])\n for ret_name in fdef.signature.control_output\n ]\n\n _set_handle_data(func_graph, fdef)\n\n for node in graph_def.node:\n output_shapes = node.attr.get(\"_output_shapes\", None)\n if output_shapes is not None:\n op = func_graph.get_operation_by_name(node.name)\n # _output_shapes for functions can sometimes be too long because the\n # output-intermediates-for-gradients version of the function was\n # substituted before saving. We'll accept that here. (See b/133666530).\n for output_index, shape in enumerate(\n output_shapes.list.shape[:len(op.outputs)]):\n op.outputs[output_index].set_shape(shape)\n output_names = {}\n for ret_arg_def, tensor_name in zip(\n fdef.signature.output_arg, output_tensor_names):\n output_names[ops.tensor_id(\n func_graph.get_tensor_by_name(tensor_name))] = (\n ret_arg_def.name)\n func_graph._output_names = output_names # pylint: disable=protected-access\n return func_graph\n\n\ndef is_function(fname):\n \"\"\"Checks for a function definition with `fname` in the current context.\"\"\"\n if context.executing_eagerly():\n return context.context().has_function(fname)\n else:\n graph = ops.get_default_graph()\n while graph is not None:\n if graph._is_function(fname): # pylint: disable=protected-access\n return True\n if hasattr(graph, \"outer_graph\"):\n graph = graph.outer_graph\n else:\n return False\n\n\ndef function_def_to_graph_def(fdef, input_shapes=None):\n \"\"\"Convert a FunctionDef to a GraphDef.\n\n Steps:\n 1. Creates placeholder nodes corresponding to inputs in\n `FunctionDef.signature.input_arg`.\n 2. Adds NodeDefs in `FunctionDef.node_def` to `GraphDef.node`.\n 3. Renames inputs of all nodes to use the convention of GraphDef instead of\n FunctionDef. See comment on `FunctionDef.node_def` on how the tensor naming\n in FunctionDefs is different from GraphDefs.\n\n Args:\n fdef: FunctionDef.\n input_shapes: Optional. A list of TensorShape objects of the shapes of\n function inputs. If specified, its length must match length of\n `fdef.signature.input_arg`. If a shape is None, the corresponding input\n placeholder will have unknown shape.\n\n Returns:\n A tuple of (GraphDef, dict<string, string>). The dict contains a mapping\n from nested tensor names (in FunctionDef) to flattened names (in GraphDef).\n\n Raises:\n ValueError: If the length of input_shapes does not match the number of\n input_args or if the FunctionDef is invalid.\n \"\"\"\n graph_def = graph_pb2.GraphDef()\n graph_def.versions.CopyFrom(\n versions_pb2.VersionDef(\n producer=versions.GRAPH_DEF_VERSION,\n min_consumer=versions.GRAPH_DEF_VERSION_MIN_CONSUMER))\n\n default_graph = ops.get_default_graph()\n\n copied_functions = set()\n\n if input_shapes and len(input_shapes) != len(fdef.signature.input_arg):\n raise ValueError(\"Length of `input_shapes` must match the number \"\n f\"of `input_arg`s in `fdef`. Got \"\n f\"{len(input_shapes)} `input_shapes` and \"\n f\"{len(fdef.signature.input_arg)} `input_arg`s.\")\n\n # 1. Create placeholders for input nodes.\n for i, arg_def in enumerate(fdef.signature.input_arg):\n node_def = graph_def.node.add()\n node_def.name = arg_def.name\n node_def.op = \"Placeholder\"\n node_def.attr[\"dtype\"].type = arg_def.type\n if input_shapes and input_shapes[i] is not None:\n input_shape = input_shapes[i]\n if not isinstance(input_shape, tensor_shape_pb2.TensorShapeProto):\n input_shape = input_shape.as_proto()\n node_def.attr[\"shape\"].shape.CopyFrom(input_shape)\n arg_attrs = fdef.arg_attr[i].attr\n for k in arg_attrs:\n # Only copy internal attributes. Normal attributes for nodes cannot be\n # applied to these Placeholder nodes.\n if k == \"_output_shapes\":\n if arg_attrs[k].WhichOneof(\"value\") == \"list\":\n node_def.attr[\"shape\"].shape.CopyFrom(arg_attrs[k].list.shape[0])\n elif arg_attrs[k].WhichOneof(\"value\") == \"shape\":\n node_def.attr[\"shape\"].shape.CopyFrom(arg_attrs[k].shape)\n elif k.startswith(\"_\"):\n node_def.attr[k].CopyFrom(arg_attrs[k])\n\n # 2. Copy all body NodeDefs to the GraphDef.\n graph_def.node.extend(fdef.node_def)\n\n # 3. Perform the renaming.\n\n # Build the tensor name mapping then flatten the tensor names.\n # See comment on `FunctionDef.node_def` on how the tensor naming in\n # FunctionDefs is different from GraphDefs.\n nested_to_flat_tensor_name = {}\n\n for arg_def in fdef.signature.input_arg:\n nested_to_flat_tensor_name[arg_def.name] = \"{}:0\".format(arg_def.name)\n control_name = \"^\" + arg_def.name\n nested_to_flat_tensor_name[control_name] = control_name\n\n for node_def in fdef.node_def:\n graph = default_graph\n while True:\n f = graph._functions.get(node_def.op, None) # pylint: disable=protected-access\n if f is not None or not hasattr(graph, \"outer_graph\"):\n break\n graph = graph.outer_graph\n\n if f is not None:\n op_def = f.definition.signature\n if node_def.op not in copied_functions:\n # Since this function is referenced as an op type, we have no choice but\n # to copy it into the GraphDef if we want downstream tools to process\n # it.\n graph_def.library.function.add().CopyFrom(f.definition)\n copied_functions.add(node_def.op)\n if f.grad_func_name:\n grad_def = function_pb2.GradientDef()\n grad_def.function_name = f.name\n grad_def.gradient_func = f.grad_func_name\n graph_def.library.gradient.extend([grad_def])\n else:\n op_def = default_graph._get_op_def(node_def.op) # pylint: disable=protected-access\n\n for attr in op_def.attr:\n if attr.type == \"func\":\n fname = node_def.attr[attr.name].func.name\n if not is_function(fname):\n raise ValueError(f\"Function {fname} was not found. Please make sure \"\n \"the FunctionDef `fdef` is correct.\")\n elif attr.type == \"list(func)\":\n for fn in node_def.attr[attr.name].list.func:\n fname = fn.name\n if not is_function(fname):\n raise ValueError(f\"Function {fname} was not found. Please make \"\n \"sure the FunctionDef `fdef` is correct.\")\n\n # Iterate over output_args in op_def to build the map.\n # Index of the output tensor in the flattened list of *all* output\n # tensors of the op.\n flattened_index = 0\n for arg_def in op_def.output_arg:\n num_args = _get_num_args(arg_def, node_def)\n for i in range(num_args):\n # Map tensor names from \"node_name:output_arg_name:index\" to\n # \"node_name:flattened_index\".\n nested_name = \"{}:{}:{}\".format(node_def.name, arg_def.name, i)\n flat_name = \"{}:{}\".format(node_def.name, flattened_index)\n nested_to_flat_tensor_name[nested_name] = flat_name\n flattened_index += 1\n control_name = \"^\" + node_def.name\n nested_to_flat_tensor_name[control_name] = control_name\n\n # Update inputs of all nodes in graph.\n for node_def in graph_def.node:\n for i in range(len(node_def.input)):\n node_def.input[i] = nested_to_flat_tensor_name[node_def.input[i]]\n\n return graph_def, nested_to_flat_tensor_name\n\n\n# Based on implementation in core/framework/node_def_util.cc::ComputeArgRange.\ndef _get_num_args(arg_def, node_def):\n if arg_def.number_attr:\n return node_def.attr[arg_def.number_attr].i\n elif arg_def.type_list_attr:\n return len(node_def.attr[arg_def.type_list_attr].list.type)\n elif arg_def.type_attr or arg_def.type != types_pb2.DT_INVALID:\n return 1\n else:\n raise ValueError(f\"Invalid arg_def:\\n\\n{arg_def}. Please make sure the \"\n \"FunctionDef `fdef` is correct.\")\n\n\ndef _set_handle_data(func_graph, fdef):\n \"\"\"Adds handle data for resource type inputs and outputs.\"\"\"\n for tensor, arg_def in itertools.chain(\n zip(func_graph.inputs, fdef.signature.input_arg),\n zip(func_graph.outputs, fdef.signature.output_arg)):\n if arg_def.handle_data:\n shape_and_dtype = arg_def.handle_data[0]\n handle_data = cpp_shape_inference_pb2.CppShapeInferenceResult.HandleData()\n handle_data.is_set = True\n handle_data.shape_and_type.append(\n cpp_shape_inference_pb2.CppShapeInferenceResult.HandleShapeAndType(\n shape=shape_and_dtype.shape, dtype=shape_and_dtype.dtype))\n resource_variable_ops._set_handle_shapes_and_types( # pylint: disable=protected-access\n tensor, handle_data, True)\n", "# Copyright 2021 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for the API surface of the V1 tf.summary ops when TF2 is enabled.\n\nV1 summary ops will invoke V2 TensorBoard summary ops in eager mode.\n\"\"\"\n\nfrom tensorboard.summary import v2 as summary_v2\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import summary_ops_v2\nfrom tensorflow.python.platform import test\nfrom tensorflow.python.summary import summary as summary_lib\n\n\nclass SummaryV2Test(test.TestCase):\n\n @test_util.run_v2_only\n def test_scalar_summary_v2__w_writer(self):\n \"\"\"Tests scalar v2 invocation with a v2 writer.\"\"\"\n with test.mock.patch.object(\n summary_v2, 'scalar', autospec=True) as mock_scalar_v2:\n with summary_ops_v2.create_summary_file_writer('/tmp/test').as_default(\n step=1):\n i = constant_op.constant(2.5)\n tensor = summary_lib.scalar('float', i)\n # Returns empty string.\n self.assertEqual(tensor.numpy(), b'')\n self.assertEqual(tensor.dtype, dtypes.string)\n mock_scalar_v2.assert_called_once_with('float', data=i)\n\n @test_util.run_v2_only\n def test_scalar_summary_v2__wo_writer(self):\n \"\"\"Tests scalar v2 invocation with no writer.\"\"\"\n with self.assertWarnsRegex(\n UserWarning, 'default summary writer not found'):\n with test.mock.patch.object(\n summary_v2, 'scalar', autospec=True) as mock_scalar_v2:\n summary_lib.scalar('float', constant_op.constant(2.5))\n mock_scalar_v2.assert_not_called()\n\n @test_util.run_v2_only\n def test_scalar_summary_v2__global_step_not_set(self):\n \"\"\"Tests scalar v2 invocation when global step is not set.\"\"\"\n with self.assertWarnsRegex(UserWarning, 'global step not set'):\n with test.mock.patch.object(\n summary_v2, 'scalar', autospec=True) as mock_scalar_v2:\n with summary_ops_v2.create_summary_file_writer(\n '/tmp/test').as_default():\n summary_lib.scalar('float', constant_op.constant(2.5))\n mock_scalar_v2.assert_not_called()\n\n\nif __name__ == '__main__':\n test.main()\n", "# Copyright 2021 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Shapes & broadcasting for RaggedTensors.\n\nTODO(martinz): make this suitable for output for tf.shape\nTODO(martinz): replace ragged_tensor_shape with this.\n\"\"\"\n\n\nimport abc\nfrom typing import Iterable, Sequence, Tuple, Union\n\nimport numpy as np\n\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.framework import tensor_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import check_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops.ragged import ragged_tensor\nfrom tensorflow.python.ops.ragged.row_partition import RowPartition\nfrom tensorflow.python.types import core\n\n\n# TODO(martinz): allow inner_shape to be a fully defined TensorShape.\n# A \"fully defined TensorShape\" means one where the rank and all dimensions are\n# known.\n# Allowing inner_shape might mean allowing inner_shape to be initialized by\n# a fully defined TensorShape, or it might mean that you can actually store\n# TensorShape in the inner_shape field. This could conceivably construct\n# a RaggedShape that was dtype agnostic.\n#\n# TODO(martinz): unify the impl of the determination of index type across\n# RowPartition and RaggedShape.\nclass RaggedShape:\n \"\"\"The shape of a ragged or dense tensor.\n\n Ragged shapes are encoded using two fields:\n\n * `inner_shape`: An integer vector giving the shape of a dense tensor.\n * `row_partitions`: A list of `RowPartition` objects, describing how\n that flat shape should be partitioned to add ragged axes.\n\n If a RaggedShape is the shape of a RaggedTensor rt, then:\n 1. row_partitions = rt._nested_row_partitions\n (and thus len(row_partitions) > 0)\n 2. inner_shape is the shape of rt.flat_values\n\n If a RaggedShape is the shape of a dense tensor t, then:\n 1. row_partitions = []\n 2. inner_shape is the shape of t.\n\n Examples:\n\n The following table gives a few examples (where `RP(lengths)` is short\n for `RowPartition.from_lengths(lengths)`):\n\n Row Partitions | Inner Shape | Example Tensor\n --------------------------- | ------------ | ----------------------------\n [] | [2, 3] | `[[1, 2, 3], [4, 5, 6]]`\n [RP([2, 0, 3])] | [5] | `[[1, 2], [], [3, 4, 5]]`\n [RP([2, 1])] | [3, 2] | `[[[1, 2], [3, 4]], [[5, 6]]]`\n [RP([2, 1]), RP([2, 1, 2])] | [5] | `[[[1, 2], [3]], [[4, 5]]]`\n \"\"\"\n\n def __init__(self, row_partitions, inner_shape, dtype=None, validate=False):\n \"\"\"Core constructor for a RaggedShape.\n\n Create a RaggedShape. This can be used to construct a\n RaggedShape representing a ragged or dense shape. If row_partitions\n is an empty list, then this is equivalent to a dense shape.\n\n If row_partitions is specified, then the num_row_partitions will be equal\n to len(row_partitions). There are several checks made.\n Specifically:\n 1. Consecutive row_partitions must have consistent nvals and nrows.\n 2. The last row_partitions must have nvals equal to the first element of\n inner_shape.\n\n The inner_shape is converted to a tensor.\n All row_partitions and the inner_shape are converted to the same dtype\n (int64 or int32).\n\n Args:\n row_partitions: the row_partitions of the shape.\n inner_shape: if len(row_partitions) > 0, the shape of the flat_values.\n Otherwise, the shape of the tensor.\n dtype: tf.int64, tf.int32, or None representing the preferred dtype.\n validate: if true, dynamic validation is applied to the shape.\n \"\"\"\n if not isinstance(row_partitions, Iterable):\n raise TypeError(\n \"row_partitions should be a list of row partitions. Instead, got \" +\n str(row_partitions))\n for x in row_partitions:\n if not isinstance(x, RowPartition):\n raise TypeError(\"row_partitions contains \" + str(x) +\n \" which is not a RowPartition\")\n dtype = _find_dtype_iterable(row_partitions, dtype)\n dtype = _find_dtype(inner_shape, dtype)\n if (isinstance(inner_shape, np.ndarray) and\n inner_shape.dtype == np.int32 and dtype is None):\n dtype = dtypes.int32\n dtype = _find_dtype(dtypes.int64, dtype)\n\n row_partitions = tuple(\n [rp.with_row_splits_dtype(dtype) for rp in row_partitions])\n self._row_partitions = row_partitions\n self._inner_shape = ops.convert_to_tensor(\n inner_shape, dtype_hint=dtype, name=\"inner_dim_sizes\")\n if self._inner_shape.dtype != dtype:\n self._inner_shape = math_ops.cast(self._inner_shape, dtype)\n\n checks = []\n # Validate shapes.\n if self._row_partitions:\n for axis, row_partition in enumerate(self._row_partitions):\n if axis > 0:\n previous_row_partition = self._row_partitions[axis - 1]\n msg = (\"RowPartitions in RaggedShape do not align \"\n f\"between {axis - 1} and {axis}\")\n static_nrows = row_partition.static_nrows\n static_nvals = previous_row_partition.static_nvals\n if (static_nrows is not None) and (static_nvals is not None):\n if static_nrows != static_nvals:\n raise ValueError(msg)\n else:\n continue\n if validate:\n checks.append(\n check_ops.assert_equal(\n previous_row_partition.nvals(),\n row_partition.nrows(),\n message=msg))\n\n self._inner_shape.shape.assert_has_rank(1)\n if row_partitions:\n last_row_partition = row_partitions[-1]\n static_nvals = last_row_partition.static_nvals\n static_inner_shape = tensor_util.constant_value(self._inner_shape)\n if (static_nvals is not None) and (static_inner_shape is not None):\n if static_nvals != static_inner_shape[0]:\n raise ValueError(\"Last row partition does not match inner_shape.\")\n elif validate:\n checks.append(\n check_ops.assert_equal(\n row_partitions[-1].nvals(),\n self._inner_shape[0],\n message=\"Last row partition does not match inner_shape.\"))\n if checks:\n self._inner_shape = control_flow_ops.with_dependencies(\n checks, self._inner_shape, name=\"inner_shape_validated\")\n self._row_partitions = [\n rp.with_dependencies(checks) for rp in self._row_partitions\n ]\n\n @classmethod\n def from_lengths(cls,\n lengths: Sequence[Union[Sequence[int], int]],\n num_row_partitions=None,\n dtype=dtypes.int64):\n \"\"\"Creates a shape with the given lengths and num_row_partitions.\n\n The lengths can either be a nonnegative int or a list of nonnegative ints.\n\n If num_row_partitions is None, then the minimal num_row_partitions is used.\n\n For example, [2, (3, 2)] is the shape of [[0, 0, 0], [0, 0]], and\n [2, 2] is the shape of [[0, 0], [0, 0]]\n\n This chooses the minimal num_row_partitions required (including zero).\n\n The following table gives a few examples (where `RP(lengths)` is short\n for `RowPartition.from_lengths(lengths)`):\n\n For example:\n from_lengths | row_partitions | inner_shape\n ---------------------- | --------------------------| -------------\n [] | [] | []\n [2, (3, 2)] | [RP([3, 2])] | [5]\n [2, 2] | [] | [2, 2]\n [2, (3, 2), 7] | [RP([3, 2])] | [5, 7]\n [2, (2, 2), 3] | [RP([2, 2])] | [4, 3]\n [2, 2, 3] | [] | [2, 2, 3]\n [2, (2, 1), (2, 0, 3)] | [RP(2, 1), RP([2, 0, 3])] | [5]\n\n If we want the row partitions to end with uniform row partitions, then\n we can set num_row_partitions.\n\n For example,\n below URP(3, 12) is RowPartition.from_uniform_row_length(3, 12)\n\n from_lengths | num_row_partitions | row_partitions | inner_shape\n ---------------| -------------------|--------------------------|------------\n [2, (3, 2), 2] | 2 | [RP([3, 2]), URP(2, 10)] | [10]\n [2, 2] | 1 | [URP(2, 4)] | [4]\n [2, 2, 3] | 0 | [] | [2, 2, 3]\n [2, 2, 3] | 1 | [URP(2, 4)] | [4, 3]\n [2, 2, 3] | 2 | [URP(2, 4), URP(3, 12)] | [12]\n\n\n\n Representing the shapes from init():\n\n from_lengths | Tensor Example\n ------------------------ | ------------------------------\n `[2, 3]` | `[[1, 2, 3], [4, 5, 6]]`\n `[3, (2, 0, 3)]` | `[[1, 2], [], [3, 4, 5]]`\n `[2, (2, 1), 2]` | `[[[1, 2], [3, 4]], [[5, 6]]]`\n `[2, (2, 1), (2, 1, 2)]` | `[[[1, 2], [3]], [[4, 5]]]`\n\n Args:\n lengths: the lengths of sublists along each axis.\n num_row_partitions: the num_row_partitions of the result or None\n indicating the minimum number of row_partitions.\n dtype: the dtype of the shape (tf.int32 or tf.int64).\n\n Returns:\n a new RaggedShape\n \"\"\"\n if not isinstance(lengths, list):\n raise ValueError(\"lengths should be a list\")\n for x in lengths:\n if not _is_int_or_tuple_of_ints(x):\n raise ValueError(\n \"element of lengths should be int or tuple of ints: instead %r\" %\n (x,))\n\n if num_row_partitions is None:\n # Calculate the minimal num_row_partitions.\n is_list = [not isinstance(x, int) for x in lengths]\n if any(is_list):\n # Last index when not a list.\n num_row_partitions = len(is_list) - is_list[-1::-1].index(True) - 1\n else:\n num_row_partitions = 0\n\n if not isinstance(num_row_partitions, int):\n raise ValueError(\"num_row_partitions should be an int or None\")\n\n if not lengths:\n if num_row_partitions > 0:\n raise ValueError(\"num_row_partitions==0 for a scalar shape\")\n return RaggedShape([], [], dtype=dtype)\n\n if not num_row_partitions < len(lengths):\n raise ValueError(\n \"num_row_partitions should be less than `len(lengths)` \"\n \"if shape is not scalar.\"\n )\n\n if num_row_partitions > 0:\n (row_partitions, nvals) = _to_row_partitions_and_nvals_from_lengths(\n lengths[:num_row_partitions + 1])\n inner_shape = [nvals] + lengths[num_row_partitions + 1:]\n return RaggedShape(row_partitions, inner_shape, dtype=dtype)\n else:\n return RaggedShape([], lengths, dtype=dtype)\n\n @classmethod\n def from_row_partitions(cls, row_partitions, dtype=None):\n \"\"\"Create a shape from row_partitions.\n\n Args:\n row_partitions: a nonempty list of RowPartition objects.\n dtype: the dtype to use, or None to use the row_partitions dtype.\n\n Returns:\n a RaggedShape with inner_rank==1.\n \"\"\"\n if not row_partitions:\n raise ValueError(\"row_partitions cannot be empty\")\n inner_shape = [row_partitions[-1].nvals()]\n return RaggedShape(row_partitions, inner_shape, dtype=dtype)\n\n @classmethod\n def _from_inner_shape(cls, inner_shape, dtype=None):\n \"\"\"Create a shape from inner_shape, where num_row_partitions == 0.\"\"\"\n return RaggedShape([], inner_shape, dtype=dtype)\n\n # pylint: disable=protected-access\n @classmethod\n def from_tensor(cls, t, dtype=None):\n \"\"\"Constructs a ragged shape for a potentially ragged tensor.\"\"\"\n if ragged_tensor.is_ragged(t):\n return RaggedShape(t._nested_row_partitions, _flat_values_shape(t),\n dtype=dtype)\n else:\n return RaggedShape._from_inner_shape(array_ops.shape(t), dtype=dtype)\n\n @property\n def row_partitions(self):\n \"\"\"The row_partitions of the shape.\"\"\"\n return self._row_partitions\n\n @property\n def num_row_partitions(self):\n \"\"\"The number of row_partitions of the shape.\"\"\"\n return len(self._row_partitions)\n\n @property\n def dtype(self):\n \"\"\"The dtype of the shape -- one of tf.int32 or tf.int64.\"\"\"\n return self._inner_shape.dtype\n\n def _static_inner_shape(self, truncate_first):\n \"\"\"Returns the lengths of the inner shape (if rank known).\"\"\"\n result = tensor_util.constant_value(self.inner_shape, partial=True)\n if result is None:\n return [...]\n result = list(result)\n if truncate_first:\n return result[1:]\n return result\n\n def static_lengths(self, ragged_lengths=True):\n \"\"\"Returns a list of statically known axis lengths.\n\n This represents what values are known. For each row partition, it presents\n either the uniform row length (if statically known),\n the list of row lengths, or none if it is not statically known.\n For the inner shape, if the rank is known, then each dimension is reported\n if known, and None otherwise. If the rank of the inner shape is not known,\n then the returned list ends with an ellipsis.\n\n Args:\n ragged_lengths: If false, returns None for all ragged dimensions.\n\n Returns:\n A Sequence[Union[Sequence[int],int, None]] of lengths, with a possible\n Ellipsis at the end.\n \"\"\"\n if self.num_row_partitions == 0:\n return self._static_inner_shape(False)\n first_dim = self.row_partitions[0].static_nrows\n if isinstance(first_dim, tensor_shape.Dimension):\n first_dim = first_dim.value\n rp_dims = [first_dim]\n for rp in self.row_partitions:\n if rp.is_uniform():\n rp_dims.append(rp.static_uniform_row_length)\n elif ragged_lengths:\n const_vals = tensor_util.constant_value(rp.row_lengths())\n if const_vals is None:\n rp_dims.append(None)\n else:\n rp_dims.append(tuple(const_vals.tolist()))\n else:\n rp_dims.append(None)\n\n return rp_dims + self._static_inner_shape(True)\n\n def __repr__(self):\n lengths = _list_with_ellipsis_to_str(self.static_lengths())\n return (\"<RaggedShape \"\n \"lengths=%s num_row_partitions=%r>\" %\n (lengths, self.num_row_partitions))\n\n def _to_tensor_shape(self) -> tensor_shape.TensorShape:\n \"\"\"Returns a TensorShape representation of the shape.\"\"\"\n lengths = self.static_lengths(ragged_lengths=False)\n if not lengths:\n return tensor_shape.TensorShape(())\n if lengths[-1] == Ellipsis:\n return tensor_shape.TensorShape(None)\n return tensor_shape.TensorShape(lengths)\n\n def _slice_shape(self, start, stop):\n \"\"\"Returns a shape self[start:stop].\n\n If start == 0, then this truncates dimensions after stop.\n If start != 0, then this will return a shape with num_row_partitions == 0.\n\n See __getitem__.\n\n Args:\n start: the first dimension. 0 <= start <= rank\n stop: the last dimension (exclusive). 0 <= stop <= rank\n \"\"\"\n if stop <= start:\n return RaggedShape._from_inner_shape([])\n elif start == 0:\n if stop <= self.num_row_partitions:\n if stop == 1:\n return RaggedShape._from_inner_shape([self.row_partitions[0].nrows()])\n new_row_partitions = self.row_partitions[:stop - 1]\n new_inner_shape = [new_row_partitions[-1].nvals()]\n return RaggedShape(new_row_partitions, new_inner_shape)\n else:\n if self.rank <= stop:\n return self\n if self.num_row_partitions == 0:\n return RaggedShape._from_inner_shape(self.inner_shape[:stop])\n else:\n new_inner_shape = self.inner_shape[:stop - self.num_row_partitions]\n return RaggedShape(self.row_partitions, new_inner_shape)\n else:\n if stop < self.rank:\n partial = self._slice_shape(0, stop)\n else:\n partial = self\n for x in self.row_partitions:\n if not x.is_uniform():\n raise ValueError(\"All relevant dimensions must be uniform\")\n\n return RaggedShape._from_inner_shape(\n partial._with_num_row_partitions(0).inner_shape[start:])\n\n def _dimension(self, index):\n \"\"\"Return a dimension, if the dimension is not ragged (see __getitem__).\"\"\"\n rank = self.rank\n if not isinstance(index, int):\n raise TypeError(\"index should be an int\")\n if (self.num_row_partitions == 0 or index > self.num_row_partitions + 1):\n # If num_row_partitions > 0 and index <= num_row_partitions + 1, then\n # we are safe.\n if rank is None:\n raise ValueError(\n \"Rank must be known to use __getitem__ on a large index.\")\n if index >= rank:\n raise IndexError(\"Index is too big: \" + str(index) + \">=\" + str(rank))\n if index < 0:\n raise IndexError(\"Index must be non-negative: \" + str(index))\n elif not self.is_uniform(index):\n raise ValueError(\"Index \" + str(index) + \" is not uniform\")\n elif index == 0 and self.num_row_partitions > 0:\n return self.row_partitions[0].nrows()\n elif self.num_row_partitions == 0:\n return self.inner_shape[index]\n elif index > self.num_row_partitions:\n return self.inner_shape[index - self.num_row_partitions]\n else:\n return self.row_partitions[index - 1].uniform_row_length()\n\n def __getitem__(self, index):\n \"\"\"Returns a dimension or a slice of the shape.\n\n Ragged shapes can have ragged dimensions that depend upon other dimensions.\n Therefore, if you ask for a dimension that is ragged, this function returns\n a ValueError. For similar reasons, if a slice is selected that includes\n a ragged dimension without including the zero dimension, then this fails.\n\n Any slice that does not start at zero will return a shape\n with num_row_partitions == 0.\n\n Args:\n index: the index: can be an int or a slice.\n\n Raises:\n IndexError: if the index is not in range.\n ValueError: if the rank is unknown, or a ragged rank is requested\n incorrectly.\n \"\"\"\n rank = self.rank\n if isinstance(index, slice):\n\n if (index.step is not None) and (index.step != 1):\n raise IndexError(\"Cannot stride through a shape\")\n start = index.start\n stop = index.stop\n if start is None:\n start = 0\n start = _fix_slice_index(start, rank, self.num_row_partitions)\n if stop is None:\n if rank is None:\n raise ValueError(\n \"Rank must be known to use __getitem__ without a stop.\")\n stop = rank\n stop = _fix_slice_index(stop, rank, self.num_row_partitions)\n return self._slice_shape(start, stop)\n elif isinstance(index, int):\n if index < 0:\n if rank is None:\n raise ValueError(\n \"Rank must be known to use __getitem__ with a negative index.\")\n return self._dimension(rank + index)\n return self._dimension(index)\n else:\n raise TypeError(\"Argument is not an int or a slice\")\n\n def _num_elements(self):\n \"\"\"Number of elements in a shape.\n\n Returns:\n The number of elements in the shape.\n\n \"\"\"\n return math_ops.reduce_prod(self.inner_shape)\n\n def _num_slices_in_dimension(self, axis):\n \"\"\"The total size of a dimension (like nvals).\n\n Effectively, this is self[:axis+1]._num_elements()\n\n Example:\n shape = RaggedShape._from_inner_shape([2, 3, 4])\n shape._num_slices_in_dimension(0) = 2\n shape._num_slices_in_dimension(1) = 6\n shape._num_slices_in_dimension(2) = 24\n shape._num_slices_in_dimension(-1) = 24\n shape._num_slices_in_dimension(-2) = 6\n shape._num_slices_in_dimension(-2) = 2\n\n Args:\n axis: the last axis to include in the number of elements. If negative,\n then axis = axis + rank.\n\n Returns:\n The number of elements in the shape.\n \"\"\"\n if not isinstance(axis, int):\n raise TypeError(\"axis must be an integer\")\n if axis < 0:\n rank = self.rank\n if rank is None:\n raise ValueError(\n \"You can't use negative values if the rank is undefined\")\n axis = axis + rank\n if axis == 0:\n if self.num_row_partitions >= 1:\n return self.row_partitions[0].nrows()\n return self.inner_shape[0]\n if axis <= self.num_row_partitions:\n return self.row_partitions[axis - 1].nvals()\n # If self.num_row_partitions = 1, and\n # self.inner_shape=[3,5,6], and axis=2, then you want:\n # 15 = 3 * 5 = math_ops.reduce_prod(self.inner_shape[:2])\n # 2 = axis - (self.num_row_partitions - 1)\n # If num_row_partitions=0, and\n # self.inner_shape=[3,5,6] and axis=2, then you want:\n # 90 = 3 * 5 * 6 = math_ops.reduce_prod(self.inner_shape[:3])\n # 3 = axis - (self.num_row_partitions - 1)\n remainder = axis - (self.num_row_partitions - 1)\n return math_ops.reduce_prod(self.inner_shape[:remainder])\n\n def is_uniform(self, axis):\n \"\"\"Returns true if the indicated dimension is ragged.\"\"\"\n if not isinstance(axis, int):\n raise TypeError(\"axis must be an integer\")\n rank = self.rank\n if axis < 0:\n raise IndexError(\"Negative axis values are not supported\")\n elif rank is not None and axis >= rank:\n raise IndexError(\"Expected axis=%s < rank=%s\" % (axis, rank))\n else:\n return ((axis == 0 or axis > len(self._row_partitions)) or\n self._row_partitions[axis - 1].is_uniform())\n\n @property\n def rank(self):\n \"\"\"The number of dimensions in this shape, or None if unknown.\"\"\"\n inner_rank = self.inner_rank\n if inner_rank is None:\n return None\n else:\n return self.num_row_partitions + inner_rank\n\n @property\n def inner_shape(self):\n \"\"\"The inner dimension sizes for this shape.\n\n Returns:\n A 1-D integer `Tensor`.\n \"\"\"\n return self._inner_shape\n\n @property\n def inner_rank(self):\n \"\"\"The rank of inner_shape.\"\"\"\n return tensor_shape.dimension_value(self._inner_shape.shape[0])\n\n def _alt_inner_shape(self, new_inner_rank):\n \"\"\"Get an alternative inner shape with higher or lower rank.\n\n For the rank of the inner shape to be be higher, the last few ragged\n dimensions must have uniform_row_length.\n\n Args:\n new_inner_rank: the new rank of the inner_shape\n\n Returns:\n A new inner_shape of rank new_inner_rank.\n \"\"\"\n if new_inner_rank == 0:\n raise ValueError(\"new_inner_rank cannot be zero\")\n elif self.inner_rank == 0:\n raise ValueError(\"old inner_rank cannot be zero\")\n elif new_inner_rank == self.inner_rank:\n return self.inner_shape\n elif new_inner_rank < self.inner_rank:\n first_dimension = self._num_slices_in_dimension(-new_inner_rank)\n if new_inner_rank == 1:\n return array_ops.expand_dims(first_dimension, 0)\n remaining_dimensions = self.inner_shape[1 - new_inner_rank:]\n return array_ops.concat(\n [array_ops.expand_dims(first_dimension, 0), remaining_dimensions],\n axis=0)\n else:\n assert new_inner_rank > self.inner_rank\n new_dimensions = new_inner_rank - self.inner_rank\n if any(\n [not x.is_uniform() for x in self.row_partitions[-new_dimensions:]]):\n raise ValueError(\"Cannot get an inner shape over a ragged dimension\")\n first_dimension = self._num_slices_in_dimension(-new_inner_rank)\n new_dimensions = new_inner_rank - self.inner_rank\n new_dims = [first_dimension] + [\n x.uniform_row_length() for x in self.row_partitions[-new_dimensions:]\n ]\n return array_ops.concat([array_ops.stack(new_dims), self.inner_shape[1:]],\n axis=0)\n\n def with_inner_rank(self, inner_rank):\n \"\"\"Returns the same shape but a different inner_rank.\n\n All dimensions that are to represented in the inner_shape must be dense.\n See inner_rank.\n\n Args:\n inner_rank: the new inner_rank of the shape.\n\n Returns:\n the same shape but a different inner_rank\n\n Raises:\n ValueError if the new dense rank is invalid, or the old rank is unknown.\n \"\"\"\n rank = self.rank\n if rank is None:\n raise ValueError(\"Rank must be known to adjust inner_rank\")\n elif rank < 2:\n if inner_rank == rank:\n return self\n raise ValueError(\"Cannot change inner_rank if rank < 2\")\n else:\n # When self.rank is not None:\n # self.rank = self.inner_rank + self.num_row_partitions\n new_num_row_partitions = rank - inner_rank\n return self._with_num_row_partitions(new_num_row_partitions)\n\n def _with_num_row_partitions(self, num_row_partitions):\n \"\"\"Creates an identical shape with the given num_row_partitions.\n\n Note that the shape must be statically refactorable to this rank.\n In particular:\n * rank must be known.\n * num_row_partitions must be a nonnegative int.\n * num_row_partitions must be less than the rank of the shape\n * num_row_partitions must be greater or equal to the index of any ragged\n dimension.\n\n Note that if the num_row_partitions is the same, self is returned.\n\n Args:\n num_row_partitions: the target num_row_partitions (must be a nonnegative\n int).\n\n Returns:\n a shape with a (possibly) different num_row_partitions.\n\n Raises:\n ValueError: if the rank is unknown, the argument is not a nonnegative int,\n or there is a dimension that is nonuniform.\n \"\"\"\n rank = self.rank\n if rank is None:\n raise ValueError(\"Rank must be known to adjust num_row_partitions\")\n if not isinstance(num_row_partitions, int):\n raise ValueError(\"num_row_partitions must be an int\")\n if num_row_partitions < 0:\n raise ValueError(\"num_row_partitions must be nonnegative\")\n if num_row_partitions == self.num_row_partitions:\n return self\n if num_row_partitions >= rank:\n raise ValueError(\"num_row_partitions must be less than rank\")\n if num_row_partitions > self.num_row_partitions:\n num_row_partitions_diff = num_row_partitions - self.num_row_partitions\n\n nvals = self.row_partitions[-1].nvals() if (\n self.num_row_partitions > 0) else self._dimension(0)\n more_rp = []\n for i in range(num_row_partitions_diff):\n nrows = nvals\n row_length = self.inner_shape[i + 1]\n nvals = nrows * row_length\n rp = RowPartition.from_uniform_row_length(\n row_length, nrows=nrows, nvals=nvals)\n more_rp.append(rp)\n return RaggedShape(\n list(self.row_partitions) + more_rp,\n self._alt_inner_shape(self.rank - num_row_partitions))\n else:\n assert num_row_partitions < self.num_row_partitions\n return RaggedShape(self.row_partitions[:num_row_partitions],\n self._alt_inner_shape(self.rank - num_row_partitions))\n\n def with_dtype(self, dtype):\n \"\"\"Change the dtype of the shape.\"\"\"\n if dtype == self.dtype:\n return self\n else:\n return RaggedShape(self.row_partitions, self.inner_shape, dtype=dtype)\n\n def _as_row_partitions(self):\n \"\"\"Returns row partitions representing this shape.\n\n In order to represent a shape as row partitions, the rank of the shape\n must be known, and the shape must have rank at least one.\n\n Returns:\n A list of RowPartition objects.\n Raises:\n ValueError, if the shape cannot be represented by RowPartitions.\n \"\"\"\n rank = self.rank\n if rank is None:\n raise ValueError(\"rank must be known for _as_row_partitions\")\n elif rank < 1:\n raise ValueError(\"rank must be >= 1 for _as_row_partitions\")\n fully_ragged = self._with_num_row_partitions(rank - 1)\n return fully_ragged.row_partitions\n\n def _validate_flat_values_dynamically(self, flat_values):\n \"\"\"Test if flat_values have the right nvals dynamically.\"\"\"\n if self.row_partitions:\n assert_op = check_ops.assert_equal(\n self.row_partitions[-1].nvals(),\n array_ops.shape(flat_values, out_type=self.dtype)[0],\n message=\"Last row partition does not match flat_values.\")\n return control_flow_ops.with_dependencies([assert_op], flat_values)\n return flat_values\n\n def _validate_flat_values(self, flat_values):\n \"\"\"Test if flat_values have the right nvals.\"\"\"\n if not isinstance(flat_values, ops.Tensor):\n return flat_values\n if self.row_partitions:\n last_row_partition = self.row_partitions[-1]\n flat_values_shape = flat_values.shape\n if flat_values_shape is None:\n return self._validate_flat_values_dynamically(flat_values)\n first_dim_flat_values = flat_values_shape[0]\n if isinstance(first_dim_flat_values, tensor_shape.Dimension):\n first_dim_flat_values = first_dim_flat_values.value\n if first_dim_flat_values is None:\n return self._validate_flat_values_dynamically(flat_values)\n static_nvals = last_row_partition.static_nvals\n if static_nvals is None:\n return self._validate_flat_values_dynamically(flat_values)\n if first_dim_flat_values != static_nvals:\n raise ValueError(\"Last row partition does not match flat_values.\")\n return flat_values\n\n def _add_row_partitions(self, flat_values, validate=False):\n \"\"\"Add row partitions to flat_values, if necessary.\n\n If the shape is truly ragged, then this adds the row_partitions.\n\n The the shape is dense, then this just returns flat_values.\n\n Args:\n flat_values: the flat_values of a ragged tensor with this shape, or a\n dense tensor with this shape.\n validate: validate the flat_values have the right first dimension.\n\n Returns:\n flat_values reshaped to have row_partitions.\n \"\"\"\n if self.row_partitions:\n if validate:\n flat_values = self._validate_flat_values(flat_values)\n return ragged_tensor.RaggedTensor._from_nested_row_partitions(\n flat_values, self.row_partitions, validate=False)\n else:\n return flat_values\n\n\ndef broadcast_dynamic_shape(shape_x: RaggedShape,\n shape_y: RaggedShape) -> RaggedShape:\n \"\"\"Returns the shape formed by broadcasting two shapes to be compatible.\n\n 1. If shape_x and shape_y both have row_partitions, then fail if their dtypes\n don't match.\n 2. If neither has row_partitions and they have different dtypes,\n go with int64.\n 3. If one has row_partitions, go with that dtype.\n\n Args:\n shape_x: A `RaggedShape`\n shape_y: A `RaggedShape`\n\n Returns:\n A `RaggedShape`.\n Raises:\n ValueError: If `shape_x` and `shape_y` are not broadcast-compatible.\n \"\"\"\n if not isinstance(shape_x, RaggedShape):\n raise TypeError(\"shape_x must be a RaggedShape\")\n if not isinstance(shape_y, RaggedShape):\n raise TypeError(\"shape_y must be a RaggedShape\")\n\n return broadcast_dynamic_shape_extended(shape_x, shape_y)[0]\n\n\ndef broadcast_to(rt_input, shape: RaggedShape):\n \"\"\"Broadcasts a potentially ragged tensor to a ragged shape.\n\n Tiles `rt_input` as necessary to match the given shape.\n\n Behavior is undefined if `rt_input` is not broadcast-compatible with `shape`.\n\n Args:\n rt_input: The potentially ragged tensor to broadcast.\n shape: A `RaggedShape`\n\n Returns:\n A potentially ragged tensor whose values are taken from\n `rt_input`, and whose shape matches `shape`.\n \"\"\"\n if not isinstance(shape, RaggedShape):\n raise TypeError(\"shape must be a RaggedShape\")\n rt_input = ragged_tensor.convert_to_tensor_or_ragged_tensor(rt_input)\n origin_shape = None\n if ragged_tensor.is_ragged(rt_input):\n if shape.num_row_partitions != 0:\n if rt_input.row_splits.dtype != shape.dtype:\n raise ValueError(\"Cannot coerce row_splits.dtype\")\n else:\n shape = shape.with_dtype(rt_input.row_splits.dtype)\n origin_shape = RaggedShape.from_tensor(rt_input)\n else:\n if shape.num_row_partitions != 0:\n origin_shape = RaggedShape.from_tensor(rt_input, dtype=shape.dtype)\n else:\n origin_shape = RaggedShape.from_tensor(rt_input, dtype=dtypes.int64)\n shape = shape.with_dtype(dtype=dtypes.int64)\n\n broadcaster = _get_broadcaster(origin_shape, shape)\n return broadcaster.broadcast(rt_input)\n\n\ndef broadcast_dynamic_shape_extended(\n a: RaggedShape,\n b: RaggedShape): # -> Tuple[RaggedShape, _Broadcaster, _Broadcaster]\n \"\"\"Gets the smallest shape to which a and b can broadcast.\n\n In order to create the smallest shape, one must also do most of the\n work to figure out how to transform from the shapes given. Thus, in addition\n to returning the shape, it also creates transformations from the\n original shapes to the result.\n\n This is the equivalent of:\n\n c = broadcast_dynamic_shape(a, b)\n ac = get_broadcaster(a, c)\n bc = get_broadcaster(b, c)\n return (c, ac, bc)\n\n Args:\n a: a RaggedShape\n b: a RaggedShape\n\n Returns:\n A triple of a shape and two broadcasters.\n \"\"\"\n if a.row_partitions and b.row_partitions:\n if a.dtype != b.dtype:\n raise ValueError(\"Dtypes don't match\")\n elif a.dtype != b.dtype:\n if a.row_partitions:\n b = b.with_dtype(a.dtype)\n elif b.row_partitions:\n a = a.with_dtype(b.dtype)\n else:\n a = a.with_dtype(dtypes.int64)\n b = b.with_dtype(dtypes.int64)\n\n if (a.rank is None or b.rank is None):\n raise ValueError(\"Unable to broadcast: unknown rank\")\n elif a.rank == 0:\n return (b, _Broadcaster(a, b, []), _get_identity_broadcaster(b))\n elif b.rank == 0:\n return (a, _get_identity_broadcaster(a), _Broadcaster(b, a, []))\n elif a.rank == 1 and b.rank == 1:\n [a_layer, b_layer,\n target] = _broadcast_dynamic_shape_one_layer(a.inner_shape, b.inner_shape)\n target_shape = RaggedShape._from_inner_shape(target) # pylint: disable=protected-access\n return (target_shape, _Broadcaster(a, target_shape, [a_layer]),\n _Broadcaster(b, target_shape, [b_layer]))\n\n if a.rank > b.rank:\n (c, bc, ac) = _broadcast_dynamic_shape_extended_helper(b, a) # pylint: disable=arguments-out-of-order\n\n return (c, ac, bc)\n\n return _broadcast_dynamic_shape_extended_helper(a, b)\n\n\ndef _row_partitions_identical(shape_a, shape_b):\n \"\"\"Returns True iff all row_partitions in shapes are identical.\"\"\"\n return ((shape_a.num_row_partitions == shape_b.num_row_partitions) and all(\n a is b for a, b in zip(shape_a.row_partitions, shape_b.row_partitions)))\n\n\n# TODO(martinz): Preserve shapes better (see CL/414806185)\ndef ragged_binary_elementwise_op_impl(op, x, y):\n \"\"\"Binary elementwise api handler for RaggedTensors.\"\"\"\n x_is_ragged = ragged_tensor.is_ragged(x)\n y_is_ragged = ragged_tensor.is_ragged(y)\n\n # Convert args to tensors.\n x = ragged_tensor.convert_to_tensor_or_ragged_tensor(\n x, preferred_dtype=(y.dtype if y_is_ragged else None))\n y = ragged_tensor.convert_to_tensor_or_ragged_tensor(\n y, preferred_dtype=x.dtype)\n\n if x_is_ragged and y_is_ragged:\n x, y = ragged_tensor.match_row_splits_dtypes(x, y)\n\n if ((x_is_ragged and y_is_ragged) or\n (x_is_ragged and x.flat_values.shape.ndims <= y.shape.ndims) or\n (y_is_ragged and y.flat_values.shape.ndims <= x.shape.ndims)):\n shape_x = RaggedShape.from_tensor(x)\n shape_y = RaggedShape.from_tensor(y)\n if shape_x.dtype != shape_y.dtype:\n if not x_is_ragged:\n shape_x = shape_x.with_dtype(shape_y.dtype)\n elif not y_is_ragged:\n shape_y = shape_y.with_dtype(shape_x.dtype)\n\n if _row_partitions_identical(shape_x, shape_y):\n # At this point, both x and y must be ragged.\n return shape_x._add_row_partitions( # pylint: disable=protected-access\n op(x.flat_values, y.flat_values), validate=False)\n\n (shape_z, bcast_xz,\n bcast_yz) = broadcast_dynamic_shape_extended(shape_x, shape_y)\n x_new_flat = bcast_xz.broadcast_flat_values(x)\n y_new_flat = bcast_yz.broadcast_flat_values(y)\n z_flat = op(x_new_flat, y_new_flat)\n return shape_z._add_row_partitions(z_flat, validate=True) # pylint: disable=protected-access\n\n x_values = x.flat_values if ragged_tensor.is_ragged(x) else x\n y_values = y.flat_values if ragged_tensor.is_ragged(y) else y\n mapped_values = op(x_values, y_values)\n if isinstance(mapped_values, bool):\n return mapped_values # Special case for tensor_equals.\n if ragged_tensor.is_ragged(x):\n return x.with_flat_values(mapped_values)\n else:\n return y.with_flat_values(mapped_values)\n\n\ndef _find_dtype_helper(value, preferred):\n \"\"\"Helper for _find_dtype.\"\"\"\n if preferred is not None:\n return preferred\n elif isinstance(value, RowPartition):\n return value.dtype\n elif isinstance(value, dtypes.DType):\n return value\n elif isinstance(value, int):\n return None\n elif isinstance(value, list):\n return None\n elif isinstance(value, tuple):\n return None\n elif isinstance(value, core.Tensor):\n return value.dtype\n return value.dtype\n\n\ndef _find_dtype(value, preferred):\n \"\"\"Returns the preferred dtype of value or preferred if preferred != None.\n\n This is used as an operator to pass over multiple objects in decreasing order\n of priority until there is a preferred dtype for one. For example, if you were\n adding three tensor-ish things (some tensors, some lists), and needed a\n preferred dtype, you could use this as:\n\n def adding(a, b, c, dtype = None):\n dtype = _find_dtype(a, dtype)\n dtype = _find_dtype(b, dtype)\n dtype = _find_dtype(c, dtype)\n if dtype is None:\n dtype = tf.float32\n ...Code continues here...\n\n Args:\n value: a list, value, RowPartition, or tensor.\n preferred: a given dtype. If not None, this will be returned.\n\n Returns:\n an optional dtype.\n \"\"\"\n result = _find_dtype_helper(value, preferred)\n if (result == dtypes.int64 or result == dtypes.int32 or result is None):\n return result\n raise ValueError(\"Illegal dtype: \" + str(result))\n\n\ndef _find_dtype_iterable(iterable, dtype):\n \"\"\"Find the preferred dtype of a list of objects.\n\n This will go over the iterable, and use the first object with a preferred\n dtype. The dtype passed has highest priority if it is not None.\n\n Args:\n iterable: an iterable with things that might have a dtype.\n dtype: an overriding dtype, or None.\n\n Returns:\n an optional dtype.\n \"\"\"\n if dtype is not None:\n return dtype\n for x in iterable:\n dtype = _find_dtype(x, dtype)\n return dtype\n\n\nclass _LayerBroadcaster(abc.ABC):\n \"\"\"A broadcaster of a single layer.\n\n Although this class does not literally contain a gather_index, the reference\n implementation is defined through a gather_index. Thus, any subclasses should\n first define the gather_index property. Other functions can be overridden\n for optimization, but it should not change the behavior.\n \"\"\"\n\n @property\n @abc.abstractmethod\n def gather_index(self):\n \"\"\"Returns a 1D tensor.\n\n The size of the 1D tensor is equal to the destination size.\n\n The ith element of the result is the index of the source of the ith element.\n \"\"\"\n pass\n\n @property\n def dtype(self):\n \"\"\"Returns the dtype of the broadcast.\"\"\"\n return self.gather_index.dtype\n\n @abc.abstractmethod\n def with_dtype(self, dtype):\n \"\"\"Returns an identical _LayerBroadcaster with a different dtype.\"\"\"\n pass\n\n def __repr__(self):\n return str(self.gather_index)\n\n @classmethod\n def from_gather_index(cls, gather_index):\n \"\"\"Create a broadcaster from a gather_index.\"\"\"\n return _GatherLayerBroadcaster(gather_index)\n\n @classmethod\n def first_layer(cls, nrows_source, nrows_target):\n \"\"\"Create a broadcaster from a gather_index.\"\"\"\n gather_index = _first_layer_gather_index(nrows_source, nrows_target)\n return _LayerBroadcaster.from_gather_index(gather_index)\n\n @classmethod\n def get_singleton_broadcaster(cls, target_size):\n \"\"\"Broadcast from 1 element to target_size elements.\"\"\"\n return _LayerBroadcaster.from_gather_index(\n array_ops.zeros(target_size, dtype=target_size.dtype))\n\n @abc.abstractmethod\n def with_dependencies(self, checks):\n \"\"\"Add dependencies to a _LayerBroadcaster.\n\n Args:\n checks: a list of ops that need to be run before any tensors from the\n Broadcaster are used.\n\n Returns:\n a copy of this _LayerBroadcaster with dependencies added.\n \"\"\"\n pass\n\n @classmethod\n def get_identity_broadcaster(cls, nvals):\n \"\"\"Create an identity broadcaster.\n\n TODO(martinz): an identity broadcaster can be far more efficient than a\n generic broadcaster. Add an optimized implementation.\n Args:\n nvals: the number of values for the broadcaster.\n\n Returns:\n an identity broadcaster from [0....nvals-1] to [0...nvals-1]\n \"\"\"\n return _GatherLayerBroadcaster(math_ops.range(nvals))\n\n def broadcast_tensor(self, tensor):\n \"\"\"Broadcast from a dense tensor.\n\n It is assumed that the first axis of the dense tensor is indexed by the\n source shape, and at the end, the first axis of the dense tensor is\n indexed by the destination shape.\n\n Args:\n tensor: a dense tensor.\n\n Returns:\n A dense tensor.\n \"\"\"\n return array_ops.gather(tensor, self.gather_index)\n\n def dest_nrows(self):\n \"\"\"Return the number of rows in the resulting gather, or None if tiling.\"\"\"\n return math_ops.cast(\n array_ops.shape(self.gather_index)[0], dtype=self.dtype)\n\n def broadcast_row_partition(self, rp):\n \"\"\"Return a new shape where the rows are broadcasted.\n\n *--self--->*\n | |\n rp result\n | |\n V V\n *--------->*\n\n This is equivalent to:\n return RowPartition.from_row_lengths(self.broadcast(rp.row_lengths()))\n\n However, if the shape has uniform row length, then that property is\n maintained.\n\n Args:\n rp: a row partition.\n\n Returns:\n a RowPartition representing a broadcast version of this row partition.\n \"\"\"\n if not rp.is_uniform():\n return RowPartition.from_row_lengths(\n self.broadcast_tensor(rp.row_lengths()))\n else:\n return RowPartition.from_uniform_row_length(\n rp.uniform_row_length(),\n nvals=rp.uniform_row_length() * self.dest_nrows(),\n nrows=self.dest_nrows())\n\n def next_layer(self, original_rp, broadcast_rp):\n r\"\"\"Create the next layer gather_index whether or not a broadcast happens.\n\n *---------self------->*\n | |\n original_rp broadcast_rp\n | |\n \\|/ \\|/\n *--next_broadcaster-->*\n Args:\n original_rp: the original row partition.\n broadcast_rp: the target row partition.\n\n Returns:\n the gather_index for next_broadcaster.\n\n \"\"\"\n gather_index = _next_layer_gather_index(self, original_rp, broadcast_rp)\n return _LayerBroadcaster.from_gather_index(gather_index)\n\n\nclass _GatherLayerBroadcaster(_LayerBroadcaster):\n \"\"\"Implements _LayerBroadcaster with an explicit gather_index.\n\n For example, suppose that the source shape is:\n [*],[*,*]\n And the target shape is:\n [*],[*,*],[*],[*,*]\n Then, this can be represented with a map:\n [0,1,2,0,1,2]\n\n \"\"\"\n\n def __init__(self, gather_index):\n gather_index = ops.convert_to_tensor(gather_index)\n if (gather_index.dtype != dtypes.int64 and\n gather_index.dtype != dtypes.int32):\n raise ValueError(\"gather_index must be int64 or int32\")\n self._gather_index = gather_index\n\n @property\n def gather_index(self):\n return self._gather_index\n\n def with_dtype(self, dtype):\n return _GatherLayerBroadcaster(math_ops.cast(self._gather_index, dtype))\n\n def with_dependencies(self, checks):\n new_gather_index = control_flow_ops.with_dependencies(\n checks, self._gather_index)\n return _GatherLayerBroadcaster(new_gather_index)\n\n\nclass _Broadcaster:\n \"\"\"A _Broadcaster represents a transformation from one shape to another.\n\n It provides a transform for each axis of the source shape to the\n corresponding axis of the destination shape.\n\n \"\"\"\n\n def __init__(self,\n source_shape,\n target_shape,\n layer_broadcasters,\n dtype=None):\n \"\"\"Create a broadcaster.\n\n Do not call directly.\n The source_shape, target_shape, and layer_broadcasters are converted\n to have the same dtype.\n\n Note: source_shape.rank and target_shape.rank must be known.\n Args:\n source_shape: the source RaggedShape\n target_shape: the target RaggedShape\n layer_broadcasters: List[_LayerBroadcaster] of length source_shape.rank.\n dtype: the preferred dtype of the broadcaster.\n\n Raises:\n TypeError: if the input types don't match.\n \"\"\"\n if not isinstance(source_shape, RaggedShape):\n raise TypeError(\"source_shape is not a RaggedShape\")\n if not isinstance(target_shape, RaggedShape):\n raise TypeError(\"target_shape is not a RaggedShape\")\n if not isinstance(layer_broadcasters, list):\n raise TypeError(\"layer_broadcasters not a list: \" +\n str(layer_broadcasters))\n for bc in layer_broadcasters:\n if not isinstance(bc, _LayerBroadcaster):\n raise TypeError(\"Not a LayerBroadcaster: \" + str(bc))\n\n dtype = _find_dtype(source_shape, dtype)\n dtype = _find_dtype(target_shape, dtype)\n dtype = _find_dtype_iterable(layer_broadcasters, dtype)\n dtype = _find_dtype(dtypes.int64, dtype)\n self._source_shape = source_shape.with_dtype(dtype)\n self._target_shape = target_shape.with_dtype(dtype)\n self._layer_broadcasters = [x.with_dtype(dtype) for x in layer_broadcasters]\n\n def __repr__(self):\n return (\"{src_shape:\" + str(self._source_shape) + \", target_shape:\" +\n str(self._target_shape) + \" layer_broadcasters: \" +\n str(self._layer_broadcasters) + \"}\")\n\n def with_dtype(self, dtype):\n \"\"\"Return a copy of this Broadcaster with a different dtype.\"\"\"\n return _Broadcaster(self._source_shape, self._target_shape,\n self._layer_broadcasters, dtype)\n\n @property\n def source_shape(self):\n return self._source_shape\n\n @property\n def target_shape(self):\n return self._target_shape\n\n @property\n def dtype(self):\n return self._source_shape.dtype\n\n def _target_inner_shape_int32(self):\n new_inner_shape = self.target_shape.inner_shape\n if new_inner_shape.dtype == dtypes.int64:\n new_inner_shape = math_ops.cast(new_inner_shape, dtype=dtypes.int32)\n return new_inner_shape\n\n # pylint:disable=protected-access\n def broadcast_flat_values(self, rt, inner_dimensions=True):\n \"\"\"flat_values of a ragged tensor broadcast to target_shape.\n\n If inner_dimensions==True, then the result is a dense tensor with shape\n target_shape.inner_shape, the flat values of the broadcasted shape.\n\n If you add target_shape.row_partitions, you will get the full broadcasted\n shape.\n\n If inner_dimensions==False, the result is a dense tensor that satsifies\n certain properties:\n 1. broadcast_to(result, target_shape.inner_shape) will give the result\n if inner_dimensions==True.\n 2. Either (a) (result.rank < target_shape.inner_rank)\n or (b) (result.shape[0] == target_shape.inner_shape[0]).\n 3. result.rank = min(target_shape.inner_rank, rt.rank)\n 4. For i < target_shape.inner_rank - 1, and i < rt.rank,\n and if rt.shape[-i]!=1, then result.shape[-i]=target_shape[-i].\n Args:\n rt: a ragged or dense tensor.\n inner_dimensions: if true, broadcast the inner dimensions as well.\n\n Returns:\n a dense tensor\n \"\"\"\n if ragged_tensor.is_ragged(rt):\n rt = rt.flat_values\n # If rt was a regular tensor, it is its own flat_values.\n if self.target_shape.rank == 0:\n return rt\n inner_rank = self.target_shape.inner_rank\n if inner_rank > self._source_shape.rank:\n # The dense rank is larger than the whole shape. So, we make the shape\n # dense.\n if self.source_shape.num_row_partitions > 0:\n rt = array_ops.reshape(\n rt, self.source_shape._alt_inner_shape(self.source_shape.rank))\n # rt.rank == self._source_shape.rank < inner_rank\n # Here, property 2a holds.\n if inner_dimensions:\n return array_ops.broadcast_to(rt, self._target_inner_shape_int32())\n return rt\n else:\n if self._source_shape.inner_rank != inner_rank:\n rt = array_ops.reshape(rt,\n self._source_shape._alt_inner_shape(inner_rank)) # pylint:disable=protected-access\n # After the reshape, rt is flat_values with inner_rank.\n flat_broadcaster = self._layer_broadcasters[-inner_rank]\n rt = flat_broadcaster.broadcast_tensor(rt)\n # Here, property 2b holds.\n if inner_dimensions:\n rt = array_ops.broadcast_to(rt, self._target_inner_shape_int32())\n return rt\n\n def broadcast(self, rt):\n \"\"\"Broadcast a tensor of source_shape to target_shape.\"\"\"\n flat_values = self.broadcast_flat_values(rt)\n return self.target_shape._add_row_partitions(flat_values) # pylint:disable=protected-access\n\n\ndef _get_layer_broadcasters_from_rps(zero_broadcaster, source_rps, target_rps):\n \"\"\"Get LayerBroadcasters from RowPartitions.\n\n *--zero_broadcaster->*\n | |\n source_rps[0] target_rps[0]\n | |\n V V\n *---result[1]------->*\n | |\n source_rps[1] target_rps[1]\n | |\n V V\n *---result[2]------->*\n .\n .\n .\n *---result[k-1]----->*\n | |\n source_rps[k] target_rps[k]\n | |\n V V\n *---result[k]------->*\n\n Note: result[0] = zero_broadcaster\n\n Args:\n zero_broadcaster: a broadcaster between the source and target row\n partitions' rows, and equal to result[0].\n source_rps: source row partitions.\n target_rps: target row partitions (same length as source_rps).\n\n Returns:\n result: a list of LayerBroadcasters.\n \"\"\"\n if not isinstance(zero_broadcaster, _LayerBroadcaster):\n raise TypeError(\"Not a _LayerBroadcaster: \" + str(zero_broadcaster))\n assert len(source_rps) == len(target_rps)\n if not source_rps:\n return [zero_broadcaster]\n next_broadcaster = zero_broadcaster.next_layer(source_rps[0], target_rps[0])\n tail_broadcasters = _get_layer_broadcasters_from_rps(next_broadcaster,\n source_rps[1:],\n target_rps[1:])\n return [zero_broadcaster] + tail_broadcasters\n\n\ndef _get_broadcaster(source_shape, target_shape):\n \"\"\"Get a _Broadcaster from source_shape to target_shape.\"\"\"\n if source_shape.dtype != target_shape.dtype:\n raise ValueError(\"The source and target row_split dtypes should be equal\")\n\n if (source_shape.rank is None or target_shape.rank is None):\n raise ValueError(\"Rank of source and target must be statically known\")\n elif source_shape.rank > target_shape.rank:\n raise ValueError(\"Cannot broadcast to a shape with smaller rank\")\n elif source_shape.rank == 0:\n return _Broadcaster(source_shape, target_shape, [])\n elif target_shape.rank == 1:\n assert source_shape.rank == 1\n layer = _LayerBroadcaster.first_layer(source_shape.inner_shape[0],\n target_shape.inner_shape[0])\n return _Broadcaster(source_shape, target_shape, [layer])\n\n assert source_shape.rank <= target_shape.rank\n assert target_shape.rank >= 2\n assert source_shape.rank >= 1\n\n source_rps = source_shape._as_row_partitions() # pylint: disable=protected-access\n\n target_rps = target_shape._as_row_partitions() # pylint: disable=protected-access\n\n assert len(target_rps) >= 1\n assert len(source_rps) <= len(target_rps)\n source_nrows = source_shape[0]\n if len(source_rps) < len(target_rps):\n # Note: this includes the case where len(source_rps)==0.\n # Here we begin at -1, one dimension before source_rps[0].\n # neg_one_source_rp | neg_one_target_rp=target_rps[-(len(source_rps)+1)]\n # source_rps[0] | target_rps[-len(source_rps)]\n # source_rps[1] | target_rps[1-len(source_rps)]\n # ... | ...\n # source_rps[-1] | target_rps[-1]\n neg_one_source_rp = RowPartition.from_uniform_row_length(\n uniform_row_length=source_nrows, nrows=1, nvals=source_nrows)\n neg_one_target_rp = target_rps[-(len(source_rps) + 1)]\n neg_one_broadcaster = _LayerBroadcaster.get_singleton_broadcaster(\n neg_one_target_rp.nrows())\n zeroth_broadcaster = neg_one_broadcaster.next_layer(neg_one_source_rp,\n neg_one_target_rp)\n target_rps_tail = target_rps[-len(source_rps):] if len(\n source_rps) >= 1 else []\n\n layers = _get_layer_broadcasters_from_rps(zeroth_broadcaster, source_rps,\n target_rps_tail)\n return _Broadcaster(source_shape, target_shape, layers)\n else:\n assert len(target_rps) == len(source_rps)\n zeroth_broadcaster = _LayerBroadcaster.first_layer(source_rps[0].nrows(),\n target_rps[0].nrows())\n layers = _get_layer_broadcasters_from_rps(zeroth_broadcaster, source_rps,\n target_rps)\n\n return _Broadcaster(source_shape, target_shape, layers)\n\n\ndef _get_identity_broadcaster(shape):\n \"\"\"Gets a Broadcaster for two identical shapes.\"\"\"\n if shape.rank is None:\n raise ValueError(\"Shape must have a defined rank\")\n layers = [\n _LayerBroadcaster.get_identity_broadcaster(\n shape._num_slices_in_dimension(i)) for i in range(shape.rank) # pylint: disable=protected-access\n ]\n return _Broadcaster(shape, shape, layers)\n\n\ndef _broadcast_dynamic_shape_one_layer(a, b):\n \"\"\"Broadcast two vectors, given their shapes.\n\n Args:\n a: the number of rows in a.\n b: the number of rows in b.\n\n Returns:\n (layer_a, layer_b, target_shape)\n layer_a is a _LayerBroadcaster from a to the target_shape.\n layer_b is a _LayerBroadcaster from b to the target_shape.\n target_shape is the target_shape\n\n Raises:\n InvalidArgumentError if the shapes are not consistent.\n \"\"\"\n a_0 = a[0]\n b_0 = b[0]\n can_broadcast_from_a = math_ops.equal(a_0, 1)\n can_broadcast_from_b = math_ops.equal(b_0, 1)\n\n def broadcast_from_a():\n # Assumes a_0 == 1\n a_layer = array_ops.zeros(b_0, dtype=b_0.dtype)\n b_layer = math_ops.range(b_0)\n target = b\n return [a_layer, b_layer, target]\n\n def broadcast_from_b():\n # Assumes b_0 == 1\n a_layer = math_ops.range(a_0)\n b_layer = array_ops.zeros(a_0, dtype=a_0.dtype)\n target = a\n return [a_layer, b_layer, target]\n\n def broadcast_noop():\n # Assumes a_0 == 1\n a_layer = math_ops.range(a_0)\n b_layer = math_ops.range(b_0)\n target = b\n return [a_layer, b_layer, target]\n\n def broadcast_not_from_a():\n can_broadcast_from_b = math_ops.equal(b_0, 1)\n return control_flow_ops.cond(\n can_broadcast_from_b, true_fn=broadcast_from_b, false_fn=broadcast_noop)\n\n nrows_equal = math_ops.equal(a_0, b_0)\n can_broadcast = math_ops.logical_or(\n can_broadcast_from_a,\n math_ops.logical_or(can_broadcast_from_b, nrows_equal))\n\n check_can_broadcast = check_ops.assert_equal(\n can_broadcast, True, message=\"Cannot broadcast\")\n\n results = control_flow_ops.cond(\n can_broadcast_from_a,\n true_fn=broadcast_from_a,\n false_fn=broadcast_not_from_a)\n\n results = [\n control_flow_ops.with_dependencies([check_can_broadcast], x)\n for x in results\n ]\n [a_gi, b_gi, target] = results\n a_layer = _LayerBroadcaster.from_gather_index(a_gi)\n b_layer = _LayerBroadcaster.from_gather_index(b_gi)\n return [a_layer, b_layer, target]\n\n\ndef _broadcast_dynamic_shape_first_layer(a_0, b_0):\n \"\"\"Broadcast the first layer of two dynamic shapes given the dimensions.\n\n Args:\n a_0: the number of rows in a.\n b_0: the number of rows in b.\n\n Returns:\n (use_a, layer_a, layer_b)\n where use_a is true if the target provably equals a, false otherwise.\n layer_a is a _LayerBroadcaster from a to the target.\n layer_b is a _LayerBroadcaster from b to the target.\n \"\"\"\n can_broadcast_from_a = math_ops.equal(a_0, constant_op.constant(1, a_0.dtype))\n can_broadcast_from_b = math_ops.equal(b_0, constant_op.constant(1, b_0.dtype))\n\n def broadcast_from_a():\n # Assumes a_0 == 1\n a_layer = array_ops.zeros(b_0, dtype=b_0.dtype)\n b_layer = math_ops.range(b_0)\n return [a_layer, b_layer]\n\n def broadcast_from_b():\n # Assumes b_0 == 1\n a_layer = math_ops.range(a_0)\n b_layer = array_ops.zeros(a_0, dtype=a_0.dtype)\n return [a_layer, b_layer]\n\n def broadcast_noop():\n # Assumes a_0 == b_0\n a_layer = math_ops.range(a_0)\n b_layer = math_ops.range(b_0)\n return [a_layer, b_layer]\n\n def broadcast_not_from_a():\n return control_flow_ops.cond(\n can_broadcast_from_b, true_fn=broadcast_from_b, false_fn=broadcast_noop)\n\n # Ideally, this would only block control flow on broadcast_noop, but\n # the control flow doesn't seem to work.\n can_broadcast = math_ops.logical_or(\n math_ops.logical_or(can_broadcast_from_a, can_broadcast_from_b),\n math_ops.equal(a_0, b_0))\n\n result = control_flow_ops.cond(\n can_broadcast_from_a,\n true_fn=broadcast_from_a,\n false_fn=broadcast_not_from_a)\n\n return [\n _LayerBroadcaster.from_gather_index(\n control_flow_ops.with_dependencies(\n [check_ops.assert_equal(can_broadcast, True)], x)) for x in result\n ]\n\n\ndef _broadcast_half(\n ac_0: _LayerBroadcaster,\n a_1: RowPartition) -> Tuple[_LayerBroadcaster, RowPartition]:\n \"\"\"Does a NOOP broadcast of a_1.\n\n *-ac_0-->*\n | |\n a_1 c_1\n | |\n V V\n *-ac_1-->*\n\n Note that by definition this cannot fail: there is always a well-defined\n NOOP broadcast. This is usually intended as half of broadcasting two shapes\n together.\n Args:\n ac_0: previous LayerBroadcaster\n a_1: previous RowPartition\n\n Returns:\n [ac_1, c_1] where ac_1 is the next LayerBroadcaster, and c_1 is the\n broadcast RowPartition\n \"\"\"\n c_1 = ac_0.broadcast_row_partition(a_1)\n old_value_rowids = array_ops.gather(ac_0.gather_index, c_1.value_rowids())\n old_row_starts = array_ops.gather(a_1.row_splits(), old_value_rowids)\n gather_index = old_row_starts + c_1.offsets_in_rows()\n return [_LayerBroadcaster.from_gather_index(gather_index), c_1]\n\n\ndef _broadcast_dynamic_shape_next_layer_half_ragged(\n ac_0: _LayerBroadcaster, bc_0: _LayerBroadcaster, a_1: RowPartition,\n b_1: RowPartition\n) -> Tuple[RowPartition, _LayerBroadcaster, _LayerBroadcaster]:\n r\"\"\"Broadcast target and next layer broadcaster of two dynamic shapes.\n\n a_1 is uniform, and b_1 is ragged.\n *--ac_0-->*<--bc_0--*\n | | |\n a_1 c_1 b_1\n | | |\n V V V\n *--ac_1-->*<--bc_1--*\n\n Args:\n ac_0: _LayerBroadcaster from a to c in the previous layer.\n bc_0: _LayerBroadcaster from b to c in the previous layer.\n a_1: a uniform RowPartition for the next layer of a.\n b_1: a ragged RowPartition for the next layer of b.\n\n Returns:\n (c_1, ac_1, bc_1)\n c_1: a RowPartition for the next layer of the dynamic shape.\n ac_1: _LayerBroadcaster from a to c in the next layer.\n bc_1: _LayerBroadcaster from b to c in the next layer.\n \"\"\"\n if not isinstance(ac_0, _LayerBroadcaster):\n raise TypeError(\"ac_0 should be a _LayerBroadcaster\")\n if not isinstance(bc_0, _LayerBroadcaster):\n raise TypeError(\"bc_0 should be a _LayerBroadcaster\")\n if not isinstance(a_1, RowPartition):\n raise TypeError(\"a_1 should be a RowPartition\")\n if not isinstance(b_1, RowPartition):\n raise TypeError(\"b_1 should be a RowPartition\")\n\n assert a_1.is_uniform()\n assert not b_1.is_uniform()\n\n def broadcast_noop():\n # The sides must be \"equal\".\n [ac_1, c_1a] = _broadcast_half(ac_0, a_1)\n [bc_1, c_1b] = _broadcast_half(bc_0, b_1)\n checks = [check_ops.assert_equal(c_1a.row_splits(), c_1b.row_splits())]\n return [\n control_flow_ops.with_dependencies(checks, x)\n for x in [a_1.row_splits(), ac_1.gather_index, bc_1.gather_index]\n ]\n\n def broadcast_a():\n [bc_1, c_1b] = _broadcast_half(bc_0, b_1)\n ac_1_gather_index = array_ops.gather(ac_0.gather_index, c_1b.value_rowids())\n return [\n c_1b.row_splits(),\n ac_1_gather_index,\n bc_1.gather_index,\n ]\n\n can_broadcast_a = math_ops.equal(a_1.uniform_row_length(), 1)\n\n [c_1_row_splits, ac_1_gather_index,\n bc_1_gather_index] = control_flow_ops.cond(\n can_broadcast_a, true_fn=broadcast_a, false_fn=broadcast_noop)\n\n c_1 = RowPartition.from_row_splits(c_1_row_splits)\n ac_1 = _LayerBroadcaster.from_gather_index(ac_1_gather_index)\n bc_1 = _LayerBroadcaster.from_gather_index(bc_1_gather_index)\n return [c_1, ac_1, bc_1]\n\n\ndef _broadcast_dynamic_shape_next_layer_both_uniform(\n ac_0: _LayerBroadcaster, bc_0: _LayerBroadcaster, a_1: RowPartition,\n b_1: RowPartition\n) -> Tuple[RowPartition, _LayerBroadcaster, _LayerBroadcaster]:\n r\"\"\"Broadcast target and next layer broadcaster of two uniform dynamic shapes.\n\n *--ac_0-->*<--bc_0--*\n | | |\n a_1 c_1 b_1\n | | |\n V V V\n *--ac_1-->*<--bc_1--*\n\n Args:\n ac_0: _LayerBroadcaster from a to c in the previous layer.\n bc_0: _LayerBroadcaster from b to c in the previous layer.\n a_1: a RowPartition for the next layer of a.\n b_1: a RowPartition for the next layer of b.\n\n Returns:\n (c_1, ac_1, bc_1)\n c_1: a RowPartition for the next layer of the dynamic shape.\n ac_1: _LayerBroadcaster from a to c in the next layer.\n bc_1: _LayerBroadcaster from b to c in the next layer.\n \"\"\"\n if not isinstance(ac_0, _LayerBroadcaster):\n raise TypeError(\"ac_0 should be a _LayerBroadcaster\")\n if not isinstance(bc_0, _LayerBroadcaster):\n raise TypeError(\"bc_0 should be a _LayerBroadcaster\")\n if not isinstance(a_1, RowPartition):\n raise TypeError(\"a_1 should be a RowPartition\")\n if not isinstance(b_1, RowPartition):\n raise TypeError(\"b_1 should be a RowPartition\")\n assert a_1.is_uniform()\n assert b_1.is_uniform()\n\n def broadcast_noop():\n # Assumes a_1.uniform_row_length() == b_1.uniform_row_length()\n # Both sides broadcast to a single shape.\n [ac_1, _] = _broadcast_half(ac_0, a_1)\n [bc_1, _] = _broadcast_half(bc_0, b_1)\n return [a_1.uniform_row_length(), ac_1.gather_index, bc_1.gather_index]\n\n def broadcast_a():\n [bc_1, c_1b] = _broadcast_half(bc_0, b_1)\n ac_1_gather_index = array_ops.gather(ac_0.gather_index, c_1b.value_rowids())\n return [\n b_1.uniform_row_length(),\n ac_1_gather_index,\n bc_1.gather_index,\n ]\n\n def broadcast_b():\n [ac_1, c_1a] = _broadcast_half(ac_0, a_1)\n bc_1_gather_index = array_ops.gather(bc_0.gather_index, c_1a.value_rowids())\n return [a_1.uniform_row_length(), ac_1.gather_index, bc_1_gather_index]\n\n can_broadcast_b = math_ops.equal(b_1.uniform_row_length(), 1)\n\n def no_broadcast_a():\n return control_flow_ops.cond(\n can_broadcast_b, true_fn=broadcast_b, false_fn=broadcast_noop)\n\n can_broadcast_a = math_ops.equal(a_1.uniform_row_length(), 1)\n\n broadcast_asserts = [\n check_ops.assert_equal(\n math_ops.logical_or(\n math_ops.logical_or(can_broadcast_a, can_broadcast_b),\n math_ops.equal(a_1.uniform_row_length(),\n b_1.uniform_row_length())), True)\n ]\n\n result = control_flow_ops.cond(\n can_broadcast_a, true_fn=broadcast_a, false_fn=no_broadcast_a)\n\n [c_1_uniform_row_length, ac_1_gather_index, bc_1_gather_index] = [\n control_flow_ops.with_dependencies(broadcast_asserts, x) for x in result\n ]\n\n c_1 = RowPartition.from_uniform_row_length(\n c_1_uniform_row_length,\n nvals=c_1_uniform_row_length * ac_0.dest_nrows(),\n nrows=ac_0.dest_nrows())\n ac_1 = _LayerBroadcaster.from_gather_index(ac_1_gather_index)\n bc_1 = _LayerBroadcaster.from_gather_index(bc_1_gather_index)\n return [c_1, ac_1, bc_1]\n\n\ndef _broadcast_dynamic_shape_next_layer(\n ac_0: _LayerBroadcaster, bc_0: _LayerBroadcaster, a_1: RowPartition,\n b_1: RowPartition\n) -> Tuple[RowPartition, _LayerBroadcaster, _LayerBroadcaster]:\n r\"\"\"Broadcast target and next layer broadcaster of two dynamic shapes.\n\n *--ac_0-->*<--bc_0--*\n | | |\n a_1 c_1 b_1\n | | |\n V V V\n *--ac_1-->*<--bc_1--*\n\n Args:\n ac_0: _LayerBroadcaster from a to c in the previous layer.\n bc_0: _LayerBroadcaster from b to c in the previous layer.\n a_1: a RowPartition for the next layer of a.\n b_1: a RowPartition for the next layer of b.\n\n Returns:\n (c_1, ac_1, bc_1)\n c_1: a RowPartition for the next layer of the dynamic shape.\n ac_1: _LayerBroadcaster from a to c in the next layer.\n bc_1: _LayerBroadcaster from b to c in the next layer.\n \"\"\"\n if not isinstance(ac_0, _LayerBroadcaster):\n raise TypeError(\"ac_0 should be a _LayerBroadcaster\")\n if not isinstance(bc_0, _LayerBroadcaster):\n raise TypeError(\"bc_0 should be a _LayerBroadcaster\")\n if not isinstance(a_1, RowPartition):\n raise TypeError(\"a_1 should be a RowPartition\")\n if not isinstance(b_1, RowPartition):\n raise TypeError(\"b_1 should be a RowPartition\")\n\n if a_1.is_uniform():\n if b_1.is_uniform():\n return _broadcast_dynamic_shape_next_layer_both_uniform(\n ac_0, bc_0, a_1, b_1)\n else:\n return _broadcast_dynamic_shape_next_layer_half_ragged(\n ac_0, bc_0, a_1, b_1)\n else:\n if b_1.is_uniform():\n [c_1, bc_1, ac_1] = _broadcast_dynamic_shape_next_layer_half_ragged( # pylint: disable=arguments-out-of-order\n bc_0, ac_0, b_1, a_1)\n return (c_1, ac_1, bc_1)\n else:\n # If neither shape is uniform, we cannot broadcast the dimension.\n [ac_1, c_1a] = _broadcast_half(ac_0, a_1)\n [bc_1, c_1b] = _broadcast_half(bc_0, b_1)\n check_valid = [\n check_ops.assert_equal(c_1a.row_splits(), c_1b.row_splits())\n ]\n return (c_1a.with_dependencies(check_valid),\n ac_1.with_dependencies(check_valid),\n bc_1.with_dependencies(check_valid))\n\n\ndef _broadcast_dynamic_shape_from_rps(\n a_zero: _LayerBroadcaster, b_zero: _LayerBroadcaster,\n a_rps: Sequence[RowPartition], b_rps: Sequence[RowPartition]\n) -> Tuple[Sequence[RowPartition], Sequence[_LayerBroadcaster],\n Sequence[_LayerBroadcaster]]:\n \"\"\"Create BroadcastLayers from two shapes to a target shape.\n\n\n *--a_zero->*<-b_zero-*\n | | |\n a_rps[0] c_rps[0] b_rps[0]\n | | |\n V V V\n *--ac[1]-->*<-bc[1]--*\n | | |\n a_rps[1] c_rps[0] b_rps[1]\n | | |\n V V V\n *--ac[2]-->*<-bc[2]--*\n\n Note: ac[0]=a_zero, and bc[0]=b_zero.\n Args:\n a_zero: broadcaster from rows of a_rps[0] to target shape.\n b_zero: broadcaster from rows of b_rps[0] to target shape.\n a_rps: RowPartitions of first shape.\n b_rps: RowPartitions of second shape, equal in length to a_rps.\n\n Returns:\n (c_rps, ac, bc) where:\n c_rps: RowPartitions of target shape.\n ac: layers broadcasting from the first shape.\n bc: layers broadcasting from the second shape.\n \"\"\"\n assert len(a_rps) == len(b_rps)\n if a_rps:\n (c_1, ac_1,\n bc_1) = _broadcast_dynamic_shape_next_layer(a_zero, b_zero, a_rps[0],\n b_rps[0])\n (c_suffix, a_layers,\n b_layers) = _broadcast_dynamic_shape_from_rps(ac_1, bc_1, a_rps[1:],\n b_rps[1:])\n\n return ([c_1] + c_suffix, [ac_1] + a_layers, [bc_1] + b_layers)\n else:\n return ([], [], [])\n\n\ndef _get_broadcast_num_row_partitions(a: RaggedShape, b: RaggedShape):\n \"\"\"Returns broadcast_dynamic_shape(a, b).num_row_partitions.\"\"\"\n # Assumes rank and num_row_partitions are not None.\n if (a.num_row_partitions == 0 and b.num_row_partitions == 0):\n return 0\n expanded_num_row_partitions_a = a.num_row_partitions + max(0, b.rank - a.rank)\n expanded_num_row_partitions_b = b.num_row_partitions + max(0, a.rank - b.rank)\n\n if a.num_row_partitions == 0:\n return expanded_num_row_partitions_b\n\n if b.num_row_partitions == 0:\n return expanded_num_row_partitions_a\n\n return max(expanded_num_row_partitions_a, expanded_num_row_partitions_b)\n\n\n# pylint: disable=protected-access\ndef _broadcast_dynamic_shape_extended_complete(\n a: RaggedShape, b: RaggedShape, b_rps: Sequence[RowPartition],\n c_suffix: Sequence[RowPartition], ac: Sequence[_LayerBroadcaster],\n bc_suffix: Sequence[_LayerBroadcaster]\n) -> Tuple[RaggedShape, _Broadcaster, _Broadcaster]:\n \"\"\"Helper for broadcast_dynamic_shape_extended.\"\"\"\n c_prefix = b_rps[:-len(c_suffix)]\n bc_prefix_length = b.rank - len(bc_suffix)\n bc_prefix = [\n _LayerBroadcaster.get_identity_broadcaster(b._num_slices_in_dimension(i))\n for i in range(bc_prefix_length)\n ]\n c_num_row_partitions = _get_broadcast_num_row_partitions(a, b)\n\n c = RaggedShape.from_row_partitions(\n c_prefix + tuple(c_suffix))._with_num_row_partitions(c_num_row_partitions)\n return (c, _Broadcaster(a, c, ac), _Broadcaster(b, c, bc_prefix + bc_suffix))\n\n\ndef _broadcast_dynamic_shape_extended_helper(\n a: RaggedShape,\n b: RaggedShape) -> Tuple[RaggedShape, _Broadcaster, _Broadcaster]:\n \"\"\"Helper for broadcast_dynamic_shape_extended.\n\n Here, we force:\n a.rank <= b.rank\n 2 <= b.rank\n 1 <= a.rank\n Args:\n a: a RaggedShape\n b: a RaggedShape\n\n Returns:\n A triple of a shape and two broadcasters.\n \"\"\"\n assert a.rank <= b.rank\n assert 2 <= b.rank\n assert 1 <= a.rank\n a_rps = a._as_row_partitions() # pylint: disable=protected-access\n b_rps = b._as_row_partitions() # pylint: disable=protected-access\n a_nrows = a[0]\n\n if len(a_rps) < len(b_rps):\n # Note: this includes the case where len(a_rps)==0.\n # Here we begin at -1, one dimension before a_rps[0].\n # neg_one_a_rp | b_rps[-(len(a_rps)+1)]\n # a_rps[0] | b_rps[-len(a_rps)]\n # a_rps[1] | b_rps[1-len(a_rps)]\n # ... | ...\n # a_rps[-1] | b_rps[-1]\n\n neg_one_a_rp = RowPartition.from_uniform_row_length(\n uniform_row_length=a_nrows, nrows=1, nvals=a_nrows)\n neg_one_b_rp = b_rps[-(len(a_rps) + 1)]\n (neg_one_ac, neg_one_bc) = _broadcast_dynamic_shape_first_layer(\n constant_op.constant(1, dtype=b_rps[0].dtype), neg_one_b_rp.nrows())\n\n # The first part of the solution.\n (c_zero, ac_zero,\n bc_zero) = _broadcast_dynamic_shape_next_layer(neg_one_ac, neg_one_bc,\n neg_one_a_rp, neg_one_b_rp)\n b_rps_tail = b_rps[-len(a_rps):] if len(a_rps) >= 1 else []\n\n (c_suffix, ac_layers,\n bc_layers) = _broadcast_dynamic_shape_from_rps(ac_zero, bc_zero, a_rps,\n b_rps_tail)\n\n return _broadcast_dynamic_shape_extended_complete(\n a=a,\n b=b,\n b_rps=b_rps,\n c_suffix=[c_zero] + c_suffix,\n ac=[ac_zero] + ac_layers,\n bc_suffix=[neg_one_bc, bc_zero] + bc_layers)\n\n else:\n assert len(a_rps) == len(b_rps)\n (ac_zero,\n bc_zero) = _broadcast_dynamic_shape_first_layer(a_rps[0].nrows(),\n b_rps[0].nrows())\n\n (c_rps, a_layers,\n b_layers) = _broadcast_dynamic_shape_from_rps(ac_zero, bc_zero, a_rps,\n b_rps)\n return _broadcast_dynamic_shape_extended_complete(\n a=a,\n b=b,\n b_rps=b_rps,\n c_suffix=c_rps,\n ac=[ac_zero] + a_layers,\n bc_suffix=[bc_zero] + b_layers)\n\n\ndef _fix_slice_index(index, rank, num_row_partitions):\n \"\"\"Slice indexes are always silently truncated.\"\"\"\n if index < 0:\n if rank is None:\n raise ValueError(\n \"Rank must be known to use __getitem__ on a negative index.\")\n index = rank + index\n if index < 0:\n index = 0\n if (num_row_partitions > 0 and index <= num_row_partitions + 1):\n # The rank is always >= num_row_partitions + 1 if num_row_partitions > 0.\n return index\n if rank is None:\n raise ValueError(\"Rank must be known to use __getitem__ on a large index.\")\n if index >= rank:\n index = rank\n return index\n\n\ndef _first_layer_gather_index(nrows_source, nrows_target):\n \"\"\"Return the first layer gather_index.\n\n Args:\n nrows_source: the number of rows in the source.\n nrows_target: the number of rows in the target.\n\n Returns:\n A tensor, usable as a gather_index for a _LayerBroadcaster.\n \"\"\"\n\n def gi_broadcast_first():\n return array_ops.zeros(nrows_target, dtype=nrows_target.dtype)\n\n def gi_no_broadcast_first():\n gather_index = math_ops.range(nrows_target, dtype=nrows_target.dtype)\n return gather_index\n\n do_broadcast = math_ops.equal(nrows_source,\n constant_op.constant(1, nrows_source.dtype))\n nrows_equal = math_ops.equal(nrows_source, nrows_target)\n can_broadcast = check_ops.assert_equal(\n math_ops.logical_or(do_broadcast, nrows_equal),\n True,\n message=\"Cannot broadcast\")\n\n gather_index = control_flow_ops.cond(\n do_broadcast, true_fn=gi_broadcast_first, false_fn=gi_no_broadcast_first)\n\n return control_flow_ops.with_dependencies([can_broadcast], gather_index)\n\n\ndef _next_layer_gather_index(bc, original_rp, broadcast_rp):\n r\"\"\"Create the next layer gather_index whether or not a broadcast happens.\n\n *----------bc-------->*\n | |\n original_rp broadcast_rp\n | |\n \\|/ \\|/\n *--next_broadcaster-->*\n\n Args:\n bc: the old broadcaster.\n original_rp: the original row partition.\n broadcast_rp: the target row partition.\n\n Returns:\n the gather_index for next_broadcaster.\n Raises:\n InvalidArgumentError if the shapes are incompatible.\n \"\"\"\n old_value_rowids = array_ops.gather(bc.gather_index,\n broadcast_rp.value_rowids())\n\n def gi_no_broadcast():\n # TODO(martinz): decide if row_splits or row_starts should be used here.\n old_row_starts = array_ops.gather(original_rp.row_splits(),\n old_value_rowids)\n expected_row_lengths = array_ops.gather(\n params=original_rp.row_lengths(), indices=bc.gather_index)\n actual_row_lengths = broadcast_rp.row_lengths()\n check_valid = check_ops.assert_equal(\n expected_row_lengths, actual_row_lengths, message=\"Cannot broadcast\")\n gather_index = old_row_starts + broadcast_rp.offsets_in_rows()\n return control_flow_ops.with_dependencies([check_valid], gather_index)\n\n def gi_broadcast():\n # Several optimizations can occur here.\n # old_row_starts == old_value_rowids, because:\n # if you are broadcasting, then the source has uniform row length of 1,\n # implying original_rp.row_splits == tf.range(orgininal_rp.nvals + 1)\n # When broadcasting, there is no need to add offsets to the\n # source, because the source has size 1.\n # Also, this is always valid, because we enforce source and destination\n # have uniform_row_lengths.\n return old_value_rowids\n\n if not original_rp.is_uniform():\n return gi_no_broadcast()\n\n do_broadcast = math_ops.equal(original_rp.uniform_row_length(),\n constant_op.constant(1, original_rp.dtype))\n gather_index = control_flow_ops.cond(\n do_broadcast, true_fn=gi_broadcast, false_fn=gi_no_broadcast)\n\n return gather_index\n\n\ndef _flat_values_shape(rt):\n if isinstance(rt, ragged_tensor.RaggedTensor):\n return array_ops.shape(rt.flat_values)\n return rt.flat_values.shape\n\n\ndef _to_row_partitions_and_nvals_from_lengths(\n lengths: Sequence[Union[int, Sequence[int]]],\n dtype=None) -> Tuple[Sequence[RowPartition], int]:\n \"\"\"Allow ragged and uniform shapes to be specified.\n\n For example, [2, [2,1], 2] represents a shape like:\n [[[0, 0], [0, 0]], [[0, 0]]]\n\n Args:\n lengths: a list of integers and lists of integers.\n dtype: dtype of the shape (tf.int32 or tf.int64)\n\n Returns:\n a sequence of RowPartitions, and the number of values of the last partition.\n \"\"\"\n size_so_far = lengths[0]\n result = []\n for current_lengths in lengths[1:]:\n if isinstance(current_lengths, int):\n nrows = size_so_far\n nvals = current_lengths * nrows\n size_so_far = nvals\n result.append(\n RowPartition.from_uniform_row_length(\n current_lengths, nvals, nrows=nrows, dtype_hint=dtype))\n else:\n if size_so_far != len(current_lengths):\n raise ValueError(\"Shape not consistent.\")\n result.append(\n RowPartition.from_row_lengths(current_lengths, dtype_hint=dtype))\n size_so_far = sum(current_lengths)\n return (result, size_so_far)\n\n\ndef _element_to_string(x):\n \"\"\"element to a string within a list.\"\"\"\n if x is Ellipsis:\n return \"...\"\n if isinstance(x, str):\n return \"'\" + x + \"'\"\n return str(x)\n\n\ndef _list_tail_with_ellipsis(arr):\n \"\"\"Print the tail of a list where the list might have an ellipsis.\"\"\"\n if not arr:\n return \"]\"\n else:\n return \", \" + _element_to_string(arr[0]) + _list_tail_with_ellipsis(arr[1:])\n\n\ndef _list_with_ellipsis_to_str(arr):\n \"\"\"Print a list that might have ellipsis.\"\"\"\n if not arr:\n return \"[]\"\n return \"[\" + _element_to_string(arr[0]) + _list_tail_with_ellipsis(arr[1:])\n\n\ndef _is_int_or_tuple_of_ints(x):\n if isinstance(x, int):\n return True\n if not isinstance(x, tuple):\n return False\n for y in x:\n if not isinstance(y, int):\n return False\n return True\n" ]
[ [ "tensorflow.python.eager.context.context", "tensorflow.core.framework.function_pb2.GradientDef", "tensorflow.python.framework.func_graph.FuncGraph", "tensorflow.python.framework.cpp_shape_inference_pb2.CppShapeInferenceResult.HandleData", "tensorflow.python.framework.cpp_shape_inference_pb2.CppShapeInferenceResult.HandleShapeAndType", "tensorflow.python.framework.ops.get_default_graph", "tensorflow.core.framework.versions_pb2.VersionDef", "tensorflow.python.ops.resource_variable_ops._set_handle_shapes_and_types", "tensorflow.python.eager.context.executing_eagerly", "tensorflow.python.framework.importer.import_graph_def_for_function", "tensorflow.core.framework.graph_pb2.GraphDef" ], [ "tensorflow.python.summary.summary.scalar", "tensorflow.python.platform.test.mock.patch.object", "tensorflow.python.platform.test.main", "tensorflow.python.framework.constant_op.constant", "tensorflow.python.ops.summary_ops_v2.create_summary_file_writer" ], [ "tensorflow.python.framework.tensor_shape.TensorShape", "tensorflow.python.ops.array_ops.shape", "tensorflow.python.ops.ragged.row_partition.RowPartition.from_row_splits", "tensorflow.python.ops.array_ops.zeros", "tensorflow.python.ops.control_flow_ops.cond", "tensorflow.python.ops.ragged.row_partition.RowPartition.from_row_lengths", "tensorflow.python.ops.control_flow_ops.with_dependencies", "tensorflow.python.ops.math_ops.reduce_prod", "tensorflow.python.ops.array_ops.gather", "tensorflow.python.framework.tensor_util.constant_value", "tensorflow.python.framework.tensor_shape.dimension_value", "tensorflow.python.ops.math_ops.logical_or", "tensorflow.python.ops.ragged.ragged_tensor.convert_to_tensor_or_ragged_tensor", "tensorflow.python.ops.math_ops.cast", "tensorflow.python.ops.ragged.row_partition.RowPartition.from_uniform_row_length", "tensorflow.python.ops.math_ops.equal", "tensorflow.python.ops.check_ops.assert_equal", "tensorflow.python.ops.ragged.ragged_tensor.is_ragged", "tensorflow.python.framework.ops.convert_to_tensor", "tensorflow.python.ops.array_ops.stack", "tensorflow.python.ops.math_ops.range", "tensorflow.python.ops.ragged.ragged_tensor.RaggedTensor._from_nested_row_partitions", "tensorflow.python.ops.ragged.ragged_tensor.match_row_splits_dtypes", "tensorflow.python.ops.array_ops.expand_dims", "tensorflow.python.framework.constant_op.constant" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.8", "1.10", "1.12", "2.7", "2.6", "1.13", "2.3", "2.4", "2.9", "2.5", "2.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.7", "2.6", "2.2", "2.3", "2.4", "2.9", "2.5", "2.8", "2.10" ] } ]
KosingZhu/tensorflow
[ "60028072a1c3b4376e145b6fea8e4ccd3324377f", "60028072a1c3b4376e145b6fea8e4ccd3324377f", "60028072a1c3b4376e145b6fea8e4ccd3324377f", "60028072a1c3b4376e145b6fea8e4ccd3324377f", "7ac2521a4e609ddef0f0ea3ffc2e76102da934d7", "60028072a1c3b4376e145b6fea8e4ccd3324377f", "7ac2521a4e609ddef0f0ea3ffc2e76102da934d7", "7ac2521a4e609ddef0f0ea3ffc2e76102da934d7", "7ac2521a4e609ddef0f0ea3ffc2e76102da934d7" ]
[ "tensorflow/python/kernel_tests/array_ops/denormal_test.py", "tensorflow/python/kernel_tests/random/stateless_random_ops_test.py", "tensorflow/python/kernel_tests/pooling_ops_test.py", "tensorflow/python/kernel_tests/cudnn_deterministic_ops_test.py", "tensorflow/lite/testing/generate_examples_lib.py", "tensorflow/python/data/util/traverse_test.py", "tensorflow/python/ops/control_flow_grad.py", "tensorflow/python/kernel_tests/sparse_xent_op_deterministic_test.py", "tensorflow/compiler/mlir/tfrt/python_tests/tf_math_ops_test.py" ]
[ "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for denormal handling.\"\"\"\n\nimport numpy as np\nimport platform\n\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.platform import test\n\n\nclass DenormalTest(test.TestCase):\n\n def testPythonHasDenormals(self):\n \"\"\"Non-tf numpy code should treat denormals correctly.\"\"\"\n for dtype in np.float32, np.float64:\n tiny = np.finfo(dtype).tiny\n self.assertEqual(tiny, tiny / 16 * 16)\n\n def _flushDenormalsTest(self, dtypes):\n if (platform.machine() == \"ppc64le\" or platform.machine() == \"s390x\" or\n platform.machine() == \"aarch64\"):\n # Disabled denormal_test on power/s390x/aarch64 platform\n # Check relevant discussion - https://github.com/tensorflow/tensorflow/issues/11902\n return\n for dtype in dtypes:\n tiny = np.finfo(dtype).tiny\n # Small shape to test main thread, large shape to test thread pool\n for shape in (), (1 << 20,):\n flush = 0.1 * constant_op.constant(tiny, shape=shape)\n self.assertAllEqual(self.evaluate(flush), np.zeros(shape))\n # Make sure the flags don't leak out\n self.testPythonHasDenormals()\n\n @test_util.run_in_graph_and_eager_modes(use_gpu=False)\n def testFlushDenormalsCPU(self):\n # On CPUs, the processor flags flush for both single and double precision.\n self._flushDenormalsTest(dtypes=(np.float32, np.float64))\n\n @test_util.run_in_graph_and_eager_modes(use_gpu=True)\n def testFlushDenormalsGPU(self):\n # On GPUs, only single precision can flush to zero.\n self._flushDenormalsTest(dtypes=(np.float32,))\n\n\nif __name__ == \"__main__\":\n test.main()\n", "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for stateless random ops.\"\"\"\n\nimport functools\n\nfrom absl.testing import parameterized\nimport numpy as np\nfrom tensorflow.python.compat import compat\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.eager import def_function\nfrom tensorflow.python.framework import config\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import random_seed\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import gen_stateless_random_ops_v2\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import random_ops\nfrom tensorflow.python.ops import stateless_random_ops as stateless\nfrom tensorflow.python.platform import test\n\n\n# Note that in theory each test will reset the eager context and may choose to\n# hide some devices, so we shouldn't cache this transient info. Tests in this\n# file don't make those config changes, so caching is fine. It provides a good\n# speed-up.\n_cached_device = None\n\n\ndef get_device():\n global _cached_device\n if _cached_device is not None:\n return _cached_device\n # Precedence from high to low\n for device_type in ('XLA_GPU', 'GPU', 'XLA_CPU', 'CPU'):\n devices = config.list_logical_devices(device_type)\n if devices:\n _cached_device = devices[0]\n return _cached_device\n raise ValueError('Cannot find any suitable device. Available devices: %s' %\n config.list_logical_devices())\n\n\nBEFORE_EXPIRE = (2020, 10, 24)\nAFTER_EXPIRE = (2020, 10, 26)\n\n\ndef invert_philox(key, value):\n \"\"\"Invert the Philox bijection.\"\"\"\n key = np.array(key, dtype=np.uint32)\n value = np.array(value, dtype=np.uint32)\n step = np.array([0x9E3779B9, 0xBB67AE85], dtype=np.uint32)\n for n in range(10)[::-1]:\n key0, key1 = key + n * step\n v0 = value[3] * 0x991a7cdb & 0xffffffff\n v2 = value[1] * 0x6d7cae67 & 0xffffffff\n hi0 = v0 * 0xD2511F53 >> 32\n hi1 = v2 * 0xCD9E8D57 >> 32\n v1 = hi1 ^ value[0] ^ key0\n v3 = hi0 ^ value[2] ^ key1\n value = v0, v1, v2, v3\n return np.array(value)\n\n\nSEEDS = ((7, 17), (11, 5), (2, 3))\nSEED_TYPES = [dtypes.int32, dtypes.int64]\n\n\ndef float_cases(shape_dtypes=(None,)):\n cases = (\n # Uniform distribution, with and without range\n ('uniform', stateless.stateless_random_uniform, random_ops.random_uniform,\n {}),\n ('uniform2', stateless.stateless_random_uniform,\n random_ops.random_uniform, dict(minval=2.2, maxval=7.1)),\n # Normal distribution, with and without mean+stddev\n ('normal', stateless.stateless_random_normal, random_ops.random_normal,\n {}),\n ('normal2', stateless.stateless_random_normal, random_ops.random_normal,\n dict(mean=2, stddev=3)),\n # Truncated normal distribution, with and without mean+stddev\n ('trnorm', stateless.stateless_truncated_normal,\n random_ops.truncated_normal, {}),\n ('trnorm2', stateless.stateless_truncated_normal,\n random_ops.truncated_normal, dict(mean=3, stddev=4)),\n )\n # Explicitly passing in params because capturing cell variable from loop is\n # problematic in Python\n def wrap(op, dtype, shape, shape_dtype, seed, **kwargs):\n device_type = get_device().device_type\n # Some dtypes are not supported on some devices\n if (dtype == dtypes.float16 and device_type in ('XLA_GPU', 'XLA_CPU') or\n dtype == dtypes.bfloat16 and device_type == 'GPU'):\n dtype = dtypes.float32\n shape_ = (constant_op.constant(shape, dtype=shape_dtype)\n if shape_dtype is not None else shape)\n return op(seed=seed, shape=shape_, dtype=dtype, **kwargs)\n\n def _name(a):\n if hasattr(a, 'name'):\n return a.name\n else:\n return a\n\n for dtype in dtypes.float16, dtypes.bfloat16, dtypes.float32, dtypes.float64:\n for shape_dtype in shape_dtypes:\n for shape in (), (3,), (2, 5):\n for name, stateless_op, stateful_op, kwargs in cases:\n yield (('%s_%s_%s_%s' %\n (name, _name(dtype), shape, _name(shape_dtype))).replace(\n ' ', ''),\n functools.partial(wrap, stateless_op, dtype, shape,\n shape_dtype, **kwargs),\n functools.partial(wrap, stateful_op, dtype, shape, shape_dtype,\n **kwargs))\n\n\ndef int_cases(shape_dtypes=(None,), minval_maxval=None):\n\n def wrap(op, minval, maxval, shape, shape_dtype, dtype, seed, **kwargs):\n shape_ = (constant_op.constant(shape, dtype=shape_dtype)\n if shape_dtype is not None else shape)\n return op(\n seed=seed, shape=shape_, minval=minval, maxval=maxval, dtype=dtype,\n **kwargs)\n\n if minval_maxval is None:\n minval_maxval = ((2, 11111),)\n for minval, maxval in minval_maxval:\n for shape_dtype in shape_dtypes:\n for shape in (), (3,), (2, 5):\n for dtype in dtypes.int32, dtypes.int64:\n yield ('uniform_%s_%s' % (minval, maxval),\n functools.partial(wrap, stateless.stateless_random_uniform,\n minval, maxval, shape, shape_dtype, dtype),\n functools.partial(wrap, random_ops.random_uniform, minval,\n maxval, shape, shape_dtype, dtype))\n\n\ndef multinomial_cases():\n num_samples = 10\n def wrap(op, logits, logits_dtype, output_dtype, seed):\n return op(seed=seed,\n logits=constant_op.constant(logits, dtype=logits_dtype),\n num_samples=num_samples, output_dtype=output_dtype)\n for logits_dtype in np.float16, np.float32, np.float64:\n for output_dtype in dtypes.int32, dtypes.int64:\n for logits in ([[0.1, 0.25, 0.5, 0.15]], [[0.5, 0.5], [0.8, 0.2],\n [0.25, 0.75]]):\n yield ('multinomial',\n functools.partial(wrap, stateless.stateless_multinomial, logits,\n logits_dtype, output_dtype),\n functools.partial(wrap, random_ops.multinomial, logits,\n logits_dtype, output_dtype))\n\n\ndef gamma_cases():\n def wrap(op, alpha, dtype, shape, seed):\n return op(seed=seed, shape=shape,\n alpha=constant_op.constant(alpha, dtype=dtype), dtype=dtype)\n for dtype in np.float16, np.float32, np.float64:\n for alpha in ([[.5, 1., 2.]], [[0.5, 0.5], [0.8, 0.2], [0.25, 0.75]]):\n yield ('gamma',\n functools.partial(wrap, stateless.stateless_random_gamma, alpha,\n dtype, (10,) + tuple(np.shape(alpha))),\n functools.partial(wrap, random_ops.random_gamma, alpha, dtype,\n (10,)))\n\n\ndef poisson_cases():\n def wrap(op, lam, lam_dtype, out_dtype, shape, seed):\n return op(seed=seed, shape=shape,\n lam=constant_op.constant(lam_dtype(lam), dtype=lam_dtype),\n dtype=out_dtype)\n for lam_dtype in np.float16, np.float32, np.float64, np.int32, np.int64:\n for out_dtype in np.float16, np.float32, np.float64, np.int32, np.int64:\n for lam in ([[5.5, 1., 2.]], [[7.5, 10.5], [3.8, 8.2], [1.25, 9.75]]):\n yield ('poisson',\n functools.partial(wrap, stateless.stateless_random_poisson, lam,\n lam_dtype, out_dtype,\n (10,) + tuple(np.shape(lam))),\n functools.partial(wrap, random_ops.random_poisson, lam,\n lam_dtype, out_dtype, (10,)))\n\n\nclass StatelessOpsTest(test.TestCase, parameterized.TestCase):\n\n def _test_match(self, case, seed):\n # Stateless ops should be the same as stateful ops on the first call\n # after seed scrambling.\n key = 0x3ec8f720, 0x02461e29\n preseed = invert_philox(key, (seed[0], 0, seed[1], 0)).astype(np.uint64)\n preseed = preseed[::2] | preseed[1::2] << 32\n with ops.device(get_device().name):\n _, stateless_op, stateful_op = case\n random_seed.set_random_seed(seed[0])\n stateful = stateful_op(seed=seed[1])\n pure = stateless_op(seed=preseed)\n self.assertAllEqual(stateful, pure)\n\n def _test_match_stateless_cpu_gpu(self, case, seed):\n # Stateless ops should produce the same result on CPUs and GPUs.\n _, stateless_op, _ = case\n\n with ops.device('CPU'):\n result_cpu = stateless_op(seed=seed)\n\n with ops.device(get_device().name):\n result_gpu = stateless_op(seed=seed)\n self.assertAllClose(result_cpu, result_gpu)\n\n def _test_old_and_new_stateless_match(self, case, seed):\n \"\"\"Tests that the new stateless ops match the old stateless ones.\"\"\"\n with ops.device(get_device().name):\n _, stateless_op, _ = case\n with compat.forward_compatibility_horizon(*BEFORE_EXPIRE):\n old = stateless_op(seed=seed)\n with compat.forward_compatibility_horizon(*AFTER_EXPIRE):\n new = stateless_op(seed=seed)\n self.assertAllClose(old, new)\n\n def _test_explicit_alg(self, case, seed):\n \"\"\"Tests that alg=philox and alg=None are the same (on CPU/GPU).\"\"\"\n with ops.device(get_device().name):\n _, stateless_op, _ = case\n implicit_alg = stateless_op(seed=seed)\n # All device types allowed in this test will result in Philox\n explicit_alg = stateless_op(seed=seed, alg='philox')\n self.assertAllClose(implicit_alg, explicit_alg)\n\n def _test_determinism(self, case, seed_type):\n # Stateless values should be equal iff the seeds are equal (roughly)\n seeds = [(x, y) for x in range(5) for y in range(5)] * 3 # pylint: disable=g-complex-comprehension\n with self.test_session(), ops.device(get_device().name):\n _, stateless_op, _ = case\n if context.executing_eagerly():\n values = [\n (seed, stateless_op(seed=constant_op.constant(seed, seed_type)))\n for seed in seeds]\n else:\n # Have this branch because the above branch is too slow in graph\n # mode\n seed_t = array_ops.placeholder(seed_type, shape=[2])\n pure = stateless_op(seed=seed_t)\n values = [\n (seed, pure.eval(feed_dict={seed_t: seed})) for seed in seeds\n ]\n for s0, v0 in values:\n for s1, v1 in values:\n if dtypes.as_dtype(v0.dtype) != dtypes.bfloat16:\n self.assertEqual(s0 == s1, np.all(v0 == v1))\n elif s0 == s1:\n # Skip the s0 != s1 case because v0 and v1 can be either equal or\n # unequal in that case due to bfloat16's low precision\n self.assertAllEqual(v0, v1)\n\n @parameterized.named_parameters(\n ('_%s_%s_%s' % (case[0], case_id, seed_id), case, seed) # pylint: disable=g-complex-comprehension\n for seed_id, seed in enumerate(SEEDS)\n for case_id, case in enumerate(float_cases()))\n @test_util.disable_tfrt('tensorflow::DirectSession::Run crashes. b/156187396')\n def testMatchFloat(self, case, seed):\n if get_device().device_type in ('XLA_GPU', 'XLA_CPU'):\n # This test was passing before because soft placement silently picked the\n # CPU kernels.\n self.skipTest('Skip on XLA because XLA kernels do not support int64 '\n 'seeds needed by this test.')\n self._test_match(case, seed)\n\n @parameterized.named_parameters(\n ('_%s_%s_%s' % (case[0], case_id, seed_id), case, seed) # pylint: disable=g-complex-comprehension\n for seed_id, seed in enumerate(SEEDS)\n for case_id, case in enumerate(int_cases()))\n @test_util.disable_tfrt('tensorflow::DirectSession::Run crashes. b/156187396')\n def testMatchInt(self, case, seed):\n if get_device().device_type in ('XLA_GPU', 'XLA_CPU'):\n # This test was passing before because soft placement silently picked the\n # CPU kernels.\n self.skipTest('Skip on XLA because XLA kernels do not support int64 '\n 'seeds needed by this test.')\n self._test_match(case, seed)\n\n @parameterized.named_parameters(\n ('_%s_%s_%s' % (case[0], case_id, seed_id), case, seed) # pylint: disable=g-complex-comprehension\n for seed_id, seed in enumerate(SEEDS)\n for case_id, case in enumerate(multinomial_cases()))\n @test_util.disable_tfrt('tensorflow::DirectSession::Run crashes. b/156187396')\n def testMatchMultinomial(self, case, seed):\n if get_device().device_type in ('XLA_GPU', 'XLA_CPU'):\n # This test was passing before because soft placement silently picked the\n # CPU kernels.\n self.skipTest('Lacking XLA kernel')\n self._test_match(case, seed)\n\n @parameterized.named_parameters(\n ('_%s_%s_%s' % (case[0], case_id, seed_id), case, seed) # pylint: disable=g-complex-comprehension\n for seed_id, seed in enumerate(SEEDS)\n for case_id, case in enumerate(gamma_cases()))\n @test_util.disable_tfrt('tensorflow::DirectSession::Run crashes. b/156187396')\n def testMatchGamma(self, case, seed):\n if get_device().device_type == 'GPU':\n # This test was passing before because soft placement silently picked the\n # CPU kernels.\n self.skipTest('Lacking GPU kernel')\n if get_device().device_type in ('XLA_GPU', 'XLA_CPU'):\n # This test was passing before because soft placement silently picked the\n # CPU kernels.\n self.skipTest('Lacking XLA kernel')\n self._test_match(case, seed)\n\n @parameterized.named_parameters(\n ('_%s_%s_%s' % (case[0], case_id, seed_id), case, seed) # pylint: disable=g-complex-comprehension\n for seed_id, seed in enumerate(SEEDS)\n for case_id, case in enumerate(gamma_cases()))\n @test_util.disable_tfrt('tensorflow::DirectSession::Run crashes. b/156187396')\n def testStatelessGammaCpuGpuMatch(self, case, seed):\n if get_device().device_type != 'GPU':\n # This test compares the numbers produced by the CPU and GPU kernel for\n # stateless_random_gamma.\n self.skipTest('This test requires GPU')\n self._test_match_stateless_cpu_gpu(case, seed)\n\n @parameterized.named_parameters(\n ('_%s_%s_%s' % (case[0], case_id, seed_id), case, seed) # pylint: disable=g-complex-comprehension\n for seed_id, seed in enumerate(SEEDS)\n for case_id, case in enumerate(poisson_cases()))\n @test_util.disable_tfrt('tensorflow::DirectSession::Run crashes. b/156187396')\n def testMatchPoisson(self, case, seed):\n if get_device().device_type == 'GPU':\n # This test was passing before because soft placement silently picked the\n # CPU kernels.\n self.skipTest('Lacking GPU kernel')\n if get_device().device_type in ('XLA_GPU', 'XLA_CPU'):\n # This test was passing before because soft placement silently picked the\n # CPU kernels.\n self.skipTest('Lacking XLA kernel')\n self._test_match(case, seed)\n\n @parameterized.named_parameters(\n ('_%s_%s_%s' % (case[0], case_id, seed_id), case, seed) # pylint: disable=g-complex-comprehension\n for seed_id, seed in enumerate(SEEDS)\n for case_id, case in enumerate(float_cases()))\n @test_util.disable_tfrt('tensorflow::DirectSession::Run crashes. b/156187396')\n def testOldAndNewStatelessMatchFloat(self, case, seed):\n self._test_old_and_new_stateless_match(case, seed)\n\n @parameterized.named_parameters(\n ('_%s_%s_%s' % (case[0], case_id, seed_id), case, seed) # pylint: disable=g-complex-comprehension\n for seed_id, seed in enumerate(SEEDS)\n for case_id, case in enumerate(\n int_cases(minval_maxval=((2, 11111), (None, None)))))\n @test_util.disable_tfrt('tensorflow::DirectSession::Run crashes. b/156187396')\n def testOldAndNewStatelessMatchInt(self, case, seed):\n self._test_old_and_new_stateless_match(case, seed)\n\n @parameterized.named_parameters(\n ('_%s_%s' % (case[0], case_id), case)\n for case_id, case in enumerate(float_cases()))\n @test_util.disable_tfrt('tensorflow::DirectSession::Run crashes. b/156187396')\n def testExplicitAlgFloat(self, case):\n seed = (7, 17)\n self._test_explicit_alg(case, seed)\n\n @parameterized.named_parameters(\n ('_%s_%s' % (case[0], case_id), case)\n for case_id, case in enumerate(\n int_cases(minval_maxval=((2, 11111), (None, None)))))\n @test_util.disable_tfrt('tensorflow::DirectSession::Run crashes. b/156187396')\n def testExplicitAlgInt(self, case):\n seed = (7, 17)\n self._test_explicit_alg(case, seed)\n\n @parameterized.named_parameters(\n ('_%s_%s_%s' % (case[0], seed_type.name, case_id), case, seed_type) # pylint: disable=g-complex-comprehension\n for seed_type in SEED_TYPES\n for case_id, case in enumerate(\n float_cases(shape_dtypes=(dtypes.int32, dtypes.int64))))\n @test_util.disable_tfrt('tensorflow::DirectSession::Run crashes. b/156187396')\n def testDeterminismFloat(self, case, seed_type):\n if seed_type == dtypes.int64 and get_device().device_type in ('XLA_GPU',\n 'XLA_CPU'):\n # This test was passing before because soft placement silently picked the\n # CPU kernels.\n self.skipTest(\n 'Skip on XLA because XLA kernels do not support int64 seeds.')\n self._test_determinism(case, seed_type)\n\n @parameterized.named_parameters(\n ('_%s_%s_%s' % (case[0], seed_type.name, case_id), case, seed_type) # pylint: disable=g-complex-comprehension\n for seed_type in SEED_TYPES\n for case_id, case in enumerate(\n int_cases(shape_dtypes=(dtypes.int32, dtypes.int64))))\n @test_util.disable_tfrt('tensorflow::DirectSession::Run crashes. b/156187396')\n def testDeterminismInt(self, case, seed_type):\n if seed_type == dtypes.int64 and get_device().device_type in ('XLA_GPU',\n 'XLA_CPU'):\n # This test was passing before because soft placement silently picked the\n # CPU kernels.\n self.skipTest(\n 'Skip on XLA because XLA kernels do not support int64 seeds.')\n self._test_determinism(case, seed_type)\n\n @parameterized.named_parameters(\n ('_%s_%s_%s' % (case[0], seed_type.name, case_id), case, seed_type) # pylint: disable=g-complex-comprehension\n for seed_type in SEED_TYPES\n for case_id, case in enumerate(multinomial_cases()))\n @test_util.disable_tfrt('tensorflow::DirectSession::Run crashes. b/156187396')\n def testDeterminismMultinomial(self, case, seed_type):\n if get_device().device_type in ('XLA_GPU', 'XLA_CPU'):\n # This test was passing before because soft placement silently picked the\n # CPU kernels.\n self.skipTest('Lacking XLA kernel')\n self._test_determinism(case, seed_type)\n\n @parameterized.named_parameters(\n ('_%s_%s_%s' % (case[0], seed_type.name, case_id), case, seed_type) # pylint: disable=g-complex-comprehension\n for seed_type in SEED_TYPES\n for case_id, case in enumerate(gamma_cases()))\n @test_util.disable_tfrt('tensorflow::DirectSession::Run crashes. b/156187396')\n def testDeterminismGamma(self, case, seed_type):\n if get_device().device_type in ('XLA_GPU', 'XLA_CPU'):\n # This test was passing before because soft placement silently picked the\n # CPU kernels.\n self.skipTest('Lacking XLA kernel')\n self._test_determinism(case, seed_type)\n\n @parameterized.named_parameters(\n ('_%s_%s_%s' % (case[0], seed_type.name, case_id), case, seed_type) # pylint: disable=g-complex-comprehension\n for seed_type in SEED_TYPES\n for case_id, case in enumerate(poisson_cases()))\n @test_util.disable_tfrt('tensorflow::DirectSession::Run crashes. b/156187396')\n def testDeterminismPoisson(self, case, seed_type):\n if get_device().device_type == 'GPU':\n # This test was passing before because soft placement silently picked the\n # CPU kernels.\n self.skipTest('Lacking GPU kernel')\n if get_device().device_type in ('XLA_GPU', 'XLA_CPU'):\n # This test was passing before because soft placement silently picked the\n # CPU kernels.\n self.skipTest('Lacking XLA kernel')\n self._test_determinism(case, seed_type)\n\n @test_util.run_v2_only\n def testGetKeyCounterAlg(self):\n seed = [1, 2]\n key, counter = gen_stateless_random_ops_v2.stateless_random_get_key_counter(\n seed)\n self.assertAllEqual(key.shape, [1])\n self.assertAllEqual(counter.shape, [2])\n alg = gen_stateless_random_ops_v2.stateless_random_get_alg()\n self.assertAllEqual(alg.shape, [])\n\n def assertDTypeEqual(self, a, b):\n self.assertEqual(dtypes.as_dtype(a), dtypes.as_dtype(b))\n\n def assertNoEqualPair(self, ls):\n for i in range(len(ls)):\n for j in range(i + 1, len(ls)):\n self.assertFalse(math_ops.reduce_all(ls[i] == ls[j]))\n\n @parameterized.parameters(['int32', 'int64'])\n @test_util.run_v2_only\n def testSplit(self, dtype):\n \"\"\"Test for `split`.\"\"\"\n seed = constant_op.constant([1, 2], dtype=dtype)\n new_seed = stateless.split(seed, 3)\n self.assertEqual(new_seed.shape, [3, 2])\n self.assertDTypeEqual(new_seed.dtype, dtype)\n self.assertNoEqualPair([seed] + array_ops.unstack(new_seed))\n\n @parameterized.parameters(['int32', 'int64'])\n @test_util.run_v2_only\n def testFoldIn(self, dtype):\n \"\"\"Test for `fold_in`.\"\"\"\n orig_seed = constant_op.constant([1, 2], dtype='int32')\n seed = stateless.fold_in(orig_seed, constant_op.constant(3, dtype=dtype))\n new_seeds = []\n new_seeds.append(seed)\n seed = stateless.fold_in(seed, constant_op.constant(4, dtype=dtype))\n new_seeds.append(seed)\n for s in new_seeds:\n self.assertEqual(s.shape, [2])\n self.assertDTypeEqual(s.dtype, dtype)\n self.assertNoEqualPair([math_ops.cast(orig_seed, dtype)] + new_seeds)\n\n @test_util.run_v2_only\n def testErrors(self):\n \"\"\"Tests that proper errors are raised.\n \"\"\"\n shape = [2, 3]\n with self.assertRaisesWithPredicateMatch(\n ValueError,\n 'minval must be a scalar; got a tensor of shape '):\n @def_function.function\n def f():\n stateless.stateless_random_uniform(\n shape=shape, seed=[1, 2], minval=array_ops.zeros(shape, 'int32'),\n maxval=100, dtype='int32')\n f()\n with self.assertRaisesWithPredicateMatch(\n ValueError,\n 'maxval must be a scalar; got a tensor of shape '):\n @def_function.function\n def f2():\n stateless.stateless_random_uniform(\n shape=shape, seed=[1, 2], minval=0,\n maxval=array_ops.ones(shape, 'int32') * 100,\n dtype='int32')\n f2()\n\n\nif __name__ == '__main__':\n config.set_soft_device_placement(False)\n context.context().enable_xla_devices()\n test.main()\n", "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Functional tests for pooling operations.\"\"\"\n\nimport collections\nimport os\n\nfrom absl.testing import parameterized\nimport numpy as np\n\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import errors_impl\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import test_util\nimport tensorflow.python.framework.config as config_exec\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import gen_array_ops\nfrom tensorflow.python.ops import gen_nn_ops\nfrom tensorflow.python.ops import gradient_checker\nfrom tensorflow.python.ops import gradients_impl\nfrom tensorflow.python.ops import nn_ops\nfrom tensorflow.python.ops import variables\nimport tensorflow.python.ops.nn_grad # pylint: disable=unused-import\nfrom tensorflow.python.platform import test\nfrom tensorflow.python.platform import tf_logging\n\n\ndef GetDeviceScope(self, use_gpu=False):\n if context.executing_eagerly():\n if use_gpu and test.is_gpu_available():\n return ops.device(\"GPU:0\")\n return ops.device(\"CPU:0\")\n else:\n return self.session(use_gpu=use_gpu)\n\n\n# TODO(jlebar): Convert the rest of this file to parameters.parameterized().\n# Then remove GetTestConfigs() and rename GetTestConfigsDicts().\ndef GetTestConfigsDicts(v1_fn,\n v2_fn=None,\n one_dimensional=False,\n allow_gpu=True):\n # (data_format, use_gpu) tuple\n if one_dimensional:\n configs0 = [\n (\"NWC\", False),\n (\"NWC\", True),\n (\"NCW\", True),\n ]\n else:\n configs0 = [\n (\"NHWC\", False),\n (\"NHWC\", True),\n (\"NCHW\", True),\n ]\n # NCHW_VECT_C only supported for max_pool.\n if (v1_fn == nn_ops.max_pool or v1_fn == nn_ops.max_pool1d or\n v2_fn == nn_ops.max_pool_v2 or v2_fn == gen_nn_ops.max_pool_v2):\n configs0.append((\"NCHW_VECT_C\", True))\n\n # (data_format, use_gpu, data_type) tuple\n configs1 = []\n for data_format, use_gpu in configs0:\n configs1.append((data_format, use_gpu, dtypes.float32))\n\n # In our test, VECT_C always uses float32. (It gets converted to int8 in\n # the test runner.)\n if data_format == \"NCHW_VECT_C\":\n continue\n\n configs1 += [(data_format, use_gpu, dtypes.float16),\n (data_format, use_gpu, dtypes.float64)]\n\n # Convert from tuple to dict and add v1/v2 versions.\n ret = []\n for data_format, use_gpu, data_type in configs1:\n ret.append({\n \"pool_func\": v1_fn,\n \"data_format\": data_format,\n \"data_type\": data_type,\n \"use_gpu\": use_gpu,\n \"v2\": False\n })\n if v2_fn:\n ret.append({\n \"pool_func\": v2_fn,\n \"data_format\": data_format,\n \"data_type\": data_type,\n \"use_gpu\": use_gpu,\n \"v2\": False\n })\n ret.append({\n \"pool_func\": v2_fn,\n \"data_format\": data_format,\n \"data_type\": data_type,\n \"use_gpu\": use_gpu,\n \"v2\": True\n })\n\n # Filter out GPU configs if necessary.\n if not allow_gpu:\n ret = [c for c in ret if not c[\"use_gpu\"]]\n\n return ret\n\n\ndef GetTestConfigs(include_nchw_vect_c=False, one_dimensional=False):\n \"\"\"Get all the valid tests configs to run.\n\n Args:\n include_nchw_vect_c: Whether to include NCHW_VECT_C in the test configs.\n one_dimensional: If it's a 1D test\n\n Returns:\n all the valid test configs as tuples of data_format and use_gpu.\n \"\"\"\n if one_dimensional:\n test_configs = [(\"NWC\", False), (\"NWC\", True)]\n if test.is_gpu_available(cuda_only=True):\n test_configs += [(\"NCW\", True)]\n return test_configs\n test_configs = [(\"NHWC\", False), (\"NHWC\", True)]\n if not test.is_gpu_available(cuda_only=True):\n tf_logging.info(\"NCHW and NCHW_VECT_C tests skipped because not run with \"\n \"--config=cuda or no GPUs available.\")\n return test_configs\n # \"NCHW\" format is currently supported exclusively on CUDA GPUs.\n test_configs += [(\"NCHW\", True)]\n if include_nchw_vect_c:\n if test.is_gpu_available(\n cuda_only=True, min_cuda_compute_capability=(6, 1)):\n test_configs += [(\"NCHW_VECT_C\", True)]\n else:\n tf_logging.info(\"NCHW_VECT_C test skipped because no GPUs with \"\n \"compute capability >= 6.1 are available.\")\n\n return test_configs\n\n\ndef GetShrunkInceptionMaxPoolShapes(shrink=30):\n \"\"\"Iterator for some of the max pool ops in the Inception 2015 model.\n\n Args:\n shrink: Factor to shrink depth relative to Inception.\n\n Yields:\n Tuple (name, input_size, filter_size, out_size, strides, padding)\n \"\"\"\n names = [\"maxpool2\", \"maxpool3\", \"maxpool4\", \"maxpool5\"]\n input_sizes = [[32, 71, 71, 192], [32, 35, 35, 288], [32, 17, 17, 1248],\n [32, 8, 8, 2048]]\n filter_sizes = [[1, 3, 3, 1], [1, 3, 3, 1], [1, 3, 3, 1], [1, 3, 3, 1]]\n output_sizes = [[32, 35, 35, 192], [32, 17, 17, 288], [32, 8, 8, 1248],\n [32, 8, 8, 2048]]\n strides = [[1, 2, 2, 1], [1, 2, 2, 1], [1, 2, 2, 1], [1, 1, 1, 1]]\n # Shrink each depth value\n for i in input_sizes:\n i[3] //= shrink\n for o in output_sizes:\n o[3] //= shrink\n paddings = [\"VALID\", \"VALID\", \"VALID\", \"SAME\"]\n for n, i, f, o, s, p in zip(names, input_sizes, filter_sizes, output_sizes,\n strides, paddings):\n yield n, i, f, o, s, p\n\n\nclass PoolingTest(test.TestCase, parameterized.TestCase):\n\n def _isMaxPool(self, func):\n return func in (nn_ops.max_pool, nn_ops.max_pool_v2)\n\n def _VerifyOneType(self, pool_func, input_sizes, ksize, strides, padding,\n data_format, data_type, expected, use_gpu, v2,\n use_negative_input=False):\n \"\"\"Verifies the output values of the pooling function.\n\n Args:\n pool_func: Function to be called, co.MaxPool, co.AvgPool,\n or the Lua version.\n input_sizes: Input tensor dimensions.\n ksize: The kernel size dimensions\n strides: The stride dimensions\n padding: Padding type.\n data_format: The data format we use to run the pooling operation.\n data_type: The data type to use to run the pooling operation.\n expected: An array containing the expected operation outputs.\n use_gpu: Whether we are running on GPU.\n v2: Whether to use v2 version.\n use_negative_input: If the input values should be negative.\n \"\"\"\n # Check that this test is compatible with the hardware we have. (Really\n # this should be done in GetTestConfigsDicts(), but when that runs, we\n # haven't initialized enough of TF to know what our hardware is!)\n if use_gpu and not test.is_gpu_available():\n self.skipTest(\"No GPU is available.\")\n if use_gpu and data_type == dtypes.float64 and test.is_built_with_rocm():\n self.skipTest(\"ROCm pooling ops don't support float64.\")\n if use_gpu and data_format == \"NCHW_VECT_C\" and not test.is_gpu_available(\n cuda_only=True, min_cuda_compute_capability=(6, 1)):\n self.skipTest(\"NCHW_VECT_C requires sm61+.\")\n\n if v2 and data_format != \"NHWC\":\n self.skipTest(\"v2 not supported for %s\" % data_format)\n if v2 and not isinstance(padding, str):\n self.skipTest(\"non-constant ksize/strides requires nonexplicit padding\")\n if data_format == \"NCHW_VECT_C\":\n if data_type != dtypes.float32:\n self.skipTest(\"quantization to qint8 not implemented for %r\" %\n data_type)\n if input_sizes[-1] % 4 != 0:\n self.skipTest(\"Skipping test for depth %d\" % input_sizes[-1])\n\n total_size = 1\n for s in input_sizes:\n total_size *= s\n tf_logging.info(\"Running %s test. %r %r %d %r %r %r %s\", data_format, v2,\n input_sizes, total_size, pool_func, ksize, strides,\n data_type)\n # Initializes the input tensor with array containing incrementing\n # numbers from 1, wrapping round to -127 after 127 to support int8.\n y = -1 if use_negative_input else 1\n x = [(((f + 128) % 255) - 127)*y for f in range(total_size)]\n with self.cached_session(use_gpu=use_gpu):\n t = constant_op.constant(x, shape=input_sizes, dtype=data_type)\n if data_format in (\"NCHW\", \"NCHW_VECT_C\", \"NCW\"):\n if data_format == \"NCHW_VECT_C\":\n t = test_util.NHWCToNCHW_VECT_C(t)\n t, _, _ = gen_array_ops.quantize_v2(t, -128.0, 127.0, dtypes.qint8)\n else:\n t = test_util.NHWCToNCHW(t)\n ksize = test_util.NHWCToNCHW(ksize)\n strides = test_util.NHWCToNCHW(strides)\n if isinstance(padding, list):\n padding = test_util.NHWCToNCHW(padding)\n ksize_placeholder = array_ops.placeholder(dtypes.int32, shape=[4])\n strides_placeholder = array_ops.placeholder(dtypes.int32, shape=[4])\n if v2:\n t = pool_func(\n t,\n ksize=ksize_placeholder,\n strides=strides_placeholder,\n padding=padding,\n data_format=data_format)\n else:\n t = pool_func(\n t,\n ksize=ksize,\n strides=strides,\n padding=padding,\n data_format=data_format)\n if data_format == \"NCHW_VECT_C\":\n t = gen_array_ops.dequantize(t, -128, 127)\n t = test_util.NCHW_VECT_CToNHWC(t)\n elif data_format == \"NCHW\":\n t = test_util.NCHWToNHWC(t)\n if v2:\n actual = t.eval(feed_dict={\n ksize_placeholder: ksize,\n strides_placeholder: strides\n })\n else:\n actual = self.evaluate(t)\n self.assertShapeEqual(actual, t)\n self.assertAllCloseAccordingToType(expected, actual.flatten())\n\n def _VerifyOneTest(self, pool_func, input_sizes, ksize, strides, padding,\n data_format, expected, use_gpu, v2,\n use_negative_input=False):\n \"\"\"Verifies the output values of the pooling function.\n\n Args:\n pool_func: Function to be called, co.MaxPool, co.AvgPool,\n or the Lua version.\n input_sizes: Input tensor dimensions.\n ksize: The kernel size dimensions\n strides: The stride dimensions\n padding: Padding type.\n data_format: The data format we use to run the pooling operation.\n expected: An array containing the expected operation outputs.\n use_gpu: Whether we are running on GPU.\n v2: Whether to use v2 version.\n use_negative_input: If the input values should be negative.\"\n \"\"\"\n if data_format == \"NCHW_VECT_C\":\n avg_pool_func = nn_ops.avg_pool\n tf_logging.info(\"pool_func=%s\", pool_func)\n if pool_func == avg_pool_func:\n tf_logging.info(\"NCHW_VECT_C not yet implemented for avg_pool\")\n return\n if (self._isMaxPool(pool_func) and isinstance(padding, list)):\n tf_logging.info(\"NCHW_VECT_C not yet implemented for max pool\" +\n \" with explicit padding\")\n return\n\n self._VerifyOneType(pool_func, input_sizes, ksize, strides, padding,\n data_format, dtypes.float32, expected, use_gpu, v2,\n use_negative_input)\n if not test.is_built_with_rocm():\n # double datatype is not supported for pooling ops on the ROCm platform\n self._VerifyOneType(pool_func, input_sizes, ksize, strides, padding,\n data_format, dtypes.float64, expected, use_gpu, v2,\n use_negative_input)\n\n if not use_gpu or test_util.GpuSupportsHalfMatMulAndConv():\n self._VerifyOneType(pool_func, input_sizes, ksize, strides, padding,\n data_format, dtypes.float16, expected, use_gpu, v2,\n use_negative_input)\n\n def _VerifyValues(self,\n pool_func,\n input_sizes,\n ksize,\n strides,\n padding,\n expected,\n use_gpu,\n v2=False,\n one_dim=False,\n use_negative_input=False):\n \"\"\"Verifies the output values of the pooling function.\n\n Args:\n pool_func: Function to be called, co.MaxPool, co.AvgPool,\n or the Lua version.\n input_sizes: Input tensor dimensions.\n ksize: The kernel size dimensions\n strides: The stride dimensions\n padding: Padding type.\n expected: An array containing the expected operation outputs.\n use_gpu: Whether we are running on GPU.\n v2: Whether to use v2 version.\n one_dim: If one dimensional pools should be done instead of two\n dimensional pools.\n use_negative_input: If the input values should be negative.\n \"\"\"\n for (data_format, use_gpu_2) in GetTestConfigs(\n include_nchw_vect_c=True, one_dimensional=one_dim):\n if use_gpu_2 == use_gpu:\n self._VerifyOneTest(pool_func, input_sizes, ksize, strides, padding,\n data_format, expected, use_gpu, v2,\n use_negative_input)\n\n @parameterized.parameters(GetTestConfigsDicts(nn_ops.avg_pool))\n @test_util.run_deprecated_v1\n def testAvgPoolValidPadding(self, **kwargs):\n self._VerifyOneType(\n input_sizes=[1, 3, 3, 3],\n ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1],\n padding=\"VALID\",\n expected=[7.0, 8.0, 9.0],\n **kwargs)\n\n @parameterized.parameters(GetTestConfigsDicts(nn_ops.avg_pool))\n @test_util.run_deprecated_v1\n def testAvgPoolEmpty(self, **kwargs):\n self._VerifyOneType(\n input_sizes=[1, 3, 3, 0],\n ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1],\n padding=\"VALID\",\n expected=[],\n **kwargs)\n\n @parameterized.parameters(GetTestConfigsDicts(nn_ops.avg_pool))\n @test_util.run_deprecated_v1\n def testAvgPoolSamePadding(self, **kwargs):\n self._VerifyOneType(\n input_sizes=[1, 2, 4, 3],\n ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1],\n padding=\"SAME\",\n expected=[8.5, 9.5, 10.5, 14.5, 15.5, 16.5],\n **kwargs)\n\n @parameterized.parameters(GetTestConfigsDicts(nn_ops.avg_pool))\n @test_util.run_deprecated_v1\n def testAvgPoolSamePaddingNonSquareWindow(self, **kwargs):\n # input is:\n # [1.0, 2.0\n # 3.0 4.0]\n #\n # Window of [x, x] should do:\n # [avg(1.0, 2.0), avg(2.0, padded0),\n # avg(3.0, 4.0), avg(4.0, padded0)]\n self._VerifyOneType(\n input_sizes=[1, 2, 2, 1],\n ksize=[1, 1, 2, 1],\n strides=[1, 1, 1, 1],\n padding=\"SAME\",\n expected=[1.5, 2.0, 3.5, 4.0],\n **kwargs)\n\n @parameterized.parameters(GetTestConfigsDicts(nn_ops.avg_pool))\n @test_util.run_deprecated_v1\n def testAvgPoolSamePaddingNonSquareWindow_2(self, **kwargs):\n # Window of [x,\n # x] should do:\n # [avg(1.0, 3.0), avg(2.0, 4.0)\n # avg(3.0, padded0), avg(4.0, padded0)]\n self._VerifyOneType(\n input_sizes=[1, 2, 2, 1],\n ksize=[1, 2, 1, 1],\n strides=[1, 1, 1, 1],\n padding=\"SAME\",\n expected=[2.0, 3.0, 3.0, 4.0],\n **kwargs)\n\n @parameterized.parameters(GetTestConfigsDicts(nn_ops.avg_pool))\n @test_util.run_deprecated_v1\n def testAvgPoolSamePaddingNonSquareWindowMultiBatch(self, **kwargs):\n self._VerifyOneType(\n input_sizes=[2, 2, 2, 2],\n ksize=[1, 1, 2, 1],\n strides=[1, 1, 1, 1],\n padding=\"SAME\",\n expected=[\n 2.0, 3.0, 3.0, 4.0, 6.0, 7.0, 7.0, 8.0, 10.0, 11.0, 11.0, 12.0,\n 14.0, 15.0, 15.0, 16.0\n ],\n **kwargs)\n\n @parameterized.parameters(GetTestConfigsDicts(nn_ops.avg_pool))\n @test_util.run_deprecated_v1\n def testAvgPoolSamePaddingNonSquareWindowMultiBatch_2(self, **kwargs):\n self._VerifyOneType(\n input_sizes=[2, 2, 2, 2],\n ksize=[1, 2, 1, 1],\n strides=[1, 1, 1, 1],\n padding=\"SAME\",\n expected=[\n 3.0, 4.0, 5.0, 6.0, 5.0, 6.0, 7.0, 8.0, 11.0, 12.0, 13.0, 14.0,\n 13.0, 14.0, 15.0, 16.0\n ],\n **kwargs)\n\n @parameterized.parameters(GetTestConfigsDicts(nn_ops.avg_pool))\n @test_util.run_deprecated_v1\n def testAvgPoolValidPaddingUnevenStride(self, **kwargs):\n self._VerifyOneType(\n input_sizes=[1, 3, 3, 3],\n ksize=[1, 2, 2, 1],\n strides=[1, 1, 2, 1],\n padding=\"VALID\",\n expected=[7.0, 8.0, 9.0, 16.0, 17.0, 18.0],\n **kwargs)\n\n @parameterized.parameters(GetTestConfigsDicts(nn_ops.avg_pool))\n @test_util.run_deprecated_v1\n def testAvgPoolValidPaddingUnevenStride_2(self, **kwargs):\n self._VerifyOneType(\n input_sizes=[1, 3, 3, 3],\n ksize=[1, 2, 2, 1],\n strides=[1, 2, 1, 1],\n padding=\"VALID\",\n expected=[7.0, 8.0, 9.0, 10.0, 11.0, 12.0],\n **kwargs)\n\n @parameterized.parameters(GetTestConfigsDicts(nn_ops.avg_pool))\n @test_util.run_deprecated_v1\n def testAvgPoolSamePadding_2(self, **kwargs):\n expected_output = [\n 11.0, 12.0, 13.0, 14.0, 19.0, 20.0, 21.0, 22.0, 43.0, 44.0, 45.0, 46.0,\n 51.0, 52.0, 53.0, 54.0\n ]\n self._VerifyOneType(\n input_sizes=[1, 4, 4, 4],\n ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1],\n padding=\"SAME\",\n expected=expected_output,\n **kwargs)\n\n @parameterized.parameters(GetTestConfigsDicts(nn_ops.avg_pool))\n @test_util.run_deprecated_v1\n def testAvgPoolSamePaddingPacket_4(self, **kwargs):\n expected_output = [\n 21.0, 22.0, 23.0, 24.0, 27.0, 28.0, 29.0, 30.0, 45.0, 46.0, 47.0, 48.0,\n 51.0, 52.0, 53.0, 54.0\n ]\n self._VerifyOneType(\n input_sizes=[1, 4, 4, 4],\n ksize=[1, 3, 3, 1],\n strides=[1, 2, 2, 1],\n padding=\"SAME\",\n expected=expected_output,\n **kwargs)\n\n @parameterized.parameters(GetTestConfigsDicts(nn_ops.avg_pool))\n @test_util.run_deprecated_v1\n def testAvgPoolSamePaddingPacket_8(self, **kwargs):\n expected_output = [\n -12.0, -11.0, -10.0, -9.0, -8.0, -7.0, -6.0, -5.0, 4.0, 5.0, 6.0, 7.0,\n 8.0, 9.0, 10.0, 11.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0,\n 32.0, 33.0, 34.0, 35.0, 36.0, 37.0, 38.0, -3.5, -54.0, -53.0, -52.0,\n -51.0, -50.0, -49.0, -48.0, -47.0, -38.0, -37.0, -36.0, -35.0, -34.0,\n -33.0, -32.0, -31.0, -22.0, -21.0, -20.0, -19.0, -18.0, -17.0, -16.0,\n -15.0, -10.0, -9.0, -8.0, -7.0, -6.0, -5.0, -4.0, -3.0, -11.0, -10.0,\n -9.0, -8.0, -7.0, -6.0, -5.0, -4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0,\n 12.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 33.0, 34.0, 35.0,\n 36.0, 37.0, 38.0, -3.5, -2.5, -85.0, -84.0, -83.0, -82.0, -81.0, -80.0,\n -79.0, -78.0, -69.0, -68.0, -67.0, -66.0, -65.0, -64.0, -63.0, -62.0,\n -53.0, -52.0, -51.0, -50.0, -49.0, -48.0, -47.0, -46.0, -41.0, -40.0,\n -39.0, -38.0, -37.0, -36.0, -35.0, -34.0\n ]\n self._VerifyOneType(\n input_sizes=[1, 8, 8, 8],\n ksize=[1, 3, 3, 1],\n strides=[1, 2, 2, 1],\n padding=\"SAME\",\n expected=expected_output,\n **kwargs)\n\n @parameterized.parameters(GetTestConfigsDicts(nn_ops.avg_pool))\n @test_util.run_deprecated_v1\n def testAvgPoolEmptyInput(self, **kwargs):\n self._VerifyOneType(\n input_sizes=[0, 8, 8, 8],\n ksize=[1, 3, 3, 1],\n strides=[1, 2, 2, 1],\n padding=\"SAME\",\n expected=[],\n **kwargs)\n\n @parameterized.parameters(\n GetTestConfigsDicts(nn_ops.max_pool, gen_nn_ops.max_pool_v2))\n @test_util.run_deprecated_v1\n def testMaxPoolValidPadding(self, **kwargs):\n self._VerifyOneType(\n input_sizes=[1, 3, 3, 3],\n ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1],\n padding=\"VALID\",\n expected=[13.0, 14.0, 15.0],\n **kwargs)\n\n @parameterized.parameters(\n GetTestConfigsDicts(nn_ops.max_pool, gen_nn_ops.max_pool_v2))\n @test_util.run_deprecated_v1\n def testMaxPoolSamePadding(self, **kwargs):\n self._VerifyOneType(\n input_sizes=[1, 2, 3, 3],\n ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1],\n padding=\"SAME\",\n expected=[13.0, 14.0, 15.0, 16.0, 17.0, 18.0],\n **kwargs)\n\n @parameterized.parameters(\n GetTestConfigsDicts(nn_ops.max_pool, nn_ops.max_pool_v2))\n @test_util.xla_allow_fallback(\"XLA doesn't support explicit padding\")\n @test_util.run_deprecated_v1\n def testMaxPoolZeroExplicitPadding(self, **kwargs):\n self._VerifyOneType(\n input_sizes=[1, 3, 3, 1],\n ksize=[1, 3, 3, 1],\n strides=[1, 2, 2, 1],\n padding=[[0, 0], [0, 0], [0, 0], [0, 0]],\n expected=[9.0],\n **kwargs)\n\n @parameterized.parameters(\n GetTestConfigsDicts(nn_ops.max_pool, nn_ops.max_pool_v2))\n @test_util.xla_allow_fallback(\"XLA doesn't support explicit padding\")\n @test_util.run_deprecated_v1\n def testMaxPoolNegativeInputExpPadding(self, **kwargs):\n self._VerifyOneType(\n input_sizes=[1, 3, 3, 1],\n ksize=[1, 3, 3, 1],\n strides=[1, 2, 2, 1],\n padding=[[0, 0], [2, 1], [2, 1], [0, 0]],\n expected=[-1, -1, -1, -1],\n use_negative_input=True,\n **kwargs)\n\n @parameterized.parameters(\n GetTestConfigsDicts(nn_ops.max_pool, nn_ops.max_pool_v2))\n @test_util.xla_allow_fallback(\"XLA doesn't support explicit padding\")\n @test_util.run_deprecated_v1\n def testMaxPoolExplicitPadding(self, **kwargs):\n expected_output = [9.0, 9.0]\n self._VerifyOneType(\n input_sizes=[1, 3, 3, 1],\n ksize=[1, 3, 3, 1],\n strides=[1, 2, 2, 1],\n padding=[[0, 0], [0, 2], [0, 1], [0, 0]],\n expected=expected_output,\n **kwargs)\n\n @parameterized.parameters(\n GetTestConfigsDicts(nn_ops.max_pool, nn_ops.max_pool_v2))\n @test_util.xla_allow_fallback(\"XLA doesn't support explicit padding\")\n @test_util.run_deprecated_v1\n def testMaxPoolExplicitPaddingAdvanced(self, **kwargs):\n expected_output = [7, 9, 11, 12, 19, 21, 23, 24, 31, 33, 35, 36, 31, 33,\n 35, 36]\n self._VerifyOneType(\n input_sizes=[1, 6, 6, 1],\n ksize=[1, 3, 3, 1],\n strides=[1, 2, 2, 1],\n padding=[[0, 0], [1, 2], [2, 1], [0, 0]],\n expected=expected_output,\n **kwargs)\n\n @parameterized.parameters(\n GetTestConfigsDicts(nn_ops.max_pool, nn_ops.max_pool_v2))\n @test_util.xla_allow_fallback(\"XLA doesn't support explicit padding\")\n @test_util.run_deprecated_v1\n def testMaxPoolNegativeInputExpPaddingAdv(self, **kwargs):\n expected_output = [-1, -1, -3, -5, -7, -7, -9, -11, -19, -19, -21, -23, -31,\n -31, -33, -35]\n\n self._VerifyOneType(\n input_sizes=[1, 6, 6, 1],\n ksize=[1, 3, 3, 1],\n strides=[1, 2, 2, 1],\n padding=[[0, 0], [1, 2], [2, 1], [0, 0]],\n expected=expected_output,\n use_negative_input=True,\n **kwargs)\n\n @parameterized.parameters(\n GetTestConfigsDicts(nn_ops.max_pool, nn_ops.max_pool_v2))\n @test_util.xla_allow_fallback(\"XLA doesn't support explicit padding\")\n @test_util.run_deprecated_v1\n def testMaxPoolExplicitPadding2_(self, **kwargs):\n expected_output = [9.0, 9.0]\n self._VerifyOneType(\n input_sizes=[1, 3, 3, 1],\n ksize=[1, 3, 3, 1],\n strides=[1, 2, 2, 1],\n padding=[[0, 0], [0, 2], [0, 1], [0, 0]],\n expected=expected_output,\n **kwargs)\n\n @parameterized.parameters(\n GetTestConfigsDicts(\n nn_ops.max_pool1d, nn_ops.max_pool_v2, one_dimensional=True))\n @test_util.xla_allow_fallback(\"XLA doesn't support explicit padding\")\n @test_util.run_deprecated_v1\n def testMaxPoolExplicitPadding_1D(self, **kwargs):\n self._VerifyOneType(\n input_sizes=[1, 3, 1],\n ksize=[1, 2, 1],\n strides=[1, 2, 1],\n padding=[[0, 0], [0, 1], [0, 0]],\n expected=[2.0, 3.0],\n **kwargs)\n\n @parameterized.parameters(\n GetTestConfigsDicts(nn_ops.max_pool, gen_nn_ops.max_pool_v2))\n @test_util.run_deprecated_v1\n def testMaxPoolSamePaddingNonSquareWindow(self, **kwargs):\n # input is:\n # [1.0, 2.0\n # 3.0 4.0]\n #\n # Window of [x, x] should do:\n #\n # [max(1.0, 2.0), max(2.0, padded0),\n # max(3.0, 4.0), max(4.0, padded0)]\n self._VerifyOneType(\n input_sizes=[1, 2, 2, 1],\n ksize=[1, 1, 2, 1],\n strides=[1, 1, 1, 1],\n padding=\"SAME\",\n expected=[2.0, 2.0, 4.0, 4.0],\n **kwargs)\n\n @parameterized.parameters(\n GetTestConfigsDicts(nn_ops.max_pool, gen_nn_ops.max_pool_v2))\n @test_util.run_deprecated_v1\n def testMaxPoolValidPaddingUnevenStride(self, **kwargs):\n self._VerifyOneType(\n input_sizes=[1, 4, 4, 1],\n ksize=[1, 2, 2, 1],\n strides=[1, 1, 2, 1],\n padding=\"VALID\",\n expected=[6.0, 8.0, 10.0, 12.0, 14.0, 16.0],\n **kwargs)\n\n @parameterized.parameters(\n GetTestConfigsDicts(nn_ops.max_pool, gen_nn_ops.max_pool_v2))\n @test_util.run_deprecated_v1\n def testMaxPoolValidPaddingUnevenStride2_(self, **kwargs):\n self._VerifyOneType(\n input_sizes=[1, 4, 4, 1],\n ksize=[1, 2, 2, 1],\n strides=[1, 2, 1, 1],\n padding=\"VALID\",\n expected=[6.0, 7.0, 8.0, 14.0, 15.0, 16.0],\n **kwargs)\n\n @parameterized.parameters(\n GetTestConfigsDicts(nn_ops.max_pool, gen_nn_ops.max_pool_v2))\n @test_util.run_deprecated_v1\n def testMaxPoolSamePaddingPacket4_(self, **kwargs):\n expected_output = [\n 21.0, 22.0, 23.0, 24.0, 29.0, 30.0, 31.0, 32.0, 53.0, 54.0, 55.0, 56.0,\n 61.0, 62.0, 63.0, 64.0\n ]\n self._VerifyOneType(\n input_sizes=[1, 4, 4, 4],\n ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1],\n padding=\"SAME\",\n expected=expected_output,\n **kwargs)\n\n @parameterized.parameters(\n GetTestConfigsDicts(nn_ops.max_pool, gen_nn_ops.max_pool_v2))\n @test_util.run_deprecated_v1\n def testMaxPoolSamePaddingPacket8_(self, **kwargs):\n expected_output = [\n 81.0, 82.0, 83.0, 84.0, 85.0, 86.0, 87.0, 88.0, 97.0, 98.0, 99.0, 100.0,\n 101.0, 102.0, 103.0, 104.0, 113.0, 114.0, 115.0, 116.0, 117.0, 118.0,\n 119.0, 120.0, 121.0, 122.0, 123.0, 124.0, 125.0, 126.0, 127.0, 120.0,\n 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 34.0, 35.0, 36.0, 37.0,\n 38.0, 39.0, 40.0, 41.0, 50.0, 51.0, 52.0, 53.0, 54.0, 55.0, 56.0, 57.0,\n 58.0, 59.0, 60.0, 61.0, 62.0, 63.0, 64.0, 65.0, 82.0, 83.0, 84.0, 85.0,\n 86.0, 87.0, 88.0, 89.0, 98.0, 99.0, 100.0, 101.0, 102.0, 103.0, 104.0,\n 105.0, 114.0, 115.0, 116.0, 117.0, 118.0, 119.0, 120.0, 121.0, 122.0,\n 123.0, 124.0, 125.0, 126.0, 127.0, 120.0, 121.0, -45.0, -44.0, -43.0,\n -42.0, -41.0, -40.0, -39.0, -38.0, -29.0, -28.0, -27.0, -26.0, -25.0,\n -24.0, -23.0, -22.0, -13.0, -12.0, -11.0, -10.0, -9.0, -8.0, -7.0, -6.0,\n -5.0, -4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0\n ]\n self._VerifyOneType(\n input_sizes=[1, 8, 8, 8],\n ksize=[1, 3, 3, 1],\n strides=[1, 2, 2, 1],\n padding=\"SAME\",\n expected=expected_output,\n **kwargs)\n\n @parameterized.parameters(\n GetTestConfigsDicts(nn_ops.max_pool, gen_nn_ops.max_pool_v2))\n @test_util.run_deprecated_v1\n def testMaxPoolEmptyInput(self, **kwargs):\n self._VerifyOneType(\n input_sizes=[0, 8, 8, 8],\n ksize=[1, 3, 3, 1],\n strides=[1, 2, 2, 1],\n padding=\"SAME\",\n expected=[],\n **kwargs)\n\n # Tests for DepthwiseMaxPooling on CPU only.\n @parameterized.parameters(\n GetTestConfigsDicts(\n nn_ops.max_pool, gen_nn_ops.max_pool_v2, allow_gpu=False))\n @test_util.run_deprecated_v1\n def testDepthwiseMaxPool1x1DepthWindow(self, **kwargs):\n # input is:\n # [1.0, ..., 10.0] along depth,\n #\n # We maxpool by depth in patches of 2.\n self._VerifyOneType(\n input_sizes=[1, 1, 1, 10],\n ksize=[1, 1, 1, 2],\n strides=[1, 1, 1, 2],\n padding=\"SAME\",\n expected=[2.0, 4.0, 6.0, 8.0, 10.0],\n **kwargs)\n\n @parameterized.parameters(\n GetTestConfigsDicts(\n nn_ops.max_pool, gen_nn_ops.max_pool_v2, allow_gpu=False))\n @test_util.run_deprecated_v1\n def testDepthwiseMaxPool2x2DepthWindow(self, **kwargs):\n # input is:\n #\n # a 2x2x6 cube, and we depthwise max across 3 to produce a 2x2x2\n # output. Each node has contiguous values, so the depthwise max\n # should be multiples of 3.0.\n self._VerifyOneType(\n input_sizes=[1, 2, 2, 6],\n ksize=[1, 1, 1, 3],\n strides=[1, 1, 1, 3],\n padding=\"SAME\",\n expected=[3.0, 6.0, 9.0, 12.0, 15.0, 18.0, 21.0, 24.0],\n **kwargs)\n\n @parameterized.parameters(\n GetTestConfigsDicts(\n nn_ops.max_pool, gen_nn_ops.max_pool_v2, allow_gpu=False))\n @test_util.run_deprecated_v1\n def testMaxPoolKernelSmallerThanStrideValid(self, **kwargs):\n self._VerifyOneType(\n input_sizes=[1, 7, 7, 1],\n ksize=[1, 2, 2, 1],\n strides=[1, 3, 3, 1],\n padding=\"VALID\",\n expected=[9, 12, 30, 33],\n **kwargs)\n\n @parameterized.parameters(GetTestConfigsDicts(nn_ops.avg_pool))\n @test_util.run_deprecated_v1\n def testAvgPoolKernelSmallerThanStride(self, **kwargs):\n self._VerifyOneType(\n input_sizes=[1, 7, 7, 1],\n ksize=[1, 2, 2, 1],\n strides=[1, 3, 3, 1],\n padding=\"VALID\",\n expected=[5, 8, 26, 29],\n **kwargs)\n\n @parameterized.parameters(\n GetTestConfigsDicts(nn_ops.max_pool, gen_nn_ops.max_pool_v2) +\n GetTestConfigsDicts(nn_ops.avg_pool))\n @test_util.run_deprecated_v1\n def testKernelSmallerThanStrideSame1_(self, **kwargs):\n self._VerifyOneType(\n input_sizes=[1, 3, 3, 1],\n ksize=[1, 1, 1, 1],\n strides=[1, 2, 2, 1],\n padding=\"SAME\",\n expected=[1, 3, 7, 9],\n **kwargs)\n\n @parameterized.parameters(\n GetTestConfigsDicts(nn_ops.max_pool, gen_nn_ops.max_pool_v2) +\n GetTestConfigsDicts(nn_ops.avg_pool))\n @test_util.run_deprecated_v1\n def testKernelSmallerThanStrideSame2_(self, **kwargs):\n self._VerifyOneType(\n input_sizes=[1, 4, 4, 1],\n ksize=[1, 1, 1, 1],\n strides=[1, 2, 2, 1],\n padding=\"SAME\",\n expected=[1, 3, 9, 11],\n **kwargs)\n\n def _testDepthwiseMaxPoolInvalidConfig(self,\n in_size,\n ksize,\n strides,\n error_msg,\n use_gpu=False):\n with self.cached_session(use_gpu=use_gpu):\n t = constant_op.constant(1.0, shape=in_size)\n with self.assertRaisesRegex(errors_impl.UnimplementedError, error_msg):\n t = nn_ops.max_pool(\n t, ksize=ksize, strides=strides, padding=\"SAME\").eval()\n\n @test_util.disable_xla(\"b/123338077\") # Passes with XLA\n def testDepthwiseMaxPoolInvalidConfigs(self):\n self._testDepthwiseMaxPoolInvalidConfig(\n [1, 2, 2, 4], [1, 2, 2, 2], [1, 1, 1, 2],\n \"exactly one of pooling across depth\")\n self._testDepthwiseMaxPoolInvalidConfig(\n [1, 2, 2, 4], [1, 1, 1, 2], [1, 1, 1, 1],\n \"depth window to equal the depth stride\")\n self._testDepthwiseMaxPoolInvalidConfig([1, 2, 2, 4], [1, 1, 1, 3],\n [1, 1, 1, 3], \"evenly divide\")\n if test.is_gpu_available():\n with self.session():\n t = variables.Variable(np.ones([1, 2, 2, 4]))\n self.evaluate(variables.global_variables_initializer())\n with self.assertRaisesOpError(\"for CPU devices\"):\n nn_ops.max_pool(\n t, ksize=[1, 1, 1, 2], strides=[1, 1, 1, 2],\n padding=\"SAME\").eval()\n\n # The following are tests that verify that the CPU and GPU implementations\n # produce the same results.\n def _CompareMaxPoolingFwd(self, input_shape, ksize, strides, padding):\n # double datatype is currently not supported for pooling ops\n # on the ROCm platform\n for dtype in [np.float32, np.float16] \\\n + [np.float64] if not test.is_built_with_rocm() else []:\n tensor_input = np.random.rand(*input_shape).astype(dtype)\n with self.cached_session():\n t = constant_op.constant(tensor_input, shape=input_shape)\n out_op, _ = nn_ops.max_pool_with_argmax(t, ksize, strides, padding)\n gpu_val = self.evaluate(out_op)\n with self.cached_session(use_gpu=False):\n t = constant_op.constant(tensor_input, shape=input_shape)\n out_op = nn_ops.max_pool(t, ksize, strides, padding)\n cpu_val = self.evaluate(out_op)\n self.assertAllCloseAccordingToType(cpu_val, gpu_val)\n\n def _CompareMaxPoolingBk(self, input_shape, output_shape, ksize, strides,\n padding):\n # double datatype is currently not supported for pooling ops\n # on the ROCm platform\n for dtype in [np.float32, np.float16] \\\n + [np.float64] if not test.is_built_with_rocm() else []:\n # Generate numbers in a narrow range, so that there are many duplicates\n # in the input.\n tensor_input = np.random.random_integers(0, 3, input_shape).astype(dtype)\n tensor_output = np.random.rand(*output_shape).astype(dtype)\n with self.cached_session():\n t = constant_op.constant(tensor_input, shape=input_shape)\n _, argmax_op = nn_ops.max_pool_with_argmax(t, ksize, strides, padding)\n argmax = self.evaluate(argmax_op)\n grad_in = constant_op.constant(tensor_output, shape=output_shape)\n out_op = gen_nn_ops.max_pool_grad_with_argmax(t, grad_in, argmax, ksize,\n strides, padding)\n gpu_val = self.evaluate(out_op)\n self.assertShapeEqual(gpu_val, out_op)\n with self.cached_session(use_gpu=False):\n t = constant_op.constant(tensor_input, shape=input_shape)\n out_op = nn_ops.max_pool(t, ksize, strides, padding)\n orig_out = self.evaluate(out_op)\n grad_in = constant_op.constant(tensor_output, shape=output_shape)\n out_op = gen_nn_ops.max_pool_grad(t, orig_out, grad_in, ksize, strides,\n padding)\n cpu_val = self.evaluate(out_op)\n self.assertShapeEqual(cpu_val, out_op)\n # The CPU version accumulates its gradient on fp16, so it's less\n # accurate than the GPU version that does the accumulation on fp32\n self.assertAllCloseAccordingToType(\n cpu_val, gpu_val, half_rtol=0.01, half_atol=0.01)\n\n def _CompareMaxPoolingGradBk(self, input_shape, output_shape, ksize, strides,\n padding):\n # double datatype is currently not supported for pooling ops\n # on the ROCm platform\n for dtype in [np.float32, np.float16] \\\n + [np.float64] if not test.is_built_with_rocm() else []:\n # Generate numbers in a narrow range, so that there are many duplicates\n # in the input.\n tensor_input = np.random.random_integers(0, 3, input_shape).astype(dtype)\n with self.cached_session(use_gpu=False):\n t = constant_op.constant(tensor_input, shape=input_shape)\n _, argmax_op = nn_ops.max_pool_with_argmax(t, ksize, strides, padding)\n argmax = self.evaluate(argmax_op)\n grad_in = constant_op.constant(tensor_input, shape=input_shape)\n out_op = gen_nn_ops.max_pool_grad_grad_with_argmax(\n t, grad_in, argmax, ksize, strides, padding)\n gpu_val = self.evaluate(out_op)\n self.assertShapeEqual(gpu_val, out_op)\n with self.cached_session(use_gpu=False):\n t = constant_op.constant(tensor_input, shape=input_shape)\n out_op = nn_ops.max_pool(t, ksize, strides, padding)\n orig_out = self.evaluate(out_op)\n grad_in = constant_op.constant(tensor_input, shape=input_shape)\n out_op = gen_nn_ops.max_pool_grad_grad(t, orig_out, grad_in, ksize,\n strides, padding)\n cpu_val = self.evaluate(out_op)\n self.assertShapeEqual(cpu_val, out_op)\n # The CPU version accumulates its gradient on fp16, so it's less\n # accurate than the GPU version that does the accumulation on fp32\n self.assertAllCloseAccordingToType(\n cpu_val, gpu_val, half_rtol=0.01, half_atol=0.01)\n\n def testMaxPoolingWithArgmax(self):\n tensor_input = [\n 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0,\n 0.0, 1.0, 0.0, 1.0\n ]\n\n Config = collections.namedtuple(\n \"Config\", [\"use_gpu\", \"include_batch_in_index\", \"argmax\", \"Targmax\"])\n configs = [\n Config(False, False, [0, 1, 3, 5, 0, 2, 6, 8], dtypes.int64),\n Config(False, True, [0, 1, 3, 5, 9, 11, 15, 17], dtypes.int64),\n Config(False, False, [0, 1, 3, 5, 0, 2, 6, 8], dtypes.int32),\n Config(False, True, [0, 1, 3, 5, 9, 11, 15, 17], dtypes.int32),\n Config(True, False, [0, 1, 3, 5, 0, 2, 6, 8], dtypes.int64),\n Config(True, True, [0, 1, 3, 5, 9, 11, 15, 17], dtypes.int64),\n ]\n\n for config in configs:\n with GetDeviceScope(self, use_gpu=config.use_gpu):\n t = constant_op.constant(tensor_input, shape=[2, 3, 3, 1])\n out_op, argmax_op = nn_ops.max_pool_with_argmax(\n t,\n ksize=[1, 2, 2, 1],\n strides=[1, 1, 1, 1],\n Targmax=config.Targmax,\n padding=\"VALID\",\n include_batch_in_index=config.include_batch_in_index)\n out, argmax = self.evaluate([out_op, argmax_op])\n self.assertShapeEqual(out, out_op)\n self.assertShapeEqual(argmax, argmax_op)\n self.assertAllClose(out.ravel(),\n [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0])\n self.assertAllEqual(argmax.ravel(), config.argmax)\n\n def testMaxPoolingGradWithArgmax(self):\n orig_input = [\n 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0,\n 0.0, 1.0, 0.0, 1.0\n ]\n tensor_input = [11.0, 12.0, 13.0, 14.0, 21.0, 22.0, 23.0, 24.0]\n\n Config = collections.namedtuple(\n \"Config\", [\"use_gpu\", \"include_batch_in_index\", \"argmax\"])\n configs = [\n Config(False, False, [0, 1, 3, 5, 0, 2, 6, 8]),\n Config(False, True, [0, 1, 3, 5, 9, 11, 15, 17]),\n Config(True, False, [0, 1, 3, 5, 0, 2, 6, 8]),\n Config(True, True, [0, 1, 3, 5, 9, 11, 15, 17])\n ]\n\n for config in configs:\n with GetDeviceScope(self, config.use_gpu):\n orig_in = constant_op.constant(orig_input, shape=[2, 3, 3, 1])\n t = constant_op.constant(tensor_input, shape=[2, 2, 2, 1])\n argmax_t = constant_op.constant(\n config.argmax, shape=[2, 2, 2, 1], dtype=dtypes.int64)\n out_op = gen_nn_ops.max_pool_grad_with_argmax(\n orig_in,\n t,\n argmax_t,\n ksize=[1, 2, 2, 1],\n strides=[1, 1, 1, 1],\n padding=\"VALID\",\n include_batch_in_index=config.include_batch_in_index)\n out = self.evaluate(out_op).flatten()\n self.assertAllClose(out, [\n 11.0, 12.0, 0.0, 13.0, 0.0, 14.0, 0.0, 0.0, 0.0, 21.0, 0.0, 22.0,\n 0.0, 0.0, 0.0, 23.0, 0.0, 24.0\n ])\n\n def testMaxPoolingGradThrowDeterminismError(self):\n if test.is_gpu_available(cuda_only=True):\n try:\n config_exec.enable_op_determinism()\n orig_input = [\n 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0,\n 0.0, 1.0, 0.0, 1.0\n ]\n tensor_input = [11.0, 12.0, 13.0, 14.0, 21.0, 22.0, 23.0, 24.0]\n\n with GetDeviceScope(self, True):\n orig_in = constant_op.constant(orig_input, shape=[2, 3, 3, 1])\n t = constant_op.constant(tensor_input, shape=[2, 2, 2, 1])\n argmax_t = constant_op.constant(\n [0, 1, 3, 5, 0, 2, 6, 8], shape=[2, 2, 2, 1], dtype=dtypes.int64)\n with self.assertRaisesRegexp(\n errors_impl.UnimplementedError, \"Determinism is not yet supported \"\n \"for MaxPoolGradWithArgmax.\"):\n out_op = gen_nn_ops.max_pool_grad_with_argmax(\n orig_in,\n t,\n argmax_t,\n ksize=[1, 2, 2, 1],\n strides=[1, 1, 1, 1],\n padding=\"VALID\",\n include_batch_in_index=False)\n self.evaluate(out_op)\n finally:\n config_exec.disable_op_determinism()\n else:\n try:\n config_exec.enable_op_determinism()\n orig_input = [\n 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0,\n 0.0, 1.0, 0.0, 1.0\n ]\n tensor_input = [11.0, 12.0, 13.0, 14.0, 21.0, 22.0, 23.0, 24.0]\n\n with GetDeviceScope(self, False):\n orig_in = constant_op.constant(orig_input, shape=[2, 3, 3, 1])\n t = constant_op.constant(tensor_input, shape=[2, 2, 2, 1])\n argmax_t = constant_op.constant(\n [0, 1, 3, 5, 0, 2, 6, 8], shape=[2, 2, 2, 1], dtype=dtypes.int64)\n out_op = gen_nn_ops.max_pool_grad_with_argmax(\n orig_in,\n t,\n argmax_t,\n ksize=[1, 2, 2, 1],\n strides=[1, 1, 1, 1],\n padding=\"VALID\",\n include_batch_in_index=False)\n self.evaluate(out_op)\n finally:\n config_exec.disable_op_determinism()\n\n def testMaxPoolingGradGradWithArgmax(self):\n # MaxPoolWithArgMax is implemented only on CUDA.\n if not test.is_gpu_available(cuda_only=True):\n return\n orig_input = [\n 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0,\n 0.0, 1.0, 0.0, 1.0\n ]\n tensor_input = [\n 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 21.0, 22.0, 23.0,\n 24.0, 25.0, 26.0, 27.0, 28.0, 29.0\n ]\n\n Config = collections.namedtuple(\n \"Config\", [\"use_gpu\", \"include_batch_in_index\", \"argmax\"])\n configs = [\n Config(True, False, [0, 1, 3, 5, 0, 2, 6, 8]),\n Config(True, True, [0, 1, 3, 5, 9, 11, 15, 17])\n ]\n\n for config in configs:\n with GetDeviceScope(self, config.use_gpu):\n orig_in = constant_op.constant(orig_input, shape=[2, 3, 3, 1])\n t = constant_op.constant(tensor_input, shape=[2, 3, 3, 1])\n argmax_t = constant_op.constant(\n config.argmax, shape=[2, 2, 2, 1], dtype=dtypes.int64)\n out_op = gen_nn_ops.max_pool_grad_grad_with_argmax(\n orig_in,\n t,\n argmax_t,\n ksize=[1, 2, 2, 1],\n strides=[1, 1, 1, 1],\n padding=\"VALID\",\n include_batch_in_index=config.include_batch_in_index)\n out = self.evaluate(out_op).flatten()\n self.assertAllClose(out,\n [11.0, 12.0, 14.0, 16.0, 21.0, 23.0, 27.0, 29.0])\n\n def _ConstructAndTestGradient(self,\n pool_func,\n input_sizes,\n output_sizes,\n window_rows,\n window_cols,\n row_stride,\n col_stride,\n padding,\n data_format,\n use_gpu,\n x_init_value=None):\n \"\"\"Verifies the gradients of the max or avg pooling function.\n\n Args:\n pool_func: Function to be called, co.MaxPool, co.AvgPool,\n or the Lua version.\n input_sizes: Input tensor dimensions.\n output_sizes: Output tensor dimensions.\n window_rows: kernel size in row dim\n window_cols: kernel size in col dim\n row_stride: Row Stride.\n col_stride: Col Stride.\n padding: Padding type.\n data_format: Data format.\n use_gpu: whether we are running on GPU\n x_init_value: Values to be passed to the gradient checker.\n \"\"\"\n assert input_sizes[0] == output_sizes[0]\n assert input_sizes[3] == output_sizes[3]\n total_size = 1\n for s in input_sizes:\n total_size *= s\n # Initializes the input tensor with array containing incrementing\n # numbers from 1.\n x = [f * 1.0 for f in range(1, total_size + 1)]\n with self.cached_session(use_gpu=use_gpu):\n input_tensor = constant_op.constant(x, shape=input_sizes, name=\"input\")\n if pool_func == nn_ops.avg_pool:\n func_name = \"avg_pool\"\n err_tolerance = 1e-4\n else:\n if x_init_value is None:\n x_init_value = np.asfarray(\n np.arange(1, total_size + 1),\n dtype=np.float32).reshape(input_sizes)\n func_name = \"max_pool\"\n err_tolerance = 1e-3\n if data_format == \"NCHW\":\n ksize = [1, 1, window_rows, window_cols]\n strides = [1, 1, row_stride, col_stride]\n if isinstance(padding, list):\n padding = test_util.NHWCToNCHW(padding)\n t = test_util.NHWCToNCHW(input_tensor)\n else:\n ksize = [1, window_rows, window_cols, 1]\n strides = [1, row_stride, col_stride, 1]\n t = input_tensor\n t = pool_func(\n t,\n ksize=ksize,\n strides=strides,\n padding=padding,\n data_format=data_format,\n name=func_name)\n if data_format == \"NCHW\":\n t = test_util.NCHWToNHWC(t)\n\n err = gradient_checker.compute_gradient_error(\n input_tensor,\n input_sizes,\n t,\n output_sizes,\n x_init_value=x_init_value,\n delta=1e-2)\n tf_logging.info(\"%s gradient error = %.4f\" % (func_name, err))\n self.assertLess(err, err_tolerance)\n\n def _ConstructAndTestSecondGradient(self,\n pool_func,\n input_sizes,\n output_sizes,\n window_rows,\n window_cols,\n row_stride,\n col_stride,\n padding,\n data_format,\n use_gpu,\n x_init_value=None):\n \"\"\"Verifies the second-order gradients of the pooling function.\n\n Args:\n pool_func: Function to be called, co.MaxPool, co.AvgPool,\n or the Lua version.\n input_sizes: Input tensor dimensions.\n output_sizes: Output tensor dimensions.\n window_rows: kernel size in row dim\n window_cols: kernel size in col dim\n row_stride: Row Stride.\n col_stride: Col Stride.\n padding: Padding type.\n data_format: Data format.\n use_gpu: whether we are running on GPU\n x_init_value: Values to be passed to the gradient checker.\n \"\"\"\n assert input_sizes[0] == output_sizes[0]\n assert input_sizes[3] == output_sizes[3]\n total_size = 1\n for s in input_sizes:\n total_size *= s\n # Initializes the input tensor with array containing incrementing\n # numbers from 1.\n x = [f * 1.0 for f in range(1, total_size + 1)]\n with self.cached_session(use_gpu=use_gpu):\n input_tensor = constant_op.constant(x, shape=input_sizes, name=\"input\")\n if pool_func == nn_ops.avg_pool:\n func_name = \"avg_pool\"\n err_tolerance = 1e-3\n else:\n if x_init_value is None:\n x_init_value = np.asfarray(\n np.arange(1, total_size + 1),\n dtype=np.float32).reshape(input_sizes)\n func_name = \"max_pool\"\n err_tolerance = 1e-2\n if data_format == \"NCHW\":\n ksize = [1, 1, window_rows, window_rows]\n strides = [1, 1, row_stride, col_stride]\n t = test_util.NHWCToNCHW(input_tensor)\n else:\n ksize = [1, window_rows, window_rows, 1]\n strides = [1, row_stride, col_stride, 1]\n t = input_tensor\n t = pool_func(\n t,\n ksize=ksize,\n strides=strides,\n padding=padding,\n data_format=data_format,\n name=func_name)\n if data_format == \"NCHW\":\n t = test_util.NHWCToNCHW(t)\n\n t_g = gradients_impl.gradients(t**2, input_tensor)[0]\n err = gradient_checker.compute_gradient_error(\n input_tensor,\n input_sizes,\n t_g,\n input_sizes,\n x_init_value=x_init_value,\n delta=1e-2)\n tf_logging.info(\"%s second-order gradient error = %.4f\" % (func_name, err))\n self.assertLess(err, err_tolerance)\n\n def _testMaxPoolGradValidPadding1_1(self, data_format, use_gpu):\n for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]:\n self._ConstructAndTestGradient(\n pool_func,\n input_sizes=[1, 3, 3, 1],\n output_sizes=[1, 3, 3, 1],\n window_rows=1,\n window_cols=1,\n row_stride=1,\n col_stride=1,\n padding=\"VALID\",\n data_format=data_format,\n use_gpu=use_gpu)\n\n def _testMaxPoolGradValidPadding2_1_6(self, data_format, use_gpu):\n for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]:\n self._ConstructAndTestGradient(\n pool_func,\n input_sizes=[2, 6, 6, 3],\n output_sizes=[2, 5, 5, 3],\n window_rows=2,\n window_cols=2,\n row_stride=1,\n col_stride=1,\n padding=\"VALID\",\n data_format=data_format,\n use_gpu=use_gpu)\n\n def _testMaxPoolGradValidPadding2_1_7(self, data_format, use_gpu):\n for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]:\n self._ConstructAndTestGradient(\n pool_func,\n input_sizes=[2, 7, 7, 3],\n output_sizes=[2, 6, 6, 3],\n window_rows=2,\n window_cols=2,\n row_stride=1,\n col_stride=1,\n padding=\"VALID\",\n data_format=data_format,\n use_gpu=use_gpu)\n\n def _testMaxPoolGradValidPadding1_2(self, data_format, use_gpu):\n for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]:\n self._ConstructAndTestGradient(\n pool_func,\n input_sizes=[1, 3, 3, 1],\n output_sizes=[1, 2, 2, 1],\n window_rows=1,\n window_cols=1,\n row_stride=2,\n col_stride=2,\n padding=\"VALID\",\n data_format=data_format,\n use_gpu=use_gpu)\n\n def _testMaxPoolGradValidPadding2_2(self, data_format, use_gpu):\n for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]:\n self._ConstructAndTestGradient(\n pool_func,\n input_sizes=[2, 2, 2, 3],\n output_sizes=[2, 1, 1, 3],\n window_rows=2,\n window_cols=2,\n row_stride=2,\n col_stride=2,\n padding=\"VALID\",\n data_format=data_format,\n use_gpu=use_gpu)\n\n def _testMaxPoolGradSamePadding1_1(self, data_format, use_gpu):\n for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]:\n self._ConstructAndTestGradient(\n pool_func,\n input_sizes=[2, 2, 4, 3],\n output_sizes=[2, 2, 4, 3],\n window_rows=1,\n window_cols=1,\n row_stride=1,\n col_stride=1,\n padding=\"SAME\",\n data_format=data_format,\n use_gpu=use_gpu)\n\n def _testMaxPoolGradSamePadding1_2(self, data_format, use_gpu):\n for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]:\n self._ConstructAndTestGradient(\n pool_func,\n input_sizes=[2, 2, 4, 3],\n output_sizes=[2, 1, 2, 3],\n window_rows=1,\n window_cols=1,\n row_stride=2,\n col_stride=2,\n padding=\"SAME\",\n data_format=data_format,\n use_gpu=use_gpu)\n\n def _testMaxPoolGradSamePadding2_1(self, data_format, use_gpu):\n for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]:\n self._ConstructAndTestGradient(\n pool_func,\n input_sizes=[2, 2, 4, 3],\n output_sizes=[2, 2, 4, 3],\n window_rows=2,\n window_cols=2,\n row_stride=1,\n col_stride=1,\n padding=\"SAME\",\n data_format=data_format,\n use_gpu=use_gpu)\n\n def _testMaxPoolGradSamePadding2_2(self, data_format, use_gpu):\n for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]:\n self._ConstructAndTestGradient(\n pool_func,\n input_sizes=[2, 2, 4, 3],\n output_sizes=[2, 1, 2, 3],\n window_rows=2,\n window_cols=2,\n row_stride=2,\n col_stride=2,\n padding=\"SAME\",\n data_format=data_format,\n use_gpu=use_gpu)\n\n def _testMaxPoolGradSamePadding3_1(self, data_format, use_gpu):\n for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]:\n self._ConstructAndTestGradient(\n pool_func,\n input_sizes=[1, 7, 7, 1],\n output_sizes=[1, 7, 7, 1],\n window_rows=3,\n window_cols=3,\n row_stride=1,\n col_stride=1,\n padding=\"SAME\",\n data_format=data_format,\n use_gpu=use_gpu)\n\n def _testMaxPoolExplicitPadding_1(self, data_format, use_gpu):\n for pool_func in [nn_ops.max_pool]:\n self._ConstructAndTestGradient(\n pool_func,\n input_sizes=[1, 7, 7, 1],\n output_sizes=[1, 7, 7, 1],\n window_rows=3,\n window_cols=3,\n row_stride=1,\n col_stride=1,\n padding=[[0, 0], [1, 1], [1, 1], [0, 0]],\n data_format=data_format,\n use_gpu=use_gpu)\n\n def _testMaxPoolExplicitPadding_2(self, data_format, use_gpu):\n for pool_func in [nn_ops.max_pool]:\n self._ConstructAndTestGradient(\n pool_func,\n input_sizes=[1, 7, 7, 1],\n output_sizes=[1, 6, 8, 1],\n window_rows=3,\n window_cols=5,\n row_stride=1,\n col_stride=1,\n padding=[[0, 0], [0, 1], [2, 3], [0, 0]],\n data_format=data_format,\n use_gpu=use_gpu)\n\n def _testMaxPoolExplicitPaddingLeftGreater(self, data_format, use_gpu):\n for pool_func in [nn_ops.max_pool]:\n self._ConstructAndTestGradient(\n pool_func,\n input_sizes=[1, 7, 7, 1],\n output_sizes=[1, 6, 8, 1],\n window_rows=3,\n window_cols=5,\n row_stride=1,\n col_stride=1,\n padding=[[0, 0], [0, 1], [3, 2], [0, 0]],\n data_format=data_format,\n use_gpu=use_gpu)\n\n def _testMaxPoolExplicitPaddingBatchChannel(self, data_format, use_gpu):\n for pool_func in [nn_ops.max_pool]:\n self._ConstructAndTestGradient(\n pool_func,\n input_sizes=[4, 7, 7, 3],\n output_sizes=[4, 6, 8, 3],\n window_rows=3,\n window_cols=5,\n row_stride=1,\n col_stride=1,\n padding=[[0, 0], [0, 1], [3, 2], [0, 0]],\n data_format=data_format,\n use_gpu=use_gpu)\n\n def _testMaxPoolExplicitPaddingStrides(self, data_format, use_gpu):\n for pool_func in [nn_ops.max_pool]:\n self._ConstructAndTestGradient(\n pool_func,\n input_sizes=[1, 7, 7, 1],\n output_sizes=[1, 4, 3, 1],\n window_rows=3,\n window_cols=3,\n row_stride=2,\n col_stride=3,\n padding=[[0, 0], [1, 1], [1, 1], [0, 0]],\n data_format=data_format,\n use_gpu=use_gpu)\n\n @test_util.run_deprecated_v1\n def testMaxPoolGrad(self):\n for (data_format, use_gpu) in GetTestConfigs():\n self._testMaxPoolGradValidPadding1_1(data_format, use_gpu)\n self._testMaxPoolGradValidPadding1_2(data_format, use_gpu)\n self._testMaxPoolGradValidPadding2_1_6(data_format, use_gpu)\n self._testMaxPoolGradValidPadding2_1_7(data_format, use_gpu)\n self._testMaxPoolGradValidPadding2_2(data_format, use_gpu)\n self._testMaxPoolGradSamePadding1_1(data_format, use_gpu)\n self._testMaxPoolGradSamePadding1_2(data_format, use_gpu)\n self._testMaxPoolGradSamePadding2_1(data_format, use_gpu)\n self._testMaxPoolGradSamePadding2_2(data_format, use_gpu)\n self._testMaxPoolGradSamePadding3_1(data_format, use_gpu)\n self._testMaxPoolExplicitPadding_1(data_format, use_gpu)\n self._testMaxPoolExplicitPadding_2(data_format, use_gpu)\n self._testMaxPoolExplicitPaddingStrides(data_format, use_gpu)\n self._testMaxPoolExplicitPaddingLeftGreater(data_format, use_gpu)\n self._testMaxPoolExplicitPaddingBatchChannel(data_format, use_gpu)\n\n def _MaxPoolGrad(self, orig_input, orig_output, grad, window_rows,\n window_cols, row_stride, col_stride, padding, v2):\n \"\"\"Max Pooling Gradient.\n\n Args:\n orig_input: A float Tensor. The original input tensor.\n orig_output: A float Tensor. The original output tensor.\n grad: A float Tensor.\n The 4D (batch x rows x cols x depth) output backprop.\n window_rows: integer. Kernel size along rows dimension.\n window_cols: integer. Kernel size along cols dimension.\n row_stride: integer. Stride along rows dimension\n col_stride: integer. Stride along cols dimension\n padding: PoolingOpDef.Padding. Padding type.\n\n Returns:\n A Tensor.\n \"\"\"\n pool_func = gen_nn_ops.max_pool_grad_v2 if v2 else gen_nn_ops.max_pool_grad\n if v2:\n return pool_func(orig_input, orig_output, grad,\n [1, window_rows, window_cols, 1],\n [1, row_stride, col_stride, 1], padding)\n else:\n padding, explicit_paddings = nn_ops.convert_padding(padding)\n return pool_func(orig_input, orig_output, grad,\n [1, window_rows, window_cols, 1],\n [1, row_stride, col_stride, 1], padding,\n explicit_paddings)\n\n def _testMaxPoolGradDirect(self, input_data, output_backprop,\n expected_input_backprop, input_sizes, output_sizes,\n window_rows, window_cols, row_stride, col_stride,\n padding, use_gpu, v2):\n pool_func = gen_nn_ops.max_pool_v2 if v2 else nn_ops.max_pool\n with self.cached_session(use_gpu=use_gpu):\n input_tensor = variables.Variable(\n np.array(input_data, dtype=np.float32).reshape(input_sizes))\n self.evaluate(variables.global_variables_initializer())\n output_tensor = pool_func(input_tensor, [1, window_rows, window_cols, 1],\n [1, row_stride, col_stride, 1], padding)\n output_backprop_tensor = constant_op.constant(\n output_backprop, shape=output_sizes)\n\n input_backprop_tensor = self._MaxPoolGrad(\n input_tensor, output_tensor, output_backprop_tensor, window_rows,\n window_cols, row_stride, col_stride, padding, v2)\n\n actual_input_backprop = self.evaluate(input_backprop_tensor)\n self.assertShapeEqual(actual_input_backprop, input_backprop_tensor)\n actual_input_backprop = actual_input_backprop.flatten()\n actual_input_backprop = self._GetNdArray(actual_input_backprop)\n\n actual_output = self.evaluate(output_tensor).flatten()\n actual_output = self._GetNdArray(actual_output)\n\n self.assertAllClose(\n expected_input_backprop, actual_input_backprop, rtol=1e-6, atol=1e-6)\n\n def _testMaxPoolGradDirect1_1(self):\n input_data = [\n 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,\n 1.0, 1.0\n ]\n output_backprop = [11.0, 12.0, 13.0, 15.0, 16.0, 17.0, 19.0, 20.0, 21.0]\n expected_input_backprop = [\n 11.0, 12.0, 13.0, 0.0, 15.0, 16.0, 17.0, 0.0, 19.0, 20.0, 21.0, 0.0,\n 0.0, 0.0, 0.0, 0.0\n ]\n\n for use_gpu in True, False:\n for v2 in [True, False]:\n self._testMaxPoolGradDirect(\n input_data,\n output_backprop,\n expected_input_backprop,\n input_sizes=[1, 4, 4, 1],\n output_sizes=[1, 3, 3, 1],\n window_rows=2,\n window_cols=2,\n row_stride=1,\n col_stride=1,\n padding=\"VALID\",\n use_gpu=use_gpu,\n v2=v2)\n\n def _testMaxPoolGradDirect1_2(self):\n input_data = [\n 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0,\n 0.0, 1.0\n ]\n output_backprop = [11.0, 12.0, 13.0, 15.0, 16.0, 17.0, 19.0, 20.0, 21.0]\n expected_input_backprop = [\n 11.0, 0.0, 25.0, 0.0, 0.0, 31.0, 0.0, 17.0, 19.0, 0.0, 41.0, 0.0, 0.0,\n 0.0, 0.0, 0.0\n ]\n\n for use_gpu in True, False:\n for v2 in [True, False]:\n self._testMaxPoolGradDirect(\n input_data,\n output_backprop,\n expected_input_backprop,\n input_sizes=[1, 4, 4, 1],\n output_sizes=[1, 3, 3, 1],\n window_rows=2,\n window_cols=2,\n row_stride=1,\n col_stride=1,\n padding=\"VALID\",\n use_gpu=use_gpu,\n v2=v2)\n\n def _testMaxPoolGradDirect1_3(self):\n input_data = [\n 1.0,\n 0.0,\n 1.0,\n 0.0,\n 0.0,\n 1.0,\n 0.0,\n 1.0,\n 1.0,\n 0.0,\n 1.0,\n 0.0,\n 0.0,\n 1.0,\n 0.0,\n 1.0,\n ]\n output_backprop = [\n 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0,\n 23.0, 24.0, 25.0, 26.0\n ]\n expected_input_backprop = [\n 54,\n 0.0,\n 62,\n 0.0,\n 0.0,\n 60,\n 0.0,\n 22.0,\n 47,\n 0.0,\n 51,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n ]\n\n for use_gpu in True, False:\n for v2 in [True, False]:\n self._testMaxPoolGradDirect(\n input_data,\n output_backprop,\n expected_input_backprop,\n input_sizes=[1, 4, 4, 1],\n output_sizes=[1, 4, 4, 1],\n window_rows=3,\n window_cols=3,\n row_stride=1,\n col_stride=1,\n padding=\"SAME\",\n use_gpu=use_gpu,\n v2=v2)\n\n def _testMaxPoolGradZeroExplicitPadding(self):\n input_data = [\n 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0,\n 0.0, 1.0\n ]\n output_backprop = [11.0, 12.0, 13.0, 15.0, 16.0, 17.0, 19.0, 20.0, 21.0]\n expected_input_backprop = [\n 11.0, 0.0, 25.0, 0.0, 0.0, 31.0, 0.0, 17.0, 19.0, 0.0, 41.0, 0.0, 0.0,\n 0.0, 0.0, 0.0\n ]\n\n for use_gpu in True, False:\n for v2 in [False]:\n self._testMaxPoolGradDirect(\n input_data,\n output_backprop,\n expected_input_backprop,\n input_sizes=[1, 4, 4, 1],\n output_sizes=[1, 3, 3, 1],\n window_rows=2,\n window_cols=2,\n row_stride=1,\n col_stride=1,\n padding=[[0, 0], [0, 0], [0, 0], [0, 0]],\n use_gpu=use_gpu,\n v2=v2)\n\n def _testMaxPoolGradExplicitPadding_1(self):\n input_data = [\n 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0,\n 0.0, 1.0\n ]\n output_backprop = [11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0,\n 20.0, 21.0, 22.0]\n expected_input_backprop = [\n 11.0, 0.0, 25.0, 0.0, 0.0, 31.0, 0.0, 49.0, 19.0, 0.0, 41.0, 0.0, 0.0,\n 0.0, 0.0, 22.0\n ]\n\n for use_gpu in True, False:\n for v2 in [False]:\n self._testMaxPoolGradDirect(\n input_data,\n output_backprop,\n expected_input_backprop,\n input_sizes=[1, 4, 4, 1],\n output_sizes=[1, 3, 4, 1],\n window_rows=2,\n window_cols=2,\n row_stride=1,\n col_stride=1,\n padding=[[0, 0], [0, 0], [0, 1], [0, 0]],\n use_gpu=use_gpu,\n v2=v2)\n\n def _testMaxPoolGradExplicitPadding_2(self):\n input_data = [\n 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0,\n 0.0, 1.0\n ]\n output_backprop = [11.0, 12.0, 13.0, 15.0, 16.0, 17.0, 19.0, 20.0, 21.0]\n expected_input_backprop = [\n 54.0, 0.0, 30.0, 0.0, 0.0, 0.0, 0.0, 0.0, 39.0, 0.0, 21.0, 0.0, 0.0,\n 0.0, 0.0, 0.0\n ]\n\n for use_gpu in True, False:\n for v2 in [False]:\n self._testMaxPoolGradDirect(\n input_data,\n output_backprop,\n expected_input_backprop,\n input_sizes=[1, 4, 4, 1],\n output_sizes=[1, 3, 3, 1],\n window_rows=3,\n window_cols=3,\n row_stride=2,\n col_stride=2,\n padding=[[0, 0], [2, 1], [2, 1], [0, 0]],\n use_gpu=use_gpu,\n v2=v2)\n\n def _testMaxPoolGradExplicitPadding_3(self):\n input_data = [\n -1.0, -5.0, -1.0, -5.0, -5.0, -1.0, -5.0, -1.0, -1.0, -5.0, -1.0, -5.0,\n -5.0, -1.0, -5.0, -1.0\n ]\n output_backprop = [11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0,\n 20.0, 21.0, 22.0]\n expected_input_backprop = [\n 11.0, 0.0, 25.0, 0.0, 0.0, 31.0, 0.0, 49.0, 19.0, 0.0, 41.0, 0.0, 0.0,\n 0.0, 0.0, 22.0\n ]\n\n for use_gpu in True, False:\n for v2 in [False]:\n self._testMaxPoolGradDirect(\n input_data,\n output_backprop,\n expected_input_backprop,\n input_sizes=[1, 4, 4, 1],\n output_sizes=[1, 3, 4, 1],\n window_rows=2,\n window_cols=2,\n row_stride=1,\n col_stride=1,\n padding=[[0, 0], [0, 0], [0, 1], [0, 0]],\n use_gpu=use_gpu,\n v2=v2)\n\n @test_util.no_xla_auto_jit(\"b/123923733\") # NaNs handled differently\n def _testMaxPoolGradDirectWithNans2_1(self):\n input_data = [float(\"nan\")] * 16\n output_backprop = [11.0, 12.0, 13.0, 15.0, 16.0, 17.0, 19.0, 20.0, 21.0]\n # Test the CPU implementation, which propagates diffs in case of NaN\n expected_input_backprop_tf_cpu = [\n 11.0, 12.0, 13.0, 0.0, 15.0, 16.0, 17.0, 0.0, 19.0, 20.0, 21.0, 0.0,\n 0.0, 0.0, 0.0, 0.0\n ]\n for v2 in [True, False]:\n self._testMaxPoolGradDirect(\n input_data,\n output_backprop,\n expected_input_backprop_tf_cpu,\n input_sizes=[1, 4, 4, 1],\n output_sizes=[1, 3, 3, 1],\n window_rows=2,\n window_cols=2,\n row_stride=1,\n col_stride=1,\n padding=\"VALID\",\n use_gpu=False,\n v2=v2)\n\n if not test.is_gpu_available():\n return\n\n # The functionality associated with TF_ENABLE_NANPROP is currently\n # not supported on the ROCm platform, so skip this part of the test\n # NANs in input lead to non-deterministic results, and hence skipping\n # the remaining tests altogether on the ROCm platform\n if test.is_built_with_rocm():\n return\n\n # Test the GPU implementation that uses cudnn for now.\n saved_nanprop = os.environ.get(\"TF_ENABLE_MAXPOOL_NANPROP\")\n # Do not propagate the diff in cases of NaNs\n os.environ[\"TF_ENABLE_MAXPOOL_NANPROP\"] = \"0\"\n expected_input_backprop_cudnn = [\n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,\n 0.0, 0.0\n ]\n\n for v2 in [True, False]:\n self._testMaxPoolGradDirect(\n input_data,\n output_backprop,\n expected_input_backprop_cudnn,\n input_sizes=[1, 4, 4, 1],\n output_sizes=[1, 3, 3, 1],\n window_rows=2,\n window_cols=2,\n row_stride=1,\n col_stride=1,\n padding=\"VALID\",\n use_gpu=True,\n v2=v2)\n\n # Propagate the diff in cases of NaNs\n os.environ[\"TF_ENABLE_MAXPOOL_NANPROP\"] = \"1\"\n expected_input_backprop_cudnn = expected_input_backprop_tf_cpu\n\n for v2 in [True, False]:\n self._testMaxPoolGradDirect(\n input_data,\n output_backprop,\n expected_input_backprop_cudnn,\n input_sizes=[1, 4, 4, 1],\n output_sizes=[1, 3, 3, 1],\n window_rows=2,\n window_cols=2,\n row_stride=1,\n col_stride=1,\n padding=\"VALID\",\n use_gpu=True,\n v2=v2)\n\n if saved_nanprop:\n os.environ[\"TF_ENABLE_MAXPOOL_NANPROP\"] = saved_nanprop\n else:\n del os.environ[\"TF_ENABLE_MAXPOOL_NANPROP\"]\n\n @test_util.no_xla_auto_jit(\"b/123923733\") # NaNs handled differently\n def _testMaxPoolGradDirectWithNans2_2(self):\n input_data = [float(\"nan\")] * 16\n output_backprop = [\n float(\"nan\"), 12.0, 13.0, 15.0,\n float(\"nan\"), 17.0, 19.0, 20.0,\n float(\"nan\")\n ]\n # Test the CPU implementation, which propagates diffs in case of NaN\n expected_input_backprop_tf_cpu = [\n float(\"nan\"), 12.0, 13.0, 0.0, 15.0,\n float(\"nan\"), 17.0, 0.0, 19.0, 20.0,\n float(\"nan\"), 0.0, 0.0, 0.0, 0.0, 0.0\n ]\n for v2 in [True, False]:\n self._testMaxPoolGradDirect(\n input_data,\n output_backprop,\n expected_input_backprop_tf_cpu,\n input_sizes=[1, 4, 4, 1],\n output_sizes=[1, 3, 3, 1],\n window_rows=2,\n window_cols=2,\n row_stride=1,\n col_stride=1,\n padding=\"VALID\",\n use_gpu=False,\n v2=v2)\n\n if not test.is_gpu_available():\n return\n\n # The functionality associated with TF_ENABLE_NANPROP is currently\n # not supported on the ROCm platform, so skip this part of the test\n # NANs in input lead to non-deterministic results, and hence skipping\n # the remaining tests altogether on the ROCm platform\n if test.is_built_with_rocm():\n return\n\n # Test the GPU implementation that uses cudnn for now.\n saved_nanprop = os.environ.get(\"TF_ENABLE_MAXPOOL_NANPROP\")\n # Do not propagate the diff in cases of NaNs\n os.environ[\"TF_ENABLE_MAXPOOL_NANPROP\"] = \"0\"\n expected_input_backprop_cudnn = [\n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,\n 0.0, 0.0\n ]\n\n for v2 in [True, False]:\n self._testMaxPoolGradDirect(\n input_data,\n output_backprop,\n expected_input_backprop_cudnn,\n input_sizes=[1, 4, 4, 1],\n output_sizes=[1, 3, 3, 1],\n window_rows=2,\n window_cols=2,\n row_stride=1,\n col_stride=1,\n padding=\"VALID\",\n use_gpu=True,\n v2=v2)\n\n # Propagate the diff in cases of NaNs\n os.environ[\"TF_ENABLE_MAXPOOL_NANPROP\"] = \"1\"\n expected_input_backprop_cudnn = expected_input_backprop_tf_cpu\n\n for v2 in [True, False]:\n self._testMaxPoolGradDirect(\n input_data,\n output_backprop,\n expected_input_backprop_cudnn,\n input_sizes=[1, 4, 4, 1],\n output_sizes=[1, 3, 3, 1],\n window_rows=2,\n window_cols=2,\n row_stride=1,\n col_stride=1,\n padding=\"VALID\",\n use_gpu=True,\n v2=v2)\n\n if saved_nanprop:\n os.environ[\"TF_ENABLE_MAXPOOL_NANPROP\"] = saved_nanprop\n else:\n del os.environ[\"TF_ENABLE_MAXPOOL_NANPROP\"]\n\n @test_util.run_deprecated_v1\n def testMaxPoolGradDirect(self):\n self._testMaxPoolGradDirect1_1()\n self._testMaxPoolGradDirect1_2()\n self._testMaxPoolGradDirect1_3()\n self._testMaxPoolGradDirectWithNans2_1()\n self._testMaxPoolGradDirectWithNans2_2()\n self._testMaxPoolGradZeroExplicitPadding()\n self._testMaxPoolGradExplicitPadding_1()\n self._testMaxPoolGradExplicitPadding_2()\n self._testMaxPoolGradExplicitPadding_3()\n\n def _testMaxPoolGradGradValidPadding1_1(self, data_format, use_gpu):\n for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]:\n self._ConstructAndTestSecondGradient(\n pool_func,\n input_sizes=[1, 3, 3, 1],\n output_sizes=[1, 3, 3, 1],\n window_rows=1,\n window_cols=1,\n row_stride=1,\n col_stride=1,\n padding=\"VALID\",\n data_format=data_format,\n use_gpu=use_gpu)\n\n def _testMaxPoolGradGradValidPadding2_1_6(self, data_format, use_gpu):\n for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]:\n self._ConstructAndTestSecondGradient(\n pool_func,\n input_sizes=[2, 6, 6, 3],\n output_sizes=[2, 5, 5, 3],\n window_rows=2,\n window_cols=2,\n row_stride=1,\n col_stride=1,\n padding=\"VALID\",\n data_format=data_format,\n use_gpu=use_gpu)\n\n def _testMaxPoolGradGradValidPadding2_1_7(self, data_format, use_gpu):\n for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]:\n self._ConstructAndTestSecondGradient(\n pool_func,\n input_sizes=[2, 7, 7, 3],\n output_sizes=[2, 6, 6, 3],\n window_rows=2,\n window_cols=2,\n row_stride=1,\n col_stride=1,\n padding=\"VALID\",\n data_format=data_format,\n use_gpu=use_gpu)\n\n def _testMaxPoolGradGradValidPadding2_2(self, data_format, use_gpu):\n for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]:\n self._ConstructAndTestSecondGradient(\n pool_func,\n input_sizes=[2, 2, 2, 3],\n output_sizes=[2, 1, 1, 3],\n window_rows=2,\n window_cols=2,\n row_stride=2,\n col_stride=2,\n padding=\"VALID\",\n data_format=data_format,\n use_gpu=use_gpu)\n\n def _testMaxPoolGradGradSamePadding1_1(self, data_format, use_gpu):\n for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]:\n self._ConstructAndTestSecondGradient(\n pool_func,\n input_sizes=[2, 2, 4, 3],\n output_sizes=[2, 2, 4, 3],\n window_rows=1,\n window_cols=1,\n row_stride=1,\n col_stride=1,\n padding=\"SAME\",\n data_format=data_format,\n use_gpu=use_gpu)\n\n def _testMaxPoolGradGradSamePadding2_1(self, data_format, use_gpu):\n for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]:\n self._ConstructAndTestSecondGradient(\n pool_func,\n input_sizes=[2, 2, 4, 3],\n output_sizes=[2, 2, 4, 3],\n window_rows=2,\n window_cols=2,\n row_stride=1,\n col_stride=1,\n padding=\"SAME\",\n data_format=data_format,\n use_gpu=use_gpu)\n\n def _testMaxPoolGradGradSamePadding2_2(self, data_format, use_gpu):\n for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]:\n self._ConstructAndTestSecondGradient(\n pool_func,\n input_sizes=[2, 2, 4, 3],\n output_sizes=[2, 1, 2, 3],\n window_rows=2,\n window_cols=2,\n row_stride=2,\n col_stride=2,\n padding=\"SAME\",\n data_format=data_format,\n use_gpu=use_gpu)\n\n def _testMaxPoolGradGradSamePadding3_1(self, data_format, use_gpu):\n for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]:\n self._ConstructAndTestSecondGradient(\n pool_func,\n input_sizes=[1, 7, 7, 1],\n output_sizes=[1, 7, 7, 1],\n window_rows=3,\n window_cols=3,\n row_stride=1,\n col_stride=1,\n padding=\"SAME\",\n data_format=data_format,\n use_gpu=use_gpu)\n\n @test_util.run_deprecated_v1\n def testMaxPoolGradGrad(self):\n for (data_format, use_gpu) in GetTestConfigs():\n self._testMaxPoolGradGradValidPadding1_1(data_format, use_gpu)\n self._testMaxPoolGradGradValidPadding2_1_6(data_format, use_gpu)\n self._testMaxPoolGradGradValidPadding2_1_7(data_format, use_gpu)\n self._testMaxPoolGradGradValidPadding2_2(data_format, use_gpu)\n self._testMaxPoolGradGradSamePadding1_1(data_format, use_gpu)\n self._testMaxPoolGradGradSamePadding2_1(data_format, use_gpu)\n self._testMaxPoolGradGradSamePadding2_2(data_format, use_gpu)\n self._testMaxPoolGradGradSamePadding3_1(data_format, use_gpu)\n\n def _MaxPoolGradGrad(self, orig_input, orig_output, grad, window_rows,\n window_cols, row_stride, col_stride, padding):\n \"\"\"Max Pooling Second-Order Gradient.\n\n Args:\n orig_input: A float Tensor. The original input tensor.\n orig_output: A float Tensor. The original output tensor.\n grad: A float Tensor.\n The 4D (batch x out_rows x out_cols x depth) output backprop.\n window_rows: integer. Kernel size along rows dimension.\n window_cols: integer. Kernel size along cols dimension.\n row_stride: integer. Stride along rows dimension\n col_stride: integer. Stride along cols dimension\n padding: PoolingOpDef.Padding. Padding type.\n\n Returns:\n A Tensor.\n \"\"\"\n return gen_nn_ops.max_pool_grad_grad(\n orig_input, orig_output, grad, [1, window_rows, window_cols, 1],\n [1, row_stride, col_stride, 1], padding)\n\n @test_util.run_deprecated_v1\n def testAvgPoolGrad(self):\n for (data_format, use_gpu) in GetTestConfigs():\n self._testAvgPoolGradValidPadding1_1(data_format, use_gpu)\n self._testAvgPoolGradValidPadding1_2(data_format, use_gpu)\n self._testAvgPoolGradValidPadding2_1(data_format, use_gpu)\n self._testAvgPoolGradValidPadding2_2(data_format, use_gpu)\n self._testAvgPoolGradSamePadding1_1(data_format, use_gpu)\n self._testAvgPoolGradSamePadding1_2(data_format, use_gpu)\n self._testAvgPoolGradSamePadding2_1(data_format, use_gpu)\n self._testAvgPoolGradSamePadding2_2(data_format, use_gpu)\n self._testAvgPoolGradSamePadding3_1(data_format, use_gpu)\n\n def _testAvgPoolGradValidPadding1_1(self, data_format, use_gpu):\n self._ConstructAndTestGradient(\n nn_ops.avg_pool,\n input_sizes=[2, 3, 3, 3],\n output_sizes=[2, 3, 3, 3],\n window_rows=1,\n window_cols=1,\n row_stride=1,\n col_stride=1,\n padding=\"VALID\",\n data_format=data_format,\n use_gpu=use_gpu)\n\n def _testAvgPoolGradValidPadding1_2(self, data_format, use_gpu):\n self._ConstructAndTestGradient(\n nn_ops.avg_pool,\n input_sizes=[2, 3, 3, 3],\n output_sizes=[2, 2, 2, 3],\n window_rows=1,\n window_cols=1,\n row_stride=2,\n col_stride=2,\n padding=\"VALID\",\n data_format=data_format,\n use_gpu=use_gpu)\n\n def _testAvgPoolGradValidPadding2_1(self, data_format, use_gpu):\n self._ConstructAndTestGradient(\n nn_ops.avg_pool,\n input_sizes=[2, 3, 3, 3],\n output_sizes=[2, 2, 2, 3],\n window_rows=2,\n window_cols=2,\n row_stride=1,\n col_stride=1,\n padding=\"VALID\",\n data_format=data_format,\n use_gpu=use_gpu)\n\n def _testAvgPoolGradValidPadding2_2(self, data_format, use_gpu):\n self._ConstructAndTestGradient(\n nn_ops.avg_pool,\n input_sizes=[2, 2, 2, 3],\n output_sizes=[2, 1, 1, 3],\n window_rows=2,\n window_cols=2,\n row_stride=2,\n col_stride=2,\n padding=\"VALID\",\n data_format=data_format,\n use_gpu=use_gpu)\n\n def _testAvgPoolGradSamePadding1_1(self, data_format, use_gpu):\n self._ConstructAndTestGradient(\n nn_ops.avg_pool,\n input_sizes=[2, 2, 4, 3],\n output_sizes=[2, 2, 4, 3],\n window_rows=1,\n window_cols=1,\n row_stride=1,\n col_stride=1,\n padding=\"SAME\",\n data_format=data_format,\n use_gpu=use_gpu)\n\n def _testAvgPoolGradSamePadding1_2(self, data_format, use_gpu):\n self._ConstructAndTestGradient(\n nn_ops.avg_pool,\n input_sizes=[2, 2, 4, 3],\n output_sizes=[2, 1, 2, 3],\n window_rows=1,\n window_cols=1,\n row_stride=2,\n col_stride=2,\n padding=\"SAME\",\n data_format=data_format,\n use_gpu=use_gpu)\n\n def _testAvgPoolGradSamePadding2_1(self, data_format, use_gpu):\n self._ConstructAndTestGradient(\n nn_ops.avg_pool,\n input_sizes=[2, 2, 4, 3],\n output_sizes=[2, 2, 4, 3],\n window_rows=2,\n window_cols=2,\n row_stride=1,\n col_stride=1,\n padding=\"SAME\",\n data_format=data_format,\n use_gpu=use_gpu)\n\n def _testAvgPoolGradSamePadding2_2(self, data_format, use_gpu):\n self._ConstructAndTestGradient(\n nn_ops.avg_pool,\n input_sizes=[2, 2, 4, 3],\n output_sizes=[2, 1, 2, 3],\n window_rows=2,\n window_cols=2,\n row_stride=2,\n col_stride=2,\n padding=\"SAME\",\n data_format=data_format,\n use_gpu=use_gpu)\n\n def _testAvgPoolGradSamePadding3_1(self, data_format, use_gpu):\n self._ConstructAndTestGradient(\n nn_ops.avg_pool,\n input_sizes=[1, 7, 7, 1],\n output_sizes=[1, 7, 7, 1],\n window_rows=3,\n window_cols=3,\n row_stride=1,\n col_stride=1,\n padding=\"SAME\",\n data_format=data_format,\n use_gpu=use_gpu)\n\n @test_util.run_deprecated_v1\n def testShapeFunctionEdgeCases(self):\n # All shapes unknown.\n for pool_func in [nn_ops.max_pool, nn_ops.avg_pool]:\n p = pool_func(\n array_ops.placeholder(dtypes.float32),\n ksize=[1, 1, 1, 1],\n strides=[1, 1, 1, 1],\n padding=\"SAME\")\n self.assertEqual([None, None, None, None], p.get_shape().as_list())\n p, am = nn_ops.max_pool_with_argmax(\n array_ops.placeholder(dtypes.float32),\n ksize=[1, 1, 1, 1],\n strides=[1, 1, 1, 1],\n padding=\"SAME\")\n self.assertEqual([None, None, None, None], p.get_shape().as_list())\n self.assertEqual([None, None, None, None], am.get_shape().as_list())\n\n # Incorrect input shape.\n for pool_func in [\n nn_ops.max_pool, nn_ops.avg_pool, nn_ops.max_pool_with_argmax\n ]:\n with self.assertRaises(ValueError):\n pool_func(\n array_ops.placeholder(dtypes.float32, shape=[1, 3]),\n ksize=[1, 1, 1, 1],\n strides=[1, 1, 1, 1],\n padding=\"SAME\")\n\n @test_util.run_deprecated_v1\n @test_util.disable_xla(\"b/123337890\") # Error messages differ\n def testOpEdgeCases(self):\n with self.session(use_gpu=test.is_gpu_available()) as sess:\n pool_funcs = [nn_ops.max_pool, nn_ops.avg_pool]\n if test.is_gpu_available():\n pool_funcs.append(nn_ops.max_pool_with_argmax)\n for pool_func in pool_funcs:\n if pool_func != nn_ops.max_pool:\n # Illegal strides.\n with self.assertRaisesRegex(\n errors_impl.UnimplementedError,\n \"Pooling is not yet supported on the batch\"):\n sess.run(\n pool_func(\n array_ops.placeholder(dtypes.float32),\n ksize=[1, 1, 1, 1],\n strides=[2, 1, 1, 1],\n padding=\"SAME\"))\n\n # Filter too large.\n with self.assertRaisesRegex(ValueError, \"Negative dimension size\"):\n sess.run(\n pool_func(\n array_ops.placeholder(dtypes.float32, shape=[32, 20, 20, 3]),\n ksize=[1, 20, 21, 1],\n strides=[1, 1, 1, 1],\n padding=\"VALID\"))\n with self.assertRaisesRegex(ValueError, \"Negative dimension size\"):\n pool_func(\n array_ops.placeholder(dtypes.float32, shape=[32, 20, 20, 3]),\n ksize=[1, 21, 20, 1],\n strides=[1, 1, 1, 1],\n padding=\"VALID\")\n\n @test_util.run_deprecated_v1\n def testEdgeCasesRaiseErrors(self):\n with self.assertRaisesRegexp(\n ValueError, \"NCHW_VECT_C.*is not supported with \"\n \"explicit padding|XLA does not support pooling ops with explicit \"\n \"padding\"):\n nn_ops.max_pool(\n array_ops.placeholder(dtypes.float32, shape=[1, 3, 3, 1]),\n ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1],\n padding=[[0, 0], [0, 1], [0, 1], [0, 0]],\n data_format=\"NCHW_VECT_C\")\n with self.assertRaisesRegexp(\n ValueError, \"Explicit padding is not supported with an input \"\n \"tensor of rank 5\"):\n nn_ops.max_pool_v2(\n array_ops.placeholder(dtypes.float32, shape=[1, 3, 3, 1, 1]),\n ksize=[1, 2, 2, 1, 1],\n strides=[1, 2, 2, 1, 1],\n padding=[[0, 0], [0, 1], [0, 1], [0, 0]],\n data_format=\"NCHW\")\n with self.assertRaisesRegexp(\n ValueError, \"Attr 'padding' of 'MaxPoolV2' Op passed \"\n \"string 'EXPLICIT'\"):\n gen_nn_ops.max_pool_v2(\n array_ops.placeholder(dtypes.float32, shape=[1, 3, 3, 1, 1]),\n ksize=[1, 2, 2, 1, 1],\n strides=[1, 2, 2, 1, 1],\n padding=\"EXPLICIT\",\n data_format=\"NHWC\")\n\n @test_util.run_deprecated_v1\n def testEdgeCasesExcessPadding(self):\n with self.session(use_gpu=test.is_gpu_available()) as sess:\n with self.assertRaisesRegexp(\n (errors_impl.UnimplementedError, errors_impl.InvalidArgumentError),\n \"Right padding 2 needs to be smaller than the window size 2|\"\n \"XLA does not support pooling ops with explicit padding\"):\n input_sizes = [1, 3, 3, 1]\n x = [(((f + 128) % 255) - 127) for f in range(9)]\n t = constant_op.constant(x, shape=input_sizes, dtype=dtypes.float32)\n sess.run(gen_nn_ops.max_pool(\n t,\n ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1],\n padding=\"EXPLICIT\",\n explicit_paddings=[0, 0, 0, 1, 0, 2, 0, 0],\n data_format=\"NHWC\"))\n\n @test_util.run_deprecated_v1\n def testNegativePadding(self):\n with self.session(use_gpu=test.is_gpu_available()) as sess:\n with self.assertRaisesRegexp(\n ValueError, \"All elements of explicit_paddings must be \"\n \"nonnegative for\"):\n input_sizes = [1, 3, 3, 1]\n x = [(((f + 128) % 255) - 127) for f in range(9)]\n t = constant_op.constant(x, shape=input_sizes, dtype=dtypes.float32)\n sess.run(gen_nn_ops.max_pool(\n t,\n ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1],\n padding=\"EXPLICIT\",\n explicit_paddings=[0, 0, -1, -1, -1, -1, 0, 0],\n data_format=\"NHWC\"))\n\n @test_util.run_deprecated_v1\n def testExplicitPaddingBatch(self):\n with self.session(use_gpu=test.is_gpu_available()) as sess:\n with self.assertRaisesRegexp(\n ValueError, \"Nonzero explicit padding in the batch or depth \"\n \"dimensions is not supported\"):\n input_sizes = [1, 3, 3, 1]\n x = [(((f + 128) % 255) - 127) for f in range(9)]\n t = constant_op.constant(x, shape=input_sizes, dtype=dtypes.float32)\n sess.run(gen_nn_ops.max_pool(\n t,\n ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1],\n padding=\"EXPLICIT\",\n explicit_paddings=[1, 1, 1, 1, 1, 1, 0, 0],\n data_format=\"NHWC\"))\n\n def testMaxPoolGradEagerShapeErrors(self):\n with context.eager_mode():\n orig_in = array_ops.ones((1, 1, 1, 1))\n\n # Test invalid orig_out shape\n orig_out = array_ops.ones((1, 1, 1, 2))\n grad = array_ops.ones((1, 1, 1, 1))\n with self.assertRaisesRegex(\n errors_impl.InvalidArgumentError,\n r\"Expected orig_output shape to be \\[1,1,1,1\\], but got \\[1,1,1,2\\]\"):\n gen_nn_ops.max_pool_grad(\n orig_in, orig_out, grad, ksize=[1, 1, 1, 1], strides=[1, 1, 1, 1],\n padding=\"VALID\")\n with self.assertRaisesRegex(\n errors_impl.InvalidArgumentError,\n r\"Expected orig_output shape to be \\[1,1,1,1\\], but got \\[1,1,1,2\\]\"):\n gen_nn_ops.max_pool_grad_grad(\n orig_in, orig_out, grad, ksize=[1, 1, 1, 1], strides=[1, 1, 1, 1],\n padding=\"VALID\")\n\n # Test invalid grad shape\n orig_out = array_ops.ones((1, 1, 1, 1))\n grad = array_ops.ones((1, 1, 1, 2))\n with self.assertRaisesRegex(\n errors_impl.InvalidArgumentError,\n r\"Expected grad shape to be \\[1,1,1,1\\], but got \\[1,1,1,2\\]\"):\n gen_nn_ops.max_pool_grad(\n orig_in, orig_out, grad, ksize=[1, 1, 1, 1], strides=[1, 1, 1, 1],\n padding=\"VALID\")\n with self.assertRaisesRegex(\n errors_impl.InvalidArgumentError,\n r\"Expected grad shape to be \\[1,1,1,1\\], but got \\[1,1,1,2\\]\"):\n gen_nn_ops.max_pool_grad_grad(\n orig_in, orig_out, grad, ksize=[1, 1, 1, 1], strides=[1, 1, 1, 1],\n padding=\"VALID\")\n\n def testMaxPoolGradWithArgmaxEagerShapeErrors(self):\n with context.eager_mode():\n inp = array_ops.ones((1, 1, 1, 1))\n\n # Test invalid grad shape\n grad = array_ops.ones((1, 1, 1, 2))\n argmax = array_ops.zeros((1, 1, 1, 1), dtype=dtypes.int64)\n with self.assertRaisesRegex(\n errors_impl.InvalidArgumentError,\n r\"Expected grad shape to be \\[1,1,1,1\\], but got \\[1,1,1,2\\]\"):\n gen_nn_ops.max_pool_grad_with_argmax(\n inp, grad, argmax, ksize=[1, 1, 1, 1], strides=[1, 1, 1, 1],\n padding=\"VALID\")\n # max_pool_grad_grad_with_argmax is only implemented for GPUs\n if test.is_gpu_available():\n with self.assertRaisesRegex(\n errors_impl.InvalidArgumentError,\n r\"Expected grad shape to be \\[1,1,1,1\\], but got \\[1,1,1,2\\]\"):\n gen_nn_ops.max_pool_grad_grad_with_argmax(\n inp, grad, argmax, ksize=[1, 1, 1, 1], strides=[1, 1, 1, 1],\n padding=\"VALID\")\n\n # Test invalid argmax shape\n grad = array_ops.ones((1, 1, 1, 1))\n argmax = array_ops.ones((1, 1, 1, 2), dtype=dtypes.int64)\n with self.assertRaisesRegex(\n errors_impl.InvalidArgumentError,\n r\"Expected argmax shape to be \\[1,1,1,1\\], but got \\[1,1,1,2\\]\"):\n gen_nn_ops.max_pool_grad_with_argmax(\n inp, grad, argmax, ksize=[1, 1, 1, 1], strides=[1, 1, 1, 1],\n padding=\"VALID\")\n # max_pool_grad_grad_with_argmax is only implemented for GPUs\n if test.is_gpu_available():\n with self.assertRaisesRegex(\n errors_impl.InvalidArgumentError,\n r\"Expected argmax shape to be \\[1,1,1,1\\], but got \\[1,1,1,2\\]\"):\n gen_nn_ops.max_pool_grad_grad_with_argmax(\n inp, grad, argmax, ksize=[1, 1, 1, 1], strides=[1, 1, 1, 1],\n padding=\"VALID\")\n\n\ndef GetMaxPoolFwdTest(input_size, filter_size, strides, padding):\n\n def Test(self):\n # MaxPoolWithArgMax is implemented only on CUDA.\n if not test.is_gpu_available(cuda_only=True):\n return\n self._CompareMaxPoolingFwd(input_size, filter_size, strides, padding)\n\n return Test\n\n\ndef GetMaxPoolGradTest(input_size, filter_size, output_size, strides, padding):\n\n def Test(self):\n # MaxPoolWithArgMax is implemented only on CUDA.\n if not test.is_gpu_available(cuda_only=True):\n return\n self._CompareMaxPoolingBk(input_size, output_size, filter_size, strides,\n padding)\n\n return Test\n\n\ndef GetMaxPoolGradGradTest(input_size, filter_size, output_size, strides,\n padding):\n\n def Test(self):\n # MaxPoolWithArgMax is implemented only on CUDA.\n if not test.is_gpu_available(cuda_only=True):\n return\n self._CompareMaxPoolingGradBk(input_size, output_size, filter_size, strides,\n padding)\n\n return Test\n\n\nif __name__ == \"__main__\":\n for (name_, input_size_, filter_size_, output_size_, stride_,\n padding_) in GetShrunkInceptionMaxPoolShapes():\n setattr(PoolingTest, \"testMaxPoolFwd_\" + name_,\n GetMaxPoolFwdTest(input_size_, filter_size_, stride_, padding_))\n setattr(PoolingTest, \"testMaxPoolGrad_\" + name_,\n GetMaxPoolGradTest(input_size_, filter_size_, output_size_, stride_,\n padding_))\n setattr(PoolingTest, \"testMaxPoolGradGrad_\" + name_,\n GetMaxPoolGradGradTest(input_size_, filter_size_, output_size_,\n stride_, padding_))\n test.main()\n", "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for TF_DETERMINISTIC_OPS=1.\"\"\"\n\nfrom tensorflow.python.framework import config\nfrom tensorflow.python.kernel_tests import cudnn_deterministic_base\nfrom tensorflow.python.platform import test\n\nConvolutionTest = cudnn_deterministic_base.ConvolutionTest\n\nif __name__ == '__main__':\n # TODO(reedwm): Merge this file with cudnn_deterministic_base.py.\n config.enable_op_determinism()\n test.main()\n", "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Generate a series of TensorFlow graphs that become tflite test cases.\n\nUsage:\n\ngenerate_examples <output directory>\n\nbazel run //tensorflow/lite/testing:generate_examples\n\nTo more easily debug failures use (or override) the --save_graphdefs flag to\nplace text proto graphdefs into the generated zip files.\n\"\"\"\n\nimport copy\nimport datetime\nimport os\nimport re\nimport zipfile\n\nimport tensorflow.compat.v1 as tf\n\n# TODO(aselle): Disable GPU for now\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"-1\"\n\n# pylint: disable=g-import-not-at-top\n# pylint: disable=g-multiple-import\n# pylint: disable=unused-import\nfrom tensorflow.lite.testing.op_tests.abs import make_abs_tests\nfrom tensorflow.lite.testing.op_tests.add_n import make_add_n_tests\nfrom tensorflow.lite.testing.op_tests.arg_min_max import make_arg_min_max_tests\nfrom tensorflow.lite.testing.op_tests.batch_to_space_nd import make_batch_to_space_nd_tests\nfrom tensorflow.lite.testing.op_tests.binary_op import make_add_tests, make_div_tests, make_sub_tests, make_mul_tests, make_pow_tests, make_floor_div_tests, make_floor_mod_tests, make_squared_difference_tests\nfrom tensorflow.lite.testing.op_tests.cast import make_cast_tests\nfrom tensorflow.lite.testing.op_tests.ceil import make_ceil_tests\nfrom tensorflow.lite.testing.op_tests.concat import make_concat_tests\nfrom tensorflow.lite.testing.op_tests.constant import make_constant_tests\nfrom tensorflow.lite.testing.op_tests.conv import make_conv_tests\nfrom tensorflow.lite.testing.op_tests.conv2d_transpose import make_conv2d_transpose_tests\nfrom tensorflow.lite.testing.op_tests.conv_activation import make_conv_relu_tests, make_conv_relu1_tests, make_conv_relu6_tests\n# Note: This is a regression test for a bug (b/112303004) that Toco incorrectly\n# transforms Conv into DepthwiseConv when two Conv ops share the same constant\n# weight tensor.\nfrom tensorflow.lite.testing.op_tests.conv_to_depthwiseconv_with_shared_weights import make_conv_to_depthwiseconv_with_shared_weights_tests\n# Note: This is a regression test for a bug (b/112436267) that Toco incorrectly\n# fuses weights when multiple Conv2D/FULLY_CONNECTED ops share the same constant\n# weight tensor.\nfrom tensorflow.lite.testing.op_tests.conv_with_shared_weights import make_conv_with_shared_weights_tests\nfrom tensorflow.lite.testing.op_tests.cos import make_cos_tests\nfrom tensorflow.lite.testing.op_tests.depth_to_space import make_depth_to_space_tests\nfrom tensorflow.lite.testing.op_tests.depthwiseconv import make_depthwiseconv_tests\nfrom tensorflow.lite.testing.op_tests.elementwise import make_sin_tests, make_log_tests, make_sqrt_tests, make_rsqrt_tests, make_square_tests\nfrom tensorflow.lite.testing.op_tests.elu import make_elu_tests\nfrom tensorflow.lite.testing.op_tests.embedding_lookup import make_embedding_lookup_tests\nfrom tensorflow.lite.testing.op_tests.equal import make_equal_tests\nfrom tensorflow.lite.testing.op_tests.exp import make_exp_tests\nfrom tensorflow.lite.testing.op_tests.expand_dims import make_expand_dims_tests\nfrom tensorflow.lite.testing.op_tests.expm1 import make_expm1_tests\nfrom tensorflow.lite.testing.op_tests.eye import make_eye_tests\nfrom tensorflow.lite.testing.op_tests.fill import make_fill_tests\nfrom tensorflow.lite.testing.op_tests.floor import make_floor_tests\nfrom tensorflow.lite.testing.op_tests.fully_connected import make_fully_connected_tests\nfrom tensorflow.lite.testing.op_tests.fused_batch_norm import make_fused_batch_norm_tests\nfrom tensorflow.lite.testing.op_tests.gather import make_gather_tests\nfrom tensorflow.lite.testing.op_tests.gather_nd import make_gather_nd_tests\nfrom tensorflow.lite.testing.op_tests.gather_with_constant import make_gather_with_constant_tests\nfrom tensorflow.lite.testing.op_tests.global_batch_norm import make_global_batch_norm_tests\nfrom tensorflow.lite.testing.op_tests.greater import make_greater_tests\nfrom tensorflow.lite.testing.op_tests.greater_equal import make_greater_equal_tests\nfrom tensorflow.lite.testing.op_tests.hardswish import make_hardswish_tests\nfrom tensorflow.lite.testing.op_tests.identity import make_identity_tests\nfrom tensorflow.lite.testing.op_tests.l2norm import make_l2norm_tests\n# Note: This is a regression test for a bug (b/122651451) that Toco incorrectly\n# erases the reduction indices array while it's shared with other ops.\nfrom tensorflow.lite.testing.op_tests.l2norm_shared_epsilon import make_l2norm_shared_epsilon_tests\nfrom tensorflow.lite.testing.op_tests.leaky_relu import make_leaky_relu_tests\nfrom tensorflow.lite.testing.op_tests.less import make_less_tests\nfrom tensorflow.lite.testing.op_tests.less_equal import make_less_equal_tests\nfrom tensorflow.lite.testing.op_tests.local_response_norm import make_local_response_norm_tests\nfrom tensorflow.lite.testing.op_tests.log_softmax import make_log_softmax_tests\nfrom tensorflow.lite.testing.op_tests.logic import make_logical_or_tests, make_logical_and_tests, make_logical_xor_tests\nfrom tensorflow.lite.testing.op_tests.lstm import make_lstm_tests\nfrom tensorflow.lite.testing.op_tests.matrix_diag import make_matrix_diag_tests\nfrom tensorflow.lite.testing.op_tests.matrix_set_diag import make_matrix_set_diag_tests\nfrom tensorflow.lite.testing.op_tests.maximum import make_maximum_tests\nfrom tensorflow.lite.testing.op_tests.minimum import make_minimum_tests\nfrom tensorflow.lite.testing.op_tests.mirror_pad import make_mirror_pad_tests\nfrom tensorflow.lite.testing.op_tests.nearest_upsample import make_nearest_upsample_tests\nfrom tensorflow.lite.testing.op_tests.neg import make_neg_tests\nfrom tensorflow.lite.testing.op_tests.not_equal import make_not_equal_tests\nfrom tensorflow.lite.testing.op_tests.one_hot import make_one_hot_tests\nfrom tensorflow.lite.testing.op_tests.pack import make_pack_tests\nfrom tensorflow.lite.testing.op_tests.pad import make_pad_tests\nfrom tensorflow.lite.testing.op_tests.padv2 import make_padv2_tests\nfrom tensorflow.lite.testing.op_tests.placeholder_with_default import make_placeholder_with_default_tests\nfrom tensorflow.lite.testing.op_tests.pool import make_l2_pool_tests, make_avg_pool_tests, make_max_pool_tests\nfrom tensorflow.lite.testing.op_tests.prelu import make_prelu_tests\nfrom tensorflow.lite.testing.op_tests.range import make_range_tests\nfrom tensorflow.lite.testing.op_tests.rank import make_rank_tests\nfrom tensorflow.lite.testing.op_tests.reduce import make_mean_tests, make_sum_tests, make_reduce_prod_tests, make_reduce_max_tests, make_reduce_min_tests, make_reduce_any_tests, make_reduce_all_tests\nfrom tensorflow.lite.testing.op_tests.relu import make_relu_tests\nfrom tensorflow.lite.testing.op_tests.relu1 import make_relu1_tests\nfrom tensorflow.lite.testing.op_tests.relu6 import make_relu6_tests\nfrom tensorflow.lite.testing.op_tests.reshape import make_reshape_tests\nfrom tensorflow.lite.testing.op_tests.resize_bilinear import make_resize_bilinear_tests\nfrom tensorflow.lite.testing.op_tests.resize_nearest_neighbor import make_resize_nearest_neighbor_tests\n# For verifying https://github.com/tensorflow/tensorflow/issues/23599\nfrom tensorflow.lite.testing.op_tests.resolve_constant_strided_slice import make_resolve_constant_strided_slice_tests\nfrom tensorflow.lite.testing.op_tests.reverse_sequence import make_reverse_sequence_tests\nfrom tensorflow.lite.testing.op_tests.reverse_v2 import make_reverse_v2_tests\nfrom tensorflow.lite.testing.op_tests.round import make_round_tests\nfrom tensorflow.lite.testing.op_tests.scatter_nd import make_scatter_nd_tests\nfrom tensorflow.lite.testing.op_tests.shape import make_shape_tests\nfrom tensorflow.lite.testing.op_tests.sigmoid import make_sigmoid_tests\nfrom tensorflow.lite.testing.op_tests.slice import make_slice_tests\nfrom tensorflow.lite.testing.op_tests.softmax import make_softmax_tests\nfrom tensorflow.lite.testing.op_tests.space_to_batch_nd import make_space_to_batch_nd_tests\nfrom tensorflow.lite.testing.op_tests.space_to_depth import make_space_to_depth_tests\nfrom tensorflow.lite.testing.op_tests.sparse_to_dense import make_sparse_to_dense_tests\nfrom tensorflow.lite.testing.op_tests.split import make_split_tests\nfrom tensorflow.lite.testing.op_tests.splitv import make_splitv_tests\nfrom tensorflow.lite.testing.op_tests.squeeze import make_squeeze_tests\nfrom tensorflow.lite.testing.op_tests.squeeze_transpose import make_squeeze_transpose_tests\nfrom tensorflow.lite.testing.op_tests.strided_slice import make_strided_slice_tests, make_strided_slice_1d_exhaustive_tests\nfrom tensorflow.lite.testing.op_tests.strided_slice_np_style import make_strided_slice_np_style_tests\nfrom tensorflow.lite.testing.op_tests.tanh import make_tanh_tests\nfrom tensorflow.lite.testing.op_tests.tile import make_tile_tests\nfrom tensorflow.lite.testing.op_tests.topk import make_topk_tests\nfrom tensorflow.lite.testing.op_tests.transpose import make_transpose_tests\nfrom tensorflow.lite.testing.op_tests.transpose_conv import make_transpose_conv_tests\nfrom tensorflow.lite.testing.op_tests.unfused_gru import make_unfused_gru_tests\nfrom tensorflow.lite.testing.op_tests.unique import make_unique_tests\nfrom tensorflow.lite.testing.op_tests.unpack import make_unpack_tests\nfrom tensorflow.lite.testing.op_tests.unroll_batch_matmul import make_unroll_batch_matmul_tests\nfrom tensorflow.lite.testing.op_tests.where import make_where_tests\nfrom tensorflow.lite.testing.op_tests.zeros_like import make_zeros_like_tests\n\nfrom tensorflow.lite.testing.zip_test_utils import get_test_function\n\n# A map from regular expression to bug number. Any test failure with label\n# matching the expression will be considered due to the corresponding bug.\nKNOWN_BUGS = {\n # TOCO doesn't support scalars as input.\n # Concat doesn't work with a single input tensor\n r\"concat.*num_tensors=1\": \"67378344\",\n # Softmax graphs are too complex.\n r\"softmax.*dim=0\": \"67749831\",\n # BatchToSpaceND only supports 4D tensors.\n r\"batch_to_space_nd.*input_shape=\\[8,2,2,2,1,1\\]\": \"70594733\",\n # Div will use floordiv.\n r\"div.*int32\": \"72051395\",\n # Strided slice cannot handle new_axis_mask.\n r\"strided_slice.*spec=\\[None\": \"137470173\",\n}\n\n\nclass MultiGenState(object):\n \"\"\"State of multiple set generation process.\n\n This state class stores the information needed when generating the examples\n for multiple test set. The stored informations are open archive object to be\n shared, information on test target for current iteration of generation,\n accumulated generation results.\n \"\"\"\n\n def __init__(self):\n # Open archive.\n self.archive = None\n # Test name for current generation.\n self.test_name = None\n # Label base path containing the test name.\n # Each of the test data path in the zip archive is derived from this path.\n # If this path is \"a/b/c/d.zip\", an example of generated test data path\n # is \"a/b/c/d_input_type=tf.float32,input_shape=[2,2].inputs\".\n # The test runner interpretes the test name of this path as \"d\".\n # Label base path also should finish with \".zip\".\n self.label_base_path = None\n # Zip manifests.\n self.zip_manifest = []\n # Number of all parameters accumulated.\n self.parameter_count = 0\n\n\nclass Options(object):\n \"\"\"All options for example generation.\"\"\"\n\n def __init__(self):\n # Directory where the outputs will be go.\n self.output_path = None\n # Particular zip to output.\n self.zip_to_output = None\n # Path to toco tool.\n self.toco = None\n # If a particular model is affected by a known bug count it as a Toco\n # error.\n self.known_bugs_are_errors = False\n # Raise an exception if any converter error is encountered.\n self.ignore_converter_errors = False\n # Include intermediate graphdefs in the output zip files.\n self.save_graphdefs = False\n # Whether the TFLite Flex converter is being used.\n self.run_with_flex = False\n # Whether to generate test cases for edgetpu.\n self.make_edgetpu_tests = False\n # Whether to generate test cases for TF PTQ.\n self.make_tf_ptq_tests = False\n # The function to convert a TensorFLow model to TFLite model.\n # See the document for `toco_convert` function for its required signature.\n self.tflite_convert_function = None\n # A map from regular expression to bug number. Any test failure with label\n # matching the expression will be considered due to the corresponding bug.\n self.known_bugs = KNOWN_BUGS\n # Make tests by setting TF forward compatibility horizon to the future.\n self.make_forward_compat_test = False\n # No limitation on the number of tests.\n self.no_tests_limit = False\n # Do not create conversion report.\n self.no_conversion_report = False\n # State of multiple test set generation. This stores state values those\n # should be kept and updated while generating examples over multiple\n # test sets.\n # TODO(juhoha): Separate the state from the options.\n self.multi_gen_state = None\n self.use_experimental_converter = False\n self.mlir_quantizer = False\n # The list of ops' name that should exist in the converted model.\n # This feature is currently only supported in MLIR conversion path.\n # Example of supported ops' name:\n # - \"AVERAGE_POOL_2D\" for builtin op.\n # - \"NumericVerify\" for custom op.\n self.expected_ops_in_converted_model = []\n\n\ndef _prepare_dir(options):\n\n def mkdir_if_not_exist(x):\n if not os.path.isdir(x):\n os.mkdir(x)\n if not os.path.isdir(x):\n raise RuntimeError(\"Failed to create dir %r\" % x)\n\n opstest_path = os.path.join(options.output_path)\n mkdir_if_not_exist(opstest_path)\n\n\ndef generate_examples(options):\n \"\"\"Generate examples for a test set.\n\n Args:\n options: Options containing information to generate examples.\n\n Raises:\n RuntimeError: if the test function cannot be found.\n \"\"\"\n _prepare_dir(options)\n\n out = options.zip_to_output\n # Some zip filenames contain a postfix identifying the conversion mode. The\n # list of valid conversion modes is defined in\n # generated_test_conversion_modes() in build_def.bzl.\n\n if options.multi_gen_state:\n test_name = options.multi_gen_state.test_name\n else:\n # Remove suffixes to extract the test name from the output name.\n test_name = re.sub(\n r\"(_(|toco-flex|forward-compat|edgetpu|mlir-quant))?\\.zip$\",\n \"\",\n out,\n count=1)\n\n test_function_name = \"make_%s_tests\" % test_name\n test_function = get_test_function(test_function_name)\n if test_function is None:\n raise RuntimeError(\"Can't find a test function to create %r. Tried %r\" %\n (out, test_function_name))\n if options.make_forward_compat_test:\n future_date = datetime.date.today() + datetime.timedelta(days=30)\n with tf.compat.forward_compatibility_horizon(future_date.year,\n future_date.month,\n future_date.day):\n test_function(options)\n else:\n test_function(options)\n\n\ndef generate_multi_set_examples(options, test_sets):\n \"\"\"Generate examples for test sets.\n\n Args:\n options: Options containing information to generate examples.\n test_sets: List of the name of test sets to generate examples.\n \"\"\"\n _prepare_dir(options)\n\n multi_gen_state = MultiGenState()\n options.multi_gen_state = multi_gen_state\n\n zip_path = os.path.join(options.output_path, options.zip_to_output)\n with zipfile.PyZipFile(zip_path, \"w\") as archive:\n multi_gen_state.archive = archive\n\n for test_name in test_sets:\n # Some generation function can change the value of the options object.\n # To keep the original options for each run, we use shallow copy.\n new_options = copy.copy(options)\n\n # Remove suffix and set test_name to run proper test generation function.\n multi_gen_state.test_name = re.sub(\n r\"(_(|toco-flex|forward-compat|mlir-quant))?$\",\n \"\",\n test_name,\n count=1)\n # Set label base path to write test data files with proper path.\n multi_gen_state.label_base_path = os.path.join(\n os.path.dirname(zip_path), test_name + \".zip\")\n\n generate_examples(new_options)\n\n zipinfo = zipfile.ZipInfo(\"manifest.txt\")\n archive.writestr(zipinfo, \"\".join(multi_gen_state.zip_manifest),\n zipfile.ZIP_DEFLATED)\n", "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for utilities for traversing the dataset construction graph.\"\"\"\n\nfrom absl.testing import parameterized\n\nfrom tensorflow.python.data.experimental.ops import data_service_ops\nfrom tensorflow.python.data.kernel_tests import test_base\nfrom tensorflow.python.data.ops import dataset_ops\nfrom tensorflow.python.data.util import traverse\nfrom tensorflow.python.framework import combinations\nfrom tensorflow.python.ops import gen_dataset_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.platform import test\n\n\nclass _TestDataset(dataset_ops.UnaryUnchangedStructureDataset):\n\n def __init__(self, input_dataset):\n self._input_dataset = input_dataset\n temp_variant_tensor = gen_dataset_ops.prefetch_dataset(\n input_dataset._variant_tensor,\n buffer_size=1,\n **self._flat_structure)\n variant_tensor = gen_dataset_ops.model_dataset(\n temp_variant_tensor, **self._flat_structure)\n super(_TestDataset, self).__init__(input_dataset, variant_tensor)\n\n\nclass TraverseTest(test_base.DatasetTestBase, parameterized.TestCase):\n\n @combinations.generate(test_base.graph_only_combinations())\n def testOnlySource(self):\n ds = dataset_ops.Dataset.range(10)\n variant_tensor_ops = traverse.obtain_all_variant_tensor_ops(ds)\n self.assertAllEqual([\"RangeDataset\"], [x.name for x in variant_tensor_ops])\n\n @combinations.generate(test_base.graph_only_combinations())\n def testSimplePipeline(self):\n ds = dataset_ops.Dataset.range(10).map(math_ops.square)\n variant_tensor_ops = traverse.obtain_all_variant_tensor_ops(ds)\n self.assertSetEqual(\n set([\"MapDataset\", \"RangeDataset\"]),\n set(x.name for x in variant_tensor_ops))\n\n @combinations.generate(test_base.graph_only_combinations())\n def testConcat(self):\n ds1 = dataset_ops.Dataset.range(10)\n ds2 = dataset_ops.Dataset.range(10)\n ds = ds1.concatenate(ds2)\n variant_tensor_ops = traverse.obtain_all_variant_tensor_ops(ds)\n self.assertSetEqual(\n set([\"ConcatenateDataset\", \"RangeDataset\", \"RangeDataset_1\"]),\n set(x.name for x in variant_tensor_ops))\n\n @combinations.generate(test_base.graph_only_combinations())\n def testZip(self):\n ds1 = dataset_ops.Dataset.range(10)\n ds2 = dataset_ops.Dataset.range(10)\n ds = dataset_ops.Dataset.zip((ds1, ds2))\n variant_tensor_ops = traverse.obtain_all_variant_tensor_ops(ds)\n self.assertSetEqual(\n set([\"ZipDataset\", \"RangeDataset\", \"RangeDataset_1\"]),\n set(x.name for x in variant_tensor_ops))\n\n @combinations.generate(test_base.graph_only_combinations())\n def testMultipleVariantTensors(self):\n ds = dataset_ops.Dataset.range(10)\n ds = _TestDataset(ds)\n variant_tensor_ops = traverse.obtain_all_variant_tensor_ops(ds)\n self.assertSetEqual(\n set([\"RangeDataset\", \"ModelDataset\", \"PrefetchDataset\"]),\n set(x.name for x in variant_tensor_ops))\n\n @combinations.generate(test_base.graph_only_combinations())\n def testFlatMap(self):\n ds1 = dataset_ops.Dataset.range(10).repeat(10)\n\n def map_fn(ds):\n\n def _map(x):\n return ds.batch(x)\n\n return _map\n\n ds2 = dataset_ops.Dataset.range(20).prefetch(1)\n ds2 = ds2.flat_map(map_fn(ds1))\n variant_tensor_ops = traverse.obtain_all_variant_tensor_ops(ds2)\n self.assertSetEqual(\n set([\n \"FlatMapDataset\", \"PrefetchDataset\", \"RepeatDataset\",\n \"RangeDataset\", \"RangeDataset_1\"\n ]), set(x.name for x in variant_tensor_ops))\n\n @combinations.generate(test_base.graph_only_combinations())\n def testTfDataService(self):\n ds = dataset_ops.Dataset.range(10)\n ds = ds.apply(\n data_service_ops.distribute(\"parallel_epochs\", \"grpc://foo:0\"))\n ops = traverse.obtain_capture_by_value_ops(ds)\n self.assertContainsSubset(\n [\"RangeDataset\", \"DataServiceDatasetV2\", \"DummyIterationCounter\"],\n set(x.name for x in ops))\n\n\nif __name__ == \"__main__\":\n test.main()\n", "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Gradients for operators defined in control_flow_ops.py.\"\"\"\nfrom six.moves import xrange # pylint: disable=redefined-builtin\n\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import indexed_slices\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import sparse_tensor\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import control_flow_util\nfrom tensorflow.python.ops import math_ops\n# go/tf-wildcard-import\n# pylint: disable=wildcard-import,undefined-variable,redefined-builtin\nfrom tensorflow.python.ops.control_flow_ops import *\n# pylint: enable=wildcard-import\n\n\ndef _SwitchGrad(op, *grad):\n \"\"\"Gradients for a Switch op is calculated using a Merge op.\n\n If the switch is a loop switch, it will be visited twice. We create\n the merge on the first visit, and update the other input of the merge\n on the second visit. A next_iteration is also added on second visit.\n \"\"\"\n graph = ops.get_default_graph()\n # pylint: disable=protected-access\n op_ctxt = op._get_control_flow_context()\n grad_ctxt = graph._get_control_flow_context()\n # pylint: enable=protected-access\n if isinstance(op_ctxt, WhileContext):\n merge_grad = grad_ctxt.grad_state.switch_map.get(op)\n if merge_grad is not None:\n # This is the second time this Switch is visited. It comes from\n # the non-exit branch of the Switch, so update the second input\n # to the Merge.\n # TODO(yuanbyu): Perform shape inference with this new input.\n if grad[1] is not None:\n # pylint: disable=protected-access\n control_flow_ops._AddNextAndBackEdge(merge_grad, grad[1],\n enforce_shape_invariant=False)\n # pylint: enable=protected-access\n return None, None\n elif grad[0] is not None:\n # This is the first time this Switch is visited. It comes from\n # the Exit branch, which is grad[0]. grad[1] is empty at this point.\n # Use grad[0] for both inputs to merge for now, but update the second\n # input of merge when we see this Switch the second time.\n merge_grad = merge([grad[0], grad[0]], name=\"b_switch\")[0]\n grad_ctxt.grad_state.switch_map[op] = merge_grad\n return merge_grad, None\n else:\n # This is the first time this Switch is visited. It comes from the\n # Identity branch. Such a Switch has `None` gradient for the Exit branch,\n # meaning the output is not differentiable.\n return None, None\n elif isinstance(op_ctxt, CondContext):\n zero_grad = grad[1 - op_ctxt.branch]\n # At this point, we have created zero_grad guarded by the right switch.\n # Unfortunately, we may still get None here for not trainable data types.\n if zero_grad is None:\n # For resource variables we get None always on the other branch, so bypass\n # this.\n if op.inputs[0].dtype == dtypes.resource:\n return merge(\n [grad[op_ctxt.branch]] * 2, name=\"cond_resource_grad\")[0], None\n return None, None\n return merge(grad, name=\"cond_grad\")[0], None\n else:\n false_grad = switch(grad[0], op.inputs[1])[0]\n true_grad = switch(grad[1], op.inputs[1])[1]\n return merge([false_grad, true_grad])[0], None\n\n\nops.RegisterGradient(\"Switch\")(_SwitchGrad)\nops.RegisterGradient(\"RefSwitch\")(_SwitchGrad)\n\n\[email protected](\"Merge\")\ndef _MergeGrad(op, grad, _):\n \"\"\"Gradients for a Merge op are calculated using a Switch op.\"\"\"\n input_op = op.inputs[0].op\n graph = ops.get_default_graph()\n # pylint: disable=protected-access\n op_ctxt = control_flow_util.GetOutputContext(input_op)\n grad_ctxt = graph._get_control_flow_context()\n # pylint: enable=protected-access\n if isinstance(op_ctxt, WhileContext):\n # pylint: disable=protected-access\n return control_flow_ops._SwitchRefOrTensor(grad, grad_ctxt.pivot)\n # pylint: enable=protected-access\n elif isinstance(op_ctxt, CondContext):\n pred = op_ctxt.pred\n if grad_ctxt and grad_ctxt.grad_state:\n # This Merge node is part of a cond within a loop.\n # The backprop needs to have the value of this predicate for every\n # iteration. So we must have its values accumulated in the forward, and\n # use the accumulated values as the predicate for this backprop switch.\n grad_state = grad_ctxt.grad_state\n real_pred = grad_state.history_map.get(pred.name)\n if real_pred is None:\n # Remember the value of pred for every iteration.\n grad_ctxt = grad_state.grad_context\n grad_ctxt.Exit()\n history_pred = grad_state.AddForwardAccumulator(pred)\n grad_ctxt.Enter()\n\n # Add the stack pop op. If pred.op is in a (outer) CondContext,\n # the stack pop will be guarded with a switch.\n real_pred = grad_state.AddBackpropAccumulatedValue(history_pred, pred)\n grad_state.history_map[pred.name] = real_pred\n pred = real_pred\n # pylint: disable=protected-access\n return control_flow_ops._SwitchRefOrTensor(grad, pred, name=\"cond_grad\")\n # pylint: enable=protected-access\n else:\n num_inputs = len(op.inputs)\n cond = [math_ops.equal(op.outputs[1], i) for i in xrange(num_inputs)]\n # pylint: disable=protected-access\n return [control_flow_ops._SwitchRefOrTensor(grad, cond[i])[1]\n for i in xrange(num_inputs)]\n # pylint: enable=protected-access\n\n\[email protected](\"RefMerge\")\ndef _RefMergeGrad(op, grad, _):\n return _MergeGrad(op, grad, _)\n\n\[email protected](\"Exit\")\ndef _ExitGrad(op, grad):\n \"\"\"Gradients for an exit op are calculated using an Enter op.\"\"\"\n graph = ops.get_default_graph()\n # pylint: disable=protected-access\n op_ctxt = op._get_control_flow_context()\n grad_ctxt = graph._get_control_flow_context()\n # pylint: enable=protected-access\n if not grad_ctxt.back_prop:\n # The flag `back_prop` is set by users to suppress gradient\n # computation for this loop. If the attribute `back_prop` is false,\n # no gradient computation.\n return None\n\n if op_ctxt.grad_state:\n raise TypeError(\"Second-order gradient for while loops not supported.\")\n\n if isinstance(grad, ops.Tensor):\n grad_ctxt.AddName(grad.name)\n else:\n if not isinstance(\n grad, (indexed_slices.IndexedSlices, sparse_tensor.SparseTensor)):\n raise TypeError(f\"Type {type(grad)} not supported, must be either\"\n \"`indexed_slices.IndexedSlices` or `SparseTensor`.\")\n grad_ctxt.AddName(grad.values.name)\n grad_ctxt.AddName(grad.indices.name)\n dense_shape = grad.dense_shape\n if dense_shape is not None:\n grad_ctxt.AddName(dense_shape.name)\n grad_ctxt.Enter()\n # pylint: disable=protected-access\n result = control_flow_ops._Enter(\n grad, grad_ctxt.name, is_constant=False,\n parallel_iterations=grad_ctxt.parallel_iterations,\n name=\"b_exit\")\n # pylint: enable=protected-access\n grad_ctxt.loop_enters.append(result)\n grad_ctxt.Exit()\n return result\n\n\nops.RegisterGradient(\"RefExit\")(_ExitGrad)\n\n\[email protected](\"NextIteration\")\ndef _NextIterationGrad(_, grad):\n \"\"\"A forward next_iteration is translated into a backprop identity.\n\n Note that the backprop next_iteration is added in switch grad.\n \"\"\"\n return grad\n\n\[email protected](\"RefNextIteration\")\ndef _RefNextIterationGrad(_, grad):\n return _NextIterationGrad(_, grad)\n\n\[email protected](\"Enter\")\ndef _EnterGrad(op, grad):\n \"\"\"Gradients for an Enter are calculated using an Exit op.\n\n For loop variables, grad is the gradient so just add an exit.\n For loop invariants, we need to add an accumulator loop.\n \"\"\"\n graph = ops.get_default_graph()\n # pylint: disable=protected-access\n grad_ctxt = graph._get_control_flow_context()\n # pylint: enable=protected-access\n if grad_ctxt is None:\n return grad\n if not grad_ctxt.back_prop:\n # Skip gradient computation, if the attribute `back_prop` is false.\n return grad\n if grad_ctxt.grad_state is None:\n # Pass the gradient through if we are not in a gradient while context.\n return grad\n if op.get_attr(\"is_constant\"):\n # Add a gradient accumulator for each loop invariant.\n if isinstance(grad, ops.Tensor):\n result = grad_ctxt.AddBackpropAccumulator(op, grad)\n elif isinstance(grad, indexed_slices.IndexedSlices):\n result = grad_ctxt.AddBackpropIndexedSlicesAccumulator(op, grad)\n else:\n # TODO(yuanbyu, lukasr): Add support for SparseTensor.\n raise TypeError(f\"Type {type(grad)} not supported,\"\n \"must be Tensor or Indexed Slices\")\n else:\n result = exit(grad)\n grad_ctxt.loop_exits.append(result)\n grad_ctxt.ExitResult([result])\n return result\n\n\[email protected](\"RefEnter\")\ndef _RefEnterGrad(op, grad):\n return _EnterGrad(op, grad)\n\n\[email protected](\"LoopCond\")\ndef _LoopCondGrad(_):\n \"\"\"Stop backprop for the predicate of a while loop.\"\"\"\n return None\n", "# Copyright 2021 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for deterministic functionality of SparseSoftmaxCrossEntropyWithLogits op.\"\"\"\n\nimport numpy as np\n\nfrom tensorflow.python.eager import backprop\nfrom tensorflow.python.framework import config\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import errors_impl\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.kernel_tests import sparse_xent_op_test_base\n# The following import is required to register the gradient function.\nfrom tensorflow.python.ops.nn_grad import _SparseSoftmaxCrossEntropyWithLogitsGrad # pylint: disable=unused-import\nfrom tensorflow.python.ops import gen_nn_ops\nfrom tensorflow.python.ops import nn_ops\nfrom tensorflow.python.platform import test\n\n\nclass SparseXentOpDeterminismExceptionsTest(test.TestCase):\n \"\"\"Test d9m-unimplemented exceptions from SparseSoftmaxXentWithLogitsOp.\n\n Test that tf.errors.UnimplementedError is thrown, as\n appropriate, by the GPU code-paths through SparseSoftmaxXentWithLogitsOp when\n deterministic ops are enabled.\n\n This test assumes that sparse_xent_op_test.py runs equivalent test cases\n when deterministic ops are not enabled and will therefore detect erroneous\n exception throwing in those cases.\n \"\"\"\n\n @test_util.run_gpu_only\n @test_util.run_in_graph_and_eager_modes\n def testExceptionThrowing(self):\n with self.session(), test_util.force_gpu():\n for features_dtype in [dtypes.float16, dtypes.float32]:\n for labels_dtype in [dtypes.int32, dtypes.int64]:\n features = constant_op.constant([[0.3, 0.5], [0.2, 0.6]],\n dtype=features_dtype)\n labels = constant_op.constant([1, 0], dtype=labels_dtype)\n with self.assertRaisesRegex(\n errors_impl.UnimplementedError,\n \"The GPU implementation of SparseSoftmaxCrossEntropyWithLogits \" +\n \"that would have been executed is not deterministic. Note that \" +\n \"the Python API uses an alternative, deterministic, \" +\n \"GPU-accelerated path when determinsim is enabled.\"):\n result = gen_nn_ops.sparse_softmax_cross_entropy_with_logits(\n features=features, labels=labels)\n self.evaluate(result)\n\n\nclass SparseXentOpDeterministicTest(\n sparse_xent_op_test_base.SparseXentOpTestBase):\n \"\"\"Test that SparseSoftmaxCrossEntropyWithLogits operates reproducibly.\n\n Inheriting from sparse_xent_op_test_base.SparseXentOpTestBase ensures that\n regular op functionality is correct when the deterministic code-path is\n selected.\n\n Note that because nn_ops.sparse_softmax_cross_entropy_with_logits_v2 calls\n nn_ops.sparse_softmax_cross_entropy_with_logits directly, the focus of\n testing is on the former in order to test both.\n \"\"\"\n\n def _randomInts(self, shape, high, dtype):\n return constant_op.constant(\n np.random.randint(low=0, high=high, size=shape).astype(dtype))\n\n def _randomFloats(self, shape, dtype):\n return constant_op.constant(\n (2 * np.random.random_sample(shape) - 1).astype(dtype))\n\n def _generateInputs(self, labels_dtype, logits_dtype, seed):\n batch_size = 1024\n classes_count = 1000\n np.random.seed(seed)\n labels_shape = (batch_size)\n labels = self._randomInts(\n labels_shape, high=classes_count, dtype=labels_dtype)\n logits_shape = (batch_size, classes_count)\n logits = self._randomFloats(logits_shape, logits_dtype)\n return labels, logits\n\n @test_util.run_in_graph_and_eager_modes\n def testForward(self):\n with self.cached_session():\n for logits_dtype in [np.float16, np.float32, np.float64, \\\n dtypes.bfloat16.as_numpy_dtype]:\n for labels_dtype in [np.int32, np.int64]:\n for trial in range(5):\n seed = 123 + trial\n labels, logits = self._generateInputs(\n labels_dtype, logits_dtype, seed=seed)\n result_a = nn_ops.sparse_softmax_cross_entropy_with_logits_v2(\n labels=labels, logits=logits)\n result_b = nn_ops.sparse_softmax_cross_entropy_with_logits_v2(\n labels=labels, logits=logits)\n self.assertAllEqual(result_a, result_b)\n\n @test_util.run_in_graph_and_eager_modes\n def testBackward(self):\n with self.cached_session():\n for logits_dtype in [np.float16, np.float32, np.float64, \\\n dtypes.bfloat16.as_numpy_dtype]:\n for labels_dtype in [np.int32, np.int64]:\n labels, logits = self._generateInputs(\n labels_dtype, logits_dtype, seed=456)\n output_shape = labels.shape[0]\n\n def gradients(seed):\n np.random.seed(seed)\n upstream_gradients = self._randomFloats(output_shape, logits_dtype)\n with backprop.GradientTape(persistent=True) as tape:\n tape.watch(logits)\n op_output = nn_ops.sparse_softmax_cross_entropy_with_logits_v2(\n labels=labels, logits=logits)\n gradient_injector_output = op_output * upstream_gradients\n return tape.gradient(gradient_injector_output, logits)\n\n for trial in range(5):\n seed = 456 + trial\n result_a = gradients(seed=seed)\n result_b = gradients(seed=seed)\n self.assertAllEqual(result_a, result_b)\n\n # Modifications to the parent class\n # (sparse_xent_op_test_base.SparseXentOpTestBase) follow\n\n def testInvalidLabelGPU(self):\n \"\"\"Modified test for invalid labels on GPU.\n\n When running on GPU, the pre-existing, nondeterministic implementation\n produces NaN (in both the forward and backward directions) for results\n associated with invalid labels (less than zero or greater than the number of\n classes minus one). However, while the deterministic implementation also\n produces NaN in the forward direction, it produces zeros in the backward\n direction.\n \"\"\"\n self._testInvalidLabelGPU(invalid_label_gradient=0.0)\n\n def testInvalidLabelCPU(self):\n \"\"\"Modified test for invalid labels on CPU.\n\n When running on CPU, the pre-existing, nondeterministic implementation\n throws a custom exception when any of the label values are invalid (less\n than zero or greater than the number of classes minus one). However, in the\n deterministic implementation, tf.gather throws an exception instead.\n \"\"\"\n self._testInvalidLabelCPU(\n expected_regex=\"indices\\[0\\] = 4 is not in \\[0, 4\\)\")\n\n def testLabelsPlaceholderScalar(self):\n \"\"\"Test exception-throwing for non-statically-shaped, zero-rank labels.\n\n The deterministic implementation cannot check for this case because it does\n not have a specific implementation of SparseSoftmaxXentWithLogitsOp.\n Instead tf.gather, which is used to create the deterministic implementation,\n throws an error.\n \"\"\"\n self._testLabelsPlaceholderScalar(\n expected_error_message=\"Expected batch_dims in the range \\[0, 0\\], \" +\n \"but got 1\")\n\n def testScalarHandling(self):\n \"\"\"Test exception-throwing for non-statically-shaped, zero-rank labels.\n\n The deterministic implementation cannot check for this case because it does\n not have a specific implementation of SparseSoftmaxXentWithLogitsOp.\n Instead tf.gather, which is used to create the deterministic implementation,\n throws an error.\n \"\"\"\n self._testScalarHandling(\n expected_regex=\"Expected batch_dims in the range \\[0, 0\\], but got 1.*\")\n\n\nif __name__ == \"__main__\":\n # TODO(reedwm): Merge this test with sparse_xent_op_test.py.\n config.enable_op_determinism()\n test.main()\n", "# Copyright 2021 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for numerical correctness of tf.math operations.\"\"\"\n\nimport numpy as np\n\nfrom absl import flags\nfrom absl.testing import parameterized\nfrom tensorflow import math\n\nfrom tensorflow.compiler.mlir.tfrt.jit.python_binding import tf_cpurt\nfrom tensorflow.python.platform import test\n\ncpurt = tf_cpurt.TfCpurtExecutor()\n\nFLAGS = flags.FLAGS\nflags.DEFINE_integer('iters', '1000', 'Number of test iterations')\n\n\ndef mlir_func_1d(op_name):\n return f\"\"\"\n func @test(%arg0: tensor<?xf32>) -> tensor<?xf32> {{\n %0 = \"tf.{op_name}\"(%arg0): (tensor<?xf32>) -> tensor<?xf32>\n return %0 : tensor<?xf32>\n }}\"\"\"\n\n\ndef test_1d(op_name, fn, vectorize=False, lb=-1.0, ub=1.0, rtol=0.0):\n compiled = cpurt.compile(mlir_func_1d(op_name), 'test', vectorize=vectorize)\n\n for _ in range(FLAGS.iters):\n arg = np.random.uniform(lb, ub, size=(100)).astype(np.float32)\n\n [res] = cpurt.execute(compiled, [arg])\n np.testing.assert_allclose(res, fn(arg), rtol=rtol)\n\n\nclass TfMathOpsTest(parameterized.TestCase):\n # Not all approximations are identical to TF's.\n base_rtol = 1e-6\n # For some ops we can match TF with the right build flags.\n avx2_rtol = 0.0 if cpurt.built_with('AVX2') else base_rtol\n\n @parameterized.named_parameters(\n ('reciprocal_scalar', 'Reciprocal', math.reciprocal, False, 0.0),\n ('reciprocal_vector', 'Reciprocal', math.reciprocal, True, 0.0),\n # Rsqrt: The AVX2 intrinsic is only emitted with vectorization.\n ('rsqrt_scalar', 'Rsqrt', math.rsqrt, False, base_rtol),\n ('rsqrt_vector', 'Rsqrt', math.rsqrt, True, avx2_rtol),\n ('tanh_scalar', 'Tanh', math.tanh, False, avx2_rtol),\n ('tanh_vector', 'Tanh', math.tanh, True, avx2_rtol),\n )\n\n def test_op(self, op_name, fn, vectorize, rtol):\n test_1d(op_name, fn, vectorize=vectorize, rtol=rtol)\n\nif __name__ == '__main__':\n np.random.seed(0)\n test.main()\n" ]
[ [ "tensorflow.python.framework.test_util.run_in_graph_and_eager_modes", "numpy.finfo", "tensorflow.python.platform.test.main", "numpy.zeros", "tensorflow.python.framework.constant_op.constant" ], [ "tensorflow.python.framework.config.set_soft_device_placement", "tensorflow.python.ops.array_ops.placeholder", "numpy.all", "tensorflow.python.ops.array_ops.zeros", "tensorflow.python.framework.ops.device", "tensorflow.python.eager.context.context", "tensorflow.python.eager.context.executing_eagerly", "tensorflow.python.ops.math_ops.reduce_all", "tensorflow.python.ops.array_ops.unstack", "tensorflow.python.platform.test.main", "tensorflow.python.ops.array_ops.ones", "tensorflow.python.ops.gen_stateless_random_ops_v2.stateless_random_get_key_counter", "tensorflow.python.ops.math_ops.cast", "tensorflow.python.ops.gen_stateless_random_ops_v2.stateless_random_get_alg", "tensorflow.python.framework.dtypes.as_dtype", "tensorflow.python.compat.compat.forward_compatibility_horizon", "numpy.array", "tensorflow.python.framework.random_seed.set_random_seed", "tensorflow.python.ops.stateless_random_ops.split", "tensorflow.python.framework.config.list_logical_devices", "tensorflow.python.framework.test_util.disable_tfrt", "numpy.shape", "tensorflow.python.framework.constant_op.constant" ], [ "tensorflow.python.framework.test_util.NCHW_VECT_CToNHWC", "tensorflow.python.ops.array_ops.placeholder", "tensorflow.python.ops.gen_array_ops.quantize_v2", "tensorflow.python.ops.array_ops.zeros", "tensorflow.python.platform.test.is_built_with_rocm", "tensorflow.python.framework.test_util.NHWCToNCHW", "tensorflow.python.framework.ops.device", "tensorflow.python.ops.nn_ops.max_pool_with_argmax", "tensorflow.python.eager.context.executing_eagerly", "tensorflow.python.ops.gradients_impl.gradients", "tensorflow.python.framework.config.disable_op_determinism", "tensorflow.python.framework.test_util.disable_xla", "tensorflow.python.ops.gen_array_ops.dequantize", "numpy.arange", "tensorflow.python.ops.nn_ops.convert_padding", "tensorflow.python.platform.test.main", "tensorflow.python.ops.array_ops.ones", "tensorflow.python.ops.gen_nn_ops.max_pool_grad", "tensorflow.python.eager.context.eager_mode", "tensorflow.python.framework.test_util.xla_allow_fallback", "tensorflow.python.ops.gradient_checker.compute_gradient_error", "tensorflow.python.platform.test.is_gpu_available", "tensorflow.python.ops.gen_nn_ops.max_pool_grad_with_argmax", "tensorflow.python.ops.gen_nn_ops.max_pool_grad_grad_with_argmax", "tensorflow.python.framework.test_util.NHWCToNCHW_VECT_C", "numpy.random.random_integers", "tensorflow.python.framework.config.enable_op_determinism", "numpy.random.rand", "numpy.array", "tensorflow.python.ops.gen_nn_ops.max_pool", "tensorflow.python.framework.test_util.no_xla_auto_jit", "tensorflow.python.ops.nn_ops.max_pool", "tensorflow.python.platform.tf_logging.info", "tensorflow.python.ops.gen_nn_ops.max_pool_grad_grad", "numpy.ones", "tensorflow.python.ops.variables.global_variables_initializer", "tensorflow.python.framework.test_util.NCHWToNHWC", "tensorflow.python.framework.test_util.GpuSupportsHalfMatMulAndConv", "tensorflow.python.framework.constant_op.constant" ], [ "tensorflow.python.platform.test.main", "tensorflow.python.framework.config.enable_op_determinism" ], [ "tensorflow.compat.v1.compat.forward_compatibility_horizon", "tensorflow.lite.testing.zip_test_utils.get_test_function" ], [ "tensorflow.python.ops.gen_dataset_ops.model_dataset", "tensorflow.python.ops.gen_dataset_ops.prefetch_dataset", "tensorflow.python.data.util.traverse.obtain_all_variant_tensor_ops", "tensorflow.python.data.ops.dataset_ops.Dataset.zip", "tensorflow.python.data.kernel_tests.test_base.graph_only_combinations", "tensorflow.python.platform.test.main", "tensorflow.python.data.ops.dataset_ops.Dataset.range", "tensorflow.python.data.util.traverse.obtain_capture_by_value_ops", "tensorflow.python.data.experimental.ops.data_service_ops.distribute" ], [ "tensorflow.python.ops.control_flow_ops._SwitchRefOrTensor", "tensorflow.python.framework.ops.RegisterGradient", "tensorflow.python.ops.control_flow_ops._Enter", "tensorflow.python.ops.math_ops.equal", "tensorflow.python.framework.ops.get_default_graph", "tensorflow.python.ops.control_flow_ops._AddNextAndBackEdge", "tensorflow.python.ops.control_flow_util.GetOutputContext" ], [ "numpy.random.seed", "tensorflow.python.ops.gen_nn_ops.sparse_softmax_cross_entropy_with_logits", "tensorflow.python.framework.test_util.force_gpu", "tensorflow.python.ops.nn_ops.sparse_softmax_cross_entropy_with_logits_v2", "numpy.random.random_sample", "tensorflow.python.platform.test.main", "tensorflow.python.framework.config.enable_op_determinism", "numpy.random.randint", "tensorflow.python.eager.backprop.GradientTape", "tensorflow.python.framework.constant_op.constant" ], [ "numpy.random.uniform", "tensorflow.compiler.mlir.tfrt.jit.python_binding.tf_cpurt.TfCpurtExecutor", "tensorflow.python.platform.test.main", "numpy.random.seed" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "2.6", "1.4", "1.13", "2.3", "2.4", "2.2", "2.9", "1.5", "1.7", "2.5", "2.8", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.8", "2.7", "2.9", "2.5", "2.6", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.10", "2.8", "2.7", "2.9" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.9", "2.8", "2.7", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.8", "2.7", "2.9", "2.5", "2.6", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "2.6", "2.2", "1.13", "2.3", "2.4", "2.9", "1.5", "1.7", "2.5", "2.8", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.9", "2.8", "2.7", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "2.6", "2.2", "1.13", "2.3", "2.4", "1.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.8", "1.2", "2.10" ] } ]
UCL/xclim
[ "118441f89d221cfffbd2e1fd0b966517e731378d" ]
[ "xclim/indices/_conversion.py" ]
[ "# noqa: D100\nfrom typing import Optional, Tuple\n\nimport numpy as np\nimport xarray as xr\n\nfrom xclim.core.calendar import date_range, datetime_to_decimal_year\nfrom xclim.core.units import amount2rate, convert_units_to, declare_units, units2pint\n\n__all__ = [\n \"humidex\",\n \"tas\",\n \"uas_vas_2_sfcwind\",\n \"sfcwind_2_uas_vas\",\n \"saturation_vapor_pressure\",\n \"relative_humidity\",\n \"specific_humidity\",\n \"snowfall_approximation\",\n \"rain_approximation\",\n \"wind_chill_index\",\n \"clausius_clapeyron_scaled_precipitation\",\n \"potential_evapotranspiration\",\n]\n\n\n@declare_units(tas=\"[temperature]\", tdps=\"[temperature]\", hurs=\"[]\")\ndef humidex(\n tas: xr.DataArray,\n tdps: Optional[xr.DataArray] = None,\n hurs: Optional[xr.DataArray] = None,\n) -> xr.DataArray:\n r\"\"\"Humidex index.\n\n The humidex indicates how hot the air feels to an average person, accounting for the effect of humidity. It\n can be loosely interpreted as the equivalent perceived temperature when the air is dry.\n\n Parameters\n ----------\n tas : xarray.DataArray\n Air temperature.\n tdps : xarray.DataArray,\n Dewpoint temperature.\n hurs : xarray.DataArray\n Relative humidity.\n\n Returns\n -------\n xarray.DataArray, [temperature]\n The humidex index.\n\n Notes\n -----\n The humidex is usually computed using hourly observations of dry bulb and dewpoint temperatures. It is computed\n using the formula based on [masterton79]_:\n\n .. math::\n\n T + {\\frac {5}{9}}\\left[e - 10\\right]\n\n where :math:`T` is the dry bulb air temperature (°C). The term :math:`e` can be computed from the dewpoint\n temperature :math:`T_{dewpoint}` in °K:\n\n .. math::\n\n e = 6.112 \\times \\exp(5417.7530\\left({\\frac {1}{273.16}}-{\\frac {1}{T_{\\text{dewpoint}}}}\\right)\n\n where the constant 5417.753 reflects the molecular weight of water, latent heat of vaporization,\n and the universal gas constant ([mekis15]_). Alternatively, the term :math:`e` can also be computed from\n the relative humidity `h` expressed in percent using [sirangelo20]_:\n\n .. math::\n\n e = \\frac{h}{100} \\times 6.112 * 10^{7.5 T/(T + 237.7)}.\n\n The humidex *comfort scale* ([eccc]_) can be interpreted as follows:\n\n - 20 to 29 : no discomfort;\n - 30 to 39 : some discomfort;\n - 40 to 45 : great discomfort, avoid exertion;\n - 46 and over : dangerous, possible heat stroke;\n\n References\n ----------\n .. [masterton79] Masterton, J. M., & Richardson, F. A. (1979). HUMIDEX, A method of quantifying human discomfort due to excessive heat and humidity, CLI 1-79. Downsview, Ontario: Environment Canada, Atmospheric Environment Service.\n .. [mekis15] Éva Mekis, Lucie A. Vincent, Mark W. Shephard & Xuebin Zhang (2015) Observed Trends in Severe Weather Conditions Based on Humidex, Wind Chill, and Heavy Rainfall Events in Canada for 1953–2012, Atmosphere-Ocean, 53:4, 383-397, DOI: 10.1080/07055900.2015.1086970\n .. [sirangelo20] Sirangelo, B., Caloiero, T., Coscarelli, R. et al. Combining stochastic models of air temperature and vapour pressure for the analysis of the bioclimatic comfort through the Humidex. Sci Rep 10, 11395 (2020). https://doi.org/10.1038/s41598-020-68297-4\n .. [eccc] https://climate.weather.gc.ca/glossary_e.html\n \"\"\"\n if (tdps is None) == (hurs is None):\n raise ValueError(\n \"At least one of `tdps` or `hurs` must be given, and not both.\"\n )\n\n # Vapour pressure in hPa\n if tdps is not None:\n # Convert dewpoint temperature to Kelvins\n tdps = convert_units_to(tdps, \"kelvin\")\n e = 6.112 * np.exp(5417.7530 * (1 / 273.16 - 1.0 / tdps))\n\n elif hurs is not None:\n # Convert dry bulb temperature to Celsius\n tasC = convert_units_to(tas, \"celsius\")\n e = hurs / 100 * 6.112 * 10 ** (7.5 * tasC / (tasC + 237.7))\n\n # Temperature delta due to humidity in delta_degC\n h = 5 / 9 * (e - 10)\n h.attrs[\"units\"] = \"delta_degree_Celsius\"\n\n # Get delta_units for output\n du = (1 * units2pint(tas) - 0 * units2pint(tas)).units\n h = convert_units_to(h, du)\n\n # Add the delta to the input temperature\n out = h + tas\n out.attrs[\"units\"] = tas.units\n return out\n\n\n@declare_units(tasmin=\"[temperature]\", tasmax=\"[temperature]\")\ndef tas(tasmin: xr.DataArray, tasmax: xr.DataArray) -> xr.DataArray:\n \"\"\"Average temperature from minimum and maximum temperatures.\n\n We assume a symmetrical distribution for the temperature and retrieve the average value as Tg = (Tx + Tn) / 2\n\n Parameters\n ----------\n tasmin : xarray.DataArray\n Minimum (daily) temperature\n tasmax : xarray.DataArray\n Maximum (daily) temperature\n\n Returns\n -------\n xarray.DataArray\n Mean (daily) temperature [same units as tasmin]\n \"\"\"\n tasmax = convert_units_to(tasmax, tasmin)\n tas = (tasmax + tasmin) / 2\n tas.attrs[\"units\"] = tasmin.attrs[\"units\"]\n return tas\n\n\n@declare_units(uas=\"[speed]\", vas=\"[speed]\", calm_wind_thresh=\"[speed]\")\ndef uas_vas_2_sfcwind(\n uas: xr.DataArray, vas: xr.DataArray, calm_wind_thresh: str = \"0.5 m/s\"\n) -> Tuple[xr.DataArray, xr.DataArray]:\n \"\"\"Wind speed and direction from the eastward and northward wind components.\n\n Computes the magnitude and angle of the wind vector from its northward and eastward components,\n following the meteorological convention that sets calm wind to a direction of 0° and northerly wind to 360°.\n\n Parameters\n ----------\n uas : xr.DataArray\n Eastward wind velocity\n vas : xr.DataArray\n Northward wind velocity\n calm_wind_thresh : str\n The threshold under which winds are considered \"calm\" and for which the direction\n is set to 0. On the Beaufort scale, calm winds are defined as < 0.5 m/s.\n\n Returns\n -------\n wind : xr.DataArray, [m s-1]\n Wind velocity\n wind_from_dir : xr.DataArray, [°]\n Direction from which the wind blows, following the meteorological convention where\n 360 stands for North and 0 for calm winds.\n\n Notes\n -----\n Winds with a velocity less than `calm_wind_thresh` are given a wind direction of 0°,\n while stronger northerly winds are set to 360°.\n \"\"\"\n # Converts the wind speed to m s-1\n uas = convert_units_to(uas, \"m/s\")\n vas = convert_units_to(vas, \"m/s\")\n wind_thresh = convert_units_to(calm_wind_thresh, \"m/s\")\n\n # Wind speed is the hypotenuse of \"uas\" and \"vas\"\n wind = np.hypot(uas, vas)\n wind.attrs[\"units\"] = \"m s-1\"\n\n # Calculate the angle\n wind_from_dir_math = np.degrees(np.arctan2(vas, uas))\n\n # Convert the angle from the mathematical standard to the meteorological standard\n wind_from_dir = (270 - wind_from_dir_math) % 360.0\n\n # According to the meteorological standard, calm winds must have a direction of 0°\n # while northerly winds have a direction of 360°\n # On the Beaufort scale, calm winds are defined as < 0.5 m/s\n wind_from_dir = xr.where(wind_from_dir.round() == 0, 360, wind_from_dir)\n wind_from_dir = xr.where(wind < wind_thresh, 0, wind_from_dir)\n wind_from_dir.attrs[\"units\"] = \"degree\"\n return wind, wind_from_dir\n\n\n@declare_units(sfcWind=\"[speed]\", sfcWindfromdir=\"[]\")\ndef sfcwind_2_uas_vas(\n sfcWind: xr.DataArray, sfcWindfromdir: xr.DataArray # noqa\n) -> Tuple[xr.DataArray, xr.DataArray]:\n \"\"\"Eastward and northward wind components from the wind speed and direction.\n\n Compute the eastward and northward wind components from the wind speed and direction.\n\n Parameters\n ----------\n sfcWind : xr.DataArray\n Wind velocity\n sfcWindfromdir : xr.DataArray\n Direction from which the wind blows, following the meteorological convention\n where 360 stands for North.\n\n Returns\n -------\n uas : xr.DataArray, [m s-1]\n Eastward wind velocity.\n vas : xr.DataArray, [m s-1]\n Northward wind velocity.\n\n \"\"\"\n # Converts the wind speed to m s-1\n sfcWind = convert_units_to(sfcWind, \"m/s\") # noqa\n\n # Converts the wind direction from the meteorological standard to the mathematical standard\n wind_from_dir_math = (-sfcWindfromdir + 270) % 360.0\n\n # TODO: This commented part should allow us to resample subdaily wind, but needs to be cleaned up and put elsewhere.\n # if resample is not None:\n # wind = wind.resample(time=resample).mean(dim='time', keep_attrs=True)\n #\n # # nb_per_day is the number of values each day. This should be calculated\n # wind_from_dir_math_per_day = wind_from_dir_math.reshape((len(wind.time), nb_per_day))\n # # Averages the subdaily angles around a circle, i.e. mean([0, 360]) = 0, not 180\n # wind_from_dir_math = np.concatenate([[degrees(phase(sum(rect(1, radians(d)) for d in angles) / len(angles)))]\n # for angles in wind_from_dir_math_per_day])\n\n uas = sfcWind * np.cos(np.radians(wind_from_dir_math))\n vas = sfcWind * np.sin(np.radians(wind_from_dir_math))\n uas.attrs[\"units\"] = \"m s-1\"\n vas.attrs[\"units\"] = \"m s-1\"\n return uas, vas\n\n\n@declare_units(tas=\"[temperature]\", ice_thresh=\"[temperature]\")\ndef saturation_vapor_pressure(\n tas: xr.DataArray, ice_thresh: str = None, method: str = \"sonntag90\" # noqa\n) -> xr.DataArray:\n \"\"\"Saturation vapor pressure from temperature.\n\n Parameters\n ----------\n tas : xr.DataArray\n Temperature array.\n ice_thresh : str\n Threshold temperature under which to switch to equations in reference to ice instead of water.\n If None (default) everything is computed with reference to water.\n method : {\"dewpoint\", \"goffgratch46\", \"sonntag90\", \"tetens30\", \"wmo08\"}\n Which method to use, see notes.\n\n Returns\n -------\n xarray.DataArray, [Pa]\n Saturation vapor pressure.\n\n Notes\n -----\n In all cases implemented here :math:`log(e_{sat})` is an empirically fitted function (usually a polynomial)\n where coefficients can be different when ice is taken as reference instead of water. Available methods are:\n\n - \"goffgratch46\" or \"GG46\", based on [goffgratch46]_, values and equation taken from [voemel]_.\n - \"sonntag90\" or \"SO90\", taken from [sonntag90]_.\n - \"tetens30\" or \"TE30\", based on [tetens30]_, values and equation taken from [voemel]_.\n - \"wmo08\" or \"WMO08\", taken from [wmo08]_.\n\n References\n ----------\n .. [goffgratch46] Goff, J. A., and S. Gratch (1946) Low-pressure properties of water from -160 to 212 °F, in Transactions of the American Society of Heating and Ventilating Engineers, pp 95-122, presented at the 52nd annual meeting of the American Society of Heating and Ventilating Engineers, New York, 1946.\n .. [sonntag90] Sonntag, D. (1990). Important new values of the physical constants of 1986, vapour pressure formulations based on the ITS-90, and psychrometer formulae. Zeitschrift für Meteorologie, 40(5), 340-344.\n .. [tetens30] Tetens, O. 1930. Über einige meteorologische Begriffe. Z. Geophys 6: 207-309.\n .. [voemel] https://cires1.colorado.edu/~voemel/vp.html\n .. [wmo08] World Meteorological Organization. (2008). Guide to meteorological instruments and methods of observation. Geneva, Switzerland: World Meteorological Organization. https://www.weather.gov/media/epz/mesonet/CWOP-WMO8.pdf\n \"\"\"\n if ice_thresh is not None:\n thresh = convert_units_to(ice_thresh, \"degK\")\n else:\n thresh = convert_units_to(\"0 K\", \"degK\")\n ref_is_water = tas > thresh\n\n if method in [\"sonntag90\", \"SO90\"]:\n e_sat = xr.where(\n ref_is_water,\n 100\n * np.exp( # Where ref_is_water is True, x100 is to convert hPa to Pa\n -6096.9385 / tas # type: ignore\n + 16.635794\n + -2.711193e-2 * tas # type: ignore\n + 1.673952e-5 * tas ** 2\n + 2.433502 * np.log(tas) # numpy's log is ln\n ),\n 100\n * np.exp( # Where ref_is_water is False (thus ref is ice)\n -6024.5282 / tas # type: ignore\n + 24.7219\n + 1.0613868e-2 * tas # type: ignore\n + -1.3198825e-5 * tas ** 2\n + -0.49382577 * np.log(tas)\n ),\n )\n elif method in [\"tetens30\", \"TE30\"]:\n e_sat = xr.where(\n ref_is_water,\n 610.78 * np.exp(17.269388 * (tas - 273.16) / (tas - 35.86)),\n 610.78 * np.exp(21.8745584 * (tas - 273.16) / (tas - 7.66)),\n )\n elif method in [\"goffgratch46\", \"GG46\"]:\n Tb = 373.16 # Water boiling temp [K]\n eb = 101325 # e_sat at Tb [Pa]\n Tp = 273.16 # Triple-point temperature [K]\n ep = 611.73 # e_sat at Tp [Pa]\n e_sat = xr.where(\n ref_is_water,\n eb\n * 10\n ** (\n -7.90298 * ((Tb / tas) - 1) # type: ignore\n + 5.02808 * np.log10(Tb / tas) # type: ignore\n + -1.3817e-7 * (10 ** (11.344 * (1 - tas / Tb)) - 1)\n + 8.1328e-3 * (10 ** (-3.49149 * ((Tb / tas) - 1)) - 1) # type: ignore\n ),\n ep\n * 10\n ** (\n -9.09718 * ((Tp / tas) - 1) # type: ignore\n + -3.56654 * np.log10(Tp / tas) # type: ignore\n + 0.876793 * (1 - tas / Tp)\n ),\n )\n elif method in [\"wmo08\", \"WMO08\"]:\n e_sat = xr.where(\n ref_is_water,\n 611.2 * np.exp(17.62 * (tas - 273.16) / (tas - 30.04)),\n 611.2 * np.exp(22.46 * (tas - 273.16) / (tas - 0.54)),\n )\n else:\n raise ValueError(\n f\"Method {method} is not in ['sonntag90', 'tetens30', 'goffgratch46', 'wmo08']\"\n )\n\n e_sat.attrs[\"units\"] = \"Pa\"\n return e_sat\n\n\n@declare_units(\n tas=\"[temperature]\",\n tdps=\"[temperature]\",\n huss=\"[]\",\n ps=\"[pressure]\",\n ice_thresh=\"[temperature]\",\n)\ndef relative_humidity(\n tas: xr.DataArray,\n tdps: xr.DataArray = None,\n huss: xr.DataArray = None,\n ps: xr.DataArray = None,\n ice_thresh: str = None,\n method: str = \"sonntag90\",\n invalid_values: str = \"clip\",\n) -> xr.DataArray:\n r\"\"\"Relative humidity.\n\n Compute relative humidity from temperature and either dewpoint temperature or specific humidity and pressure through\n the saturation vapor pressure.\n\n Parameters\n ----------\n tas : xr.DataArray\n Temperature array\n tdps : xr.DataArray\n Dewpoint temperature, if specified, overrides huss and ps.\n huss : xr.DataArray\n Specific humidity.\n ps : xr.DataArray\n Air Pressure.\n ice_thresh : str\n Threshold temperature under which to switch to equations in reference to ice instead of water.\n If None (default) everything is computed with reference to water. Does nothing if 'method' is \"bohren98\".\n method : {\"bohren98\", \"goffgratch46\", \"sonntag90\", \"tetens30\", \"wmo08\"}\n Which method to use, see notes of this function and of `saturation_vapor_pressure`.\n invalid_values : {\"clip\", \"mask\", None}\n What to do with values outside the 0-100 range. If \"clip\" (default), clips everything to 0 - 100,\n if \"mask\", replaces values outside the range by np.nan, and if `None`, does nothing.\n\n Returns\n -------\n xr.DataArray, [%]\n Relative humidity.\n\n Notes\n -----\n In the following, let :math:`T`, :math:`T_d`, :math:`q` and :math:`p` be the temperature,\n the dew point temperature, the specific humidity and the air pressure.\n\n **For the \"bohren98\" method** : This method does not use the saturation vapor pressure directly,\n but rather uses an approximation of the ratio of :math:`\\frac{e_{sat}(T_d)}{e_{sat}(T)}`.\n With :math:`L` the enthalpy of vaporization of water and :math:`R_w` the gas constant for water vapor,\n the relative humidity is computed as:\n\n .. math::\n\n RH = e^{\\frac{-L (T - T_d)}{R_wTT_d}}\n\n From [BohrenAlbrecht1998]_, formula taken from [Lawrence2005]_. :math:`L = 2.5\\times 10^{-6}` J kg-1, exact for :math:`T = 273.15` K, is used.\n\n **Other methods**: With :math:`w`, :math:`w_{sat}`, :math:`e_{sat}` the mixing ratio,\n the saturation mixing ratio and the saturation vapor pressure.\n If the dewpoint temperature is given, relative humidity is computed as:\n\n .. math::\n\n RH = 100\\frac{e_{sat}(T_d)}{e_{sat}(T)}\n\n Otherwise, the specific humidity and the air pressure must be given so relative humidity can be computed as:\n\n .. math::\n\n RH = 100\\frac{w}{w_{sat}}\n w = \\frac{q}{1-q}\n w_{sat} = 0.622\\frac{e_{sat}}{P - e_{sat}}\n\n The methods differ by how :math:`e_{sat}` is computed. See the doc of :py:meth:`xclim.core.utils.saturation_vapor_pressure`.\n\n References\n ----------\n .. [Lawrence2005] Lawrence, M.G. (2005). The Relationship between Relative Humidity and the Dewpoint Temperature in Moist Air: A Simple Conversion and Applications. Bull. Amer. Meteor. Soc., 86, 225–234, https://doi.org/10.1175/BAMS-86-2-225\n .. [BohrenAlbrecht1998] Craig F. Bohren, Bruce A. Albrecht. Atmospheric Thermodynamics. Oxford University Press, 1998.\n \"\"\"\n if method in (\"bohren98\", \"BA90\"):\n if tdps is None:\n raise ValueError(\"To use method 'bohren98' (BA98), dewpoint must be given.\")\n tdps = convert_units_to(tdps, \"degK\")\n tas = convert_units_to(tas, \"degK\")\n L = 2.501e6\n Rw = (461.5,)\n hurs = 100 * np.exp(-L * (tas - tdps) / (Rw * tas * tdps)) # type: ignore\n elif tdps is not None:\n e_sat_dt = saturation_vapor_pressure(\n tas=tdps, ice_thresh=ice_thresh, method=method\n )\n e_sat_t = saturation_vapor_pressure(\n tas=tas, ice_thresh=ice_thresh, method=method\n )\n hurs = 100 * e_sat_dt / e_sat_t # type: ignore\n else:\n ps = convert_units_to(ps, \"Pa\")\n huss = convert_units_to(huss, \"\")\n tas = convert_units_to(tas, \"degK\")\n\n e_sat = saturation_vapor_pressure(tas=tas, ice_thresh=ice_thresh, method=method)\n\n w = huss / (1 - huss)\n w_sat = 0.62198 * e_sat / (ps - e_sat) # type: ignore\n hurs = 100 * w / w_sat\n\n if invalid_values == \"clip\":\n hurs = hurs.clip(0, 100)\n elif invalid_values == \"mask\":\n hurs = hurs.where((hurs <= 100) & (hurs >= 0))\n hurs.attrs[\"units\"] = \"%\"\n return hurs\n\n\n@declare_units(\n tas=\"[temperature]\",\n hurs=\"[]\",\n ps=\"[pressure]\",\n ice_thresh=\"[temperature]\",\n)\ndef specific_humidity(\n tas: xr.DataArray,\n hurs: xr.DataArray,\n ps: xr.DataArray,\n ice_thresh: str = None,\n method: str = \"sonntag90\",\n invalid_values: str = None,\n) -> xr.DataArray:\n r\"\"\"Specific humidity from temperature, relative humidity and pressure.\n\n Parameters\n ----------\n tas : xr.DataArray\n Temperature array\n hurs : xr.DataArray\n Relative Humidity.\n ps : xr.DataArray\n Air Pressure.\n ice_thresh : str\n Threshold temperature under which to switch to equations in reference to ice instead of water.\n If None (default) everything is computed with reference to water.\n method : {\"goffgratch46\", \"sonntag90\", \"tetens30\", \"wmo08\"}\n Which method to use, see notes of this function and of `saturation_vapor_pressure`.\n invalid_values : {\"clip\", \"mask\", None}\n What to do with values larger than the saturation specific humidity and lower than 0.\n If \"clip\" (default), clips everything to 0 - q_sat\n if \"mask\", replaces values outside the range by np.nan,\n if None, does nothing.\n\n Returns\n -------\n xarray.DataArray, [dimensionless]\n Specific humidity.\n\n Notes\n -----\n In the following, let :math:`T`, :math:`hurs` (in %) and :math:`p` be the temperature,\n the relative humidity and the air pressure. With :math:`w`, :math:`w_{sat}`, :math:`e_{sat}` the mixing ratio,\n the saturation mixing ratio and the saturation vapor pressure, specific humidity :math:`q` is computed as:\n\n .. math::\n\n w_{sat} = 0.622\\frac{e_{sat}}{P - e_{sat}}\n w = w_{sat} * hurs / 100\n q = w / (1 + w)\n\n The methods differ by how :math:`e_{sat}` is computed. See the doc of `xclim.core.utils.saturation_vapor_pressure`.\n\n If `invalid_values` is not `None`, the saturation specific humidity :math:`q_{sat}` is computed as:\n\n .. math::\n\n q_{sat} = w_{sat} / (1 + w_{sat})\n \"\"\"\n ps = convert_units_to(ps, \"Pa\")\n hurs = convert_units_to(hurs, \"\")\n tas = convert_units_to(tas, \"degK\")\n\n e_sat = saturation_vapor_pressure(tas=tas, ice_thresh=ice_thresh, method=method)\n\n w_sat = 0.62198 * e_sat / (ps - e_sat) # type: ignore\n w = w_sat * hurs\n q = w / (1 + w)\n\n if invalid_values is not None:\n q_sat = w_sat / (1 + w_sat)\n if invalid_values == \"clip\":\n q = q.clip(0, q_sat)\n elif invalid_values == \"mask\":\n q = q.where((q <= q_sat) & (q >= 0))\n q.attrs[\"units\"] = \"\"\n return q\n\n\n@declare_units(pr=\"[precipitation]\", tas=\"[temperature]\", thresh=\"[temperature]\")\ndef snowfall_approximation(\n pr: xr.DataArray,\n tas: xr.DataArray,\n thresh: str = \"0 degC\",\n method: str = \"binary\",\n) -> xr.DataArray:\n \"\"\"Snowfall approximation from total precipitation and temperature.\n\n Solid precipitation estimated from precipitation and temperature according to a given method.\n\n Parameters\n ----------\n pr : xarray.DataArray\n Mean daily precipitation flux.\n tas : xarray.DataArray, optional\n Mean, maximum, or minimum daily temperature.\n thresh : str,\n Threshold temperature, used by method \"binary\".\n method : {\"binary\", \"brown\", \"auer\"}\n Which method to use when approximating snowfall from total precipitation. See notes.\n\n Returns\n -------\n xarray.DataArray, [same units as pr]\n Solid precipitation flux.\n\n Notes\n -----\n The following methods are available to approximate snowfall and are drawn from the\n Canadian Land Surface Scheme (CLASS, [Verseghy09]_).\n\n - ``'binary'`` : When the temperature is under the freezing threshold, precipitation\n is assumed to be solid. The method is agnostic to the type of temperature used\n (mean, maximum or minimum).\n - ``'brown'`` : The phase between the freezing threshold goes from solid to liquid linearly\n over a range of 2°C over the freezing point.\n - ``'auer'`` : The phase between the freezing threshold goes from solid to liquid as a degree six\n polynomial over a range of 6°C over the freezing point.\n\n References\n ----------\n .. [Verseghy09] Diana Verseghy (2009), CLASS – The Canadian Land Surface Scheme (Version 3.4), Technical\n Documentation (Version 1.1), Environment Canada, Climate Research Division, Science and Technology Branch.\n https://gitlab.com/cccma/classic/-/blob/master/src/atmosphericVarsCalc.f90\n \"\"\"\n\n if method == \"binary\":\n thresh = convert_units_to(thresh, tas)\n prsn = pr.where(tas <= thresh, 0)\n\n elif method == \"brown\":\n # Freezing point + 2C in the native units\n upper = convert_units_to(convert_units_to(thresh, \"degC\") + 2, tas)\n thresh = convert_units_to(thresh, tas)\n\n # Interpolate fraction over temperature (in units of tas)\n t = xr.DataArray(\n [-np.inf, thresh, upper, np.inf], dims=(\"tas\",), attrs={\"units\": \"degC\"}\n )\n fraction = xr.DataArray([1.0, 1.0, 0.0, 0.0], dims=(\"tas\",), coords={\"tas\": t})\n\n # Multiply precip by snowfall fraction\n prsn = pr * fraction.interp(tas=tas, method=\"linear\")\n\n elif method == \"auer\":\n dtas = convert_units_to(tas, \"degK\") - convert_units_to(thresh, \"degK\")\n\n # Create nodes for the snowfall fraction: -inf, thresh, ..., thresh+6, inf [degC]\n t = np.concatenate(\n [[-273.15], np.linspace(0, 6, 100, endpoint=False), [6, 1e10]]\n )\n t = xr.DataArray(t, dims=\"tas\", name=\"tas\", coords={\"tas\": t})\n\n # The polynomial coefficients, valid between thresh and thresh + 6 (defined in CLASS)\n coeffs = xr.DataArray(\n [100, 4.6664, -15.038, -1.5089, 2.0399, -0.366, 0.0202],\n dims=(\"degree\",),\n coords={\"degree\": range(7)},\n )\n\n fraction = xr.polyval(t.tas, coeffs).clip(0, 100) / 100\n fraction[0] = 1\n fraction[-2:] = 0\n\n # Convert snowfall fraction coordinates to native tas units\n prsn = pr * fraction.interp(tas=dtas, method=\"linear\")\n\n else:\n raise ValueError(f\"Method {method} not one of 'binary', 'brown' or 'auer'.\")\n\n prsn.attrs[\"units\"] = pr.attrs[\"units\"]\n return prsn\n\n\n@declare_units(pr=\"[precipitation]\", tas=\"[temperature]\", thresh=\"[temperature]\")\ndef rain_approximation(\n pr: xr.DataArray,\n tas: xr.DataArray,\n thresh: str = \"0 degC\",\n method: str = \"binary\",\n) -> xr.DataArray:\n \"\"\"Rainfall approximation from total precipitation and temperature.\n\n Liquid precipitation estimated from precipitation and temperature according to a given method.\n This is a convenience method based on :py:func:`snowfall_approximation`, see the latter for details.\n\n Parameters\n ----------\n pr : xarray.DataArray\n Mean daily precipitation flux.\n tas : xarray.DataArray, optional\n Mean, maximum, or minimum daily temperature.\n thresh : str,\n Threshold temperature, used by method \"binary\".\n method : {\"binary\", \"brown\", \"auer\"}\n Which method to use when approximating snowfall from total precipitation. See notes.\n\n Returns\n -------\n xarray.DataArray, [same units as pr]\n Liquid precipitation rate.\n\n Notes\n -----\n This method computes the snowfall approximation and subtracts it from the total\n precipitation to estimate the liquid rain precipitation.\n\n See also\n --------\n snowfall_approximation\n \"\"\"\n prra = pr - snowfall_approximation(pr, tas, thresh=thresh, method=method)\n prra.attrs[\"units\"] = pr.attrs[\"units\"]\n return prra\n\n\n@declare_units(\n tas=\"[temperature]\",\n sfcWind=\"[speed]\",\n)\ndef wind_chill_index(\n tas: xr.DataArray,\n sfcWind: xr.DataArray,\n method: str = \"CAN\",\n mask_invalid: bool = True,\n):\n r\"\"\"Wind chill index.\n\n The Wind Chill Index is an estimation of how cold the weather feels to the average person.\n It is computed from the air temperature and the 10-m wind. As defined by the Environment and Climate Change Canada ([MVSZ15]_),\n two equations exist, the conventional one and one for slow winds (usually < 5 km/h), see Notes.\n\n Parameters\n ----------\n tas : xarray.DataArray\n Surface air temperature.\n sfcWind : xarray.DataArray\n Surface wind speed (10 m).\n method : {'CAN', 'US'}\n If \"CAN\" (default), a \"slow wind\" equation is used where winds are slower than 5 km/h, see Notes.\n mask_invalid : bool\n Whether to mask values when the inputs are outside their validity range. or not.\n If True (default), points where the temperature is above a threshold are masked.\n The threshold is 0°C for the canadian method and 50°F for the american one.\n With the latter method, points where sfcWind < 3 mph are also masked.\n\n Returns\n -------\n xarray.DataArray, [degC]\n Wind Chill Index.\n\n Notes\n -----\n Following the calculations of Environment and Climate Change Canada, this function switches from the standardized index\n to another one for slow winds. The standard index is the same as used by the National Weather Service of the USA. Given\n a temperature at surface :math:`T` (in °C) and 10-m wind speed :math:`V` (in km/h), the Wind Chill Index :math:`W` (dimensionless)\n is computed as:\n\n .. math::\n\n W = 13.12 + 0.6125*T - 11.37*V^0.16 + 0.3965*T*V^0.16\n\n Under slow winds (:math:`V < 5` km/h), and using the canadian method, it becomes:\n\n .. math::\n\n W = T + \\frac{-1.59 + 0.1345 * T}{5} * V\n\n\n Both equations are invalid for temperature over 0°C in the canadian method.\n\n The american Wind Chill Temperature index (WCT), as defined by USA's National Weather Service, is computed when\n `method='US'`. In that case, the maximal valid temperature is 50°F (10 °C) and minimal wind speed is 3 mph (4.8 km/h).\n\n References\n ----------\n .. [MVSZ15] Éva Mekis, Lucie A. Vincent, Mark W. Shephard & Xuebin Zhang (2015) Observed Trends in Severe Weather Conditions Based on Humidex, Wind Chill, and Heavy Rainfall Events in Canada for 1953–2012, Atmosphere-Ocean, 53:4, 383-397, DOI: 10.1080/07055900.2015.1086970\n Osczevski, R., & Bluestein, M. (2005). The New Wind Chill Equivalent Temperature Chart. Bulletin of the American Meteorological Society, 86(10), 1453–1458. https://doi.org/10.1175/BAMS-86-10-1453\n .. [NWS] Wind Chill Questions, Cold Resources, National Weather Service, retrieved 25-05-21. https://www.weather.gov/safety/cold-faqs\n \"\"\"\n tas = convert_units_to(tas, \"degC\")\n sfcWind = convert_units_to(sfcWind, \"km/h\")\n\n V = sfcWind ** 0.16\n W = 13.12 + 0.6215 * tas - 11.37 * V + 0.3965 * tas * V\n\n if method.upper() == \"CAN\":\n W = xr.where(sfcWind < 5, tas + sfcWind * (-1.59 + 0.1345 * tas) / 5, W)\n elif method.upper() != \"US\":\n raise ValueError(f\"`method` must be one of 'US' and 'CAN'. Got '{method}'.\")\n\n if mask_invalid:\n mask = {\"CAN\": tas <= 0, \"US\": (sfcWind > 4.828032) & (tas <= 10)}\n W = W.where(mask[method.upper()])\n\n W.attrs[\"units\"] = \"degC\"\n return W\n\n\n@declare_units(\n delta_tas=\"[temperature]\",\n pr_baseline=\"[precipitation]\",\n)\ndef clausius_clapeyron_scaled_precipitation(\n delta_tas: xr.DataArray,\n pr_baseline: xr.DataArray,\n cc_scale_factor: float = 1.07,\n) -> xr.DataArray:\n r\"\"\"Scale precipitation according to the Clausius-Clapeyron relation.\n\n Parameters\n ----------\n delta_tas : xarray.DataArray\n Difference in temperature between a baseline climatology and another climatology.\n pr_baseline : xarray.DataArray\n Baseline precipitation to adjust with Clausius-Clapeyron.\n cc_scale_factor : float (default = 1.07)\n Clausius Clapeyron scale factor.\n\n Returns\n -------\n DataArray\n Baseline precipitation scaled to other climatology using Clausius-Clapeyron relationship.\n\n Notes\n -----\n The Clausius-Clapeyron equation for water vapor under typical atmospheric conditions states that the saturation\n water vapor pressure :math:`e_s` changes approximately exponentially with temperature\n\n .. math::\n\n \\frac{\\\\mathrm{d}e_s(T)}{\\\\mathrm{d}T} \\approx 1.07 e_s(T)\n\n This function assumes that precipitation can be scaled by the same factor.\n\n Warnings\n --------\n Make sure that `delta_tas` is computed over a baseline compatible with `pr_baseline`. So for example,\n if `delta_tas` is the climatological difference between a baseline and a future period, then `pr_baseline`\n should be precipitations over a period within the same baseline.\n \"\"\"\n\n # Get difference in temperature. Time-invariant baseline temperature (from above) is broadcast.\n delta_tas = convert_units_to(delta_tas, \"delta_degreeC\")\n\n # Calculate scaled precipitation.\n pr_out = pr_baseline * (cc_scale_factor ** delta_tas)\n pr_out.attrs[\"units\"] = pr_baseline.attrs[\"units\"]\n\n return pr_out\n\n\n@declare_units(tasmin=\"[temperature]\", tasmax=\"[temperature]\", tas=\"[temperature]\")\ndef potential_evapotranspiration(\n tasmin: Optional[xr.DataArray] = None,\n tasmax: Optional[xr.DataArray] = None,\n tas: Optional[xr.DataArray] = None,\n method: str = \"BR65\",\n) -> xr.DataArray:\n \"\"\"Potential evapotranspiration.\n\n The potential for water evaporation from soil and transpiration by plants if the water supply is\n sufficient, according to a given method.\n\n Parameters\n ----------\n tasmin : xarray.DataArray\n Minimum daily temperature.\n tasmax : xarray.DataArray\n Maximum daily temperature.\n tas : xarray.DataArray\n Mean daily temperature.\n method : {\"baierrobertson65\", \"BR65\", \"hargreaves85\", \"HG85\", \"thornthwaite48\", \"TW48\"}\n Which method to use, see notes.\n\n Returns\n -------\n xarray.DataArray\n\n Notes\n -----\n Available methods are:\n\n - \"baierrobertson65\" or \"BR65\", based on [baierrobertson65]_. Requires tasmin and tasmax, daily [D] freq.\n - \"hargreaves85\" or \"HG85\", based on [hargreaves85]_. Requires tasmin and tasmax, daily [D] freq. (optional: tas can be given in addition of tasmin and tasmax).\n - \"thornthwaite48\" or \"TW48\", based on [thornthwaite48]_. Requires tasmin and tasmax, monthly [MS] or daily [D] freq. (optional: tas can be given instead of tasmin and tasmax).\n\n References\n ----------\n .. [baierrobertson65] Baier, W., & Robertson, G. W. (1965). Estimation of latent evaporation from simple weather observations. Canadian journal of plant science, 45(3), 276-284.\n .. [hargreaves85] Hargreaves, G. H., & Samani, Z. A. (1985). Reference crop evapotranspiration from temperature. Applied engineering in agriculture, 1(2), 96-99.\n .. [thornthwaite48] Thornthwaite, C. W. (1948). An approach toward a rational classification of climate. Geographical review, 38(1), 55-94.\n \"\"\"\n\n if method in [\"baierrobertson65\", \"BR65\"]:\n tasmin = convert_units_to(tasmin, \"degF\")\n tasmax = convert_units_to(tasmax, \"degF\")\n\n latr = (tasmin.lat * np.pi) / 180\n gsc = 0.082 # MJ/m2/min\n\n # julian day fraction\n jd_frac = (datetime_to_decimal_year(tasmin.time) % 1) * 2 * np.pi\n\n ds = 0.409 * np.sin(jd_frac - 1.39)\n dr = 1 + 0.033 * np.cos(jd_frac)\n omega = np.arccos(-np.tan(latr) * np.tan(ds))\n re = (\n (24 * 60 / np.pi)\n * gsc\n * dr\n * (\n omega * np.sin(latr) * np.sin(ds)\n + np.cos(latr) * np.cos(ds) * np.sin(omega)\n )\n ) # MJ/m2/day\n re = re / 4.1864e-2 # cal/cm2/day\n\n # Baier et Robertson(1965) formula\n out = 0.094 * (\n -87.03 + 0.928 * tasmax + 0.933 * (tasmax - tasmin) + 0.0486 * re\n )\n out = out.clip(0)\n\n elif method in [\"hargreaves85\", \"HG85\"]:\n tasmin = convert_units_to(tasmin, \"degC\")\n tasmax = convert_units_to(tasmax, \"degC\")\n if tas is None:\n tas = (tasmin + tasmax) / 2\n else:\n tas = convert_units_to(tas, \"degC\")\n\n latr = (tasmin.lat * np.pi) / 180\n gsc = 0.082 # MJ/m2/min\n lv = 2.5 # MJ/kg\n\n # julian day fraction\n jd_frac = (datetime_to_decimal_year(tasmin.time) % 1) * 2 * np.pi\n\n ds = 0.409 * np.sin(jd_frac - 1.39)\n dr = 1 + 0.033 * np.cos(jd_frac)\n omega = np.arccos(-np.tan(latr) * np.tan(ds))\n ra = (\n (24 * 60 / np.pi)\n * gsc\n * dr\n * (\n omega * np.sin(latr) * np.sin(ds)\n + np.cos(latr) * np.cos(ds) * np.sin(omega)\n )\n ) # MJ/m2/day\n\n # Hargreaves and Samani(1985) formula\n out = (0.0023 * ra * (tas + 17.8) * (tasmax - tasmin) ** 0.5) / lv\n out = out.clip(0)\n\n elif method in [\"thornthwaite48\", \"TW48\"]:\n if tas is None:\n tasmin = convert_units_to(tasmin, \"degC\")\n tasmax = convert_units_to(tasmax, \"degC\")\n tas = (tasmin + tasmax) / 2\n else:\n tas = convert_units_to(tas, \"degC\")\n tas = tas.clip(0)\n tas = tas.resample(time=\"MS\").mean(dim=\"time\")\n\n latr = (tas.lat * np.pi) / 180 # rad\n\n start = \"-\".join(\n [\n str(tas.time[0].dt.year.values),\n \"{:02d}\".format(tas.time[0].dt.month.values),\n \"01\",\n ]\n )\n\n end = \"-\".join(\n [\n str(tas.time[-1].dt.year.values),\n \"{:02d}\".format(tas.time[-1].dt.month.values),\n str(tas.time[-1].dt.daysinmonth.values),\n ]\n )\n\n time_v = xr.DataArray(\n date_range(start, end, freq=\"D\", calendar=\"standard\"),\n dims=\"time\",\n name=\"time\",\n )\n\n # julian day fraction\n jd_frac = (datetime_to_decimal_year(time_v) % 1) * 2 * np.pi\n\n ds = 0.409 * np.sin(jd_frac - 1.39)\n omega = np.arccos(-np.tan(latr) * np.tan(ds)) * 180 / np.pi # degrees\n\n # monthly-mean daytime length (multiples of 12 hours)\n dl = 2 * omega / (15 * 12)\n dl_m = dl.resample(time=\"MS\").mean(dim=\"time\")\n\n # annual heat index\n id_m = (tas / 5) ** 1.514\n id_y = id_m.resample(time=\"YS\").sum(dim=\"time\")\n\n tas_idy_a = []\n for base_time, indexes in tas.resample(time=\"YS\").groups.items():\n tas_y = tas.isel(time=indexes)\n id_v = id_y.sel(time=base_time)\n a = 6.75e-7 * id_v ** 3 - 7.71e-5 * id_v ** 2 + 0.01791 * id_v + 0.49239\n\n frac = (10 * tas_y / id_v) ** a\n tas_idy_a.append(frac)\n\n tas_idy_a = xr.concat(tas_idy_a, dim=\"time\")\n\n # Thornthwaite(1948) formula\n out = 1.6 * dl_m * tas_idy_a # cm/month\n out = 10 * out # mm/month\n\n else:\n raise NotImplementedError(f\"'{method}' method is not implemented.\")\n\n out.attrs[\"units\"] = \"mm\"\n return amount2rate(out, out_units=\"kg m-2 s-1\")\n" ]
[ [ "numpy.log", "numpy.radians", "numpy.linspace", "numpy.cos", "numpy.sin", "numpy.arctan2", "numpy.tan", "numpy.log10", "numpy.exp", "numpy.hypot" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
AQ18/skimpy
[ "435fc50244f2ca815bbb39d525a82a4692f5c0ac", "435fc50244f2ca815bbb39d525a82a4692f5c0ac", "435fc50244f2ca815bbb39d525a82a4692f5c0ac", "435fc50244f2ca815bbb39d525a82a4692f5c0ac" ]
[ "tests/test_ORACLE.py", "tutorials/mechanisms/tutorial_convenience.py", "skimpy/analysis/oracle/load_pytfa_solution.py", "tutorials/parameter_sampling/tutorial_dependent_non_linear_fluxes.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\n.. module:: skimpy\n :platform: Unix, Windows\n :synopsis: Simple Kinetic Models in Python\n\n.. moduleauthor:: SKiMPy team\n\n[---------]\n\nCopyright 2018 Laboratory of Computational Systems Biotechnology (LCSB),\nEcole Polytechnique Federale de Lausanne (EPFL), Switzerland\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\nhttp://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\n\"\"\"\n\nimport pytest\n\nimport numpy as np\n\nimport pytfa\nfrom pytfa.io import import_matlab_model, load_thermoDB\nfrom pytfa.io.viz import get_reaction_data\n\nfrom skimpy.utils.namespace import *\nfrom skimpy.sampling.simple_parameter_sampler import SimpleParameterSampler\nfrom skimpy.core.solution import ODESolutionPopulation\nfrom skimpy.io.generate_from_pytfa import FromPyTFA\nfrom skimpy.utils.general import sanitize_cobra_vars\nfrom skimpy.utils.tabdict import TabDict\n\nfrom skimpy.analysis.oracle import *\n\nfrom settings import this_directory\nfrom os.path import join\n\nCPLEX = 'optlang-cplex'\nGLPK = 'optlang-glpk'\n\nBASL_FLUX = 1e-6 # mmol/gDW/hr\nMIN_DISPLACEMENT = 1e-2\nSMALL_MOLECULES = ['h_c','h_e','h_m',\n 'h2o2_c','h2o2_e',\n 'co2_c','co2_r','co2_e',' co2_m',\n 'pi_c','pi_r','pi_e','pi_m',\n 'o2_c','o2_r','o2_e','o2_m',\n 'o2s_c', 'o2s_m', 'o2s_e',\n 'ppi_c','ppi_m','ppi_r',\n 'hco3_c','hco3_e','hco3_m',\n 'na1_c','na1_e']\n\ndef import_toy_model_from_cobra():\n path_to_model = join(this_directory, '..', 'models/toy_model.mat')\n\n cobra_model = import_matlab_model(path_to_model)\n #Test the model\n solution = cobra_model.optimize()\n\n return cobra_model\n\n\ndef convert_cobra_to_tfa(cobra_model):\n \"\"\"\n Make tfa analysis of the model\n \"\"\"\n path_to_data = join(this_directory, '..', 'data/thermo_data.thermodb')\n\n thermo_data = load_thermoDB(path_to_data)\n\n tmodel= pytfa.ThermoModel(thermo_data, cobra_model)\n # for comp in tmodel.compartments.values():\n # comp['c_min'] = 1e-8\n\n tmodel.prepare()\n tmodel.convert(add_displacement = True)\n\n # Set the solver\n tmodel.solver = GLPK\n # Set solver options\n # GLPK option optimality and integrality deprecated\n #tmodel.solver.configuration.tolerances.optimality = 1e-9\n #tmodel.solver.configuration.tolerances.integrality = 1e-9\n\n tmodel.solver.configuration.tolerances.feasibility = 1e-9\n\n\n # Find a solution\n solution = tmodel.optimize()\n\n\n return tmodel\n\n\ndef prepare_tfa_model_for_kinetic_import(tmodel):\n \"\"\"\n Prepare the model to sample parameters\n \"\"\"\n\n # Add minimum flux requirements basal fluxes 1e-6\n # safe: ensure that fluxes that cant obey the minimum requirement are removed\n\n tmodel = add_min_flux_requirements(tmodel, BASL_FLUX, inplace=True )\n solution = tmodel.optimize()\n\n # Fix the flux directionality profile (FDP)\n tmodel = fix_directionality(tmodel, solution, inplace=True)\n solution = tmodel.optimize()\n\n # Add dummy free energy constrains for reaction of unknown free energy\n tmodel = add_undefined_delta_g(tmodel, solution, delta_g_std=0.0, delta_g_std_err=10000.0, inplace=True)\n solution = tmodel.optimize()\n\n # Force a minimal thermodynamic displacement\n\n tmodel = add_min_log_displacement(tmodel, MIN_DISPLACEMENT)\n solution = tmodel.optimize()\n\n return tmodel, solution\n\n\ndef import_kinetic_model_from_tfa(tmodel,solution):\n\n model_gen = FromPyTFA(small_molecules=SMALL_MOLECULES)\n kmodel = model_gen.import_model(tmodel, solution.raw)\n\n return kmodel\n\ndef get_flux_and_concentration_data(tmodel, solution):\n # Map fluxes back to reaction variables\n this_flux_solution = get_reaction_data(tmodel, solution.raw)\n # Create the flux dict\n # Convert fluxes from mmol/gDW/hr to mol/L/s\n # eColi 0.39 gDW/L\n flux_dict = (0.39*1e-3*this_flux_solution[[i.id for i in tmodel.reactions]]).to_dict()\n\n # Create a concentration dict with consistent names\n variable_names = tmodel.log_concentration.list_attr('name')\n metabolite_ids = tmodel.log_concentration.list_attr('id')\n #Get conentrations in mol\n temp_concentration_dict = np.exp(solution.raw[variable_names]).to_dict()\n\n # Map concentration names\n mapping_dict = {k:sanitize_cobra_vars(v) for k,v in zip(variable_names,metabolite_ids)}\n concentration_dict = {mapping_dict[k]:v for k,v in temp_concentration_dict.items()}\n\n return concentration_dict, flux_dict\n\n\n\"\"\"\nPrep and import model\n\"\"\"\ncmodel = import_toy_model_from_cobra()\ntmodel = convert_cobra_to_tfa(cmodel)\ntmodel, solution = prepare_tfa_model_for_kinetic_import(tmodel)\nkmodel = import_kinetic_model_from_tfa(tmodel,solution)\nconcentration_dict, flux_dict = get_flux_and_concentration_data(tmodel,solution)\n\ndef test_compile_mca():\n \"\"\"\n Compile the model\n \"\"\"\n kmodel.prepare(mca=True)\n\n parameter_list = TabDict([(k, p.symbol) for k, p in kmodel.parameters.items()\n if p.name.startswith('vmax_forward')])\n\n kmodel.compile_mca(sim_type=QSSA, parameter_list=parameter_list)\n\n\[email protected](name=['test_parameter_sampling_linear_pathway','test_compile_mca'])\ndef test_oracle_parameter_sampling():\n\n # Initialize parameter sampler\n sampling_parameters = SimpleParameterSampler.Parameters(n_samples=100)\n sampler = SimpleParameterSampler(sampling_parameters)\n\n # Sample the model\n parameter_population = sampler.sample(kmodel, flux_dict, concentration_dict)\n\n\n\[email protected](name=['test_oracle_parameter_sampling','test_compile_mca'])\ndef test_oracle_flux_concentration_sampling():\n\n pass\n\n\n\[email protected](name=['test_oracle_parameter_sampling','test_compile_mca'])\ndef test_oracle_ode():\n\n # Initialize parameter sampler\n sampling_parameters = SimpleParameterSampler.Parameters(n_samples=1)\n sampler = SimpleParameterSampler(sampling_parameters)\n\n # Sample the model\n parameter_population = sampler.sample(kmodel, flux_dict, concentration_dict)\n\n\n kmodel.compile_ode(sim_type=QSSA)\n kmodel.initial_conditions = TabDict([(k,v)for k,v in concentration_dict.items()])\n\n solutions = []\n for parameters in parameter_population:\n kmodel.parameters = parameters\n this_sol_qssa = kmodel.solve_ode(np.linspace(0.0, 10.0, 1000), solver_type='cvode')\n solutions.append(this_sol_qssa)\n\n this_sol_qssa.plot('test.html')\n\n\n solpop = ODESolutionPopulation(solutions)\n\n\[email protected](name=['test_oracle_parameter_sampling','test_compile_mca'])\ndef test_oracle_mca():\n\n\n # Initialize parameter sampler\n sampling_parameters = SimpleParameterSampler.Parameters(n_samples=1)\n sampler = SimpleParameterSampler(sampling_parameters)\n\n # Sample the model\n parameter_population = sampler.sample(kmodel, flux_dict, concentration_dict)\n\n\n \"\"\"\n Calculate control coefficients \n \"\"\"\n flux_control_coeff = kmodel.flux_control_fun(flux_dict,\n concentration_dict,\n parameter_population)\n\n concentration_control_coeff = kmodel.concentration_control_fun(flux_dict,\n concentration_dict,\n parameter_population)\n\n\n", "# -*- coding: utf-8 -*-\n\"\"\"\n.. module:: skimpy\n :platform: Unix, Windows\n :synopsis: Simple Kinetic Models in Python\n\n.. moduleauthor:: SKiMPy team\n\n[---------]\n\nCopyright 2017 Laboratory of Computational Systems Biotechnology (LCSB),\nEcole Polytechnique Federale de Lausanne (EPFL), Switzerland\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\nhttp://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\n\"\"\"\n\nimport numpy as np\n# Test models\nfrom skimpy.core import *\nfrom skimpy.mechanisms import *\n\nname = 'pfk'\n\nSpecificConvenience = make_convenience([-2, -1, 3])\nmetabolites = SpecificConvenience.Reactants(substrate1 = 'A',\n substrate2 = 'B',\n product1 = 'C' )\n\n# thermo_data = {'S': 1e-2,\n# 'P': 1e-2,\n# 'sig_S': 0.1,\n# 'sig_P': 0.1,\n# 'gamma': 0.1,\n# 'flux': 1.0,\n# 'E_tot': 1e-5}\n\n## QSSA Method\nparameters = SpecificConvenience.Parameters(\n vmax_forward = 1.0,\n k_equilibrium=2.0,\n km_substrate1 = 10.0,\n km_substrate2 = 10.0,\n km_product1 = 10.0)\n\npfk = Reaction(name=name,\n mechanism=SpecificConvenience,\n reactants=metabolites,\n )\n\nthis_model = KineticModel()\nthis_model.add_reaction(pfk)\nthis_model.parametrize_by_reaction({pfk.name:parameters})\nthis_model.compile_ode(sim_type = QSSA)\n\nthis_model.initial_conditions['A'] = 10.0\nthis_model.initial_conditions['B'] = 10.0\nthis_model.initial_conditions['C'] = 10.0\n\n\nthis_sol_qssa = this_model.solve_ode(np.linspace(0.0, 50.0, 500),solver_type = 'cvode')\n\nthis_sol_qssa.plot('output/base_out_qssa.html')", "# -*- coding: utf-8 -*-\r\n\"\"\"\r\n.. module:: skimpy\r\n :platform: Unix, Windows\r\n :synopsis: Simple Kinetic Models in Python\r\n\r\n.. moduleauthor:: SKiMPy team\r\n\r\n[---------]\r\n\r\nCopyright 2020 Laboratory of Computational Systems Biotechnology (LCSB),\r\nEcole Polytechnique Federale de Lausanne (EPFL), Switzerland\r\n\r\nLicensed under the Apache License, Version 2.0 (the \"License\");\r\nyou may not use this file except in compliance with the License.\r\nYou may obtain a copy of the License at\r\n\r\nhttp://www.apache.org/licenses/LICENSE-2.0\r\n\r\nUnless required by applicable law or agreed to in writing, software\r\ndistributed under the License is distributed on an \"AS IS\" BASIS,\r\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\nSee the License for the specific language governing permissions and\r\nlimitations under the License.\r\n\r\n\"\"\"\r\n\r\nfrom skimpy.utils.general import sanitize_cobra_vars\r\nfrom skimpy.utils.conversions import deltag0_to_keq\r\nfrom skimpy.core.parameters import ParameterValues\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\n\r\n# Load and convert pytfa solution for kinetic model\r\n\r\ndef load_fluxes(solution_raw,tmodel,kmodel,\r\n density=None,\r\n ratio_gdw_gww=None,\r\n concentration_scaling=None,\r\n time_scaling=None,\r\n xmol_in_flux=1e-3):\r\n # TODO try to fetch from model\r\n if density is None \\\r\n or ratio_gdw_gww is None \\\r\n or concentration_scaling is None \\\r\n or time_scaling is None:\r\n raise ValueError(\"density, ratio_gdw_gww, concentration_scaling, or time_scaling \"\r\n \"is required as input or field of kmodel\")\r\n\r\n # Flux solution input assumed to be mmol/gDW/hr\r\n flux_scaling_factor = xmol_in_flux * (ratio_gdw_gww * density) \\\r\n * concentration_scaling \\\r\n / time_scaling\r\n\r\n fluxes_in_kmodel = list(kmodel.reactions.keys())\r\n\r\n # Convert to net-fluxes\r\n solution_nf = { this_rxn.id: (solution_raw[this_rxn.forward_variable.name] \\\r\n - solution_raw[this_rxn.reverse_variable.name]) \\\r\n for this_rxn in tmodel.reactions}\r\n\r\n # Convert tmodel net fluxes to kmodel fluxes\r\n flux_dict = {rxn: solution_nf[rxn]*flux_scaling_factor for rxn in fluxes_in_kmodel}\r\n\r\n fluxes = pd.Series(flux_dict)\r\n # Sort according to the k-model\r\n return fluxes[fluxes_in_kmodel]\r\n\r\n\r\ndef load_concentrations(solution_raw, tmodel, kmodel, concentration_scaling=None):\r\n # TODO try to fetch from model\r\n if concentration_scaling is None:\r\n raise ValueError(\"concentration_scaling is required as input or field of kmodel\")\r\n\r\n concentration_dict = {sanitize_cobra_vars(lc.id): np.exp(solution_raw[lc.variable.name])\r\n *concentration_scaling\r\n for lc in tmodel.log_concentration}\r\n concentrations = pd.Series(concentration_dict)\r\n\r\n return concentrations\r\n\r\n\r\ndef load_equilibrium_constants(solution_raw, tmodel, kmodel,\r\n concentration_scaling=None,\r\n in_place=False):\r\n # TODO try to fetch from model\r\n if concentration_scaling is None:\r\n raise ValueError(\"concentration_scaling is required as input or field of kmodel\")\r\n\r\n equilibrium_constant_dict = dict()\r\n\r\n # Calculate the fitting equilibrium constants for the kinetic models (absorb concentrations that do\r\n # not appear explicitly in the mass balance of the model in the deltag )\r\n RT = tmodel.RT\r\n\r\n rnxs_ids_with = [dg.id for dg in tmodel.delta_g]\r\n\r\n for pytfa_rxn in tmodel.reactions:\r\n\r\n if not pytfa_rxn.id in kmodel.reactions:\r\n continue\r\n\r\n if not pytfa_rxn.id in rnxs_ids_with:\r\n continue\r\n\r\n deltag0 = solution_raw[tmodel.delta_g.get_by_id(pytfa_rxn.id).name]\r\n\r\n for met, stoich in pytfa_rxn.metabolites.items():\r\n kin_met_id = sanitize_cobra_vars(met)\r\n if (kin_met_id in kmodel.reactants) or (kin_met_id in kmodel.parameters):\r\n var_met_lc = tmodel.log_concentration.get_by_id(met.id).name\r\n met_lc = solution_raw[var_met_lc]\r\n deltag0 -= stoich * RT * (met_lc + np.log(concentration_scaling))\r\n\r\n try:\r\n k_eq = kmodel.reactions[pytfa_rxn.id].parameters['k_equilibrium']\r\n except KeyError:\r\n continue\r\n\r\n if in_place:\r\n k_eq.value = deltag0_to_keq(deltag0, tmodel.TEMPERATURE,\r\n gas_constant=tmodel.GAS_CONSTANT)\r\n # Check how to best index this ...\r\n equilibrium_constant_dict[k_eq.symbol] = deltag0_to_keq(deltag0, tmodel.TEMPERATURE,\r\n gas_constant=tmodel.GAS_CONSTANT)\r\n\r\n return ParameterValues(equilibrium_constant_dict,kmodel)\r\n\r\n\r\n\r\n\r\n", "# -*- coding: utf-8 -*-\n\"\"\"\n.. module:: skimpy\n :platform: Unix, Windows\n :synopsis: Simple Kinetic Models in Python\n\n.. moduleauthor:: SKiMPy team\n\n[---------]\n\nCopyright 2017 Laboratory of Computational Systems Biotechnology (LCSB),\nEcole Polytechnique Federale de Lausanne (EPFL), Switzerland\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\nhttp://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\n\"\"\"\n\nimport numpy as np\n\n# Test models\nfrom skimpy.core import *\nfrom skimpy.mechanisms import *\nfrom skimpy.sampling.simple_parameter_sampler import SimpleParameterSampler\nfrom skimpy.core.solution import ODESolutionPopulation\n\n# 'DM_13dpg' 10\n# 'DM_2h3oppan' -2\n# 'DM_pep' -8\n# 'DM_atp' -12\n# 'DM_adp' 12\n# 'DM_nadh' -2\n# 'DM_nad' 2\n# 'DM_h' -12\n# 'DM_h2o' -8\n# 'PGK' 10\n# 'PGK' 10\n# 'GLYCK' 9\n# 'PGM' 1\n# 'GLYCK2' 7\n# 'TRSARr' 2\n# 'ENO' 8\n\nflux_dict = {'PGK' : 10.0,\n 'PGM' : 1.0,\n 'GLYCK' : 9.0,\n 'GLYCK2' : 7.0,\n 'TRSARr' : 2.0,\n 'ENO' : 8.0,\n 'DM_13dpg' : 10,\n 'DM_2h3oppan' : -2,\n 'DM_pep' : -8,\n 'DM_atp' : -12,\n 'DM_adp' : 12,\n 'DM_nadh' : -2,\n 'DM_nad' : 2,\n }\n\n# Build linear Pathway model\nmetabolites_pgk = RandBiBiReversibleMichaelisMenten.Reactants(\n substrate1 = '_13dpg',\n substrate2 = 'adp',\n product1 = '_3pg',\n product2 = 'atp')\nmetabolites_pgm = ReversibleMichaelisMenten.Reactants(\n substrate = '_3pg',\n product = '_2pg')\nmetabolites_glyck = RandBiBiReversibleMichaelisMenten.Reactants(\n substrate1 = '_3pg',\n substrate2 = 'adp',\n product1 = 'glyc',\n product2 = 'atp')\nmetabolites_glyck2 = RandBiBiReversibleMichaelisMenten.Reactants(\n substrate1 = 'glyc',\n substrate2 = 'atp',\n product1 = '_2pg',\n product2 = 'adp')\nmetabolites_trsarr = RandBiBiReversibleMichaelisMenten.Reactants(\n substrate1 = 'glyc',\n substrate2 = 'nad',\n product1 = '_2h3oppan',\n product2 = 'nadh')\nmetabolites_eno = ReversibleMichaelisMenten.Reactants(\n substrate = '_2pg',\n product = 'pep')\n\n\ndef keq(deltag):\n return np.exp(-deltag*4184/(8.31*298.15))\n\n## QSSA Method\nparameters_pgm = ReversibleMichaelisMenten.Parameters(k_equilibrium=keq(-1.0))\nparameters_pgk = RandBiBiReversibleMichaelisMenten.Parameters(k_equilibrium=keq(-0.7996))\nparameters_glyck = RandBiBiReversibleMichaelisMenten.Parameters(k_equilibrium=keq(-5.4833))\nparameters_glyck2 = RandBiBiReversibleMichaelisMenten.Parameters(k_equilibrium=keq(-5.3603))\nparameters_trsarr = RandBiBiReversibleMichaelisMenten.Parameters(k_equilibrium=keq(-3.7509))\nparameters_eno = ReversibleMichaelisMenten.Parameters(k_equilibrium=keq(-1.6601))\n\npgk = Reaction(name='PGK',\n mechanism=RandBiBiReversibleMichaelisMenten,\n reactants=metabolites_pgk,\n )\npgm = Reaction(name='PGM',\n mechanism=ReversibleMichaelisMenten,\n reactants=metabolites_pgm,\n )\nglyck = Reaction(name='GLYCK',\n mechanism=RandBiBiReversibleMichaelisMenten,\n reactants=metabolites_glyck,\n )\nglyck2 = Reaction(name='GLYCK2',\n mechanism=RandBiBiReversibleMichaelisMenten,\n reactants=metabolites_glyck2,\n )\ntrsarr = Reaction(name='TRSARr',\n mechanism=RandBiBiReversibleMichaelisMenten,\n reactants=metabolites_trsarr,\n )\neno = Reaction(name='ENO',\n mechanism=ReversibleMichaelisMenten,\n reactants=metabolites_eno,\n )\n\n\nthis_model = KineticModel()\nthis_model.add_reaction(pgk)\nthis_model.add_reaction(pgm)\nthis_model.add_reaction(glyck)\nthis_model.add_reaction(glyck2)\nthis_model.add_reaction(trsarr)\nthis_model.add_reaction(eno)\n\nthe_boundary_condition = BoundaryFlux(\"_13dpg\", flux_dict['DM_13dpg'])\nthis_model.add_boundary_condition(the_boundary_condition)\n\nthe_boundary_condition = BoundaryFlux(\"pep\", flux_dict['DM_pep'])\nthis_model.add_boundary_condition(the_boundary_condition)\n\nthe_boundary_condition = BoundaryFlux(\"_2h3oppan\", flux_dict['DM_2h3oppan'])\nthis_model.add_boundary_condition(the_boundary_condition)\n\nthe_boundary_condition = BoundaryFlux(\"atp\", flux_dict['DM_atp'])\nthis_model.add_boundary_condition(the_boundary_condition)\n\nthe_boundary_condition = BoundaryFlux(\"adp\", flux_dict['DM_adp'])\nthis_model.add_boundary_condition(the_boundary_condition)\n\nthe_boundary_condition = BoundaryFlux(\"nad\", flux_dict['DM_nad'])\nthis_model.add_boundary_condition(the_boundary_condition)\n\nthe_boundary_condition = BoundaryFlux(\"nadh\", flux_dict['DM_nadh'])\nthis_model.add_boundary_condition(the_boundary_condition)\n\n\nthis_model.parametrize_by_reaction({'PGK' : parameters_pgk,\n 'PGM' : parameters_pgm,\n 'GLYCK' : parameters_glyck,\n 'GLYCK2': parameters_glyck2,\n 'TRSARr': parameters_trsarr,\n 'ENO' : parameters_eno})\n\n\nthis_model.prepare(mca=True)\nthis_model.compile_mca()\n\n\nconcentration_dict = {'_13dpg' : 1.0,\n '_2pg' : 1.0,\n '_3pg' : 1.0,\n 'glyc' : 1.0,\n 'pep' : 1.0,\n '_2h3oppan' : 1.0,\n 'atp' : 1.0,\n 'adp' : 1.0,\n 'nad' : 1.0,\n 'nadh' : 1.0,\n }\n\n\nparameters = SimpleParameterSampler.Parameters(n_samples=10)\nsampler = SimpleParameterSampler(parameters)\n\nparameter_population = sampler.sample(this_model, flux_dict, concentration_dict)\n\nthis_model.compile_ode(sim_type = QSSA)\n\n#\nthis_model.initial_conditions['_13dpg'] = 5.0\nthis_model.initial_conditions['_2pg'] = 1.0\nthis_model.initial_conditions['_3pg'] = 1.0\nthis_model.initial_conditions['glyc'] = 1.0\nthis_model.initial_conditions['pep'] = 1.0\nthis_model.initial_conditions['_2h3oppan'] = 1.0\nthis_model.initial_conditions['atp'] = 1.0\nthis_model.initial_conditions['adp'] = 2.0\nthis_model.initial_conditions['nad'] = 1.0\nthis_model.initial_conditions['nadh'] = 1.0\n\nthis_model.logger.setLevel('INFO')\n\nsolutions = []\nfor parameters in parameter_population:\n this_model.ode_fun.parameter_values = parameters\n this_sol_qssa = this_model.solve_ode(np.linspace(0.0, 50.0, 500), solver_type='cvode')\n solutions.append(this_sol_qssa)\n\nthis_sol_qssa.plot('output/non_linear_qssa.html')\n\n\nsolpop = ODESolutionPopulation(solutions)\nsolpop.plot('output/non_linear_ode_pop_{}.html')\n\n" ]
[ [ "numpy.exp", "numpy.linspace" ], [ "numpy.linspace" ], [ "numpy.log", "numpy.exp", "pandas.Series" ], [ "numpy.exp", "numpy.linspace" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
alliecc/argoverse-api
[ "8d96ebd92195a4fdb228d45b4584fdc61d4522c9", "8d96ebd92195a4fdb228d45b4584fdc61d4522c9", "8d96ebd92195a4fdb228d45b4584fdc61d4522c9" ]
[ "argoverse/evaluation/eval_forecasting.py", "tests/test_mpl_plotting_utils.py", "tests/test_polyline_density.py" ]
[ "# <Copyright 2019, Argo AI, LLC. Released under the MIT license.>\n\n\"\"\"This module evaluates the forecasted trajectories against the ground truth.\"\"\"\n\nimport math\nimport pickle as pkl\nfrom typing import Dict, List, Optional, Tuple\n\nimport numpy as np\n\nfrom argoverse.map_representation.map_api import ArgoverseMap\n\nLOW_PROB_THRESHOLD_FOR_METRICS = 0.05\n\n\ndef get_ade(forecasted_trajectory: np.ndarray, gt_trajectory: np.ndarray) -> float:\n \"\"\"Compute Average Displacement Error.\n\n Args:\n forecasted_trajectory: Predicted trajectory with shape (pred_len x 2)\n gt_trajectory: Ground truth trajectory with shape (pred_len x 2)\n\n Returns:\n ade: Average Displacement Error\n\n \"\"\"\n pred_len = forecasted_trajectory.shape[0]\n ade = float(\n sum(\n math.sqrt(\n (forecasted_trajectory[i, 0] - gt_trajectory[i, 0]) ** 2\n + (forecasted_trajectory[i, 1] - gt_trajectory[i, 1]) ** 2\n )\n for i in range(pred_len)\n )\n / pred_len\n )\n return ade\n\n\ndef get_fde(forecasted_trajectory: np.ndarray, gt_trajectory: np.ndarray) -> float:\n \"\"\"Compute Final Displacement Error.\n\n Args:\n forecasted_trajectory: Predicted trajectory with shape (pred_len x 2)\n gt_trajectory: Ground truth trajectory with shape (pred_len x 2)\n\n Returns:\n fde: Final Displacement Error\n\n \"\"\"\n fde = math.sqrt(\n (forecasted_trajectory[-1, 0] - gt_trajectory[-1, 0]) ** 2\n + (forecasted_trajectory[-1, 1] - gt_trajectory[-1, 1]) ** 2\n )\n return fde\n\n\ndef get_displacement_errors_and_miss_rate(\n forecasted_trajectories: Dict[int, List[np.ndarray]],\n gt_trajectories: Dict[int, np.ndarray],\n max_guesses: int,\n horizon: int,\n miss_threshold: float,\n forecasted_probabilities: Optional[Dict[int, List[float]]] = None,\n) -> Dict[str, float]:\n \"\"\"Compute min fde and ade for each sample.\n\n Note: Both min_fde and min_ade values correspond to the trajectory which has minimum fde.\n\n Args:\n forecasted_trajectories: Predicted top-k trajectory dict with key as seq_id and value as list of trajectories.\n Each element of the list is of shape (pred_len x 2).\n gt_trajectories: Ground Truth Trajectory dict with key as seq_id and values as trajectory of\n shape (pred_len x 2)\n max_guesses: Number of guesses allowed\n horizon: Prediction horizon\n miss_threshold: Distance threshold for the last predicted coordinate\n forecasted_probabilities: Probabilites associated with forecasted trajectories.\n\n Returns:\n metric_results: Metric values for minADE, minFDE, MR, p-minADE, p-minFDE, p-MR\n \"\"\"\n metric_results: Dict[str, float] = {}\n min_ade, prob_min_ade = [], []\n min_fde, prob_min_fde = [], []\n n_misses, prob_n_misses = [], []\n for k, v in gt_trajectories.items():\n curr_min_ade = float(\"inf\")\n curr_min_fde = float(\"inf\")\n min_idx = 0\n max_num_traj = min(max_guesses, len(forecasted_trajectories[k]))\n\n # If probabilities available, use the most likely trajectories, else use the first few\n if forecasted_probabilities is not None:\n sorted_idx = np.argsort([-x for x in forecasted_probabilities[k]], kind=\"stable\")\n # sorted_idx = np.argsort(forecasted_probabilities[k])[::-1]\n pruned_probabilities = [forecasted_probabilities[k][t] for t in sorted_idx[:max_num_traj]]\n # Normalize\n prob_sum = sum(pruned_probabilities)\n pruned_probabilities = [p / prob_sum for p in pruned_probabilities]\n else:\n sorted_idx = np.arange(len(forecasted_trajectories[k]))\n pruned_trajectories = [forecasted_trajectories[k][t] for t in sorted_idx[:max_num_traj]]\n\n for j in range(len(pruned_trajectories)):\n fde = get_fde(pruned_trajectories[j][:horizon], v[:horizon])\n if fde < curr_min_fde:\n min_idx = j\n curr_min_fde = fde\n curr_min_ade = get_ade(pruned_trajectories[min_idx][:horizon], v[:horizon])\n min_ade.append(curr_min_ade)\n min_fde.append(curr_min_fde)\n n_misses.append(curr_min_fde > miss_threshold)\n\n if forecasted_probabilities is not None:\n prob_n_misses.append(1.0 if curr_min_fde > miss_threshold else (1.0 - pruned_probabilities[min_idx]))\n prob_min_ade.append(\n min(-np.log(pruned_probabilities[min_idx]), -np.log(LOW_PROB_THRESHOLD_FOR_METRICS)) + curr_min_ade\n )\n prob_min_fde.append(\n min(-np.log(pruned_probabilities[min_idx]), -np.log(LOW_PROB_THRESHOLD_FOR_METRICS)) + curr_min_fde\n )\n metric_results[\"minADE\"] = sum(min_ade) / len(min_ade)\n metric_results[\"minFDE\"] = sum(min_fde) / len(min_fde)\n metric_results[\"MR\"] = sum(n_misses) / len(n_misses)\n if forecasted_probabilities is not None:\n metric_results[\"p-minADE\"] = sum(prob_min_ade) / len(prob_min_ade)\n metric_results[\"p-minFDE\"] = sum(prob_min_fde) / len(prob_min_fde)\n metric_results[\"p-MR\"] = sum(prob_n_misses) / len(prob_n_misses)\n return metric_results\n\n\ndef get_drivable_area_compliance(\n forecasted_trajectories: Dict[int, List[np.ndarray]], city_names: Dict[int, str], max_n_guesses: int\n) -> float:\n \"\"\"Compute drivable area compliance metric.\n\n Args:\n forecasted_trajectories: Predicted top-k trajectory dict with key as seq_id and value as list of trajectories.\n Each element of the list is of shape (pred_len x 2).\n city_names: Dict mapping sequence id to city name.\n max_n_guesses: Maximum number of guesses allowed.\n\n Returns:\n Mean drivable area compliance\n\n \"\"\"\n avm = ArgoverseMap()\n\n dac_score = []\n\n for seq_id, trajectories in forecasted_trajectories.items():\n city_name = city_names[seq_id]\n num_dac_trajectories = 0\n n_guesses = min(max_n_guesses, len(trajectories))\n for trajectory in trajectories[:n_guesses]:\n raster_layer = avm.get_raster_layer_points_boolean(trajectory, city_name, \"driveable_area\")\n if np.sum(raster_layer) == raster_layer.shape[0]:\n num_dac_trajectories += 1\n\n dac_score.append(num_dac_trajectories / n_guesses)\n\n return sum(dac_score) / len(dac_score)\n\n\ndef compute_forecasting_metrics(\n forecasted_trajectories: Dict[int, List[np.ndarray]],\n gt_trajectories: Dict[int, np.ndarray],\n city_names: Dict[int, str],\n max_n_guesses: int,\n horizon: int,\n miss_threshold: float,\n forecasted_probabilities: Optional[Dict[int, List[float]]] = None,\n) -> Dict[str, float]:\n \"\"\"Compute all the forecasting metrics.\n\n Args:\n forecasted_trajectories: Predicted top-k trajectory dict with key as seq_id and value as list of trajectories.\n Each element of the list is of shape (pred_len x 2).\n gt_trajectories: Ground Truth Trajectory dict with key as seq_id and values as trajectory of\n shape (pred_len x 2)\n city_names: Dict mapping sequence id to city name.\n max_n_guesses: Number of guesses allowed\n horizon: Prediction horizon\n miss_threshold: Miss threshold\n forecasted_probabilities: Normalized Probabilities associated with each of the forecasted trajectories.\n\n Returns:\n metric_results: Dictionary containing values for all metrics.\n \"\"\"\n metric_results = get_displacement_errors_and_miss_rate(\n forecasted_trajectories, gt_trajectories, max_n_guesses, horizon, miss_threshold, forecasted_probabilities\n )\n metric_results[\"DAC\"] = get_drivable_area_compliance(forecasted_trajectories, city_names, max_n_guesses)\n\n print(\"------------------------------------------------\")\n print(f\"Prediction Horizon : {horizon}, Max #guesses (K): {max_n_guesses}\")\n print(\"------------------------------------------------\")\n print(metric_results)\n print(\"------------------------------------------------\")\n\n return metric_results\n", "# <Copyright 2019, Argo AI, LLC. Released under the MIT license.>\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom argoverse.utils.mpl_plotting_utils import (\n animate_polyline,\n draw_lane_polygons,\n draw_polygon_mpl,\n draw_polygonpatch_matplotlib,\n plot_bbox_2D,\n plot_lane_segment_patch,\n plot_nearby_centerlines,\n visualize_centerline,\n)\n\n\ndef test_draw_polygon_mpl_smokescreen_nolinewidth():\n \"\"\"\n \"\"\"\n ax = plt.axes([1, 1, 1, 1])\n # polygon: Numpy array of shape (N,2) or (N,3)\n polygon = np.array([[0, 0], [1, 1], [1, 0], [0, 0]])\n # color is tuple or Numpy array of shape (3,) representing RGB values\n color = np.array([255, 0, 0])\n draw_polygon_mpl(ax, polygon, color)\n plt.close(\"all\")\n\n\ndef test_draw_polygon_mpl_smokescreen_with_linewidth():\n \"\"\"\n \"\"\"\n ax = plt.axes([1, 1, 1, 1])\n # polygon: Numpy array of shape (N,2) or (N,3)\n polygon = np.array([[0, 0], [1, 1], [1, 0], [0, 0]])\n # color is tuple or Numpy array of shape (3,) representing RGB values\n color = np.array([255, 0, 0])\n linewidth = 100\n draw_polygon_mpl(ax, polygon, color, linewidth=linewidth)\n plt.close(\"all\")\n\n\ndef test_plot_lane_segment_patch_smokescreen():\n \"\"\"\n \"\"\"\n ax = plt.axes([1, 1, 1, 1])\n polygon_pts = np.array([[-1, 0], [1, 0], [0, 1]])\n color = \"r\"\n alpha = 0.9\n plot_lane_segment_patch(polygon_pts, ax, color, alpha)\n plt.close(\"all\")\n\n\ndef test_plot_nearby_centerlines_smokescreen():\n \"\"\"\n \"\"\"\n ax = plt.axes([1, 1, 1, 1])\n # lane_centerlines: Python dictionary where key is lane ID, value is\n # object describing the lane\n lane_centerlines = {}\n lane_id_1 = 20\n obj_1 = {\"centerline\": np.array([[0, 0], [1, 1], [2, 2]])}\n lane_centerlines[lane_id_1] = obj_1\n\n lane_id_1 = 2000\n obj_2 = {\"centerline\": np.array([[0, -1], [0, -2], [0, -3]])}\n lane_centerlines[lane_id_1] = obj_2\n\n nearby_lane_ids = [20, 2000]\n color = \"g\"\n plot_nearby_centerlines(lane_centerlines, ax, nearby_lane_ids, color)\n plt.close(\"all\")\n\n\ndef test_animate_polyline_smokescreen():\n \"\"\"\n \n \"\"\"\n polyline = np.array([[0, 0], [1, 1], [2, 0], [0, 2]])\n axes_margin = 2\n animate_polyline(polyline, axes_margin, show_plot=False)\n plt.close(\"all\")\n", "# <Copyright 2019, Argo AI, LLC. Released under the MIT license.>\n\nimport numpy as np\n\nfrom argoverse.utils import polyline_density\n\n\ndef test_polyline_length():\n line = np.array([[0, 0], [0, 1], [1, 1], [1, 0], [2, 0], [2, 1]])\n\n length = polyline_density.get_polyline_length(line)\n assert abs(length - 5.0) < 1e-10\n" ]
[ [ "numpy.argsort", "numpy.log", "numpy.sum" ], [ "matplotlib.pyplot.axes", "numpy.array", "matplotlib.pyplot.close" ], [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
rscalzo/sami
[ "7ac5632e018cdf2384f5ff067c503177684f61c8", "7ac5632e018cdf2384f5ff067c503177684f61c8" ]
[ "utils/cCirc.py", "samifitting.py" ]
[ "from __future__ import print_function\n\n\"\"\"\nWrapper for the C++ drizzle overlap code.\n\nHistory\n-------\n\nCreated by Jon Nielsen in 2012\nUpdated for the new cubing algorithm by Francesco D'Eugenio 16/02/2017\n\nNotes\n-----\nThis module contains a testing function. At the moment it requires that the libraries path be hardcoded (FDE).\n\"\"\"\n\nimport ctypes as C\nimport numpy as np\nimport os.path\n\n# Load the shared library\ntry:\n libcm = C.CDLL(os.path.join(os.path.dirname(__file__), \"libcCirc.so\"))\nexcept:\n raise ImportError\n pass\n #libcm = C.CDLL(os.path.join('/home/franz/software/dev/sami-software-dev/dr0.10/utils', 'libcCirc.so'))\n\n# Specify the arguments our function takes:\n# First argument is a 1D array of doubles. It must be contiguous in memory.\n# Second argument is a regular C long.\n# Third argument is a pointer to a double.\nlibcm.weight_map.argtypes = [\n C.c_long, C.c_long, C.c_double, C.c_double, C.c_double,\n np.ctypeslib.ndpointer(dtype='d', ndim=1, flags='C_CONTIGUOUS')]\n\n\n\ndef resample_circle(nx, ny, xc, yc, r, *args):\n output = np.zeros(nx*ny)\n libcm.weight_map(nx, ny, xc, yc, r, output)\n return output.reshape(ny, nx)\n\n\n\n\n# Specify the arguments our function takes:\n# First argument is a 1D array of doubles. It must be contiguous in memory.\n# Second argument is a regular C long.\n# Third argument is a pointer to a double.\nlibcm.weight_map_Gaussian.argtypes = [\n C.c_long, C.c_long, C.c_double, C.c_double, C.c_double, C.c_double,\n np.ctypeslib.ndpointer(dtype='d', ndim=1, flags='C_CONTIGUOUS')]\n\n\n\ndef inteGrauss2d(nx, ny, xc, yc, sigma, n_sigma):\n output = np.zeros(nx*ny)\n libcm.weight_map_Gaussian(nx, ny, xc, yc, sigma, n_sigma, output)\n return output.reshape(ny, nx)\n\n\n\nif __name__ == \"__main__\":\n\n import time\n import pylab as plt\n from mpl_toolkits.axes_grid1 import ImageGrid\n plt.switch_backend('pdf')\n\n try:\n from texttable import Texttable\n\n def print_table(headers, times):\n table = Texttable()\n headers = ['Method'] + headers\n times = [u'Elapsed time (\\u03bcs)'] + times\n table.add_rows([[h, t] for h, t in zip(headers, times)])\n table.set_cols_align([\"l\", \"c\"])\n\n print(table.draw())\n\n except ImportError:\n warnings.warn('To print formatted output, please install `texttable`')\n\n def print_table(headers, times):\n for h, e in zip(headers, elapsed):\n print(h, '{:8.2f}'.format(e/float(n_iterations)*1.e6),\n u'\\u03bc'+'s')\n\n\n from circ import resample_circle as rc_py\n from circ import inteGrauss2d as ig_py\n rc_C = resample_circle\n ig_C = inteGrauss2d\n\n # This circle show that the python implementation of cCirc wraps around,\n # which is dumb\n # (50, 50, 27.527636662069785, 2.716882503265406, 4.9423403572267581,\n # 2.0454945513296701))\n\n # Define some representative cases.\n # Each has (xpix, ypix, xc, yc, [r/sigma], [clip])\n tests = ((10, 10, 5, 5, 1, 3),\n (10, 10, 3, 5, 1, 3),\n (10, 10, 3.1, 5.7, 1, 3))\n tests = ((50, 50, 27.527636662069785, 23.716882503265406, 4.9423403572267581, 2.0454945513296701),\n (50, 50, 27.527636662069785, 2.716882503265406, 4.9423403572267581, 2.0454945513296701)) # <=\n\n # Define some circles to plot. Notice that for `resample_circle` the last\n # element of each tuple is ignored, while the penultimate is the radius.\n # for `inteGrauss2d` the last element defines the truncation and the\n # penultimate element defines the standard deviation of the Gaussian.\n tests = (\n (10, 10, 5., 1.0, 2., 2.),\n (10, 10, 5.65, 1.2, 2.31, 2.001))\n tests = ((50, 50, 9., 9., 1.6, 5.),)\n\n # Number of iterations for the benchmark.\n n_iterations = 10000\n\n n_tests = len(tests)\n\n plt.clf()\n fig = plt.figure(figsize=(18, 3 * n_tests))\n grid = ImageGrid(fig, 111, nrows_ncols=(n_tests, 6),\n cbar_location='top', cbar_mode='edge',\n cbar_pad='0%', cbar_size='5%', direction='row',\n axes_pad=(0, 0))\n \n wmaps = [[rc_py(*t), rc_C(*t), ig_py(*t), ig_C(*t)] for t in tests]\n wmaps = [[w[0], w[1], w[0]-w[1], w[2], w[3], w[2]-w[3]] for w in wmaps]\n wmaps = sum(wmaps, []) # Flatten the list.\n \n # Reformat `tests` so that we can iterate over it for each subplot.\n tests = ((t,)*6 for t in tests) # Repeat each test for every plot that uses it.\n tests = sum(tests, ()) # Flatten this tuple.\n \n\n y_labels = ['$\\mathrm{Test\\;'+ '{}'.format(n) + '}$'\n for n in range(1, n_tests+1)]\n x_labels = ['$\\mathrm{resample\\_circ \\; (python)}$',\n '$\\mathrm{resample\\_circ \\; (C++)}$',\n '$\\mathrm{residuals}$',\n '$\\mathrm{inteGrauss2d \\; (python)}$',\n '$\\mathrm{inteGrauss2d \\; (C++)}$',\n '$\\mathrm{residuals}$']\n\n for n, (wm, t, ax) in enumerate(zip(wmaps, tests, grid)):\n img = ax.imshow(wm, origin='lower', interpolation='none')\n ax.plot([t[2]-.5], [t[3]-.5], 'mx')\n if n >= (n_tests - 1) * 6: # Trigger the colourbar.\n cbar = ax.cax.colorbar(img)\n cbar.solids.set_edgecolor('face')\n\n if n % 6 == 0: # We are at the first column.\n ax.set_ylabel(y_labels[n // 6])\n if n // 6 >= (n_tests - 1): # We are at the bottom row.\n ax.set_xlabel(x_labels[n % 6])\n \n #plt.subplots_adjust(wspace=0., hspace=0.)\n plt.savefig('test_ccircs.pdf')\n \n \n \n # Benchmarking.\n \n # Create random tests to prevent system caching of interemdiate results in\n # the C implementation - that would be unfair.\n \n grid_size = np.repeat(50, n_iterations)\n xc, yc = np.random.uniform(10, 40, (2, n_iterations))\n radius = np.random.uniform(1, 5, n_iterations)\n n_sigma = np.random.uniform(1, 5, n_iterations)\n \n tests = [\n (xpix, ypix, x, y, r, ns)\n for xpix, ypix, x, y, r, ns in zip(\n grid_size, grid_size, xc, yc, radius, n_sigma)]\n \n start_time, elapsed, results = [], [], []\n \n print('Benchmarking function `resample_circle` (Top hat cubing, python)...')\n start_time.append(time.time())\n results.append(np.array([rc_py(*t) for t in tests]))\n elapsed.append(time.time() - start_time[-1])\n \n print('Benchmarking function `resample_circle` (Top hat cubing, C++)...')\n start_time.append(time.time())\n results.append(np.array([rc_C(*t) for t in tests]))\n elapsed.append(time.time() - start_time[-1])\n \n print('Benchmarking function `inteGrauss2d` (Gaussian cubing, python)...')\n start_time.append(time.time())\n results.append(np.array([ig_py(*t) for t in tests]))\n elapsed.append(time.time() - start_time[-1])\n \n print('Benchmarking function `inteGrauss2d` (Gaussian cubing, C++)...')\n start_time.append(time.time())\n results.append(np.array([ig_C(*t) for t in tests]))\n elapsed.append(time.time() - start_time[-1])\n \n print('Summary:')\n headers = ['`resample_circle` (Top hat cubing, python)',\n '`resample_circle` (Top hat cubing, C++)',\n '`inteGrauss2d` (Gaussian cubing, python)',\n '`inteGrauss2d` (Gaussian cubing, C++)']\n print_table(headers, elapsed)\n", "\"\"\"\r\nThis file contains various fitting functions for general use with SAMI codes.\r\n\r\nCurrently included:\r\n\r\nGaussFitter - Gaussian Fitter (1d)\r\nGaussHermiteFitter - Fits a truncated Gauss-Hermite expansion (1d)\r\nTwoDGaussFitter - Gaussian Fitter (2d, optionally w/ PA and different widths)\r\n\r\nWould be nice:\r\n\r\nExponential Fitter?\r\nOthers?\r\n\r\nExample of the class format is below. Should have a list of things that can\r\nbe accessed in all class definitions (chi-2 etc.)\r\n\r\nTo use these fitting classes, initialise them using the initial guesses of the\r\nparameters, along with the coordinates, data and (optionally) weights. Then\r\ncall the fit function to perform the fit. The best fit parameters are then\r\nstored in p. For example:\r\n\r\nmy_fitter = TwoDGaussFitter(initial_p, x, y, data, weights)\r\nmy_fitter.fit()\r\nbest_fit_p = my_fitter.p\r\n\r\nIf you want to integrate over each fibre, use the fibre_integrator function\r\n*before* performing the fit. The diameter must be provided in the same units\r\nthat x and y will be in. For example:\r\n\r\nmy_fitter = TwoDGaussFitter(initial_p, x, y, data, weights)\r\nfibre_integrator(my_fitter, 1.6)\r\nmy_fitter.fit()\r\nbest_fit_p = my_fitter.p\r\n\r\nFor integration over square pixels rather than round fibres, use:\r\nfibre_integrator(my_fitter, 0.7, pixel=True)\r\n\r\nCalling an instance of a fitter will return the model values at the provided\r\ncoordinates. So, after either of the above examples:\r\n\r\nmy_fitter(x, y)\r\n\r\nwould return the best-fit model values at the coordinates (x, y).\r\n\r\nTODO: Make a BaseFitter class containing the basic functionality that other\r\nclasses can inherit from.\r\n\r\nTODO: Implement limits in a better way, i.e. use a minimisation function\r\nthat incorporates limits rather than the \"return 1e99\" method used below.\r\n\r\nTODO: Rename this module to fitting, rather than samifitting.\r\n\"\"\"\r\nfrom __future__ import absolute_import, division, print_function, unicode_literals\r\n\r\nfrom scipy.optimize import leastsq\r\nimport scipy as sp\r\nimport numpy as np\r\n\r\nclass FittingException(Exception):\r\n \"\"\"Could I make this do something useful?\"\"\"\r\n pass\r\n\r\nclass GaussFitter:\r\n \"\"\" Fits a 1d Gaussian to data. Params in form list p (amplitude, mean, sigma, offset). Offset is optional.\"\"\"\r\n\r\n def __init__(self, p, x, y, weights=None):\r\n self.p_start = p\r\n self.p = p\r\n self.x = x\r\n self.y = y\r\n if weights == None:\r\n self.weights = sp.ones(len(self.y))\r\n else:\r\n self.weights = weights\r\n\r\n self.perr = 0.\r\n self.var_fit = 0.\r\n\r\n if len(p) == 4 and p[0]>0.:\r\n self.fitfunc = self.f1\r\n elif len(p) == 3:\r\n # no base\r\n self.fitfunc = self.f2\r\n elif len(p) == 4 and p[0] < 0.:\r\n self.p[0] = abs(self.p[0])\r\n self.fitfunc = self.f3\r\n \r\n def f1(self, p, x): \r\n return p[0]*sp.exp(-(p[1]-x)**2/(2*p[2]**2)) + p[3]\r\n \r\n def f2(self, p, x): \r\n return p[0]*sp.exp(-(p[1]-x)**2/(2*p[2]**2)) + 0.\r\n \r\n def f3(self, p, x): \r\n return -p[0]*sp.exp(-(p[1]-x)**2/(2*p[2]**2)) + p[3]\r\n\r\n def errfunc(self, p, x, y, weights):\r\n # if width < 0 return input\r\n if p[2] < 0. or p[0] < 0.:\r\n # if we get a negative sigma value then penalise massively because\r\n # that is silly.\r\n # This method isn't great, as it can make leastsq confused about\r\n # the convergence criteria. Would be better to use something like\r\n # scipy.optimize.minimize that properly incorporates limits. Same\r\n # goes for the other classes in this module.\r\n return 1e99\r\n else:\r\n return weights*(self.fitfunc(p, x) - y)\r\n\r\n def fit(self):\r\n\r\n self.p, self.cov_x, self.infodict, self.mesg, self.success = \\\r\n leastsq(self.errfunc, self.p, \\\r\n args=(self.x, self.y, self.weights), full_output=1)\r\n\r\n var_fit = (self.errfunc(self.p, self.x, \\\r\n self.y, self.weights)**2).sum()/(len(self.y)-len(self.p))\r\n\r\n self.var_fit = var_fit\r\n\r\n if self.cov_x is not None:\r\n self.perr = sp.sqrt(self.cov_x.diagonal())*self.var_fit\r\n\r\n if not self.success in [1,2,3,4]:\r\n print(\"Fit Failed\")\r\n #raise FittingException(\"Fit failed\") # This does nothing.\r\n \r\n self.linestr=self.p[0]*self.p[2]*sp.sqrt(2*sp.pi)\r\n #self.line_err=S.sqrt(self.linestr*self.linestr*((self.perr[0]/self.p[0])**2+(self.perr[2]/self.p[2])**2))\r\n\r\n def __call__(self, x):\r\n return self.fitfunc(self.p, x)\r\n\r\nclass GaussHermiteFitter:\r\n \"\"\"Parameters list p contains, in order, amplitude, mean, sigma, h3, h4, bias, where the bias is optional\"\"\"\r\n\r\n def __init__(self, p, x, y, weights=None):\r\n self.p_start = p\r\n self.p = p\r\n self.x = x\r\n self.y = y\r\n if weights == None:\r\n self.weights = S.ones(len(self.y))\r\n else:\r\n self.weights = weights\r\n\r\n self.perr = 0.\r\n self.var_fit = 0.\r\n\r\n # Which function to use depends on the input parameters \r\n if len(p) == 5 and p[0]>0.:\r\n # Fit a truncated Gauss-Hermite sequence.\r\n self.fitfunc = self.f1\r\n elif len(p) == 6 and p[0]>0.:\r\n # Fit a truncated Gauss-Hermite sequence with a bias.\r\n self.fitfunc = self.f2\r\n else:\r\n raise Exception\r\n \r\n def f1(self, p, x):\r\n w=(p[1]-x)/(p[2])\r\n H3=(p[3]*sp.sqrt(2)/sp.sqrt(6))*((2*w**3)-(3*w))\r\n H4=(p[4]/sp.sqrt(24))*(4*w**4-12*w**2+3)\r\n gauss=p[0]*sp.exp(-w**2/2)\r\n \r\n gh=gauss*(1+H3+H4)\r\n return gh\r\n\r\n def f2(self, p, x):\r\n w=(p[1]-x)/(p[2])\r\n H3=(p[3]*sp.sqrt(2)/sp.sqrt(6))*((2*w**3)-(3*w))\r\n H4=(p[4]/sp.sqrt(24))*(4*w**4-12*w**2+3)\r\n gauss=p[0]*sp.exp(-w**2/2)\r\n \r\n gh2=gauss*(1+H3+H4)+p[5]\r\n return gh2\r\n\r\n def errfunc(self, p, x, y, weights):\r\n if p[2] < 0. or p[0] < 0.:\r\n # if we get a negative sigma value then penalise massively because\r\n # that is silly.\r\n return 1e99\r\n else:\r\n return weights*(self.fitfunc(p, x) - y)\r\n\r\n def fit(self):\r\n\r\n self.p, self.cov_x, self.infodict, self.mesg, self.success = \\\r\n leastsq(self.errfunc, self.p, \\\r\n args=(self.x, self.y, self.weights), full_output=1)\r\n\r\n var_fit = (self.errfunc(self.p, self.x, \\\r\n self.y, self.weights)**2).sum()/(len(self.y)-len(self.p))\r\n\r\n self.var_fit = var_fit\r\n\r\n if self.cov_x is not None:\r\n self.perr = S.sqrt(self.cov_x.diagonal())*self.var_fit\r\n\r\n # Would like to return the linestrength and associated error\r\n gamma=self.p[0]*self.p[2]*S.sqrt(2*S.pi)\r\n gamma_err=S.sqrt(gamma*gamma*((self.perr[0]/self.p[0])**2+(self.perr[1]/self.p[1])**2))\r\n self.linestr=gamma*(1+S.sqrt(6)*self.p[4]/4)\r\n self.line_err=S.sqrt(gamma_err**2*(1+S.sqrt(6)*self.p[4]/4)**2+self.perr[4]**2*(S.sqrt(6)*gamma_err/4)**2)\r\n\r\n if not self.success in [1,2,3,4]:\r\n print(\"Fit Failed...\")\r\n #raise FittingException(\"Fit failed\")\r\n\r\n def __call__(self, x):\r\n return self.fitfunc(self.p, x)\r\n\r\nclass TwoDGaussFitter:\r\n \"\"\" Fits a 2d Gaussian with PA and ellipticity. Params in form (amplitude, mean_x, mean_y, sigma_x, sigma_y,\r\n rotation, offset). Offset is optional. To fit a circular Gaussian use (amplitude, mean_x, mean_y, sigma, offset),\r\n again offset is optional.\"\"\"\r\n\r\n def __init__(self, p, x, y, z, weights=None):\r\n self.p_start = p\r\n self.p = p\r\n self.x = x\r\n self.y = y\r\n self.z = z\r\n \r\n if weights == None:\r\n self.weights = sp.ones(len(self.z))\r\n else:\r\n self.weights = weights\r\n\r\n self.perr = 0.\r\n self.var_fit = 0.\r\n\r\n if len(p) == 7:\r\n # 2d elliptical Gaussian with offset.\r\n self.p[0] = abs(self.p[0]) # amplitude should be positive.\r\n self.fitfunc = self.f1\r\n \r\n elif len(p) == 6:\r\n # 2d elliptical Gaussian witout offset.\r\n self.p[0] = abs(self.p[0]) # amplitude should be positive.\r\n self.fitfunc = self.f2\r\n \r\n elif len(p) == 5:\r\n # 2d circular Gaussian with offset.\r\n self.p[0] = abs(self.p[0]) # amplitude should be positive.\r\n self.fitfunc = self.f3\r\n \r\n elif len(p) == 4:\r\n self.p[0] = abs(self.p[0])\r\n self.fitfunc = self.f4\r\n\r\n else:\r\n raise Exception\r\n\r\n def f1(self, p, x, y):\r\n # f1 is an elliptical Gaussian with PA and a bias level.\r\n\r\n rot_rad=p[5]*sp.pi/180 # convert rotation into radians.\r\n\r\n rc_x=p[1]*sp.cos(rot_rad)-p[2]*sp.sin(rot_rad)\r\n rc_y=p[1]*sp.sin(rot_rad)+p[2]*sp.cos(rot_rad)\r\n \r\n return p[0]*sp.exp(-(((rc_x-(x*sp.cos(rot_rad)-y*sp.sin(rot_rad)))/p[3])**2\\\r\n +((rc_y-(x*sp.sin(rot_rad)+y*sp.cos(rot_rad)))/p[4])**2)/2)+p[6]\r\n\r\n def f2(self, p, x, y):\r\n # f2 is an elliptical Gaussian with PA and no bias level.\r\n\r\n rot_rad=p[5]*sp.pi/180 # convert rotation into radians.\r\n\r\n rc_x=p[1]*sp.cos(rot_rad)-p[2]*sp.sin(rot_rad)\r\n rc_y=p[1]*sp.sin(rot_rad)+p[2]*sp.cos(rot_rad)\r\n \r\n return p[0]*sp.exp(-(((rc_x-(x*sp.cos(rot_rad)-y*sp.sin(rot_rad)))/p[3])**2\\\r\n +((rc_y-(x*sp.sin(rot_rad)+y*sp.cos(rot_rad)))/p[4])**2)/2)\r\n\r\n def f3(self, p, x, y):\r\n # f3 is a circular Gaussian, p in form (amplitude, mean_x, mean_y, sigma, offset).\r\n return p[0]*sp.exp(-(((p[1]-x)/p[3])**2+((p[2]-y)/p[3])**2)/2)+p[4]\r\n\r\n def f4(self, p, x, y):\r\n # f4 is a circular Gaussian as f3 but without an offset\r\n return p[0]*sp.exp(-(((p[1]-x)/p[3])**2+((p[2]-y)/p[3])**2)/2)\r\n\r\n def errfunc(self, p, x, y, z, weights):\r\n # if width < 0 return input\r\n #if p[3] < 0. or p[4] < 0. or p[0] < 0.: # This isn't valid for the circular case!\r\n # if we get negative sigma values then penalise massively because\r\n # that is silly.\r\n #return 1e99\r\n #else:\r\n return weights*(self.fitfunc(p, x, y) - z)\r\n\r\n def fit(self):\r\n\r\n #print(np.shape(self.x), np.shape(self.y), np.shape(self.z))\r\n\r\n self.p, self.cov_x, self.infodict, self.mesg, self.success = \\\r\n leastsq(self.errfunc, self.p, \\\r\n args=(self.x, self.y, self.z, self.weights), full_output=1)\r\n\r\n var_fit = (self.errfunc(self.p, self.x, \\\r\n self.y, self.z, self.weights)**2).sum()/(len(self.z)-len(self.p))\r\n\r\n self.var_fit = var_fit\r\n\r\n if self.cov_x is not None:\r\n self.perr = sp.sqrt(self.cov_x.diagonal())*self.var_fit\r\n\r\n if not self.success in [1,2,3,4]:\r\n print(\"Fit Failed...\")\r\n #raise ExpFittingException(\"Fit failed\")\r\n\r\n def __call__(self, x, y):\r\n return self.fitfunc(self.p, x, y)\r\n \r\ndef fibre_integrator(fitter, diameter, pixel=False):\r\n \"\"\"Edits a fitter's fitfunc so that it integrates over each SAMI fibre.\"\"\"\r\n\r\n # Save the diameter; not used here but could be useful later\r\n fitter.diameter = diameter\r\n\r\n # Define the subsampling points to use\r\n n_pix = 5 # Number of sampling points across the fibre\r\n # First make a 1d array of subsample points\r\n delta_x = np.linspace(-0.5 * (diameter * (1 - 1.0/n_pix)),\r\n 0.5 * (diameter * (1 - 1.0/n_pix)),\r\n num=n_pix)\r\n delta_y = delta_x\r\n # Then turn that into a 2d grid of (delta_x, delta_y) centred on (0, 0)\r\n delta_x = np.ravel(np.outer(delta_x, np.ones(n_pix)))\r\n delta_y = np.ravel(np.outer(np.ones(n_pix), delta_y))\r\n if pixel:\r\n # Square pixels; keep everything\r\n n_keep = n_pix**2\r\n else:\r\n # Round fibres; only keep the points within one radius\r\n keep = np.where(delta_x**2 + delta_y**2 < (0.5 * diameter)**2)[0]\r\n n_keep = np.size(keep)\r\n delta_x = delta_x[keep]\r\n delta_y = delta_y[keep]\r\n\r\n old_fitfunc = fitter.fitfunc\r\n\r\n def integrated_fitfunc(p, x, y):\r\n # The fitter's fitfunc will be replaced by this one\r\n n_fib = np.size(x)\r\n x_sub = (np.outer(delta_x, np.ones(n_fib)) +\r\n np.outer(np.ones(n_keep), x))\r\n y_sub = (np.outer(delta_y, np.ones(n_fib)) +\r\n np.outer(np.ones(n_keep), y))\r\n return np.mean(old_fitfunc(p, x_sub, y_sub), 0)\r\n\r\n # Replace the fitter's fitfunc\r\n fitter.fitfunc = integrated_fitfunc\r\n\r\n return\r\n\r\n" ]
[ [ "numpy.ctypeslib.ndpointer", "numpy.random.uniform", "numpy.repeat", "numpy.zeros" ], [ "numpy.linspace", "numpy.ones", "scipy.sqrt", "numpy.size", "scipy.optimize.leastsq", "scipy.sin", "numpy.where", "scipy.exp", "scipy.cos" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ShihanYang/dgl
[ "ec2e24be6dcc3bcc3d4ad5dff78212735f9db00f" ]
[ "tests/compute/test_transform.py" ]
[ "from scipy import sparse as spsp\nimport unittest\nimport networkx as nx\nimport numpy as np\nimport dgl\nimport dgl.function as fn\nimport backend as F\nfrom dgl.graph_index import from_scipy_sparse_matrix\nimport unittest\nfrom utils import parametrize_dtype\n\nD = 5\n\n# line graph related\n\ndef test_line_graph():\n N = 5\n G = dgl.DGLGraph(nx.star_graph(N))\n G.edata['h'] = F.randn((2 * N, D))\n n_edges = G.number_of_edges()\n L = G.line_graph(shared=True)\n assert L.number_of_nodes() == 2 * N\n L.ndata['h'] = F.randn((2 * N, D))\n # update node features on line graph should reflect to edge features on\n # original graph.\n u = [0, 0, 2, 3]\n v = [1, 2, 0, 0]\n eid = G.edge_ids(u, v)\n L.nodes[eid].data['h'] = F.zeros((4, D))\n assert F.allclose(G.edges[u, v].data['h'], F.zeros((4, D)))\n\n # adding a new node feature on line graph should also reflect to a new\n # edge feature on original graph\n data = F.randn((n_edges, D))\n L.ndata['w'] = data\n assert F.allclose(G.edata['w'], data)\n\n@parametrize_dtype\ndef test_hetero_linegraph(index_dtype):\n g = dgl.graph(([0, 1, 1, 2, 2],[2, 0, 2, 0, 1]),\n 'user', 'follows', index_dtype=index_dtype)\n lg = dgl.line_heterograph(g)\n assert lg.number_of_nodes() == 5\n assert lg.number_of_edges() == 8\n row, col = lg.edges()\n assert np.array_equal(F.asnumpy(row),\n np.array([0, 0, 1, 2, 2, 3, 4, 4]))\n assert np.array_equal(F.asnumpy(col),\n np.array([3, 4, 0, 3, 4, 0, 1, 2]))\n\n lg = dgl.line_heterograph(g, backtracking=False)\n assert lg.number_of_nodes() == 5\n assert lg.number_of_edges() == 4\n row, col = lg.edges()\n assert np.array_equal(F.asnumpy(row),\n np.array([0, 1, 2, 4]))\n assert np.array_equal(F.asnumpy(col),\n np.array([4, 0, 3, 1]))\n g = dgl.graph(([0, 1, 1, 2, 2],[2, 0, 2, 0, 1]),\n 'user', 'follows', restrict_format='csr', index_dtype=index_dtype)\n lg = dgl.line_heterograph(g)\n assert lg.number_of_nodes() == 5\n assert lg.number_of_edges() == 8\n row, col = lg.edges()\n assert np.array_equal(F.asnumpy(row),\n np.array([0, 0, 1, 2, 2, 3, 4, 4]))\n assert np.array_equal(F.asnumpy(col),\n np.array([3, 4, 0, 3, 4, 0, 1, 2]))\n\n g = dgl.graph(([0, 1, 1, 2, 2],[2, 0, 2, 0, 1]),\n 'user', 'follows', restrict_format='csc', index_dtype=index_dtype)\n lg = dgl.line_heterograph(g)\n assert lg.number_of_nodes() == 5\n assert lg.number_of_edges() == 8\n row, col, eid = lg.edges('all')\n row = F.asnumpy(row)\n col = F.asnumpy(col)\n eid = F.asnumpy(eid).astype(int)\n order = np.argsort(eid)\n assert np.array_equal(row[order],\n np.array([0, 0, 1, 2, 2, 3, 4, 4]))\n assert np.array_equal(col[order],\n np.array([3, 4, 0, 3, 4, 0, 1, 2]))\n\ndef test_no_backtracking():\n N = 5\n G = dgl.DGLGraph(nx.star_graph(N))\n L = G.line_graph(backtracking=False)\n assert L.number_of_nodes() == 2 * N\n for i in range(1, N):\n e1 = G.edge_id(0, i)\n e2 = G.edge_id(i, 0)\n assert not L.has_edge_between(e1, e2)\n assert not L.has_edge_between(e2, e1)\n\n# reverse graph related\ndef test_reverse():\n g = dgl.DGLGraph()\n g.add_nodes(5)\n # The graph need not to be completely connected.\n g.add_edges([0, 1, 2], [1, 2, 1])\n g.ndata['h'] = F.tensor([[0.], [1.], [2.], [3.], [4.]])\n g.edata['h'] = F.tensor([[5.], [6.], [7.]])\n rg = g.reverse()\n\n assert g.is_multigraph == rg.is_multigraph\n\n assert g.number_of_nodes() == rg.number_of_nodes()\n assert g.number_of_edges() == rg.number_of_edges()\n assert F.allclose(F.astype(rg.has_edges_between(\n [1, 2, 1], [0, 1, 2]), F.float32), F.ones((3,)))\n assert g.edge_id(0, 1) == rg.edge_id(1, 0)\n assert g.edge_id(1, 2) == rg.edge_id(2, 1)\n assert g.edge_id(2, 1) == rg.edge_id(1, 2)\n\n # test dgl.reverse_heterograph\n # test homogeneous graph\n g = dgl.graph((F.tensor([0, 1, 2]), F.tensor([1, 2, 0])))\n g.ndata['h'] = F.tensor([[0.], [1.], [2.]])\n g.edata['h'] = F.tensor([[3.], [4.], [5.]])\n g_r = dgl.reverse_heterograph(g)\n assert g.number_of_nodes() == g_r.number_of_nodes()\n assert g.number_of_edges() == g_r.number_of_edges()\n u_g, v_g, eids_g = g.all_edges(form='all')\n u_rg, v_rg, eids_rg = g_r.all_edges(form='all')\n assert F.array_equal(u_g, v_rg)\n assert F.array_equal(v_g, u_rg)\n assert F.array_equal(eids_g, eids_rg)\n assert F.array_equal(g.ndata['h'], g_r.ndata['h'])\n assert len(g_r.edata) == 0\n\n # without share ndata\n g_r = dgl.reverse_heterograph(g, copy_ndata=False)\n assert g.number_of_nodes() == g_r.number_of_nodes()\n assert g.number_of_edges() == g_r.number_of_edges()\n assert len(g_r.ndata) == 0\n assert len(g_r.edata) == 0\n\n # with share ndata and edata\n g_r = dgl.reverse_heterograph(g, copy_ndata=True, copy_edata=True)\n assert g.number_of_nodes() == g_r.number_of_nodes()\n assert g.number_of_edges() == g_r.number_of_edges()\n assert F.array_equal(g.ndata['h'], g_r.ndata['h'])\n assert F.array_equal(g.edata['h'], g_r.edata['h'])\n\n # add new node feature to g_r\n g_r.ndata['hh'] = F.tensor([0, 1, 2])\n assert ('hh' in g.ndata) is False\n assert ('hh' in g_r.ndata) is True\n\n # add new edge feature to g_r\n g_r.edata['hh'] = F.tensor([0, 1, 2])\n assert ('hh' in g.edata) is False\n assert ('hh' in g_r.edata) is True\n\n # test heterogeneous graph\n g = dgl.heterograph({\n ('user', 'follows', 'user'): ([0, 1, 2, 4, 3 ,1, 3], [1, 2, 3, 2, 0, 0, 1]),\n ('user', 'plays', 'game'): ([0, 0, 2, 3, 3, 4, 1], [1, 0, 1, 0, 1, 0, 0]),\n ('developer', 'develops', 'game'): ([0, 1, 1, 2], [0, 0, 1, 1])})\n g.nodes['user'].data['h'] = F.tensor([0, 1, 2, 3, 4])\n g.nodes['user'].data['hh'] = F.tensor([1, 1, 1, 1, 1])\n g.nodes['game'].data['h'] = F.tensor([0, 1])\n g.edges['follows'].data['h'] = F.tensor([0, 1, 2, 4, 3 ,1, 3])\n g.edges['follows'].data['hh'] = F.tensor([1, 2, 3, 2, 0, 0, 1])\n g_r = dgl.reverse_heterograph(g)\n\n for etype_g, etype_gr in zip(g.canonical_etypes, g_r.canonical_etypes):\n assert etype_g[0] == etype_gr[2]\n assert etype_g[1] == etype_gr[1]\n assert etype_g[2] == etype_gr[0]\n assert g.number_of_edges(etype_g) == g_r.number_of_edges(etype_gr)\n for ntype in g.ntypes:\n assert g.number_of_nodes(ntype) == g_r.number_of_nodes(ntype)\n assert F.array_equal(g.nodes['user'].data['h'], g_r.nodes['user'].data['h'])\n assert F.array_equal(g.nodes['user'].data['hh'], g_r.nodes['user'].data['hh'])\n assert F.array_equal(g.nodes['game'].data['h'], g_r.nodes['game'].data['h'])\n assert len(g_r.edges['follows'].data) == 0\n u_g, v_g, eids_g = g.all_edges(form='all', etype=('user', 'follows', 'user'))\n u_rg, v_rg, eids_rg = g_r.all_edges(form='all', etype=('user', 'follows', 'user'))\n assert F.array_equal(u_g, v_rg)\n assert F.array_equal(v_g, u_rg)\n assert F.array_equal(eids_g, eids_rg)\n u_g, v_g, eids_g = g.all_edges(form='all', etype=('user', 'plays', 'game'))\n u_rg, v_rg, eids_rg = g_r.all_edges(form='all', etype=('game', 'plays', 'user'))\n assert F.array_equal(u_g, v_rg)\n assert F.array_equal(v_g, u_rg)\n assert F.array_equal(eids_g, eids_rg)\n u_g, v_g, eids_g = g.all_edges(form='all', etype=('developer', 'develops', 'game'))\n u_rg, v_rg, eids_rg = g_r.all_edges(form='all', etype=('game', 'develops', 'developer'))\n assert F.array_equal(u_g, v_rg)\n assert F.array_equal(v_g, u_rg)\n assert F.array_equal(eids_g, eids_rg)\n\n # withour share ndata\n g_r = dgl.reverse_heterograph(g, copy_ndata=False)\n for etype_g, etype_gr in zip(g.canonical_etypes, g_r.canonical_etypes):\n assert etype_g[0] == etype_gr[2]\n assert etype_g[1] == etype_gr[1]\n assert etype_g[2] == etype_gr[0]\n assert g.number_of_edges(etype_g) == g_r.number_of_edges(etype_gr)\n for ntype in g.ntypes:\n assert g.number_of_nodes(ntype) == g_r.number_of_nodes(ntype)\n assert len(g_r.nodes['user'].data) == 0\n assert len(g_r.nodes['game'].data) == 0\n\n g_r = dgl.reverse_heterograph(g, copy_ndata=True, copy_edata=True)\n print(g_r)\n for etype_g, etype_gr in zip(g.canonical_etypes, g_r.canonical_etypes):\n assert etype_g[0] == etype_gr[2]\n assert etype_g[1] == etype_gr[1]\n assert etype_g[2] == etype_gr[0]\n assert g.number_of_edges(etype_g) == g_r.number_of_edges(etype_gr)\n assert F.array_equal(g.edges['follows'].data['h'], g_r.edges['follows'].data['h'])\n assert F.array_equal(g.edges['follows'].data['hh'], g_r.edges['follows'].data['hh'])\n\n # add new node feature to g_r\n g_r.nodes['user'].data['hhh'] = F.tensor([0, 1, 2, 3, 4])\n assert ('hhh' in g.nodes['user'].data) is False\n assert ('hhh' in g_r.nodes['user'].data) is True\n\n # add new edge feature to g_r\n g_r.edges['follows'].data['hhh'] = F.tensor([1, 2, 3, 2, 0, 0, 1])\n assert ('hhh' in g.edges['follows'].data) is False\n assert ('hhh' in g_r.edges['follows'].data) is True\n\n\ndef test_reverse_shared_frames():\n g = dgl.DGLGraph()\n g.add_nodes(3)\n g.add_edges([0, 1, 2], [1, 2, 1])\n g.ndata['h'] = F.tensor([[0.], [1.], [2.]])\n g.edata['h'] = F.tensor([[3.], [4.], [5.]])\n\n rg = g.reverse(share_ndata=True, share_edata=True)\n assert F.allclose(g.ndata['h'], rg.ndata['h'])\n assert F.allclose(g.edata['h'], rg.edata['h'])\n assert F.allclose(g.edges[[0, 2], [1, 1]].data['h'],\n rg.edges[[1, 1], [0, 2]].data['h'])\n\n rg.ndata['h'] = rg.ndata['h'] + 1\n assert F.allclose(rg.ndata['h'], g.ndata['h'])\n\n g.edata['h'] = g.edata['h'] - 1\n assert F.allclose(rg.edata['h'], g.edata['h'])\n\n src_msg = fn.copy_src(src='h', out='m')\n sum_reduce = fn.sum(msg='m', out='h')\n\n rg.update_all(src_msg, sum_reduce)\n assert F.allclose(g.ndata['h'], rg.ndata['h'])\n\ndef test_to_bidirected():\n # homogeneous graph\n g = dgl.graph((F.tensor([0, 1, 3, 1]), F.tensor([1, 2, 0, 2])))\n g.ndata['h'] = F.tensor([[0.], [1.], [2.], [1.]])\n g.edata['h'] = F.tensor([[3.], [4.], [5.], [6.]])\n bg = dgl.to_bidirected(g, copy_ndata=True, copy_edata=True)\n u, v = g.edges()\n ub, vb = bg.edges()\n assert F.array_equal(F.cat([u, v], dim=0), ub)\n assert F.array_equal(F.cat([v, u], dim=0), vb)\n assert F.array_equal(g.ndata['h'], bg.ndata['h'])\n assert F.array_equal(F.cat([g.edata['h'], g.edata['h']], dim=0), bg.edata['h'])\n bg.ndata['hh'] = F.tensor([[0.], [1.], [2.], [1.]])\n assert ('hh' in g.ndata) is False\n bg.edata['hh'] = F.tensor([[0.], [1.], [2.], [1.], [0.], [1.], [2.], [1.]])\n assert ('hh' in g.edata) is False\n\n # donot share ndata and edata\n bg = dgl.to_bidirected(g, copy_ndata=False, copy_edata=False)\n ub, vb = bg.edges()\n assert F.array_equal(F.cat([u, v], dim=0), ub)\n assert F.array_equal(F.cat([v, u], dim=0), vb)\n assert ('h' in bg.ndata) is False\n assert ('h' in bg.edata) is False\n\n # zero edge graph\n g = dgl.graph([])\n bg = dgl.to_bidirected(g, copy_ndata=True, copy_edata=True)\n\n # heterogeneous graph\n g = dgl.heterograph({\n ('user', 'wins', 'user'): (F.tensor([0, 2, 0, 2, 2]), F.tensor([1, 1, 2, 1, 0])),\n ('user', 'plays', 'game'): (F.tensor([1, 2, 1]), F.tensor([2, 1, 1])),\n ('user', 'follows', 'user'): (F.tensor([1, 2, 1]), F.tensor([0, 0, 0]))\n })\n g.nodes['game'].data['hv'] = F.ones((3, 1))\n g.nodes['user'].data['hv'] = F.ones((3, 1))\n g.edges['wins'].data['h'] = F.tensor([0, 1, 2, 3, 4])\n bg = dgl.to_bidirected(g, copy_ndata=True, copy_edata=True, ignore_bipartite=True)\n assert F.array_equal(g.nodes['game'].data['hv'], bg.nodes['game'].data['hv'])\n assert F.array_equal(g.nodes['user'].data['hv'], bg.nodes['user'].data['hv'])\n u, v = g.all_edges(order='eid', etype=('user', 'wins', 'user'))\n ub, vb = bg.all_edges(order='eid', etype=('user', 'wins', 'user'))\n assert F.array_equal(F.cat([u, v], dim=0), ub)\n assert F.array_equal(F.cat([v, u], dim=0), vb)\n assert F.array_equal(F.cat([g.edges['wins'].data['h'], g.edges['wins'].data['h']], dim=0),\n bg.edges['wins'].data['h'])\n u, v = g.all_edges(order='eid', etype=('user', 'follows', 'user'))\n ub, vb = bg.all_edges(order='eid', etype=('user', 'follows', 'user'))\n assert F.array_equal(F.cat([u, v], dim=0), ub)\n assert F.array_equal(F.cat([v, u], dim=0), vb)\n u, v = g.all_edges(order='eid', etype=('user', 'plays', 'game'))\n ub, vb = bg.all_edges(order='eid', etype=('user', 'plays', 'game'))\n assert F.array_equal(u, ub)\n assert F.array_equal(v, vb)\n assert len(bg.edges['plays'].data) == 0\n assert len(bg.edges['follows'].data) == 0\n\n # donot share ndata and edata\n bg = dgl.to_bidirected(g, copy_ndata=False, copy_edata=False, ignore_bipartite=True)\n assert len(bg.edges['wins'].data) == 0\n assert len(bg.edges['plays'].data) == 0\n assert len(bg.edges['follows'].data) == 0\n assert len(bg.nodes['game'].data) == 0\n assert len(bg.nodes['user'].data) == 0\n u, v = g.all_edges(order='eid', etype=('user', 'wins', 'user'))\n ub, vb = bg.all_edges(order='eid', etype=('user', 'wins', 'user'))\n assert F.array_equal(F.cat([u, v], dim=0), ub)\n assert F.array_equal(F.cat([v, u], dim=0), vb)\n u, v = g.all_edges(order='eid', etype=('user', 'follows', 'user'))\n ub, vb = bg.all_edges(order='eid', etype=('user', 'follows', 'user'))\n assert F.array_equal(F.cat([u, v], dim=0), ub)\n assert F.array_equal(F.cat([v, u], dim=0), vb)\n u, v = g.all_edges(order='eid', etype=('user', 'plays', 'game'))\n ub, vb = bg.all_edges(order='eid', etype=('user', 'plays', 'game'))\n assert F.array_equal(u, ub)\n assert F.array_equal(v, vb)\n\n\ndef test_simple_graph():\n elist = [(0, 1), (0, 2), (1, 2), (0, 1)]\n g = dgl.DGLGraph(elist, readonly=True)\n assert g.is_multigraph\n sg = dgl.to_simple_graph(g)\n assert not sg.is_multigraph\n assert sg.number_of_edges() == 3\n src, dst = sg.edges()\n eset = set(zip(list(F.asnumpy(src)), list(F.asnumpy(dst))))\n assert eset == set(elist)\n\n\ndef test_bidirected_graph():\n def _test(in_readonly, out_readonly):\n elist = [(0, 0), (0, 1), (1, 0),\n (1, 1), (2, 1), (2, 2)]\n num_edges = 7\n g = dgl.DGLGraph(elist, readonly=in_readonly)\n elist.append((1, 2))\n elist = set(elist)\n big = dgl.to_bidirected_stale(g, out_readonly)\n assert big.number_of_edges() == num_edges\n src, dst = big.edges()\n eset = set(zip(list(F.asnumpy(src)), list(F.asnumpy(dst))))\n assert eset == set(elist)\n\n _test(True, True)\n _test(True, False)\n _test(False, True)\n _test(False, False)\n\n\ndef test_khop_graph():\n N = 20\n feat = F.randn((N, 5))\n\n def _test(g):\n for k in range(4):\n g_k = dgl.khop_graph(g, k)\n # use original graph to do message passing for k times.\n g.ndata['h'] = feat\n for _ in range(k):\n g.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h'))\n h_0 = g.ndata.pop('h')\n # use k-hop graph to do message passing for one time.\n g_k.ndata['h'] = feat\n g_k.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h'))\n h_1 = g_k.ndata.pop('h')\n assert F.allclose(h_0, h_1, rtol=1e-3, atol=1e-3)\n\n # Test for random undirected graphs\n g = dgl.DGLGraph(nx.erdos_renyi_graph(N, 0.3))\n _test(g)\n # Test for random directed graphs\n g = dgl.DGLGraph(nx.erdos_renyi_graph(N, 0.3, directed=True))\n _test(g)\n\ndef test_khop_adj():\n N = 20\n feat = F.randn((N, 5))\n g = dgl.DGLGraph(nx.erdos_renyi_graph(N, 0.3))\n for k in range(3):\n adj = F.tensor(dgl.khop_adj(g, k))\n # use original graph to do message passing for k times.\n g.ndata['h'] = feat\n for _ in range(k):\n g.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h'))\n h_0 = g.ndata.pop('h')\n # use k-hop adj to do message passing for one time.\n h_1 = F.matmul(adj, feat)\n assert F.allclose(h_0, h_1, rtol=1e-3, atol=1e-3)\n\n\ndef test_laplacian_lambda_max():\n N = 20\n eps = 1e-6\n # test DGLGraph\n g = dgl.DGLGraph(nx.erdos_renyi_graph(N, 0.3))\n l_max = dgl.laplacian_lambda_max(g)\n assert (l_max[0] < 2 + eps)\n # test batched DGLGraph\n N_arr = [20, 30, 10, 12]\n bg = dgl.batch([\n dgl.DGLGraph(nx.erdos_renyi_graph(N, 0.3))\n for N in N_arr\n ])\n l_max_arr = dgl.laplacian_lambda_max(bg)\n assert len(l_max_arr) == len(N_arr)\n for l_max in l_max_arr:\n assert l_max < 2 + eps\n\n\ndef test_add_self_loop():\n g = dgl.DGLGraph()\n g.add_nodes(5)\n g.add_edges([0, 1, 2], [1, 1, 2])\n # Nodes 0, 3, 4 don't have self-loop\n new_g = dgl.transform.add_self_loop(g)\n assert F.allclose(new_g.edges()[0], F.tensor([0, 0, 1, 2, 3, 4]))\n assert F.allclose(new_g.edges()[1], F.tensor([1, 0, 1, 2, 3, 4]))\n\n\ndef test_remove_self_loop():\n g = dgl.DGLGraph()\n g.add_nodes(5)\n g.add_edges([0, 1, 2], [1, 1, 2])\n new_g = dgl.transform.remove_self_loop(g)\n assert F.allclose(new_g.edges()[0], F.tensor([0]))\n assert F.allclose(new_g.edges()[1], F.tensor([1]))\n\ndef create_large_graph_index(num_nodes):\n row = np.random.choice(num_nodes, num_nodes * 10)\n col = np.random.choice(num_nodes, num_nodes * 10)\n spm = spsp.coo_matrix((np.ones(len(row)), (row, col)))\n\n return from_scipy_sparse_matrix(spm, True)\n\ndef get_nodeflow(g, node_ids, num_layers):\n batch_size = len(node_ids)\n expand_factor = g.number_of_nodes()\n sampler = dgl.contrib.sampling.NeighborSampler(g, batch_size,\n expand_factor=expand_factor, num_hops=num_layers,\n seed_nodes=node_ids)\n return next(iter(sampler))\n\ndef test_partition_with_halo():\n g = dgl.DGLGraph(create_large_graph_index(1000), readonly=True)\n node_part = np.random.choice(4, g.number_of_nodes())\n subgs = dgl.transform.partition_graph_with_halo(g, node_part, 2)\n for part_id, subg in subgs.items():\n node_ids = np.nonzero(node_part == part_id)[0]\n lnode_ids = np.nonzero(F.asnumpy(subg.ndata['inner_node']))[0]\n nf = get_nodeflow(g, node_ids, 2)\n lnf = get_nodeflow(subg, lnode_ids, 2)\n for i in range(nf.num_layers):\n layer_nids1 = F.asnumpy(nf.layer_parent_nid(i))\n layer_nids2 = lnf.layer_parent_nid(i)\n layer_nids2 = F.asnumpy(F.gather_row(subg.ndata[dgl.NID], layer_nids2))\n assert np.all(np.sort(layer_nids1) == np.sort(layer_nids2))\n\n for i in range(nf.num_blocks):\n block_eids1 = F.asnumpy(nf.block_parent_eid(i))\n block_eids2 = lnf.block_parent_eid(i)\n block_eids2 = F.asnumpy(F.gather_row(subg.edata[dgl.EID], block_eids2))\n assert np.all(np.sort(block_eids1) == np.sort(block_eids2))\n\n subgs = dgl.transform.partition_graph_with_halo(g, node_part, 2, reshuffle=True)\n for part_id, subg in subgs.items():\n node_ids = np.nonzero(node_part == part_id)[0]\n lnode_ids = np.nonzero(F.asnumpy(subg.ndata['inner_node']))[0]\n assert np.all(np.sort(F.asnumpy(subg.ndata['orig_id'])[lnode_ids]) == node_ids)\n\[email protected](F._default_context_str == 'gpu', reason=\"METIS doesn't support GPU\")\ndef test_metis_partition():\n # TODO(zhengda) Metis fails to partition a small graph.\n g = dgl.DGLGraph(create_large_graph_index(1000), readonly=True)\n check_metis_partition(g, 0)\n check_metis_partition(g, 1)\n check_metis_partition(g, 2)\n check_metis_partition_with_constraint(g)\n\[email protected](F._default_context_str == 'gpu', reason=\"METIS doesn't support GPU\")\ndef test_hetero_metis_partition():\n # TODO(zhengda) Metis fails to partition a small graph.\n g = dgl.DGLGraph(create_large_graph_index(1000), readonly=True)\n g = dgl.as_heterograph(g)\n check_metis_partition(g, 0)\n check_metis_partition(g, 1)\n check_metis_partition(g, 2)\n check_metis_partition_with_constraint(g)\n\n\ndef check_metis_partition_with_constraint(g):\n ntypes = np.zeros((g.number_of_nodes(),), dtype=np.int32)\n ntypes[0:int(g.number_of_nodes()/4)] = 1\n ntypes[int(g.number_of_nodes()*3/4):] = 2\n subgs = dgl.transform.metis_partition(g, 4, extra_cached_hops=1, balance_ntypes=ntypes)\n if subgs is not None:\n for i in subgs:\n subg = subgs[i]\n parent_nids = F.asnumpy(subg.ndata[dgl.NID])\n sub_ntypes = ntypes[parent_nids]\n print('type0:', np.sum(sub_ntypes == 0))\n print('type1:', np.sum(sub_ntypes == 1))\n print('type2:', np.sum(sub_ntypes == 2))\n subgs = dgl.transform.metis_partition(g, 4, extra_cached_hops=1,\n balance_ntypes=ntypes, balance_edges=True)\n if subgs is not None:\n for i in subgs:\n subg = subgs[i]\n parent_nids = F.asnumpy(subg.ndata[dgl.NID])\n sub_ntypes = ntypes[parent_nids]\n print('type0:', np.sum(sub_ntypes == 0))\n print('type1:', np.sum(sub_ntypes == 1))\n print('type2:', np.sum(sub_ntypes == 2))\n\ndef check_metis_partition(g, extra_hops):\n subgs = dgl.transform.metis_partition(g, 4, extra_cached_hops=extra_hops)\n num_inner_nodes = 0\n num_inner_edges = 0\n if subgs is not None:\n for part_id, subg in subgs.items():\n lnode_ids = np.nonzero(F.asnumpy(subg.ndata['inner_node']))[0]\n ledge_ids = np.nonzero(F.asnumpy(subg.edata['inner_edge']))[0]\n num_inner_nodes += len(lnode_ids)\n num_inner_edges += len(ledge_ids)\n assert np.sum(F.asnumpy(subg.ndata['part_id']) == part_id) == len(lnode_ids)\n assert num_inner_nodes == g.number_of_nodes()\n print(g.number_of_edges() - num_inner_edges)\n\n if extra_hops == 0:\n return\n\n # partitions with node reshuffling\n subgs = dgl.transform.metis_partition(g, 4, extra_cached_hops=extra_hops, reshuffle=True)\n num_inner_nodes = 0\n num_inner_edges = 0\n edge_cnts = np.zeros((g.number_of_edges(),))\n if subgs is not None:\n for part_id, subg in subgs.items():\n lnode_ids = np.nonzero(F.asnumpy(subg.ndata['inner_node']))[0]\n ledge_ids = np.nonzero(F.asnumpy(subg.edata['inner_edge']))[0]\n num_inner_nodes += len(lnode_ids)\n num_inner_edges += len(ledge_ids)\n assert np.sum(F.asnumpy(subg.ndata['part_id']) == part_id) == len(lnode_ids)\n nids = F.asnumpy(subg.ndata[dgl.NID])\n\n # ensure the local node Ids are contiguous.\n parent_ids = F.asnumpy(subg.ndata[dgl.NID])\n parent_ids = parent_ids[:len(lnode_ids)]\n assert np.all(parent_ids == np.arange(parent_ids[0], parent_ids[-1] + 1))\n\n # count the local edges.\n parent_ids = F.asnumpy(subg.edata[dgl.EID])[ledge_ids]\n edge_cnts[parent_ids] += 1\n\n orig_ids = subg.ndata['orig_id']\n inner_node = F.asnumpy(subg.ndata['inner_node'])\n for nid in range(subg.number_of_nodes()):\n neighs = subg.predecessors(nid)\n old_neighs1 = F.gather_row(orig_ids, neighs)\n old_nid = F.asnumpy(orig_ids[nid])\n old_neighs2 = g.predecessors(old_nid)\n # If this is an inner node, it should have the full neighborhood.\n if inner_node[nid]:\n assert np.all(np.sort(F.asnumpy(old_neighs1)) == np.sort(F.asnumpy(old_neighs2)))\n # Normally, local edges are only counted once.\n assert np.all(edge_cnts == 1)\n\n assert num_inner_nodes == g.number_of_nodes()\n print(g.number_of_edges() - num_inner_edges)\n\[email protected](F._default_context_str == 'gpu', reason=\"It doesn't support GPU\")\ndef test_reorder_nodes():\n g = dgl.DGLGraph(create_large_graph_index(1000), readonly=True)\n new_nids = np.random.permutation(g.number_of_nodes())\n # TODO(zhengda) we need to test both CSR and COO.\n new_g = dgl.transform.reorder_nodes(g, new_nids)\n new_in_deg = new_g.in_degrees()\n new_out_deg = new_g.out_degrees()\n in_deg = g.in_degrees()\n out_deg = g.out_degrees()\n new_in_deg1 = F.scatter_row(in_deg, F.tensor(new_nids), in_deg)\n new_out_deg1 = F.scatter_row(out_deg, F.tensor(new_nids), out_deg)\n assert np.all(F.asnumpy(new_in_deg == new_in_deg1))\n assert np.all(F.asnumpy(new_out_deg == new_out_deg1))\n orig_ids = F.asnumpy(new_g.ndata['orig_id'])\n for nid in range(g.number_of_nodes()):\n neighs = F.asnumpy(g.successors(nid))\n new_neighs1 = new_nids[neighs]\n new_nid = new_nids[nid]\n new_neighs2 = new_g.successors(new_nid)\n assert np.all(np.sort(new_neighs1) == np.sort(F.asnumpy(new_neighs2)))\n\n for nid in range(new_g.number_of_nodes()):\n neighs = F.asnumpy(new_g.successors(nid))\n old_neighs1 = orig_ids[neighs]\n old_nid = orig_ids[nid]\n old_neighs2 = g.successors(old_nid)\n assert np.all(np.sort(old_neighs1) == np.sort(F.asnumpy(old_neighs2)))\n\n neighs = F.asnumpy(new_g.predecessors(nid))\n old_neighs1 = orig_ids[neighs]\n old_nid = orig_ids[nid]\n old_neighs2 = g.predecessors(old_nid)\n assert np.all(np.sort(old_neighs1) == np.sort(F.asnumpy(old_neighs2)))\n\[email protected](F._default_context_str == 'gpu', reason=\"GPU not implemented\")\n@parametrize_dtype\ndef test_in_subgraph(index_dtype):\n g1 = dgl.graph([(1,0),(2,0),(3,0),(0,1),(2,1),(3,1),(0,2)], 'user', 'follow', index_dtype=index_dtype)\n g2 = dgl.bipartite([(0,0),(0,1),(1,2),(3,2)], 'user', 'play', 'game', index_dtype=index_dtype)\n g3 = dgl.bipartite([(2,0),(2,1),(2,2),(1,0),(1,3),(0,0)], 'game', 'liked-by', 'user', index_dtype=index_dtype)\n g4 = dgl.bipartite([(0,0),(1,0),(2,0),(3,0)], 'user', 'flips', 'coin', index_dtype=index_dtype)\n hg = dgl.hetero_from_relations([g1, g2, g3, g4])\n subg = dgl.in_subgraph(hg, {'user' : [0,1], 'game' : 0})\n assert subg._idtype_str == index_dtype\n assert len(subg.ntypes) == 3\n assert len(subg.etypes) == 4\n u, v = subg['follow'].edges()\n edge_set = set(zip(list(F.asnumpy(u)), list(F.asnumpy(v))))\n assert F.array_equal(hg['follow'].edge_ids(u, v), subg['follow'].edata[dgl.EID])\n assert edge_set == {(1,0),(2,0),(3,0),(0,1),(2,1),(3,1)}\n u, v = subg['play'].edges()\n edge_set = set(zip(list(F.asnumpy(u)), list(F.asnumpy(v))))\n assert F.array_equal(hg['play'].edge_ids(u, v), subg['play'].edata[dgl.EID])\n assert edge_set == {(0,0)}\n u, v = subg['liked-by'].edges()\n edge_set = set(zip(list(F.asnumpy(u)), list(F.asnumpy(v))))\n assert F.array_equal(hg['liked-by'].edge_ids(u, v), subg['liked-by'].edata[dgl.EID])\n assert edge_set == {(2,0),(2,1),(1,0),(0,0)}\n assert subg['flips'].number_of_edges() == 0\n\[email protected](F._default_context_str == 'gpu', reason=\"GPU not implemented\")\n@parametrize_dtype\ndef test_out_subgraph(index_dtype):\n g1 = dgl.graph([(1,0),(2,0),(3,0),(0,1),(2,1),(3,1),(0,2)], 'user', 'follow', index_dtype=index_dtype)\n g2 = dgl.bipartite([(0,0),(0,1),(1,2),(3,2)], 'user', 'play', 'game', index_dtype=index_dtype)\n g3 = dgl.bipartite([(2,0),(2,1),(2,2),(1,0),(1,3),(0,0)], 'game', 'liked-by', 'user', index_dtype=index_dtype)\n g4 = dgl.bipartite([(0,0),(1,0),(2,0),(3,0)], 'user', 'flips', 'coin', index_dtype=index_dtype)\n hg = dgl.hetero_from_relations([g1, g2, g3, g4])\n subg = dgl.out_subgraph(hg, {'user' : [0,1], 'game' : 0})\n assert subg._idtype_str == index_dtype\n assert len(subg.ntypes) == 3\n assert len(subg.etypes) == 4\n u, v = subg['follow'].edges()\n edge_set = set(zip(list(F.asnumpy(u)), list(F.asnumpy(v))))\n assert edge_set == {(1,0),(0,1),(0,2)}\n assert F.array_equal(hg['follow'].edge_ids(u, v), subg['follow'].edata[dgl.EID])\n u, v = subg['play'].edges()\n edge_set = set(zip(list(F.asnumpy(u)), list(F.asnumpy(v))))\n assert edge_set == {(0,0),(0,1),(1,2)}\n assert F.array_equal(hg['play'].edge_ids(u, v), subg['play'].edata[dgl.EID])\n u, v = subg['liked-by'].edges()\n edge_set = set(zip(list(F.asnumpy(u)), list(F.asnumpy(v))))\n assert edge_set == {(0,0)}\n assert F.array_equal(hg['liked-by'].edge_ids(u, v), subg['liked-by'].edata[dgl.EID])\n u, v = subg['flips'].edges()\n edge_set = set(zip(list(F.asnumpy(u)), list(F.asnumpy(v))))\n assert edge_set == {(0,0),(1,0)}\n assert F.array_equal(hg['flips'].edge_ids(u, v), subg['flips'].edata[dgl.EID])\n\[email protected](F._default_context_str == 'gpu', reason=\"GPU compaction not implemented\")\n@parametrize_dtype\ndef test_compact(index_dtype):\n g1 = dgl.heterograph({\n ('user', 'follow', 'user'): [(1, 3), (3, 5)],\n ('user', 'plays', 'game'): [(2, 4), (3, 4), (2, 5)],\n ('game', 'wished-by', 'user'): [(6, 7), (5, 7)]},\n {'user': 20, 'game': 10}, index_dtype=index_dtype)\n\n g2 = dgl.heterograph({\n ('game', 'clicked-by', 'user'): [(3, 1)],\n ('user', 'likes', 'user'): [(1, 8), (8, 9)]},\n {'user': 20, 'game': 10}, index_dtype=index_dtype)\n\n g3 = dgl.graph([(0, 1), (1, 2)], num_nodes=10, ntype='user', index_dtype=index_dtype)\n g4 = dgl.graph([(1, 3), (3, 5)], num_nodes=10, ntype='user', index_dtype=index_dtype)\n\n def _check(g, new_g, induced_nodes):\n assert g.ntypes == new_g.ntypes\n assert g.canonical_etypes == new_g.canonical_etypes\n\n for ntype in g.ntypes:\n assert -1 not in induced_nodes[ntype]\n\n for etype in g.canonical_etypes:\n g_src, g_dst = g.all_edges(order='eid', etype=etype)\n g_src = F.asnumpy(g_src)\n g_dst = F.asnumpy(g_dst)\n new_g_src, new_g_dst = new_g.all_edges(order='eid', etype=etype)\n new_g_src_mapped = induced_nodes[etype[0]][F.asnumpy(new_g_src)]\n new_g_dst_mapped = induced_nodes[etype[2]][F.asnumpy(new_g_dst)]\n assert (g_src == new_g_src_mapped).all()\n assert (g_dst == new_g_dst_mapped).all()\n\n # Test default\n new_g1 = dgl.compact_graphs(g1)\n induced_nodes = {ntype: new_g1.nodes[ntype].data[dgl.NID] for ntype in new_g1.ntypes}\n induced_nodes = {k: F.asnumpy(v) for k, v in induced_nodes.items()}\n assert new_g1._idtype_str == index_dtype\n assert set(induced_nodes['user']) == set([1, 3, 5, 2, 7])\n assert set(induced_nodes['game']) == set([4, 5, 6])\n _check(g1, new_g1, induced_nodes)\n\n # Test with always_preserve given a dict\n new_g1 = dgl.compact_graphs(\n g1, always_preserve={'game': F.tensor([4, 7], dtype=getattr(F, index_dtype))})\n assert new_g1._idtype_str == index_dtype\n induced_nodes = {ntype: new_g1.nodes[ntype].data[dgl.NID] for ntype in new_g1.ntypes}\n induced_nodes = {k: F.asnumpy(v) for k, v in induced_nodes.items()}\n assert set(induced_nodes['user']) == set([1, 3, 5, 2, 7])\n assert set(induced_nodes['game']) == set([4, 5, 6, 7])\n _check(g1, new_g1, induced_nodes)\n\n # Test with always_preserve given a tensor\n new_g3 = dgl.compact_graphs(\n g3, always_preserve=F.tensor([1, 7], dtype=getattr(F, index_dtype)))\n induced_nodes = {ntype: new_g3.nodes[ntype].data[dgl.NID] for ntype in new_g3.ntypes}\n induced_nodes = {k: F.asnumpy(v) for k, v in induced_nodes.items()}\n\n assert new_g3._idtype_str == index_dtype\n assert set(induced_nodes['user']) == set([0, 1, 2, 7])\n _check(g3, new_g3, induced_nodes)\n\n # Test multiple graphs\n new_g1, new_g2 = dgl.compact_graphs([g1, g2])\n induced_nodes = {ntype: new_g1.nodes[ntype].data[dgl.NID] for ntype in new_g1.ntypes}\n induced_nodes = {k: F.asnumpy(v) for k, v in induced_nodes.items()}\n assert new_g1._idtype_str == index_dtype\n assert new_g2._idtype_str == index_dtype\n assert set(induced_nodes['user']) == set([1, 3, 5, 2, 7, 8, 9])\n assert set(induced_nodes['game']) == set([3, 4, 5, 6])\n _check(g1, new_g1, induced_nodes)\n _check(g2, new_g2, induced_nodes)\n\n # Test multiple graphs with always_preserve given a dict\n new_g1, new_g2 = dgl.compact_graphs(\n [g1, g2], always_preserve={'game': F.tensor([4, 7], dtype=getattr(F, index_dtype))})\n induced_nodes = {ntype: new_g1.nodes[ntype].data[dgl.NID] for ntype in new_g1.ntypes}\n induced_nodes = {k: F.asnumpy(v) for k, v in induced_nodes.items()}\n assert new_g1._idtype_str == index_dtype\n assert new_g2._idtype_str == index_dtype\n assert set(induced_nodes['user']) == set([1, 3, 5, 2, 7, 8, 9])\n assert set(induced_nodes['game']) == set([3, 4, 5, 6, 7])\n _check(g1, new_g1, induced_nodes)\n _check(g2, new_g2, induced_nodes)\n\n # Test multiple graphs with always_preserve given a tensor\n new_g3, new_g4 = dgl.compact_graphs(\n [g3, g4], always_preserve=F.tensor([1, 7], dtype=getattr(F, index_dtype)))\n induced_nodes = {ntype: new_g3.nodes[ntype].data[dgl.NID] for ntype in new_g3.ntypes}\n induced_nodes = {k: F.asnumpy(v) for k, v in induced_nodes.items()}\n\n assert new_g3._idtype_str == index_dtype\n assert new_g4._idtype_str == index_dtype\n assert set(induced_nodes['user']) == set([0, 1, 2, 3, 5, 7])\n _check(g3, new_g3, induced_nodes)\n _check(g4, new_g4, induced_nodes)\n\[email protected](F._default_context_str == 'gpu', reason=\"GPU to simple not implemented\")\n@parametrize_dtype\ndef test_to_simple(index_dtype):\n # homogeneous graph\n g = dgl.graph((F.tensor([0, 1, 2, 1]), F.tensor([1, 2, 0, 2])))\n g.ndata['h'] = F.tensor([[0.], [1.], [2.]])\n g.edata['h'] = F.tensor([[3.], [4.], [5.], [6.]])\n sg, wb = dgl.to_simple(g, writeback_mapping=True)\n u, v = g.all_edges(form='uv', order='eid')\n u = F.asnumpy(u).tolist()\n v = F.asnumpy(v).tolist()\n uv = list(zip(u, v))\n eid_map = F.asnumpy(wb)\n\n su, sv = sg.all_edges(form='uv', order='eid')\n su = F.asnumpy(su).tolist()\n sv = F.asnumpy(sv).tolist()\n suv = list(zip(su, sv))\n sc = F.asnumpy(sg.edata['count'])\n assert set(uv) == set(suv)\n for i, e in enumerate(suv):\n assert sc[i] == sum(e == _e for _e in uv)\n for i, e in enumerate(uv):\n assert eid_map[i] == suv.index(e)\n # shared ndata\n assert F.array_equal(sg.ndata['h'], g.ndata['h'])\n assert 'h' not in sg.edata\n # new ndata to sg\n sg.ndata['hh'] = F.tensor([[0.], [1.], [2.]])\n assert 'hh' not in g.ndata\n\n sg = dgl.to_simple(g, writeback_mapping=False, copy_ndata=False)\n assert 'h' not in sg.ndata\n assert 'h' not in sg.edata\n\n # heterogeneous graph\n g = dgl.heterograph({\n ('user', 'follow', 'user'): ([0, 1, 2, 1, 1, 1],\n [1, 3, 2, 3, 4, 4]),\n ('user', 'plays', 'game'): ([3, 2, 1, 1, 3, 2, 2], [5, 3, 4, 4, 5, 3, 3])},\n index_dtype=index_dtype)\n g.nodes['user'].data['h'] = F.tensor([0, 1, 2, 3, 4])\n g.nodes['user'].data['hh'] = F.tensor([0, 1, 2, 3, 4])\n g.edges['follow'].data['h'] = F.tensor([0, 1, 2, 3, 4, 5])\n sg, wb = dgl.to_simple(g, return_counts='weights', writeback_mapping=True, copy_edata=True)\n g.nodes['game'].data['h'] = F.tensor([0, 1, 2, 3, 4, 5])\n\n for etype in g.canonical_etypes:\n u, v = g.all_edges(form='uv', order='eid', etype=etype)\n u = F.asnumpy(u).tolist()\n v = F.asnumpy(v).tolist()\n uv = list(zip(u, v))\n eid_map = F.asnumpy(wb[etype])\n\n su, sv = sg.all_edges(form='uv', order='eid', etype=etype)\n su = F.asnumpy(su).tolist()\n sv = F.asnumpy(sv).tolist()\n suv = list(zip(su, sv))\n sw = F.asnumpy(sg.edges[etype].data['weights'])\n\n assert set(uv) == set(suv)\n for i, e in enumerate(suv):\n assert sw[i] == sum(e == _e for _e in uv)\n for i, e in enumerate(uv):\n assert eid_map[i] == suv.index(e)\n # shared ndata\n assert F.array_equal(sg.nodes['user'].data['h'], g.nodes['user'].data['h'])\n assert F.array_equal(sg.nodes['user'].data['hh'], g.nodes['user'].data['hh'])\n assert 'h' not in sg.nodes['game'].data\n # new ndata to sg\n sg.nodes['user'].data['hhh'] = F.tensor([0, 1, 2, 3, 4])\n assert 'hhh' not in g.nodes['user'].data\n # share edata\n feat_idx = F.asnumpy(wb[('user', 'follow', 'user')])\n _, indices = np.unique(feat_idx, return_index=True)\n assert np.array_equal(F.asnumpy(sg.edges['follow'].data['h']),\n F.asnumpy(g.edges['follow'].data['h'])[indices])\n\n sg = dgl.to_simple(g, writeback_mapping=False, copy_ndata=False)\n for ntype in g.ntypes:\n assert g.number_of_nodes(ntype) == sg.number_of_nodes(ntype)\n assert 'h' not in sg.nodes['user'].data\n assert 'hh' not in sg.nodes['user'].data\n\[email protected](F._default_context_str == 'gpu', reason=\"GPU compaction not implemented\")\n@parametrize_dtype\ndef test_to_block(index_dtype):\n def check(g, bg, ntype, etype, dst_nodes, include_dst_in_src=True):\n if dst_nodes is not None:\n assert F.array_equal(bg.dstnodes[ntype].data[dgl.NID], dst_nodes)\n n_dst_nodes = bg.number_of_nodes('DST/' + ntype)\n if include_dst_in_src:\n assert F.array_equal(\n bg.srcnodes[ntype].data[dgl.NID][:n_dst_nodes],\n bg.dstnodes[ntype].data[dgl.NID])\n\n g = g[etype]\n bg = bg[etype]\n induced_src = bg.srcdata[dgl.NID]\n induced_dst = bg.dstdata[dgl.NID]\n induced_eid = bg.edata[dgl.EID]\n bg_src, bg_dst = bg.all_edges(order='eid')\n src_ans, dst_ans = g.all_edges(order='eid')\n\n induced_src_bg = F.gather_row(induced_src, bg_src)\n induced_dst_bg = F.gather_row(induced_dst, bg_dst)\n induced_src_ans = F.gather_row(src_ans, induced_eid)\n induced_dst_ans = F.gather_row(dst_ans, induced_eid)\n\n assert F.array_equal(induced_src_bg, induced_src_ans)\n assert F.array_equal(induced_dst_bg, induced_dst_ans)\n\n def checkall(g, bg, dst_nodes, include_dst_in_src=True):\n for etype in g.etypes:\n ntype = g.to_canonical_etype(etype)[2]\n if dst_nodes is not None and ntype in dst_nodes:\n check(g, bg, ntype, etype, dst_nodes[ntype], include_dst_in_src)\n else:\n check(g, bg, ntype, etype, None, include_dst_in_src)\n\n g = dgl.heterograph({\n ('A', 'AA', 'A'): [(0, 1), (2, 3), (1, 2), (3, 4)],\n ('A', 'AB', 'B'): [(0, 1), (1, 3), (3, 5), (1, 6)],\n ('B', 'BA', 'A'): [(2, 3), (3, 2)]}, index_dtype=index_dtype)\n g.nodes['A'].data['x'] = F.randn((5, 10))\n g.nodes['B'].data['x'] = F.randn((7, 5))\n g.edges['AA'].data['x'] = F.randn((4, 3))\n g.edges['AB'].data['x'] = F.randn((4, 3))\n g.edges['BA'].data['x'] = F.randn((2, 3))\n g_a = g['AA']\n\n def check_features(g, bg):\n for ntype in bg.srctypes:\n for key in g.nodes[ntype].data:\n assert F.array_equal(\n bg.srcnodes[ntype].data[key],\n F.gather_row(g.nodes[ntype].data[key], bg.srcnodes[ntype].data[dgl.NID]))\n for ntype in bg.dsttypes:\n for key in g.nodes[ntype].data:\n assert F.array_equal(\n bg.dstnodes[ntype].data[key],\n F.gather_row(g.nodes[ntype].data[key], bg.dstnodes[ntype].data[dgl.NID]))\n for etype in bg.canonical_etypes:\n for key in g.edges[etype].data:\n assert F.array_equal(\n bg.edges[etype].data[key],\n F.gather_row(g.edges[etype].data[key], bg.edges[etype].data[dgl.EID]))\n\n bg = dgl.to_block(g_a)\n check(g_a, bg, 'A', 'AA', None)\n check_features(g_a, bg)\n assert bg.number_of_src_nodes() == 5\n assert bg.number_of_dst_nodes() == 4\n\n bg = dgl.to_block(g_a, include_dst_in_src=False)\n check(g_a, bg, 'A', 'AA', None, False)\n check_features(g_a, bg)\n assert bg.number_of_src_nodes() == 4\n assert bg.number_of_dst_nodes() == 4\n\n dst_nodes = F.tensor([4, 3, 2, 1], dtype=getattr(F, index_dtype))\n bg = dgl.to_block(g_a, dst_nodes)\n check(g_a, bg, 'A', 'AA', dst_nodes)\n check_features(g_a, bg)\n\n g_ab = g['AB']\n\n bg = dgl.to_block(g_ab)\n assert bg._idtype_str == index_dtype\n assert bg.number_of_nodes('SRC/B') == 4\n assert F.array_equal(bg.srcnodes['B'].data[dgl.NID], bg.dstnodes['B'].data[dgl.NID])\n assert bg.number_of_nodes('DST/A') == 0\n checkall(g_ab, bg, None)\n check_features(g_ab, bg)\n\n dst_nodes = {'B': F.tensor([5, 6, 3, 1], dtype=getattr(F, index_dtype))}\n bg = dgl.to_block(g, dst_nodes)\n assert bg.number_of_nodes('SRC/B') == 4\n assert F.array_equal(bg.srcnodes['B'].data[dgl.NID], bg.dstnodes['B'].data[dgl.NID])\n assert bg.number_of_nodes('DST/A') == 0\n checkall(g, bg, dst_nodes)\n check_features(g, bg)\n\n dst_nodes = {'A': F.tensor([4, 3, 2, 1], dtype=getattr(F, index_dtype)), 'B': F.tensor([3, 5, 6, 1], dtype=getattr(F, index_dtype))}\n bg = dgl.to_block(g, dst_nodes=dst_nodes)\n checkall(g, bg, dst_nodes)\n check_features(g, bg)\n\[email protected](F._default_context_str == 'gpu', reason=\"GPU not implemented\")\n@parametrize_dtype\ndef test_remove_edges(index_dtype):\n def check(g1, etype, g, edges_removed):\n src, dst, eid = g.edges(etype=etype, form='all')\n src1, dst1 = g1.edges(etype=etype, order='eid')\n if etype is not None:\n eid1 = g1.edges[etype].data[dgl.EID]\n else:\n eid1 = g1.edata[dgl.EID]\n src1 = F.asnumpy(src1)\n dst1 = F.asnumpy(dst1)\n eid1 = F.asnumpy(eid1)\n src = F.asnumpy(src)\n dst = F.asnumpy(dst)\n eid = F.asnumpy(eid)\n sde_set = set(zip(src, dst, eid))\n\n for s, d, e in zip(src1, dst1, eid1):\n assert (s, d, e) in sde_set\n assert not np.isin(edges_removed, eid1).any()\n assert g1.idtype == g.idtype\n\n for fmt in ['coo', 'csr', 'csc']:\n for edges_to_remove in [[2], [2, 2], [3, 2], [1, 3, 1, 2]]:\n g = dgl.graph([(0, 1), (2, 3), (1, 2), (3, 4)], restrict_format=fmt, index_dtype=index_dtype)\n g1 = dgl.remove_edges(g, F.tensor(edges_to_remove, getattr(F, index_dtype)))\n check(g1, None, g, edges_to_remove)\n\n g = dgl.graph(\n spsp.csr_matrix(([1, 1, 1, 1], ([0, 2, 1, 3], [1, 3, 2, 4])), shape=(5, 5)),\n restrict_format=fmt, index_dtype=index_dtype)\n g1 = dgl.remove_edges(g, F.tensor(edges_to_remove, getattr(F, index_dtype)))\n check(g1, None, g, edges_to_remove)\n\n g = dgl.heterograph({\n ('A', 'AA', 'A'): [(0, 1), (2, 3), (1, 2), (3, 4)],\n ('A', 'AB', 'B'): [(0, 1), (1, 3), (3, 5), (1, 6)],\n ('B', 'BA', 'A'): [(2, 3), (3, 2)]}, index_dtype=index_dtype)\n g2 = dgl.remove_edges(g, {'AA': F.tensor([2], getattr(F, index_dtype)), 'AB': F.tensor([3], getattr(F, index_dtype)), 'BA': F.tensor([1], getattr(F, index_dtype))})\n check(g2, 'AA', g, [2])\n check(g2, 'AB', g, [3])\n check(g2, 'BA', g, [1])\n\n g3 = dgl.remove_edges(g, {'AA': F.tensor([], getattr(F, index_dtype)), 'AB': F.tensor([3], getattr(F, index_dtype)), 'BA': F.tensor([1], getattr(F, index_dtype))})\n check(g3, 'AA', g, [])\n check(g3, 'AB', g, [3])\n check(g3, 'BA', g, [1])\n\n g4 = dgl.remove_edges(g, {'AB': F.tensor([3, 1, 2, 0], getattr(F, index_dtype))})\n check(g4, 'AA', g, [])\n check(g4, 'AB', g, [3, 1, 2, 0])\n check(g4, 'BA', g, [])\n\ndef test_cast():\n m = spsp.coo_matrix(([1, 1], ([0, 1], [1, 2])), (4, 4))\n g = dgl.DGLGraph(m, readonly=True)\n gsrc, gdst = g.edges(order='eid')\n ndata = F.randn((4, 5))\n edata = F.randn((2, 4))\n g.ndata['x'] = ndata\n g.edata['y'] = edata\n\n hg = dgl.as_heterograph(g, 'A', 'AA')\n assert hg.ntypes == ['A']\n assert hg.etypes == ['AA']\n assert hg.canonical_etypes == [('A', 'AA', 'A')]\n assert hg.number_of_nodes() == 4\n assert hg.number_of_edges() == 2\n hgsrc, hgdst = hg.edges(order='eid')\n assert F.array_equal(gsrc, hgsrc)\n assert F.array_equal(gdst, hgdst)\n\n g2 = dgl.as_immutable_graph(hg)\n assert g2.number_of_nodes() == 4\n assert g2.number_of_edges() == 2\n g2src, g2dst = hg.edges(order='eid')\n assert F.array_equal(g2src, gsrc)\n assert F.array_equal(g2dst, gdst)\n\nif __name__ == '__main__':\n # test_reorder_nodes()\n # test_line_graph()\n # test_no_backtracking()\n # test_reverse()\n # test_reverse_shared_frames()\n # test_to_bidirected()\n # test_simple_graph()\n # test_bidirected_graph()\n # test_khop_adj()\n # test_khop_graph()\n # test_laplacian_lambda_max()\n # test_remove_self_loop()\n # test_add_self_loop()\n # test_partition_with_halo()\n test_metis_partition()\n test_hetero_metis_partition()\n # test_hetero_linegraph('int32')\n # test_compact()\n # test_to_simple(\"int32\")\n # test_in_subgraph(\"int32\")\n # test_out_subgraph()\n # test_to_block(\"int32\")\n # test_remove_edges()\n" ]
[ [ "scipy.sparse.coo_matrix", "numpy.nonzero", "numpy.unique", "numpy.random.choice", "numpy.arange", "numpy.sort", "scipy.sparse.csr_matrix", "numpy.all", "numpy.argsort", "numpy.array", "numpy.sum", "numpy.isin" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
iLuSIAnn/test
[ "10d0a20dc1a646b5c1f6c7bff2960e3f5df0510e", "10d0a20dc1a646b5c1f6c7bff2960e3f5df0510e", "10d0a20dc1a646b5c1f6c7bff2960e3f5df0510e", "10d0a20dc1a646b5c1f6c7bff2960e3f5df0510e", "10d0a20dc1a646b5c1f6c7bff2960e3f5df0510e", "10d0a20dc1a646b5c1f6c7bff2960e3f5df0510e" ]
[ "tests/sandbox/.venv_ccf_sandbox/lib/python3.8/site-packages/sklearn/base.py", "tests/sandbox/.venv_ccf_sandbox/lib/python3.8/site-packages/sklearn/preprocessing/tests/test_data.py", "tests/sandbox/.venv_ccf_sandbox/lib/python3.8/site-packages/sklearn/manifold/_mds.py", "tests/sandbox/.venv_ccf_sandbox/lib/python3.8/site-packages/sklearn/tests/test_multiclass.py", "tests/sandbox/.venv_ccf_sandbox/lib/python3.8/site-packages/sklearn/linear_model/_ransac.py", "tests/sandbox/.venv_ccf_sandbox/lib/python3.8/site-packages/scipy/interpolate/tests/test_interpnd.py" ]
[ "\"\"\"\nBase classes for all estimators.\n\nUsed for VotingClassifier\n\"\"\"\n\n# Author: Gael Varoquaux <[email protected]>\n# License: BSD 3 clause\n\nimport copy\nimport warnings\nfrom collections import defaultdict\nimport platform\nimport inspect\nimport re\n\nimport numpy as np\n\nfrom . import __version__\nfrom ._config import get_config\nfrom .utils import _IS_32BIT\nfrom .utils.validation import check_X_y\nfrom .utils.validation import check_array\nfrom .utils._estimator_html_repr import estimator_html_repr\nfrom .utils.validation import _deprecate_positional_args\n\n_DEFAULT_TAGS = {\n 'non_deterministic': False,\n 'requires_positive_X': False,\n 'requires_positive_y': False,\n 'X_types': ['2darray'],\n 'poor_score': False,\n 'no_validation': False,\n 'multioutput': False,\n \"allow_nan\": False,\n 'stateless': False,\n 'multilabel': False,\n '_skip_test': False,\n '_xfail_checks': False,\n 'multioutput_only': False,\n 'binary_only': False,\n 'requires_fit': True,\n 'requires_y': False,\n }\n\n\n@_deprecate_positional_args\ndef clone(estimator, *, safe=True):\n \"\"\"Constructs a new estimator with the same parameters.\n\n Clone does a deep copy of the model in an estimator\n without actually copying attached data. It yields a new estimator\n with the same parameters that has not been fit on any data.\n\n Parameters\n ----------\n estimator : {list, tuple, set} of estimator objects or estimator object\n The estimator or group of estimators to be cloned.\n\n safe : bool, default=True\n If safe is false, clone will fall back to a deep copy on objects\n that are not estimators.\n\n \"\"\"\n estimator_type = type(estimator)\n # XXX: not handling dictionaries\n if estimator_type in (list, tuple, set, frozenset):\n return estimator_type([clone(e, safe=safe) for e in estimator])\n elif not hasattr(estimator, 'get_params') or isinstance(estimator, type):\n if not safe:\n return copy.deepcopy(estimator)\n else:\n if isinstance(estimator, type):\n raise TypeError(\"Cannot clone object. \" +\n \"You should provide an instance of \" +\n \"scikit-learn estimator instead of a class.\")\n else:\n raise TypeError(\"Cannot clone object '%s' (type %s): \"\n \"it does not seem to be a scikit-learn \"\n \"estimator as it does not implement a \"\n \"'get_params' method.\"\n % (repr(estimator), type(estimator)))\n\n klass = estimator.__class__\n new_object_params = estimator.get_params(deep=False)\n for name, param in new_object_params.items():\n new_object_params[name] = clone(param, safe=False)\n new_object = klass(**new_object_params)\n params_set = new_object.get_params(deep=False)\n\n # quick sanity check of the parameters of the clone\n for name in new_object_params:\n param1 = new_object_params[name]\n param2 = params_set[name]\n if param1 is not param2:\n raise RuntimeError('Cannot clone object %s, as the constructor '\n 'either does not set or modifies parameter %s' %\n (estimator, name))\n return new_object\n\n\ndef _pprint(params, offset=0, printer=repr):\n \"\"\"Pretty print the dictionary 'params'\n\n Parameters\n ----------\n params : dict\n The dictionary to pretty print\n\n offset : int, default=0\n The offset in characters to add at the begin of each line.\n\n printer : callable, default=repr\n The function to convert entries to strings, typically\n the builtin str or repr\n\n \"\"\"\n # Do a multi-line justified repr:\n options = np.get_printoptions()\n np.set_printoptions(precision=5, threshold=64, edgeitems=2)\n params_list = list()\n this_line_length = offset\n line_sep = ',\\n' + (1 + offset // 2) * ' '\n for i, (k, v) in enumerate(sorted(params.items())):\n if type(v) is float:\n # use str for representing floating point numbers\n # this way we get consistent representation across\n # architectures and versions.\n this_repr = '%s=%s' % (k, str(v))\n else:\n # use repr of the rest\n this_repr = '%s=%s' % (k, printer(v))\n if len(this_repr) > 500:\n this_repr = this_repr[:300] + '...' + this_repr[-100:]\n if i > 0:\n if (this_line_length + len(this_repr) >= 75 or '\\n' in this_repr):\n params_list.append(line_sep)\n this_line_length = len(line_sep)\n else:\n params_list.append(', ')\n this_line_length += 2\n params_list.append(this_repr)\n this_line_length += len(this_repr)\n\n np.set_printoptions(**options)\n lines = ''.join(params_list)\n # Strip trailing space to avoid nightmare in doctests\n lines = '\\n'.join(l.rstrip(' ') for l in lines.split('\\n'))\n return lines\n\n\nclass BaseEstimator:\n \"\"\"Base class for all estimators in scikit-learn\n\n Notes\n -----\n All estimators should specify all the parameters that can be set\n at the class level in their ``__init__`` as explicit keyword\n arguments (no ``*args`` or ``**kwargs``).\n \"\"\"\n\n @classmethod\n def _get_param_names(cls):\n \"\"\"Get parameter names for the estimator\"\"\"\n # fetch the constructor or the original constructor before\n # deprecation wrapping if any\n init = getattr(cls.__init__, 'deprecated_original', cls.__init__)\n if init is object.__init__:\n # No explicit constructor to introspect\n return []\n\n # introspect the constructor arguments to find the model parameters\n # to represent\n init_signature = inspect.signature(init)\n # Consider the constructor parameters excluding 'self'\n parameters = [p for p in init_signature.parameters.values()\n if p.name != 'self' and p.kind != p.VAR_KEYWORD]\n for p in parameters:\n if p.kind == p.VAR_POSITIONAL:\n raise RuntimeError(\"scikit-learn estimators should always \"\n \"specify their parameters in the signature\"\n \" of their __init__ (no varargs).\"\n \" %s with constructor %s doesn't \"\n \" follow this convention.\"\n % (cls, init_signature))\n # Extract and sort argument names excluding 'self'\n return sorted([p.name for p in parameters])\n\n def get_params(self, deep=True):\n \"\"\"\n Get parameters for this estimator.\n\n Parameters\n ----------\n deep : bool, default=True\n If True, will return the parameters for this estimator and\n contained subobjects that are estimators.\n\n Returns\n -------\n params : mapping of string to any\n Parameter names mapped to their values.\n \"\"\"\n out = dict()\n for key in self._get_param_names():\n try:\n value = getattr(self, key)\n except AttributeError:\n warnings.warn('From version 0.24, get_params will raise an '\n 'AttributeError if a parameter cannot be '\n 'retrieved as an instance attribute. Previously '\n 'it would return None.',\n FutureWarning)\n value = None\n if deep and hasattr(value, 'get_params'):\n deep_items = value.get_params().items()\n out.update((key + '__' + k, val) for k, val in deep_items)\n out[key] = value\n return out\n\n def set_params(self, **params):\n \"\"\"\n Set the parameters of this estimator.\n\n The method works on simple estimators as well as on nested objects\n (such as pipelines). The latter have parameters of the form\n ``<component>__<parameter>`` so that it's possible to update each\n component of a nested object.\n\n Parameters\n ----------\n **params : dict\n Estimator parameters.\n\n Returns\n -------\n self : object\n Estimator instance.\n \"\"\"\n if not params:\n # Simple optimization to gain speed (inspect is slow)\n return self\n valid_params = self.get_params(deep=True)\n\n nested_params = defaultdict(dict) # grouped by prefix\n for key, value in params.items():\n key, delim, sub_key = key.partition('__')\n if key not in valid_params:\n raise ValueError('Invalid parameter %s for estimator %s. '\n 'Check the list of available parameters '\n 'with `estimator.get_params().keys()`.' %\n (key, self))\n\n if delim:\n nested_params[key][sub_key] = value\n else:\n setattr(self, key, value)\n valid_params[key] = value\n\n for key, sub_params in nested_params.items():\n valid_params[key].set_params(**sub_params)\n\n return self\n\n def __repr__(self, N_CHAR_MAX=700):\n # N_CHAR_MAX is the (approximate) maximum number of non-blank\n # characters to render. We pass it as an optional parameter to ease\n # the tests.\n\n from .utils._pprint import _EstimatorPrettyPrinter\n\n N_MAX_ELEMENTS_TO_SHOW = 30 # number of elements to show in sequences\n\n # use ellipsis for sequences with a lot of elements\n pp = _EstimatorPrettyPrinter(\n compact=True, indent=1, indent_at_name=True,\n n_max_elements_to_show=N_MAX_ELEMENTS_TO_SHOW)\n\n repr_ = pp.pformat(self)\n\n # Use bruteforce ellipsis when there are a lot of non-blank characters\n n_nonblank = len(''.join(repr_.split()))\n if n_nonblank > N_CHAR_MAX:\n lim = N_CHAR_MAX // 2 # apprx number of chars to keep on both ends\n regex = r'^(\\s*\\S){%d}' % lim\n # The regex '^(\\s*\\S){%d}' % n\n # matches from the start of the string until the nth non-blank\n # character:\n # - ^ matches the start of string\n # - (pattern){n} matches n repetitions of pattern\n # - \\s*\\S matches a non-blank char following zero or more blanks\n left_lim = re.match(regex, repr_).end()\n right_lim = re.match(regex, repr_[::-1]).end()\n\n if '\\n' in repr_[left_lim:-right_lim]:\n # The left side and right side aren't on the same line.\n # To avoid weird cuts, e.g.:\n # categoric...ore',\n # we need to start the right side with an appropriate newline\n # character so that it renders properly as:\n # categoric...\n # handle_unknown='ignore',\n # so we add [^\\n]*\\n which matches until the next \\n\n regex += r'[^\\n]*\\n'\n right_lim = re.match(regex, repr_[::-1]).end()\n\n ellipsis = '...'\n if left_lim + len(ellipsis) < len(repr_) - right_lim:\n # Only add ellipsis if it results in a shorter repr\n repr_ = repr_[:left_lim] + '...' + repr_[-right_lim:]\n\n return repr_\n\n def __getstate__(self):\n try:\n state = super().__getstate__()\n except AttributeError:\n state = self.__dict__.copy()\n\n if type(self).__module__.startswith('sklearn.'):\n return dict(state.items(), _sklearn_version=__version__)\n else:\n return state\n\n def __setstate__(self, state):\n if type(self).__module__.startswith('sklearn.'):\n pickle_version = state.pop(\"_sklearn_version\", \"pre-0.18\")\n if pickle_version != __version__:\n warnings.warn(\n \"Trying to unpickle estimator {0} from version {1} when \"\n \"using version {2}. This might lead to breaking code or \"\n \"invalid results. Use at your own risk.\".format(\n self.__class__.__name__, pickle_version, __version__),\n UserWarning)\n try:\n super().__setstate__(state)\n except AttributeError:\n self.__dict__.update(state)\n\n def _more_tags(self):\n return _DEFAULT_TAGS\n\n def _get_tags(self):\n collected_tags = {}\n for base_class in reversed(inspect.getmro(self.__class__)):\n if hasattr(base_class, '_more_tags'):\n # need the if because mixins might not have _more_tags\n # but might do redundant work in estimators\n # (i.e. calling more tags on BaseEstimator multiple times)\n more_tags = base_class._more_tags(self)\n collected_tags.update(more_tags)\n return collected_tags\n\n def _check_n_features(self, X, reset):\n \"\"\"Set the `n_features_in_` attribute, or check against it.\n\n Parameters\n ----------\n X : {ndarray, sparse matrix} of shape (n_samples, n_features)\n The input samples.\n reset : bool\n If True, the `n_features_in_` attribute is set to `X.shape[1]`.\n Else, the attribute must already exist and the function checks\n that it is equal to `X.shape[1]`.\n \"\"\"\n n_features = X.shape[1]\n\n if reset:\n self.n_features_in_ = n_features\n else:\n if not hasattr(self, 'n_features_in_'):\n raise RuntimeError(\n \"The reset parameter is False but there is no \"\n \"n_features_in_ attribute. Is this estimator fitted?\"\n )\n if n_features != self.n_features_in_:\n raise ValueError(\n 'X has {} features, but this {} is expecting {} features '\n 'as input.'.format(n_features, self.__class__.__name__,\n self.n_features_in_)\n )\n\n def _validate_data(self, X, y=None, reset=True,\n validate_separately=False, **check_params):\n \"\"\"Validate input data and set or check the `n_features_in_` attribute.\n\n Parameters\n ----------\n X : {array-like, sparse matrix, dataframe} of shape \\\n (n_samples, n_features)\n The input samples.\n y : array-like of shape (n_samples,), default=None\n The targets. If None, `check_array` is called on `X` and\n `check_X_y` is called otherwise.\n reset : bool, default=True\n Whether to reset the `n_features_in_` attribute.\n If False, the input will be checked for consistency with data\n provided when reset was last True.\n validate_separately : False or tuple of dicts, default=False\n Only used if y is not None.\n If False, call validate_X_y(). Else, it must be a tuple of kwargs\n to be used for calling check_array() on X and y respectively.\n **check_params : kwargs\n Parameters passed to :func:`sklearn.utils.check_array` or\n :func:`sklearn.utils.check_X_y`. Ignored if validate_separately\n is not False.\n\n Returns\n -------\n out : {ndarray, sparse matrix} or tuple of these\n The validated input. A tuple is returned if `y` is not None.\n \"\"\"\n\n if y is None:\n if self._get_tags()['requires_y']:\n raise ValueError(\n f\"This {self.__class__.__name__} estimator \"\n f\"requires y to be passed, but the target y is None.\"\n )\n X = check_array(X, **check_params)\n out = X\n else:\n if validate_separately:\n # We need this because some estimators validate X and y\n # separately, and in general, separately calling check_array()\n # on X and y isn't equivalent to just calling check_X_y()\n # :(\n check_X_params, check_y_params = validate_separately\n X = check_array(X, **check_X_params)\n y = check_array(y, **check_y_params)\n else:\n X, y = check_X_y(X, y, **check_params)\n out = X, y\n\n if check_params.get('ensure_2d', True):\n self._check_n_features(X, reset=reset)\n\n return out\n\n @property\n def _repr_html_(self):\n \"\"\"HTML representation of estimator.\n\n This is redundant with the logic of `_repr_mimebundle_`. The latter\n should be favorted in the long term, `_repr_html_` is only\n implemented for consumers who do not interpret `_repr_mimbundle_`.\n \"\"\"\n if get_config()[\"display\"] != 'diagram':\n raise AttributeError(\"_repr_html_ is only defined when the \"\n \"'display' configuration option is set to \"\n \"'diagram'\")\n return self._repr_html_inner\n\n def _repr_html_inner(self):\n \"\"\"This function is returned by the @property `_repr_html_` to make\n `hasattr(estimator, \"_repr_html_\") return `True` or `False` depending\n on `get_config()[\"display\"]`.\n \"\"\"\n return estimator_html_repr(self)\n\n def _repr_mimebundle_(self, **kwargs):\n \"\"\"Mime bundle used by jupyter kernels to display estimator\"\"\"\n output = {\"text/plain\": repr(self)}\n if get_config()[\"display\"] == 'diagram':\n output[\"text/html\"] = estimator_html_repr(self)\n return output\n\n\nclass ClassifierMixin:\n \"\"\"Mixin class for all classifiers in scikit-learn.\"\"\"\n\n _estimator_type = \"classifier\"\n\n def score(self, X, y, sample_weight=None):\n \"\"\"\n Return the mean accuracy on the given test data and labels.\n\n In multi-label classification, this is the subset accuracy\n which is a harsh metric since you require for each sample that\n each label set be correctly predicted.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Test samples.\n\n y : array-like of shape (n_samples,) or (n_samples, n_outputs)\n True labels for X.\n\n sample_weight : array-like of shape (n_samples,), default=None\n Sample weights.\n\n Returns\n -------\n score : float\n Mean accuracy of self.predict(X) wrt. y.\n \"\"\"\n from .metrics import accuracy_score\n return accuracy_score(y, self.predict(X), sample_weight=sample_weight)\n\n def _more_tags(self):\n return {'requires_y': True}\n\n\nclass RegressorMixin:\n \"\"\"Mixin class for all regression estimators in scikit-learn.\"\"\"\n _estimator_type = \"regressor\"\n\n def score(self, X, y, sample_weight=None):\n \"\"\"Return the coefficient of determination R^2 of the prediction.\n\n The coefficient R^2 is defined as (1 - u/v), where u is the residual\n sum of squares ((y_true - y_pred) ** 2).sum() and v is the total\n sum of squares ((y_true - y_true.mean()) ** 2).sum().\n The best possible score is 1.0 and it can be negative (because the\n model can be arbitrarily worse). A constant model that always\n predicts the expected value of y, disregarding the input features,\n would get a R^2 score of 0.0.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Test samples. For some estimators this may be a\n precomputed kernel matrix or a list of generic objects instead,\n shape = (n_samples, n_samples_fitted),\n where n_samples_fitted is the number of\n samples used in the fitting for the estimator.\n\n y : array-like of shape (n_samples,) or (n_samples, n_outputs)\n True values for X.\n\n sample_weight : array-like of shape (n_samples,), default=None\n Sample weights.\n\n Returns\n -------\n score : float\n R^2 of self.predict(X) wrt. y.\n\n Notes\n -----\n The R2 score used when calling ``score`` on a regressor uses\n ``multioutput='uniform_average'`` from version 0.23 to keep consistent\n with default value of :func:`~sklearn.metrics.r2_score`.\n This influences the ``score`` method of all the multioutput\n regressors (except for\n :class:`~sklearn.multioutput.MultiOutputRegressor`).\n \"\"\"\n\n from .metrics import r2_score\n y_pred = self.predict(X)\n return r2_score(y, y_pred, sample_weight=sample_weight)\n\n def _more_tags(self):\n return {'requires_y': True}\n\n\nclass ClusterMixin:\n \"\"\"Mixin class for all cluster estimators in scikit-learn.\"\"\"\n _estimator_type = \"clusterer\"\n\n def fit_predict(self, X, y=None):\n \"\"\"\n Perform clustering on X and returns cluster labels.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Input data.\n\n y : Ignored\n Not used, present for API consistency by convention.\n\n Returns\n -------\n labels : ndarray of shape (n_samples,)\n Cluster labels.\n \"\"\"\n # non-optimized default implementation; override when a better\n # method is possible for a given clustering algorithm\n self.fit(X)\n return self.labels_\n\n\nclass BiclusterMixin:\n \"\"\"Mixin class for all bicluster estimators in scikit-learn\"\"\"\n\n @property\n def biclusters_(self):\n \"\"\"Convenient way to get row and column indicators together.\n\n Returns the ``rows_`` and ``columns_`` members.\n \"\"\"\n return self.rows_, self.columns_\n\n def get_indices(self, i):\n \"\"\"Row and column indices of the i'th bicluster.\n\n Only works if ``rows_`` and ``columns_`` attributes exist.\n\n Parameters\n ----------\n i : int\n The index of the cluster.\n\n Returns\n -------\n row_ind : ndarray, dtype=np.intp\n Indices of rows in the dataset that belong to the bicluster.\n col_ind : ndarray, dtype=np.intp\n Indices of columns in the dataset that belong to the bicluster.\n\n \"\"\"\n rows = self.rows_[i]\n columns = self.columns_[i]\n return np.nonzero(rows)[0], np.nonzero(columns)[0]\n\n def get_shape(self, i):\n \"\"\"Shape of the i'th bicluster.\n\n Parameters\n ----------\n i : int\n The index of the cluster.\n\n Returns\n -------\n shape : tuple (int, int)\n Number of rows and columns (resp.) in the bicluster.\n \"\"\"\n indices = self.get_indices(i)\n return tuple(len(i) for i in indices)\n\n def get_submatrix(self, i, data):\n \"\"\"Return the submatrix corresponding to bicluster `i`.\n\n Parameters\n ----------\n i : int\n The index of the cluster.\n data : array-like\n The data.\n\n Returns\n -------\n submatrix : ndarray\n The submatrix corresponding to bicluster i.\n\n Notes\n -----\n Works with sparse matrices. Only works if ``rows_`` and\n ``columns_`` attributes exist.\n \"\"\"\n from .utils.validation import check_array\n data = check_array(data, accept_sparse='csr')\n row_ind, col_ind = self.get_indices(i)\n return data[row_ind[:, np.newaxis], col_ind]\n\n\nclass TransformerMixin:\n \"\"\"Mixin class for all transformers in scikit-learn.\"\"\"\n\n def fit_transform(self, X, y=None, **fit_params):\n \"\"\"\n Fit to data, then transform it.\n\n Fits transformer to X and y with optional parameters fit_params\n and returns a transformed version of X.\n\n Parameters\n ----------\n X : {array-like, sparse matrix, dataframe} of shape \\\n (n_samples, n_features)\n\n y : ndarray of shape (n_samples,), default=None\n Target values.\n\n **fit_params : dict\n Additional fit parameters.\n\n Returns\n -------\n X_new : ndarray array of shape (n_samples, n_features_new)\n Transformed array.\n \"\"\"\n # non-optimized default implementation; override when a better\n # method is possible for a given clustering algorithm\n if y is None:\n # fit method of arity 1 (unsupervised transformation)\n return self.fit(X, **fit_params).transform(X)\n else:\n # fit method of arity 2 (supervised transformation)\n return self.fit(X, y, **fit_params).transform(X)\n\n\nclass DensityMixin:\n \"\"\"Mixin class for all density estimators in scikit-learn.\"\"\"\n _estimator_type = \"DensityEstimator\"\n\n def score(self, X, y=None):\n \"\"\"Return the score of the model on the data X\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n\n y : Ignored\n Not used, present for API consistency by convention.\n\n Returns\n -------\n score : float\n \"\"\"\n pass\n\n\nclass OutlierMixin:\n \"\"\"Mixin class for all outlier detection estimators in scikit-learn.\"\"\"\n _estimator_type = \"outlier_detector\"\n\n def fit_predict(self, X, y=None):\n \"\"\"Perform fit on X and returns labels for X.\n\n Returns -1 for outliers and 1 for inliers.\n\n Parameters\n ----------\n X : {array-like, sparse matrix, dataframe} of shape \\\n (n_samples, n_features)\n\n y : Ignored\n Not used, present for API consistency by convention.\n\n Returns\n -------\n y : ndarray of shape (n_samples,)\n 1 for inliers, -1 for outliers.\n \"\"\"\n # override for transductive outlier detectors like LocalOulierFactor\n return self.fit(X).predict(X)\n\n\nclass MetaEstimatorMixin:\n _required_parameters = [\"estimator\"]\n \"\"\"Mixin class for all meta estimators in scikit-learn.\"\"\"\n\n\nclass MultiOutputMixin:\n \"\"\"Mixin to mark estimators that support multioutput.\"\"\"\n def _more_tags(self):\n return {'multioutput': True}\n\n\nclass _UnstableArchMixin:\n \"\"\"Mark estimators that are non-determinstic on 32bit or PowerPC\"\"\"\n def _more_tags(self):\n return {'non_deterministic': (\n _IS_32BIT or platform.machine().startswith(('ppc', 'powerpc')))}\n\n\ndef is_classifier(estimator):\n \"\"\"Return True if the given estimator is (probably) a classifier.\n\n Parameters\n ----------\n estimator : object\n Estimator object to test.\n\n Returns\n -------\n out : bool\n True if estimator is a classifier and False otherwise.\n \"\"\"\n return getattr(estimator, \"_estimator_type\", None) == \"classifier\"\n\n\ndef is_regressor(estimator):\n \"\"\"Return True if the given estimator is (probably) a regressor.\n\n Parameters\n ----------\n estimator : object\n Estimator object to test.\n\n Returns\n -------\n out : bool\n True if estimator is a regressor and False otherwise.\n \"\"\"\n return getattr(estimator, \"_estimator_type\", None) == \"regressor\"\n\n\ndef is_outlier_detector(estimator):\n \"\"\"Return True if the given estimator is (probably) an outlier detector.\n\n Parameters\n ----------\n estimator : object\n Estimator object to test.\n\n Returns\n -------\n out : bool\n True if estimator is an outlier detector and False otherwise.\n \"\"\"\n return getattr(estimator, \"_estimator_type\", None) == \"outlier_detector\"\n", "# Authors:\n#\n# Giorgio Patrini\n#\n# License: BSD 3 clause\n\nimport warnings\nimport itertools\n\nimport numpy as np\nimport numpy.linalg as la\nfrom scipy import sparse, stats\nfrom scipy.sparse import random as sparse_random\n\nimport pytest\n\nfrom sklearn.utils import gen_batches\n\nfrom sklearn.utils._testing import assert_almost_equal\nfrom sklearn.utils._testing import assert_array_almost_equal\nfrom sklearn.utils._testing import assert_array_equal\nfrom sklearn.utils._testing import assert_array_less\nfrom sklearn.utils._testing import assert_warns_message\nfrom sklearn.utils._testing import assert_no_warnings\nfrom sklearn.utils._testing import assert_allclose\nfrom sklearn.utils._testing import assert_allclose_dense_sparse\nfrom sklearn.utils._testing import skip_if_32bit\nfrom sklearn.utils._testing import _convert_container\n\nfrom sklearn.utils.sparsefuncs import mean_variance_axis\nfrom sklearn.preprocessing._data import _handle_zeros_in_scale\nfrom sklearn.preprocessing._data import Binarizer\nfrom sklearn.preprocessing._data import KernelCenterer\nfrom sklearn.preprocessing._data import Normalizer\nfrom sklearn.preprocessing._data import normalize\nfrom sklearn.preprocessing._data import StandardScaler\nfrom sklearn.preprocessing._data import scale\nfrom sklearn.preprocessing._data import MinMaxScaler\nfrom sklearn.preprocessing._data import minmax_scale\nfrom sklearn.preprocessing._data import QuantileTransformer\nfrom sklearn.preprocessing._data import quantile_transform\nfrom sklearn.preprocessing._data import MaxAbsScaler\nfrom sklearn.preprocessing._data import maxabs_scale\nfrom sklearn.preprocessing._data import RobustScaler\nfrom sklearn.preprocessing._data import robust_scale\nfrom sklearn.preprocessing._data import add_dummy_feature\nfrom sklearn.preprocessing._data import PolynomialFeatures\nfrom sklearn.preprocessing._data import PowerTransformer\nfrom sklearn.preprocessing._data import power_transform\nfrom sklearn.preprocessing._data import BOUNDS_THRESHOLD\nfrom sklearn.exceptions import NotFittedError\n\nfrom sklearn.base import clone\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.model_selection import cross_val_predict\nfrom sklearn.svm import SVR\nfrom sklearn.utils import shuffle\n\nfrom sklearn import datasets\n\niris = datasets.load_iris()\n\n# Make some data to be used many times\nrng = np.random.RandomState(0)\nn_features = 30\nn_samples = 1000\noffsets = rng.uniform(-1, 1, size=n_features)\nscales = rng.uniform(1, 10, size=n_features)\nX_2d = rng.randn(n_samples, n_features) * scales + offsets\nX_1row = X_2d[0, :].reshape(1, n_features)\nX_1col = X_2d[:, 0].reshape(n_samples, 1)\nX_list_1row = X_1row.tolist()\nX_list_1col = X_1col.tolist()\n\n\ndef toarray(a):\n if hasattr(a, \"toarray\"):\n a = a.toarray()\n return a\n\n\ndef _check_dim_1axis(a):\n return np.asarray(a).shape[0]\n\n\ndef assert_correct_incr(i, batch_start, batch_stop, n, chunk_size,\n n_samples_seen):\n if batch_stop != n:\n assert (i + 1) * chunk_size == n_samples_seen\n else:\n assert (i * chunk_size + (batch_stop - batch_start) ==\n n_samples_seen)\n\n\ndef test_polynomial_features():\n # Test Polynomial Features\n X1 = np.arange(6)[:, np.newaxis]\n P1 = np.hstack([np.ones_like(X1),\n X1, X1 ** 2, X1 ** 3])\n deg1 = 3\n\n X2 = np.arange(6).reshape((3, 2))\n x1 = X2[:, :1]\n x2 = X2[:, 1:]\n P2 = np.hstack([x1 ** 0 * x2 ** 0,\n x1 ** 1 * x2 ** 0,\n x1 ** 0 * x2 ** 1,\n x1 ** 2 * x2 ** 0,\n x1 ** 1 * x2 ** 1,\n x1 ** 0 * x2 ** 2])\n deg2 = 2\n\n for (deg, X, P) in [(deg1, X1, P1), (deg2, X2, P2)]:\n P_test = PolynomialFeatures(deg, include_bias=True).fit_transform(X)\n assert_array_almost_equal(P_test, P)\n\n P_test = PolynomialFeatures(deg, include_bias=False).fit_transform(X)\n assert_array_almost_equal(P_test, P[:, 1:])\n\n interact = PolynomialFeatures(2, interaction_only=True, include_bias=True)\n X_poly = interact.fit_transform(X)\n assert_array_almost_equal(X_poly, P2[:, [0, 1, 2, 4]])\n\n assert interact.powers_.shape == (interact.n_output_features_,\n interact.n_input_features_)\n\n\ndef test_polynomial_feature_names():\n X = np.arange(30).reshape(10, 3)\n poly = PolynomialFeatures(degree=2, include_bias=True).fit(X)\n feature_names = poly.get_feature_names()\n assert_array_equal(['1', 'x0', 'x1', 'x2', 'x0^2', 'x0 x1',\n 'x0 x2', 'x1^2', 'x1 x2', 'x2^2'],\n feature_names)\n\n poly = PolynomialFeatures(degree=3, include_bias=False).fit(X)\n feature_names = poly.get_feature_names([\"a\", \"b\", \"c\"])\n assert_array_equal(['a', 'b', 'c', 'a^2', 'a b', 'a c', 'b^2',\n 'b c', 'c^2', 'a^3', 'a^2 b', 'a^2 c',\n 'a b^2', 'a b c', 'a c^2', 'b^3', 'b^2 c',\n 'b c^2', 'c^3'], feature_names)\n # test some unicode\n poly = PolynomialFeatures(degree=1, include_bias=True).fit(X)\n feature_names = poly.get_feature_names(\n [\"\\u0001F40D\", \"\\u262E\", \"\\u05D0\"])\n assert_array_equal([\"1\", \"\\u0001F40D\", \"\\u262E\", \"\\u05D0\"],\n feature_names)\n\n\ndef test_polynomial_feature_array_order():\n X = np.arange(10).reshape(5, 2)\n\n def is_c_contiguous(a):\n return np.isfortran(a.T)\n\n assert is_c_contiguous(PolynomialFeatures().fit_transform(X))\n assert is_c_contiguous(PolynomialFeatures(order='C').fit_transform(X))\n assert np.isfortran(PolynomialFeatures(order='F').fit_transform(X))\n\n\[email protected](['deg', 'include_bias', 'interaction_only', 'dtype'],\n [(1, True, False, int),\n (2, True, False, int),\n (2, True, False, np.float32),\n (2, True, False, np.float64),\n (3, False, False, np.float64),\n (3, False, True, np.float64),\n (4, False, False, np.float64),\n (4, False, True, np.float64)])\ndef test_polynomial_features_csc_X(deg, include_bias, interaction_only, dtype):\n rng = np.random.RandomState(0)\n X = rng.randint(0, 2, (100, 2))\n X_csc = sparse.csc_matrix(X)\n\n est = PolynomialFeatures(deg, include_bias=include_bias,\n interaction_only=interaction_only)\n Xt_csc = est.fit_transform(X_csc.astype(dtype))\n Xt_dense = est.fit_transform(X.astype(dtype))\n\n assert isinstance(Xt_csc, sparse.csc_matrix)\n assert Xt_csc.dtype == Xt_dense.dtype\n assert_array_almost_equal(Xt_csc.A, Xt_dense)\n\n\[email protected](['deg', 'include_bias', 'interaction_only', 'dtype'],\n [(1, True, False, int),\n (2, True, False, int),\n (2, True, False, np.float32),\n (2, True, False, np.float64),\n (3, False, False, np.float64),\n (3, False, True, np.float64)])\ndef test_polynomial_features_csr_X(deg, include_bias, interaction_only, dtype):\n rng = np.random.RandomState(0)\n X = rng.randint(0, 2, (100, 2))\n X_csr = sparse.csr_matrix(X)\n\n est = PolynomialFeatures(deg, include_bias=include_bias,\n interaction_only=interaction_only)\n Xt_csr = est.fit_transform(X_csr.astype(dtype))\n Xt_dense = est.fit_transform(X.astype(dtype, copy=False))\n\n assert isinstance(Xt_csr, sparse.csr_matrix)\n assert Xt_csr.dtype == Xt_dense.dtype\n assert_array_almost_equal(Xt_csr.A, Xt_dense)\n\n\[email protected](['deg', 'include_bias', 'interaction_only', 'dtype'],\n [(2, True, False, np.float32),\n (2, True, False, np.float64),\n (3, False, False, np.float64),\n (3, False, True, np.float64)])\ndef test_polynomial_features_csr_X_floats(deg, include_bias,\n interaction_only, dtype):\n X_csr = sparse_random(1000, 10, 0.5, random_state=0).tocsr()\n X = X_csr.toarray()\n\n est = PolynomialFeatures(deg, include_bias=include_bias,\n interaction_only=interaction_only)\n Xt_csr = est.fit_transform(X_csr.astype(dtype))\n Xt_dense = est.fit_transform(X.astype(dtype))\n\n assert isinstance(Xt_csr, sparse.csr_matrix)\n assert Xt_csr.dtype == Xt_dense.dtype\n assert_array_almost_equal(Xt_csr.A, Xt_dense)\n\n\[email protected](['zero_row_index', 'deg', 'interaction_only'],\n [(0, 2, True), (1, 2, True), (2, 2, True),\n (0, 3, True), (1, 3, True), (2, 3, True),\n (0, 2, False), (1, 2, False), (2, 2, False),\n (0, 3, False), (1, 3, False), (2, 3, False)])\ndef test_polynomial_features_csr_X_zero_row(zero_row_index, deg,\n interaction_only):\n X_csr = sparse_random(3, 10, 1.0, random_state=0).tocsr()\n X_csr[zero_row_index, :] = 0.0\n X = X_csr.toarray()\n\n est = PolynomialFeatures(deg, include_bias=False,\n interaction_only=interaction_only)\n Xt_csr = est.fit_transform(X_csr)\n Xt_dense = est.fit_transform(X)\n\n assert isinstance(Xt_csr, sparse.csr_matrix)\n assert Xt_csr.dtype == Xt_dense.dtype\n assert_array_almost_equal(Xt_csr.A, Xt_dense)\n\n\n# This degree should always be one more than the highest degree supported by\n# _csr_expansion.\[email protected](['include_bias', 'interaction_only'],\n [(True, True), (True, False),\n (False, True), (False, False)])\ndef test_polynomial_features_csr_X_degree_4(include_bias, interaction_only):\n X_csr = sparse_random(1000, 10, 0.5, random_state=0).tocsr()\n X = X_csr.toarray()\n\n est = PolynomialFeatures(4, include_bias=include_bias,\n interaction_only=interaction_only)\n Xt_csr = est.fit_transform(X_csr)\n Xt_dense = est.fit_transform(X)\n\n assert isinstance(Xt_csr, sparse.csr_matrix)\n assert Xt_csr.dtype == Xt_dense.dtype\n assert_array_almost_equal(Xt_csr.A, Xt_dense)\n\n\[email protected](['deg', 'dim', 'interaction_only'],\n [(2, 1, True),\n (2, 2, True),\n (3, 1, True),\n (3, 2, True),\n (3, 3, True),\n (2, 1, False),\n (2, 2, False),\n (3, 1, False),\n (3, 2, False),\n (3, 3, False)])\ndef test_polynomial_features_csr_X_dim_edges(deg, dim, interaction_only):\n X_csr = sparse_random(1000, dim, 0.5, random_state=0).tocsr()\n X = X_csr.toarray()\n\n est = PolynomialFeatures(deg, interaction_only=interaction_only)\n Xt_csr = est.fit_transform(X_csr)\n Xt_dense = est.fit_transform(X)\n\n assert isinstance(Xt_csr, sparse.csr_matrix)\n assert Xt_csr.dtype == Xt_dense.dtype\n assert_array_almost_equal(Xt_csr.A, Xt_dense)\n\n\ndef test_standard_scaler_1d():\n # Test scaling of dataset along single axis\n for X in [X_1row, X_1col, X_list_1row, X_list_1row]:\n\n scaler = StandardScaler()\n X_scaled = scaler.fit(X).transform(X, copy=True)\n\n if isinstance(X, list):\n X = np.array(X) # cast only after scaling done\n\n if _check_dim_1axis(X) == 1:\n assert_almost_equal(scaler.mean_, X.ravel())\n assert_almost_equal(scaler.scale_, np.ones(n_features))\n assert_array_almost_equal(X_scaled.mean(axis=0),\n np.zeros_like(n_features))\n assert_array_almost_equal(X_scaled.std(axis=0),\n np.zeros_like(n_features))\n else:\n assert_almost_equal(scaler.mean_, X.mean())\n assert_almost_equal(scaler.scale_, X.std())\n assert_array_almost_equal(X_scaled.mean(axis=0),\n np.zeros_like(n_features))\n assert_array_almost_equal(X_scaled.mean(axis=0), .0)\n assert_array_almost_equal(X_scaled.std(axis=0), 1.)\n assert scaler.n_samples_seen_ == X.shape[0]\n\n # check inverse transform\n X_scaled_back = scaler.inverse_transform(X_scaled)\n assert_array_almost_equal(X_scaled_back, X)\n\n # Constant feature\n X = np.ones((5, 1))\n scaler = StandardScaler()\n X_scaled = scaler.fit(X).transform(X, copy=True)\n assert_almost_equal(scaler.mean_, 1.)\n assert_almost_equal(scaler.scale_, 1.)\n assert_array_almost_equal(X_scaled.mean(axis=0), .0)\n assert_array_almost_equal(X_scaled.std(axis=0), .0)\n assert scaler.n_samples_seen_ == X.shape[0]\n\n\ndef test_standard_scaler_dtype():\n # Ensure scaling does not affect dtype\n rng = np.random.RandomState(0)\n n_samples = 10\n n_features = 3\n for dtype in [np.float16, np.float32, np.float64]:\n X = rng.randn(n_samples, n_features).astype(dtype)\n scaler = StandardScaler()\n X_scaled = scaler.fit(X).transform(X)\n assert X.dtype == X_scaled.dtype\n assert scaler.mean_.dtype == np.float64\n assert scaler.scale_.dtype == np.float64\n\n\ndef test_scale_1d():\n # 1-d inputs\n X_list = [1., 3., 5., 0.]\n X_arr = np.array(X_list)\n\n for X in [X_list, X_arr]:\n X_scaled = scale(X)\n assert_array_almost_equal(X_scaled.mean(), 0.0)\n assert_array_almost_equal(X_scaled.std(), 1.0)\n assert_array_equal(scale(X, with_mean=False, with_std=False), X)\n\n\n@skip_if_32bit\ndef test_standard_scaler_numerical_stability():\n # Test numerical stability of scaling\n # np.log(1e-5) is taken because of its floating point representation\n # was empirically found to cause numerical problems with np.mean & np.std.\n\n x = np.full(8, np.log(1e-5), dtype=np.float64)\n # This does not raise a warning as the number of samples is too low\n # to trigger the problem in recent numpy\n x_scaled = assert_no_warnings(scale, x)\n assert_array_almost_equal(scale(x), np.zeros(8))\n\n # with 2 more samples, the std computation run into numerical issues:\n x = np.full(10, np.log(1e-5), dtype=np.float64)\n w = \"standard deviation of the data is probably very close to 0\"\n x_scaled = assert_warns_message(UserWarning, w, scale, x)\n assert_array_almost_equal(x_scaled, np.zeros(10))\n\n x = np.full(10, 1e-100, dtype=np.float64)\n x_small_scaled = assert_no_warnings(scale, x)\n assert_array_almost_equal(x_small_scaled, np.zeros(10))\n\n # Large values can cause (often recoverable) numerical stability issues:\n x_big = np.full(10, 1e100, dtype=np.float64)\n w = \"Dataset may contain too large values\"\n x_big_scaled = assert_warns_message(UserWarning, w, scale, x_big)\n assert_array_almost_equal(x_big_scaled, np.zeros(10))\n assert_array_almost_equal(x_big_scaled, x_small_scaled)\n\n x_big_centered = assert_warns_message(UserWarning, w, scale, x_big,\n with_std=False)\n assert_array_almost_equal(x_big_centered, np.zeros(10))\n assert_array_almost_equal(x_big_centered, x_small_scaled)\n\n\ndef test_scaler_2d_arrays():\n # Test scaling of 2d array along first axis\n rng = np.random.RandomState(0)\n n_features = 5\n n_samples = 4\n X = rng.randn(n_samples, n_features)\n X[:, 0] = 0.0 # first feature is always of zero\n\n scaler = StandardScaler()\n X_scaled = scaler.fit(X).transform(X, copy=True)\n assert not np.any(np.isnan(X_scaled))\n assert scaler.n_samples_seen_ == n_samples\n\n assert_array_almost_equal(X_scaled.mean(axis=0), n_features * [0.0])\n assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])\n # Check that X has been copied\n assert X_scaled is not X\n\n # check inverse transform\n X_scaled_back = scaler.inverse_transform(X_scaled)\n assert X_scaled_back is not X\n assert X_scaled_back is not X_scaled\n assert_array_almost_equal(X_scaled_back, X)\n\n X_scaled = scale(X, axis=1, with_std=False)\n assert not np.any(np.isnan(X_scaled))\n assert_array_almost_equal(X_scaled.mean(axis=1), n_samples * [0.0])\n X_scaled = scale(X, axis=1, with_std=True)\n assert not np.any(np.isnan(X_scaled))\n assert_array_almost_equal(X_scaled.mean(axis=1), n_samples * [0.0])\n assert_array_almost_equal(X_scaled.std(axis=1), n_samples * [1.0])\n # Check that the data hasn't been modified\n assert X_scaled is not X\n\n X_scaled = scaler.fit(X).transform(X, copy=False)\n assert not np.any(np.isnan(X_scaled))\n assert_array_almost_equal(X_scaled.mean(axis=0), n_features * [0.0])\n assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])\n # Check that X has not been copied\n assert X_scaled is X\n\n X = rng.randn(4, 5)\n X[:, 0] = 1.0 # first feature is a constant, non zero feature\n scaler = StandardScaler()\n X_scaled = scaler.fit(X).transform(X, copy=True)\n assert not np.any(np.isnan(X_scaled))\n assert_array_almost_equal(X_scaled.mean(axis=0), n_features * [0.0])\n assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])\n # Check that X has not been copied\n assert X_scaled is not X\n\n\ndef test_scaler_float16_overflow():\n # Test if the scaler will not overflow on float16 numpy arrays\n rng = np.random.RandomState(0)\n # float16 has a maximum of 65500.0. On the worst case 5 * 200000 is 100000\n # which is enough to overflow the data type\n X = rng.uniform(5, 10, [200000, 1]).astype(np.float16)\n\n with np.errstate(over='raise'):\n scaler = StandardScaler().fit(X)\n X_scaled = scaler.transform(X)\n\n # Calculate the float64 equivalent to verify result\n X_scaled_f64 = StandardScaler().fit_transform(X.astype(np.float64))\n\n # Overflow calculations may cause -inf, inf, or nan. Since there is no nan\n # input, all of the outputs should be finite. This may be redundant since a\n # FloatingPointError exception will be thrown on overflow above.\n assert np.all(np.isfinite(X_scaled))\n\n # The normal distribution is very unlikely to go above 4. At 4.0-8.0 the\n # float16 precision is 2^-8 which is around 0.004. Thus only 2 decimals are\n # checked to account for precision differences.\n assert_array_almost_equal(X_scaled, X_scaled_f64, decimal=2)\n\n\ndef test_handle_zeros_in_scale():\n s1 = np.array([0, 1, 2, 3])\n s2 = _handle_zeros_in_scale(s1, copy=True)\n\n assert not s1[0] == s2[0]\n assert_array_equal(s1, np.array([0, 1, 2, 3]))\n assert_array_equal(s2, np.array([1, 1, 2, 3]))\n\n\ndef test_minmax_scaler_partial_fit():\n # Test if partial_fit run over many batches of size 1 and 50\n # gives the same results as fit\n X = X_2d\n n = X.shape[0]\n\n for chunk_size in [1, 2, 50, n, n + 42]:\n # Test mean at the end of the process\n scaler_batch = MinMaxScaler().fit(X)\n\n scaler_incr = MinMaxScaler()\n for batch in gen_batches(n_samples, chunk_size):\n scaler_incr = scaler_incr.partial_fit(X[batch])\n\n assert_array_almost_equal(scaler_batch.data_min_,\n scaler_incr.data_min_)\n assert_array_almost_equal(scaler_batch.data_max_,\n scaler_incr.data_max_)\n assert scaler_batch.n_samples_seen_ == scaler_incr.n_samples_seen_\n assert_array_almost_equal(scaler_batch.data_range_,\n scaler_incr.data_range_)\n assert_array_almost_equal(scaler_batch.scale_, scaler_incr.scale_)\n assert_array_almost_equal(scaler_batch.min_, scaler_incr.min_)\n\n # Test std after 1 step\n batch0 = slice(0, chunk_size)\n scaler_batch = MinMaxScaler().fit(X[batch0])\n scaler_incr = MinMaxScaler().partial_fit(X[batch0])\n\n assert_array_almost_equal(scaler_batch.data_min_,\n scaler_incr.data_min_)\n assert_array_almost_equal(scaler_batch.data_max_,\n scaler_incr.data_max_)\n assert scaler_batch.n_samples_seen_ == scaler_incr.n_samples_seen_\n assert_array_almost_equal(scaler_batch.data_range_,\n scaler_incr.data_range_)\n assert_array_almost_equal(scaler_batch.scale_, scaler_incr.scale_)\n assert_array_almost_equal(scaler_batch.min_, scaler_incr.min_)\n\n # Test std until the end of partial fits, and\n scaler_batch = MinMaxScaler().fit(X)\n scaler_incr = MinMaxScaler() # Clean estimator\n for i, batch in enumerate(gen_batches(n_samples, chunk_size)):\n scaler_incr = scaler_incr.partial_fit(X[batch])\n assert_correct_incr(i, batch_start=batch.start,\n batch_stop=batch.stop, n=n,\n chunk_size=chunk_size,\n n_samples_seen=scaler_incr.n_samples_seen_)\n\n\ndef test_standard_scaler_partial_fit():\n # Test if partial_fit run over many batches of size 1 and 50\n # gives the same results as fit\n X = X_2d\n n = X.shape[0]\n\n for chunk_size in [1, 2, 50, n, n + 42]:\n # Test mean at the end of the process\n scaler_batch = StandardScaler(with_std=False).fit(X)\n\n scaler_incr = StandardScaler(with_std=False)\n for batch in gen_batches(n_samples, chunk_size):\n scaler_incr = scaler_incr.partial_fit(X[batch])\n\n assert_array_almost_equal(scaler_batch.mean_, scaler_incr.mean_)\n assert scaler_batch.var_ == scaler_incr.var_ # Nones\n assert scaler_batch.n_samples_seen_ == scaler_incr.n_samples_seen_\n\n # Test std after 1 step\n batch0 = slice(0, chunk_size)\n scaler_incr = StandardScaler().partial_fit(X[batch0])\n if chunk_size == 1:\n assert_array_almost_equal(np.zeros(n_features, dtype=np.float64),\n scaler_incr.var_)\n assert_array_almost_equal(np.ones(n_features, dtype=np.float64),\n scaler_incr.scale_)\n else:\n assert_array_almost_equal(np.var(X[batch0], axis=0),\n scaler_incr.var_)\n assert_array_almost_equal(np.std(X[batch0], axis=0),\n scaler_incr.scale_) # no constants\n\n # Test std until the end of partial fits, and\n scaler_batch = StandardScaler().fit(X)\n scaler_incr = StandardScaler() # Clean estimator\n for i, batch in enumerate(gen_batches(n_samples, chunk_size)):\n scaler_incr = scaler_incr.partial_fit(X[batch])\n assert_correct_incr(i, batch_start=batch.start,\n batch_stop=batch.stop, n=n,\n chunk_size=chunk_size,\n n_samples_seen=scaler_incr.n_samples_seen_)\n\n assert_array_almost_equal(scaler_batch.var_, scaler_incr.var_)\n assert scaler_batch.n_samples_seen_ == scaler_incr.n_samples_seen_\n\n\ndef test_standard_scaler_partial_fit_numerical_stability():\n # Test if the incremental computation introduces significative errors\n # for large datasets with values of large magniture\n rng = np.random.RandomState(0)\n n_features = 2\n n_samples = 100\n offsets = rng.uniform(-1e15, 1e15, size=n_features)\n scales = rng.uniform(1e3, 1e6, size=n_features)\n X = rng.randn(n_samples, n_features) * scales + offsets\n\n scaler_batch = StandardScaler().fit(X)\n scaler_incr = StandardScaler()\n for chunk in X:\n scaler_incr = scaler_incr.partial_fit(chunk.reshape(1, n_features))\n\n # Regardless of abs values, they must not be more diff 6 significant digits\n tol = 10 ** (-6)\n assert_allclose(scaler_incr.mean_, scaler_batch.mean_, rtol=tol)\n assert_allclose(scaler_incr.var_, scaler_batch.var_, rtol=tol)\n assert_allclose(scaler_incr.scale_, scaler_batch.scale_, rtol=tol)\n # NOTE Be aware that for much larger offsets std is very unstable (last\n # assert) while mean is OK.\n\n # Sparse input\n size = (100, 3)\n scale = 1e20\n X = rng.randint(0, 2, size).astype(np.float64) * scale\n X_csr = sparse.csr_matrix(X)\n X_csc = sparse.csc_matrix(X)\n\n for X in [X_csr, X_csc]:\n # with_mean=False is required with sparse input\n scaler = StandardScaler(with_mean=False).fit(X)\n scaler_incr = StandardScaler(with_mean=False)\n\n for chunk in X:\n # chunk = sparse.csr_matrix(data_chunks)\n scaler_incr = scaler_incr.partial_fit(chunk)\n\n # Regardless of magnitude, they must not differ more than of 6 digits\n tol = 10 ** (-6)\n assert scaler.mean_ is not None\n assert_allclose(scaler_incr.var_, scaler.var_, rtol=tol)\n assert_allclose(scaler_incr.scale_, scaler.scale_, rtol=tol)\n\n\ndef test_partial_fit_sparse_input():\n # Check that sparsity is not destroyed\n X = np.array([[1.], [0.], [0.], [5.]])\n X_csr = sparse.csr_matrix(X)\n X_csc = sparse.csc_matrix(X)\n\n null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)\n for X in [X_csr, X_csc]:\n\n X_null = null_transform.partial_fit(X).transform(X)\n assert_array_equal(X_null.data, X.data)\n X_orig = null_transform.inverse_transform(X_null)\n assert_array_equal(X_orig.data, X_null.data)\n assert_array_equal(X_orig.data, X.data)\n\n\ndef test_standard_scaler_trasform_with_partial_fit():\n # Check some postconditions after applying partial_fit and transform\n X = X_2d[:100, :]\n\n scaler_incr = StandardScaler()\n for i, batch in enumerate(gen_batches(X.shape[0], 1)):\n\n X_sofar = X[:(i + 1), :]\n chunks_copy = X_sofar.copy()\n scaled_batch = StandardScaler().fit_transform(X_sofar)\n\n scaler_incr = scaler_incr.partial_fit(X[batch])\n scaled_incr = scaler_incr.transform(X_sofar)\n\n assert_array_almost_equal(scaled_batch, scaled_incr)\n assert_array_almost_equal(X_sofar, chunks_copy) # No change\n right_input = scaler_incr.inverse_transform(scaled_incr)\n assert_array_almost_equal(X_sofar, right_input)\n\n zero = np.zeros(X.shape[1])\n epsilon = np.finfo(float).eps\n assert_array_less(zero, scaler_incr.var_ + epsilon) # as less or equal\n assert_array_less(zero, scaler_incr.scale_ + epsilon)\n # (i+1) because the Scaler has been already fitted\n assert (i + 1) == scaler_incr.n_samples_seen_\n\n\ndef test_min_max_scaler_iris():\n X = iris.data\n scaler = MinMaxScaler()\n # default params\n X_trans = scaler.fit_transform(X)\n assert_array_almost_equal(X_trans.min(axis=0), 0)\n assert_array_almost_equal(X_trans.max(axis=0), 1)\n X_trans_inv = scaler.inverse_transform(X_trans)\n assert_array_almost_equal(X, X_trans_inv)\n\n # not default params: min=1, max=2\n scaler = MinMaxScaler(feature_range=(1, 2))\n X_trans = scaler.fit_transform(X)\n assert_array_almost_equal(X_trans.min(axis=0), 1)\n assert_array_almost_equal(X_trans.max(axis=0), 2)\n X_trans_inv = scaler.inverse_transform(X_trans)\n assert_array_almost_equal(X, X_trans_inv)\n\n # min=-.5, max=.6\n scaler = MinMaxScaler(feature_range=(-.5, .6))\n X_trans = scaler.fit_transform(X)\n assert_array_almost_equal(X_trans.min(axis=0), -.5)\n assert_array_almost_equal(X_trans.max(axis=0), .6)\n X_trans_inv = scaler.inverse_transform(X_trans)\n assert_array_almost_equal(X, X_trans_inv)\n\n # raises on invalid range\n scaler = MinMaxScaler(feature_range=(2, 1))\n with pytest.raises(ValueError):\n scaler.fit(X)\n\n\ndef test_min_max_scaler_zero_variance_features():\n # Check min max scaler on toy data with zero variance features\n X = [[0., 1., +0.5],\n [0., 1., -0.1],\n [0., 1., +1.1]]\n\n X_new = [[+0., 2., 0.5],\n [-1., 1., 0.0],\n [+0., 1., 1.5]]\n\n # default params\n scaler = MinMaxScaler()\n X_trans = scaler.fit_transform(X)\n X_expected_0_1 = [[0., 0., 0.5],\n [0., 0., 0.0],\n [0., 0., 1.0]]\n assert_array_almost_equal(X_trans, X_expected_0_1)\n X_trans_inv = scaler.inverse_transform(X_trans)\n assert_array_almost_equal(X, X_trans_inv)\n\n X_trans_new = scaler.transform(X_new)\n X_expected_0_1_new = [[+0., 1., 0.500],\n [-1., 0., 0.083],\n [+0., 0., 1.333]]\n assert_array_almost_equal(X_trans_new, X_expected_0_1_new, decimal=2)\n\n # not default params\n scaler = MinMaxScaler(feature_range=(1, 2))\n X_trans = scaler.fit_transform(X)\n X_expected_1_2 = [[1., 1., 1.5],\n [1., 1., 1.0],\n [1., 1., 2.0]]\n assert_array_almost_equal(X_trans, X_expected_1_2)\n\n # function interface\n X_trans = minmax_scale(X)\n assert_array_almost_equal(X_trans, X_expected_0_1)\n X_trans = minmax_scale(X, feature_range=(1, 2))\n assert_array_almost_equal(X_trans, X_expected_1_2)\n\n\ndef test_minmax_scale_axis1():\n X = iris.data\n X_trans = minmax_scale(X, axis=1)\n assert_array_almost_equal(np.min(X_trans, axis=1), 0)\n assert_array_almost_equal(np.max(X_trans, axis=1), 1)\n\n\ndef test_min_max_scaler_1d():\n # Test scaling of dataset along single axis\n for X in [X_1row, X_1col, X_list_1row, X_list_1row]:\n\n scaler = MinMaxScaler(copy=True)\n X_scaled = scaler.fit(X).transform(X)\n\n if isinstance(X, list):\n X = np.array(X) # cast only after scaling done\n\n if _check_dim_1axis(X) == 1:\n assert_array_almost_equal(X_scaled.min(axis=0),\n np.zeros(n_features))\n assert_array_almost_equal(X_scaled.max(axis=0),\n np.zeros(n_features))\n else:\n assert_array_almost_equal(X_scaled.min(axis=0), .0)\n assert_array_almost_equal(X_scaled.max(axis=0), 1.)\n assert scaler.n_samples_seen_ == X.shape[0]\n\n # check inverse transform\n X_scaled_back = scaler.inverse_transform(X_scaled)\n assert_array_almost_equal(X_scaled_back, X)\n\n # Constant feature\n X = np.ones((5, 1))\n scaler = MinMaxScaler()\n X_scaled = scaler.fit(X).transform(X)\n assert X_scaled.min() >= 0.\n assert X_scaled.max() <= 1.\n assert scaler.n_samples_seen_ == X.shape[0]\n\n # Function interface\n X_1d = X_1row.ravel()\n min_ = X_1d.min()\n max_ = X_1d.max()\n assert_array_almost_equal((X_1d - min_) / (max_ - min_),\n minmax_scale(X_1d, copy=True))\n\n\ndef test_scaler_without_centering():\n rng = np.random.RandomState(42)\n X = rng.randn(4, 5)\n X[:, 0] = 0.0 # first feature is always of zero\n X_csr = sparse.csr_matrix(X)\n X_csc = sparse.csc_matrix(X)\n\n with pytest.raises(ValueError):\n StandardScaler().fit(X_csr)\n with pytest.raises(ValueError):\n StandardScaler().fit(X_csc)\n\n null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)\n X_null = null_transform.fit_transform(X_csr)\n assert_array_equal(X_null.data, X_csr.data)\n X_orig = null_transform.inverse_transform(X_null)\n assert_array_equal(X_orig.data, X_csr.data)\n\n scaler = StandardScaler(with_mean=False).fit(X)\n X_scaled = scaler.transform(X, copy=True)\n assert not np.any(np.isnan(X_scaled))\n\n scaler_csr = StandardScaler(with_mean=False).fit(X_csr)\n X_csr_scaled = scaler_csr.transform(X_csr, copy=True)\n assert not np.any(np.isnan(X_csr_scaled.data))\n\n scaler_csc = StandardScaler(with_mean=False).fit(X_csc)\n X_csc_scaled = scaler_csc.transform(X_csc, copy=True)\n assert not np.any(np.isnan(X_csc_scaled.data))\n\n assert_array_almost_equal(scaler.mean_, scaler_csr.mean_)\n assert_array_almost_equal(scaler.var_, scaler_csr.var_)\n assert_array_almost_equal(scaler.scale_, scaler_csr.scale_)\n\n assert_array_almost_equal(scaler.mean_, scaler_csc.mean_)\n assert_array_almost_equal(scaler.var_, scaler_csc.var_)\n assert_array_almost_equal(scaler.scale_, scaler_csc.scale_)\n\n assert_array_almost_equal(\n X_scaled.mean(axis=0), [0., -0.01, 2.24, -0.35, -0.78], 2)\n assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])\n\n X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(X_csr_scaled, 0)\n assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))\n assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))\n\n # Check that X has not been modified (copy)\n assert X_scaled is not X\n assert X_csr_scaled is not X_csr\n\n X_scaled_back = scaler.inverse_transform(X_scaled)\n assert X_scaled_back is not X\n assert X_scaled_back is not X_scaled\n assert_array_almost_equal(X_scaled_back, X)\n\n X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled)\n assert X_csr_scaled_back is not X_csr\n assert X_csr_scaled_back is not X_csr_scaled\n assert_array_almost_equal(X_csr_scaled_back.toarray(), X)\n\n X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc())\n assert X_csc_scaled_back is not X_csc\n assert X_csc_scaled_back is not X_csc_scaled\n assert_array_almost_equal(X_csc_scaled_back.toarray(), X)\n\n\[email protected](\"with_mean\", [True, False])\[email protected](\"with_std\", [True, False])\[email protected](\"array_constructor\",\n [np.asarray, sparse.csc_matrix, sparse.csr_matrix])\ndef test_scaler_n_samples_seen_with_nan(with_mean, with_std,\n array_constructor):\n X = np.array([[0, 1, 3],\n [np.nan, 6, 10],\n [5, 4, np.nan],\n [8, 0, np.nan]],\n dtype=np.float64)\n X = array_constructor(X)\n\n if sparse.issparse(X) and with_mean:\n pytest.skip(\"'with_mean=True' cannot be used with sparse matrix.\")\n\n transformer = StandardScaler(with_mean=with_mean, with_std=with_std)\n transformer.fit(X)\n\n assert_array_equal(transformer.n_samples_seen_, np.array([3, 4, 2]))\n\n\ndef _check_identity_scalers_attributes(scaler_1, scaler_2):\n assert scaler_1.mean_ is scaler_2.mean_ is None\n assert scaler_1.var_ is scaler_2.var_ is None\n assert scaler_1.scale_ is scaler_2.scale_ is None\n assert scaler_1.n_samples_seen_ == scaler_2.n_samples_seen_\n\n\ndef test_scaler_return_identity():\n # test that the scaler return identity when with_mean and with_std are\n # False\n X_dense = np.array([[0, 1, 3],\n [5, 6, 0],\n [8, 0, 10]],\n dtype=np.float64)\n X_csr = sparse.csr_matrix(X_dense)\n X_csc = X_csr.tocsc()\n\n transformer_dense = StandardScaler(with_mean=False, with_std=False)\n X_trans_dense = transformer_dense.fit_transform(X_dense)\n\n transformer_csr = clone(transformer_dense)\n X_trans_csr = transformer_csr.fit_transform(X_csr)\n\n transformer_csc = clone(transformer_dense)\n X_trans_csc = transformer_csc.fit_transform(X_csc)\n\n assert_allclose_dense_sparse(X_trans_csr, X_csr)\n assert_allclose_dense_sparse(X_trans_csc, X_csc)\n assert_allclose(X_trans_dense, X_dense)\n\n for trans_1, trans_2 in itertools.combinations([transformer_dense,\n transformer_csr,\n transformer_csc],\n 2):\n _check_identity_scalers_attributes(trans_1, trans_2)\n\n transformer_dense.partial_fit(X_dense)\n transformer_csr.partial_fit(X_csr)\n transformer_csc.partial_fit(X_csc)\n\n for trans_1, trans_2 in itertools.combinations([transformer_dense,\n transformer_csr,\n transformer_csc],\n 2):\n _check_identity_scalers_attributes(trans_1, trans_2)\n\n transformer_dense.fit(X_dense)\n transformer_csr.fit(X_csr)\n transformer_csc.fit(X_csc)\n\n for trans_1, trans_2 in itertools.combinations([transformer_dense,\n transformer_csr,\n transformer_csc],\n 2):\n _check_identity_scalers_attributes(trans_1, trans_2)\n\n\ndef test_scaler_int():\n # test that scaler converts integer input to floating\n # for both sparse and dense matrices\n rng = np.random.RandomState(42)\n X = rng.randint(20, size=(4, 5))\n X[:, 0] = 0 # first feature is always of zero\n X_csr = sparse.csr_matrix(X)\n X_csc = sparse.csc_matrix(X)\n\n null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)\n with warnings.catch_warnings(record=True):\n X_null = null_transform.fit_transform(X_csr)\n assert_array_equal(X_null.data, X_csr.data)\n X_orig = null_transform.inverse_transform(X_null)\n assert_array_equal(X_orig.data, X_csr.data)\n\n with warnings.catch_warnings(record=True):\n scaler = StandardScaler(with_mean=False).fit(X)\n X_scaled = scaler.transform(X, copy=True)\n assert not np.any(np.isnan(X_scaled))\n\n with warnings.catch_warnings(record=True):\n scaler_csr = StandardScaler(with_mean=False).fit(X_csr)\n X_csr_scaled = scaler_csr.transform(X_csr, copy=True)\n assert not np.any(np.isnan(X_csr_scaled.data))\n\n with warnings.catch_warnings(record=True):\n scaler_csc = StandardScaler(with_mean=False).fit(X_csc)\n X_csc_scaled = scaler_csc.transform(X_csc, copy=True)\n assert not np.any(np.isnan(X_csc_scaled.data))\n\n assert_array_almost_equal(scaler.mean_, scaler_csr.mean_)\n assert_array_almost_equal(scaler.var_, scaler_csr.var_)\n assert_array_almost_equal(scaler.scale_, scaler_csr.scale_)\n\n assert_array_almost_equal(scaler.mean_, scaler_csc.mean_)\n assert_array_almost_equal(scaler.var_, scaler_csc.var_)\n assert_array_almost_equal(scaler.scale_, scaler_csc.scale_)\n\n assert_array_almost_equal(\n X_scaled.mean(axis=0),\n [0., 1.109, 1.856, 21., 1.559], 2)\n assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])\n\n X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(\n X_csr_scaled.astype(np.float), 0)\n assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))\n assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))\n\n # Check that X has not been modified (copy)\n assert X_scaled is not X\n assert X_csr_scaled is not X_csr\n\n X_scaled_back = scaler.inverse_transform(X_scaled)\n assert X_scaled_back is not X\n assert X_scaled_back is not X_scaled\n assert_array_almost_equal(X_scaled_back, X)\n\n X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled)\n assert X_csr_scaled_back is not X_csr\n assert X_csr_scaled_back is not X_csr_scaled\n assert_array_almost_equal(X_csr_scaled_back.toarray(), X)\n\n X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc())\n assert X_csc_scaled_back is not X_csc\n assert X_csc_scaled_back is not X_csc_scaled\n assert_array_almost_equal(X_csc_scaled_back.toarray(), X)\n\n\ndef test_scaler_without_copy():\n # Check that StandardScaler.fit does not change input\n rng = np.random.RandomState(42)\n X = rng.randn(4, 5)\n X[:, 0] = 0.0 # first feature is always of zero\n X_csr = sparse.csr_matrix(X)\n X_csc = sparse.csc_matrix(X)\n\n X_copy = X.copy()\n StandardScaler(copy=False).fit(X)\n assert_array_equal(X, X_copy)\n\n X_csr_copy = X_csr.copy()\n StandardScaler(with_mean=False, copy=False).fit(X_csr)\n assert_array_equal(X_csr.toarray(), X_csr_copy.toarray())\n\n X_csc_copy = X_csc.copy()\n StandardScaler(with_mean=False, copy=False).fit(X_csc)\n assert_array_equal(X_csc.toarray(), X_csc_copy.toarray())\n\n\ndef test_scale_sparse_with_mean_raise_exception():\n rng = np.random.RandomState(42)\n X = rng.randn(4, 5)\n X_csr = sparse.csr_matrix(X)\n X_csc = sparse.csc_matrix(X)\n\n # check scaling and fit with direct calls on sparse data\n with pytest.raises(ValueError):\n scale(X_csr, with_mean=True)\n with pytest.raises(ValueError):\n StandardScaler(with_mean=True).fit(X_csr)\n\n with pytest.raises(ValueError):\n scale(X_csc, with_mean=True)\n with pytest.raises(ValueError):\n StandardScaler(with_mean=True).fit(X_csc)\n\n # check transform and inverse_transform after a fit on a dense array\n scaler = StandardScaler(with_mean=True).fit(X)\n with pytest.raises(ValueError):\n scaler.transform(X_csr)\n with pytest.raises(ValueError):\n scaler.transform(X_csc)\n\n X_transformed_csr = sparse.csr_matrix(scaler.transform(X))\n with pytest.raises(ValueError):\n scaler.inverse_transform(X_transformed_csr)\n\n X_transformed_csc = sparse.csc_matrix(scaler.transform(X))\n with pytest.raises(ValueError):\n scaler.inverse_transform(X_transformed_csc)\n\n\ndef test_scale_input_finiteness_validation():\n # Check if non finite inputs raise ValueError\n X = [[np.inf, 5, 6, 7, 8]]\n with pytest.raises(ValueError, match=\"Input contains infinity \"\n \"or a value too large\"):\n scale(X)\n\n\ndef test_robust_scaler_error_sparse():\n X_sparse = sparse.rand(1000, 10)\n scaler = RobustScaler(with_centering=True)\n err_msg = \"Cannot center sparse matrices\"\n with pytest.raises(ValueError, match=err_msg):\n scaler.fit(X_sparse)\n\n\[email protected](\"with_centering\", [True, False])\[email protected](\"with_scaling\", [True, False])\[email protected](\"X\", [np.random.randn(10, 3),\n sparse.rand(10, 3, density=0.5)])\ndef test_robust_scaler_attributes(X, with_centering, with_scaling):\n # check consistent type of attributes\n if with_centering and sparse.issparse(X):\n pytest.skip(\"RobustScaler cannot center sparse matrix\")\n\n scaler = RobustScaler(with_centering=with_centering,\n with_scaling=with_scaling)\n scaler.fit(X)\n\n if with_centering:\n assert isinstance(scaler.center_, np.ndarray)\n else:\n assert scaler.center_ is None\n if with_scaling:\n assert isinstance(scaler.scale_, np.ndarray)\n else:\n assert scaler.scale_ is None\n\n\ndef test_robust_scaler_col_zero_sparse():\n # check that the scaler is working when there is not data materialized in a\n # column of a sparse matrix\n X = np.random.randn(10, 5)\n X[:, 0] = 0\n X = sparse.csr_matrix(X)\n\n scaler = RobustScaler(with_centering=False)\n scaler.fit(X)\n assert scaler.scale_[0] == pytest.approx(1)\n\n X_trans = scaler.transform(X)\n assert_allclose(X[:, 0].toarray(), X_trans[:, 0].toarray())\n\n\ndef test_robust_scaler_2d_arrays():\n # Test robust scaling of 2d array along first axis\n rng = np.random.RandomState(0)\n X = rng.randn(4, 5)\n X[:, 0] = 0.0 # first feature is always of zero\n\n scaler = RobustScaler()\n X_scaled = scaler.fit(X).transform(X)\n\n assert_array_almost_equal(np.median(X_scaled, axis=0), 5 * [0.0])\n assert_array_almost_equal(X_scaled.std(axis=0)[0], 0)\n\n\[email protected](\"density\", [0, 0.05, 0.1, 0.5, 1])\[email protected](\"strictly_signed\",\n ['positive', 'negative', 'zeros', None])\ndef test_robust_scaler_equivalence_dense_sparse(density, strictly_signed):\n # Check the equivalence of the fitting with dense and sparse matrices\n X_sparse = sparse.rand(1000, 5, density=density).tocsc()\n if strictly_signed == 'positive':\n X_sparse.data = np.abs(X_sparse.data)\n elif strictly_signed == 'negative':\n X_sparse.data = - np.abs(X_sparse.data)\n elif strictly_signed == 'zeros':\n X_sparse.data = np.zeros(X_sparse.data.shape, dtype=np.float64)\n X_dense = X_sparse.toarray()\n\n scaler_sparse = RobustScaler(with_centering=False)\n scaler_dense = RobustScaler(with_centering=False)\n\n scaler_sparse.fit(X_sparse)\n scaler_dense.fit(X_dense)\n\n assert_allclose(scaler_sparse.scale_, scaler_dense.scale_)\n\n\ndef test_robust_scaler_transform_one_row_csr():\n # Check RobustScaler on transforming csr matrix with one row\n rng = np.random.RandomState(0)\n X = rng.randn(4, 5)\n single_row = np.array([[0.1, 1., 2., 0., -1.]])\n scaler = RobustScaler(with_centering=False)\n scaler = scaler.fit(X)\n row_trans = scaler.transform(sparse.csr_matrix(single_row))\n row_expected = single_row / scaler.scale_\n assert_array_almost_equal(row_trans.toarray(), row_expected)\n row_scaled_back = scaler.inverse_transform(row_trans)\n assert_array_almost_equal(single_row, row_scaled_back.toarray())\n\n\ndef test_robust_scaler_iris():\n X = iris.data\n scaler = RobustScaler()\n X_trans = scaler.fit_transform(X)\n assert_array_almost_equal(np.median(X_trans, axis=0), 0)\n X_trans_inv = scaler.inverse_transform(X_trans)\n assert_array_almost_equal(X, X_trans_inv)\n q = np.percentile(X_trans, q=(25, 75), axis=0)\n iqr = q[1] - q[0]\n assert_array_almost_equal(iqr, 1)\n\n\ndef test_robust_scaler_iris_quantiles():\n X = iris.data\n scaler = RobustScaler(quantile_range=(10, 90))\n X_trans = scaler.fit_transform(X)\n assert_array_almost_equal(np.median(X_trans, axis=0), 0)\n X_trans_inv = scaler.inverse_transform(X_trans)\n assert_array_almost_equal(X, X_trans_inv)\n q = np.percentile(X_trans, q=(10, 90), axis=0)\n q_range = q[1] - q[0]\n assert_array_almost_equal(q_range, 1)\n\n\ndef test_quantile_transform_iris():\n X = iris.data\n # uniform output distribution\n transformer = QuantileTransformer(n_quantiles=30)\n X_trans = transformer.fit_transform(X)\n X_trans_inv = transformer.inverse_transform(X_trans)\n assert_array_almost_equal(X, X_trans_inv)\n # normal output distribution\n transformer = QuantileTransformer(n_quantiles=30,\n output_distribution='normal')\n X_trans = transformer.fit_transform(X)\n X_trans_inv = transformer.inverse_transform(X_trans)\n assert_array_almost_equal(X, X_trans_inv)\n # make sure it is possible to take the inverse of a sparse matrix\n # which contain negative value; this is the case in the iris dataset\n X_sparse = sparse.csc_matrix(X)\n X_sparse_tran = transformer.fit_transform(X_sparse)\n X_sparse_tran_inv = transformer.inverse_transform(X_sparse_tran)\n assert_array_almost_equal(X_sparse.A, X_sparse_tran_inv.A)\n\n\ndef test_quantile_transform_check_error():\n X = np.transpose([[0, 25, 50, 0, 0, 0, 75, 0, 0, 100],\n [2, 4, 0, 0, 6, 8, 0, 10, 0, 0],\n [0, 0, 2.6, 4.1, 0, 0, 2.3, 0, 9.5, 0.1]])\n X = sparse.csc_matrix(X)\n X_neg = np.transpose([[0, 25, 50, 0, 0, 0, 75, 0, 0, 100],\n [-2, 4, 0, 0, 6, 8, 0, 10, 0, 0],\n [0, 0, 2.6, 4.1, 0, 0, 2.3, 0, 9.5, 0.1]])\n X_neg = sparse.csc_matrix(X_neg)\n\n err_msg = \"Invalid value for 'n_quantiles': 0.\"\n with pytest.raises(ValueError, match=err_msg):\n QuantileTransformer(n_quantiles=0).fit(X)\n err_msg = \"Invalid value for 'subsample': 0.\"\n with pytest.raises(ValueError, match=err_msg):\n QuantileTransformer(subsample=0).fit(X)\n err_msg = (\"The number of quantiles cannot be greater than \"\n \"the number of samples used. Got 1000 quantiles \"\n \"and 10 samples.\")\n with pytest.raises(ValueError, match=err_msg):\n QuantileTransformer(subsample=10).fit(X)\n\n transformer = QuantileTransformer(n_quantiles=10)\n err_msg = \"QuantileTransformer only accepts non-negative sparse matrices.\"\n with pytest.raises(ValueError, match=err_msg):\n transformer.fit(X_neg)\n transformer.fit(X)\n err_msg = \"QuantileTransformer only accepts non-negative sparse matrices.\"\n with pytest.raises(ValueError, match=err_msg):\n transformer.transform(X_neg)\n\n X_bad_feat = np.transpose([[0, 25, 50, 0, 0, 0, 75, 0, 0, 100],\n [0, 0, 2.6, 4.1, 0, 0, 2.3, 0, 9.5, 0.1]])\n err_msg = (\"X does not have the same number of features as the previously\"\n \" fitted \" \"data. Got 2 instead of 3.\")\n with pytest.raises(ValueError, match=err_msg):\n transformer.transform(X_bad_feat)\n err_msg = (\"X does not have the same number of features \"\n \"as the previously fitted data. Got 2 instead of 3.\")\n with pytest.raises(ValueError, match=err_msg):\n transformer.inverse_transform(X_bad_feat)\n\n transformer = QuantileTransformer(n_quantiles=10,\n output_distribution='rnd')\n # check that an error is raised at fit time\n err_msg = (\"'output_distribution' has to be either 'normal' or \"\n \"'uniform'. Got 'rnd' instead.\")\n with pytest.raises(ValueError, match=err_msg):\n transformer.fit(X)\n # check that an error is raised at transform time\n transformer.output_distribution = 'uniform'\n transformer.fit(X)\n X_tran = transformer.transform(X)\n transformer.output_distribution = 'rnd'\n err_msg = (\"'output_distribution' has to be either 'normal' or 'uniform'.\"\n \" Got 'rnd' instead.\")\n with pytest.raises(ValueError, match=err_msg):\n transformer.transform(X)\n # check that an error is raised at inverse_transform time\n err_msg = (\"'output_distribution' has to be either 'normal' or 'uniform'.\"\n \" Got 'rnd' instead.\")\n with pytest.raises(ValueError, match=err_msg):\n transformer.inverse_transform(X_tran)\n # check that an error is raised if input is scalar\n with pytest.raises(ValueError,\n match='Expected 2D array, got scalar array instead'):\n transformer.transform(10)\n # check that a warning is raised is n_quantiles > n_samples\n transformer = QuantileTransformer(n_quantiles=100)\n warn_msg = \"n_quantiles is set to n_samples\"\n with pytest.warns(UserWarning, match=warn_msg) as record:\n transformer.fit(X)\n assert len(record) == 1\n assert transformer.n_quantiles_ == X.shape[0]\n\n\ndef test_quantile_transform_sparse_ignore_zeros():\n X = np.array([[0, 1],\n [0, 0],\n [0, 2],\n [0, 2],\n [0, 1]])\n X_sparse = sparse.csc_matrix(X)\n transformer = QuantileTransformer(ignore_implicit_zeros=True,\n n_quantiles=5)\n\n # dense case -> warning raise\n assert_warns_message(UserWarning, \"'ignore_implicit_zeros' takes effect\"\n \" only with sparse matrix. This parameter has no\"\n \" effect.\", transformer.fit, X)\n\n X_expected = np.array([[0, 0],\n [0, 0],\n [0, 1],\n [0, 1],\n [0, 0]])\n X_trans = transformer.fit_transform(X_sparse)\n assert_almost_equal(X_expected, X_trans.A)\n\n # consider the case where sparse entries are missing values and user-given\n # zeros are to be considered\n X_data = np.array([0, 0, 1, 0, 2, 2, 1, 0, 1, 2, 0])\n X_col = np.array([0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1])\n X_row = np.array([0, 4, 0, 1, 2, 3, 4, 5, 6, 7, 8])\n X_sparse = sparse.csc_matrix((X_data, (X_row, X_col)))\n X_trans = transformer.fit_transform(X_sparse)\n X_expected = np.array([[0., 0.5],\n [0., 0.],\n [0., 1.],\n [0., 1.],\n [0., 0.5],\n [0., 0.],\n [0., 0.5],\n [0., 1.],\n [0., 0.]])\n assert_almost_equal(X_expected, X_trans.A)\n\n transformer = QuantileTransformer(ignore_implicit_zeros=True,\n n_quantiles=5)\n X_data = np.array([-1, -1, 1, 0, 0, 0, 1, -1, 1])\n X_col = np.array([0, 0, 1, 1, 1, 1, 1, 1, 1])\n X_row = np.array([0, 4, 0, 1, 2, 3, 4, 5, 6])\n X_sparse = sparse.csc_matrix((X_data, (X_row, X_col)))\n X_trans = transformer.fit_transform(X_sparse)\n X_expected = np.array([[0, 1],\n [0, 0.375],\n [0, 0.375],\n [0, 0.375],\n [0, 1],\n [0, 0],\n [0, 1]])\n assert_almost_equal(X_expected, X_trans.A)\n assert_almost_equal(X_sparse.A, transformer.inverse_transform(X_trans).A)\n\n # check in conjunction with subsampling\n transformer = QuantileTransformer(ignore_implicit_zeros=True,\n n_quantiles=5,\n subsample=8,\n random_state=0)\n X_trans = transformer.fit_transform(X_sparse)\n assert_almost_equal(X_expected, X_trans.A)\n assert_almost_equal(X_sparse.A, transformer.inverse_transform(X_trans).A)\n\n\ndef test_quantile_transform_dense_toy():\n X = np.array([[0, 2, 2.6],\n [25, 4, 4.1],\n [50, 6, 2.3],\n [75, 8, 9.5],\n [100, 10, 0.1]])\n\n transformer = QuantileTransformer(n_quantiles=5)\n transformer.fit(X)\n\n # using the a uniform output, each entry of X should be map between 0 and 1\n # and equally spaced\n X_trans = transformer.fit_transform(X)\n X_expected = np.tile(np.linspace(0, 1, num=5), (3, 1)).T\n assert_almost_equal(np.sort(X_trans, axis=0), X_expected)\n\n X_test = np.array([\n [-1, 1, 0],\n [101, 11, 10],\n ])\n X_expected = np.array([\n [0, 0, 0],\n [1, 1, 1],\n ])\n assert_array_almost_equal(transformer.transform(X_test), X_expected)\n\n X_trans_inv = transformer.inverse_transform(X_trans)\n assert_array_almost_equal(X, X_trans_inv)\n\n\ndef test_quantile_transform_subsampling():\n # Test that subsampling the input yield to a consistent results We check\n # that the computed quantiles are almost mapped to a [0, 1] vector where\n # values are equally spaced. The infinite norm is checked to be smaller\n # than a given threshold. This is repeated 5 times.\n\n # dense support\n n_samples = 1000000\n n_quantiles = 1000\n X = np.sort(np.random.sample((n_samples, 1)), axis=0)\n ROUND = 5\n inf_norm_arr = []\n for random_state in range(ROUND):\n transformer = QuantileTransformer(random_state=random_state,\n n_quantiles=n_quantiles,\n subsample=n_samples // 10)\n transformer.fit(X)\n diff = (np.linspace(0, 1, n_quantiles) -\n np.ravel(transformer.quantiles_))\n inf_norm = np.max(np.abs(diff))\n assert inf_norm < 1e-2\n inf_norm_arr.append(inf_norm)\n # each random subsampling yield a unique approximation to the expected\n # linspace CDF\n assert len(np.unique(inf_norm_arr)) == len(inf_norm_arr)\n\n # sparse support\n\n X = sparse.rand(n_samples, 1, density=.99, format='csc', random_state=0)\n inf_norm_arr = []\n for random_state in range(ROUND):\n transformer = QuantileTransformer(random_state=random_state,\n n_quantiles=n_quantiles,\n subsample=n_samples // 10)\n transformer.fit(X)\n diff = (np.linspace(0, 1, n_quantiles) -\n np.ravel(transformer.quantiles_))\n inf_norm = np.max(np.abs(diff))\n assert inf_norm < 1e-1\n inf_norm_arr.append(inf_norm)\n # each random subsampling yield a unique approximation to the expected\n # linspace CDF\n assert len(np.unique(inf_norm_arr)) == len(inf_norm_arr)\n\n\ndef test_quantile_transform_sparse_toy():\n X = np.array([[0., 2., 0.],\n [25., 4., 0.],\n [50., 0., 2.6],\n [0., 0., 4.1],\n [0., 6., 0.],\n [0., 8., 0.],\n [75., 0., 2.3],\n [0., 10., 0.],\n [0., 0., 9.5],\n [100., 0., 0.1]])\n\n X = sparse.csc_matrix(X)\n\n transformer = QuantileTransformer(n_quantiles=10)\n transformer.fit(X)\n\n X_trans = transformer.fit_transform(X)\n assert_array_almost_equal(np.min(X_trans.toarray(), axis=0), 0.)\n assert_array_almost_equal(np.max(X_trans.toarray(), axis=0), 1.)\n\n X_trans_inv = transformer.inverse_transform(X_trans)\n assert_array_almost_equal(X.toarray(), X_trans_inv.toarray())\n\n transformer_dense = QuantileTransformer(n_quantiles=10).fit(\n X.toarray())\n\n X_trans = transformer_dense.transform(X)\n assert_array_almost_equal(np.min(X_trans.toarray(), axis=0), 0.)\n assert_array_almost_equal(np.max(X_trans.toarray(), axis=0), 1.)\n\n X_trans_inv = transformer_dense.inverse_transform(X_trans)\n assert_array_almost_equal(X.toarray(), X_trans_inv.toarray())\n\n\ndef test_quantile_transform_axis1():\n X = np.array([[0, 25, 50, 75, 100],\n [2, 4, 6, 8, 10],\n [2.6, 4.1, 2.3, 9.5, 0.1]])\n\n X_trans_a0 = quantile_transform(X.T, axis=0, n_quantiles=5)\n X_trans_a1 = quantile_transform(X, axis=1, n_quantiles=5)\n assert_array_almost_equal(X_trans_a0, X_trans_a1.T)\n\n\ndef test_quantile_transform_bounds():\n # Lower and upper bounds are manually mapped. We checked that in the case\n # of a constant feature and binary feature, the bounds are properly mapped.\n X_dense = np.array([[0, 0],\n [0, 0],\n [1, 0]])\n X_sparse = sparse.csc_matrix(X_dense)\n\n # check sparse and dense are consistent\n X_trans = QuantileTransformer(n_quantiles=3,\n random_state=0).fit_transform(X_dense)\n assert_array_almost_equal(X_trans, X_dense)\n X_trans_sp = QuantileTransformer(n_quantiles=3,\n random_state=0).fit_transform(X_sparse)\n assert_array_almost_equal(X_trans_sp.A, X_dense)\n assert_array_almost_equal(X_trans, X_trans_sp.A)\n\n # check the consistency of the bounds by learning on 1 matrix\n # and transforming another\n X = np.array([[0, 1],\n [0, 0.5],\n [1, 0]])\n X1 = np.array([[0, 0.1],\n [0, 0.5],\n [1, 0.1]])\n transformer = QuantileTransformer(n_quantiles=3).fit(X)\n X_trans = transformer.transform(X1)\n assert_array_almost_equal(X_trans, X1)\n\n # check that values outside of the range learned will be mapped properly.\n X = np.random.random((1000, 1))\n transformer = QuantileTransformer()\n transformer.fit(X)\n assert (transformer.transform([[-10]]) ==\n transformer.transform([[np.min(X)]]))\n assert (transformer.transform([[10]]) ==\n transformer.transform([[np.max(X)]]))\n assert (transformer.inverse_transform([[-10]]) ==\n transformer.inverse_transform(\n [[np.min(transformer.references_)]]))\n assert (transformer.inverse_transform([[10]]) ==\n transformer.inverse_transform(\n [[np.max(transformer.references_)]]))\n\n\ndef test_quantile_transform_and_inverse():\n X_1 = iris.data\n X_2 = np.array([[0.], [BOUNDS_THRESHOLD / 10], [1.5], [2], [3], [3], [4]])\n for X in [X_1, X_2]:\n transformer = QuantileTransformer(n_quantiles=1000, random_state=0)\n X_trans = transformer.fit_transform(X)\n X_trans_inv = transformer.inverse_transform(X_trans)\n assert_array_almost_equal(X, X_trans_inv, decimal=9)\n\n\ndef test_quantile_transform_nan():\n X = np.array([[np.nan, 0, 0, 1],\n [np.nan, np.nan, 0, 0.5],\n [np.nan, 1, 1, 0]])\n\n transformer = QuantileTransformer(n_quantiles=10, random_state=42)\n transformer.fit_transform(X)\n\n # check that the quantile of the first column is all NaN\n assert np.isnan(transformer.quantiles_[:, 0]).all()\n # all other column should not contain NaN\n assert not np.isnan(transformer.quantiles_[:, 1:]).any()\n\n\[email protected](\"array_type\", ['array', 'sparse'])\ndef test_quantile_transformer_sorted_quantiles(array_type):\n # Non-regression test for:\n # https://github.com/scikit-learn/scikit-learn/issues/15733\n # Taken from upstream bug report:\n # https://github.com/numpy/numpy/issues/14685\n X = np.array([0, 1, 1, 2, 2, 3, 3, 4, 5, 5, 1, 1, 9, 9, 9, 8, 8, 7] * 10)\n X = 0.1 * X.reshape(-1, 1)\n X = _convert_container(X, array_type)\n\n n_quantiles = 100\n qt = QuantileTransformer(n_quantiles=n_quantiles).fit(X)\n\n # Check that the estimated quantile threasholds are monotically\n # increasing:\n quantiles = qt.quantiles_[:, 0]\n assert len(quantiles) == 100\n assert all(np.diff(quantiles) >= 0)\n\n\ndef test_robust_scaler_invalid_range():\n for range_ in [\n (-1, 90),\n (-2, -3),\n (10, 101),\n (100.5, 101),\n (90, 50),\n ]:\n scaler = RobustScaler(quantile_range=range_)\n\n with pytest.raises(ValueError, match=r'Invalid quantile range: \\('):\n scaler.fit(iris.data)\n\n\ndef test_scale_function_without_centering():\n rng = np.random.RandomState(42)\n X = rng.randn(4, 5)\n X[:, 0] = 0.0 # first feature is always of zero\n X_csr = sparse.csr_matrix(X)\n\n X_scaled = scale(X, with_mean=False)\n assert not np.any(np.isnan(X_scaled))\n\n X_csr_scaled = scale(X_csr, with_mean=False)\n assert not np.any(np.isnan(X_csr_scaled.data))\n\n # test csc has same outcome\n X_csc_scaled = scale(X_csr.tocsc(), with_mean=False)\n assert_array_almost_equal(X_scaled, X_csc_scaled.toarray())\n\n # raises value error on axis != 0\n with pytest.raises(ValueError):\n scale(X_csr, with_mean=False, axis=1)\n\n assert_array_almost_equal(X_scaled.mean(axis=0),\n [0., -0.01, 2.24, -0.35, -0.78], 2)\n assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])\n # Check that X has not been copied\n assert X_scaled is not X\n\n X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(X_csr_scaled, 0)\n assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))\n assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))\n\n # null scale\n X_csr_scaled = scale(X_csr, with_mean=False, with_std=False, copy=True)\n assert_array_almost_equal(X_csr.toarray(), X_csr_scaled.toarray())\n\n\ndef test_robust_scale_axis1():\n X = iris.data\n X_trans = robust_scale(X, axis=1)\n assert_array_almost_equal(np.median(X_trans, axis=1), 0)\n q = np.percentile(X_trans, q=(25, 75), axis=1)\n iqr = q[1] - q[0]\n assert_array_almost_equal(iqr, 1)\n\n\ndef test_robust_scale_1d_array():\n X = iris.data[:, 1]\n X_trans = robust_scale(X)\n assert_array_almost_equal(np.median(X_trans), 0)\n q = np.percentile(X_trans, q=(25, 75))\n iqr = q[1] - q[0]\n assert_array_almost_equal(iqr, 1)\n\n\ndef test_robust_scaler_zero_variance_features():\n # Check RobustScaler on toy data with zero variance features\n X = [[0., 1., +0.5],\n [0., 1., -0.1],\n [0., 1., +1.1]]\n\n scaler = RobustScaler()\n X_trans = scaler.fit_transform(X)\n\n # NOTE: for such a small sample size, what we expect in the third column\n # depends HEAVILY on the method used to calculate quantiles. The values\n # here were calculated to fit the quantiles produces by np.percentile\n # using numpy 1.9 Calculating quantiles with\n # scipy.stats.mstats.scoreatquantile or scipy.stats.mstats.mquantiles\n # would yield very different results!\n X_expected = [[0., 0., +0.0],\n [0., 0., -1.0],\n [0., 0., +1.0]]\n assert_array_almost_equal(X_trans, X_expected)\n X_trans_inv = scaler.inverse_transform(X_trans)\n assert_array_almost_equal(X, X_trans_inv)\n\n # make sure new data gets transformed correctly\n X_new = [[+0., 2., 0.5],\n [-1., 1., 0.0],\n [+0., 1., 1.5]]\n X_trans_new = scaler.transform(X_new)\n X_expected_new = [[+0., 1., +0.],\n [-1., 0., -0.83333],\n [+0., 0., +1.66667]]\n assert_array_almost_equal(X_trans_new, X_expected_new, decimal=3)\n\n\ndef test_maxabs_scaler_zero_variance_features():\n # Check MaxAbsScaler on toy data with zero variance features\n X = [[0., 1., +0.5],\n [0., 1., -0.3],\n [0., 1., +1.5],\n [0., 0., +0.0]]\n\n scaler = MaxAbsScaler()\n X_trans = scaler.fit_transform(X)\n X_expected = [[0., 1., 1.0 / 3.0],\n [0., 1., -0.2],\n [0., 1., 1.0],\n [0., 0., 0.0]]\n assert_array_almost_equal(X_trans, X_expected)\n X_trans_inv = scaler.inverse_transform(X_trans)\n assert_array_almost_equal(X, X_trans_inv)\n\n # make sure new data gets transformed correctly\n X_new = [[+0., 2., 0.5],\n [-1., 1., 0.0],\n [+0., 1., 1.5]]\n X_trans_new = scaler.transform(X_new)\n X_expected_new = [[+0., 2.0, 1.0 / 3.0],\n [-1., 1.0, 0.0],\n [+0., 1.0, 1.0]]\n\n assert_array_almost_equal(X_trans_new, X_expected_new, decimal=2)\n\n # function interface\n X_trans = maxabs_scale(X)\n assert_array_almost_equal(X_trans, X_expected)\n\n # sparse data\n X_csr = sparse.csr_matrix(X)\n X_csc = sparse.csc_matrix(X)\n X_trans_csr = scaler.fit_transform(X_csr)\n X_trans_csc = scaler.fit_transform(X_csc)\n X_expected = [[0., 1., 1.0 / 3.0],\n [0., 1., -0.2],\n [0., 1., 1.0],\n [0., 0., 0.0]]\n assert_array_almost_equal(X_trans_csr.A, X_expected)\n assert_array_almost_equal(X_trans_csc.A, X_expected)\n X_trans_csr_inv = scaler.inverse_transform(X_trans_csr)\n X_trans_csc_inv = scaler.inverse_transform(X_trans_csc)\n assert_array_almost_equal(X, X_trans_csr_inv.A)\n assert_array_almost_equal(X, X_trans_csc_inv.A)\n\n\ndef test_maxabs_scaler_large_negative_value():\n # Check MaxAbsScaler on toy data with a large negative value\n X = [[0., 1., +0.5, -1.0],\n [0., 1., -0.3, -0.5],\n [0., 1., -100.0, 0.0],\n [0., 0., +0.0, -2.0]]\n\n scaler = MaxAbsScaler()\n X_trans = scaler.fit_transform(X)\n X_expected = [[0., 1., 0.005, -0.5],\n [0., 1., -0.003, -0.25],\n [0., 1., -1.0, 0.0],\n [0., 0., 0.0, -1.0]]\n assert_array_almost_equal(X_trans, X_expected)\n\n\ndef test_maxabs_scaler_transform_one_row_csr():\n # Check MaxAbsScaler on transforming csr matrix with one row\n X = sparse.csr_matrix([[0.5, 1., 1.]])\n scaler = MaxAbsScaler()\n scaler = scaler.fit(X)\n X_trans = scaler.transform(X)\n X_expected = sparse.csr_matrix([[1., 1., 1.]])\n assert_array_almost_equal(X_trans.toarray(), X_expected.toarray())\n X_scaled_back = scaler.inverse_transform(X_trans)\n assert_array_almost_equal(X.toarray(), X_scaled_back.toarray())\n\n\ndef test_maxabs_scaler_1d():\n # Test scaling of dataset along single axis\n for X in [X_1row, X_1col, X_list_1row, X_list_1row]:\n\n scaler = MaxAbsScaler(copy=True)\n X_scaled = scaler.fit(X).transform(X)\n\n if isinstance(X, list):\n X = np.array(X) # cast only after scaling done\n\n if _check_dim_1axis(X) == 1:\n assert_array_almost_equal(np.abs(X_scaled.max(axis=0)),\n np.ones(n_features))\n else:\n assert_array_almost_equal(np.abs(X_scaled.max(axis=0)), 1.)\n assert scaler.n_samples_seen_ == X.shape[0]\n\n # check inverse transform\n X_scaled_back = scaler.inverse_transform(X_scaled)\n assert_array_almost_equal(X_scaled_back, X)\n\n # Constant feature\n X = np.ones((5, 1))\n scaler = MaxAbsScaler()\n X_scaled = scaler.fit(X).transform(X)\n assert_array_almost_equal(np.abs(X_scaled.max(axis=0)), 1.)\n assert scaler.n_samples_seen_ == X.shape[0]\n\n # function interface\n X_1d = X_1row.ravel()\n max_abs = np.abs(X_1d).max()\n assert_array_almost_equal(X_1d / max_abs, maxabs_scale(X_1d, copy=True))\n\n\ndef test_maxabs_scaler_partial_fit():\n # Test if partial_fit run over many batches of size 1 and 50\n # gives the same results as fit\n X = X_2d[:100, :]\n n = X.shape[0]\n\n for chunk_size in [1, 2, 50, n, n + 42]:\n # Test mean at the end of the process\n scaler_batch = MaxAbsScaler().fit(X)\n\n scaler_incr = MaxAbsScaler()\n scaler_incr_csr = MaxAbsScaler()\n scaler_incr_csc = MaxAbsScaler()\n for batch in gen_batches(n, chunk_size):\n scaler_incr = scaler_incr.partial_fit(X[batch])\n X_csr = sparse.csr_matrix(X[batch])\n scaler_incr_csr = scaler_incr_csr.partial_fit(X_csr)\n X_csc = sparse.csc_matrix(X[batch])\n scaler_incr_csc = scaler_incr_csc.partial_fit(X_csc)\n\n assert_array_almost_equal(scaler_batch.max_abs_, scaler_incr.max_abs_)\n assert_array_almost_equal(scaler_batch.max_abs_,\n scaler_incr_csr.max_abs_)\n assert_array_almost_equal(scaler_batch.max_abs_,\n scaler_incr_csc.max_abs_)\n assert scaler_batch.n_samples_seen_ == scaler_incr.n_samples_seen_\n assert (scaler_batch.n_samples_seen_ ==\n scaler_incr_csr.n_samples_seen_)\n assert (scaler_batch.n_samples_seen_ ==\n scaler_incr_csc.n_samples_seen_)\n assert_array_almost_equal(scaler_batch.scale_, scaler_incr.scale_)\n assert_array_almost_equal(scaler_batch.scale_, scaler_incr_csr.scale_)\n assert_array_almost_equal(scaler_batch.scale_, scaler_incr_csc.scale_)\n assert_array_almost_equal(scaler_batch.transform(X),\n scaler_incr.transform(X))\n\n # Test std after 1 step\n batch0 = slice(0, chunk_size)\n scaler_batch = MaxAbsScaler().fit(X[batch0])\n scaler_incr = MaxAbsScaler().partial_fit(X[batch0])\n\n assert_array_almost_equal(scaler_batch.max_abs_, scaler_incr.max_abs_)\n assert scaler_batch.n_samples_seen_ == scaler_incr.n_samples_seen_\n assert_array_almost_equal(scaler_batch.scale_, scaler_incr.scale_)\n assert_array_almost_equal(scaler_batch.transform(X),\n scaler_incr.transform(X))\n\n # Test std until the end of partial fits, and\n scaler_batch = MaxAbsScaler().fit(X)\n scaler_incr = MaxAbsScaler() # Clean estimator\n for i, batch in enumerate(gen_batches(n, chunk_size)):\n scaler_incr = scaler_incr.partial_fit(X[batch])\n assert_correct_incr(i, batch_start=batch.start,\n batch_stop=batch.stop, n=n,\n chunk_size=chunk_size,\n n_samples_seen=scaler_incr.n_samples_seen_)\n\n\ndef test_normalizer_l1():\n rng = np.random.RandomState(0)\n X_dense = rng.randn(4, 5)\n X_sparse_unpruned = sparse.csr_matrix(X_dense)\n\n # set the row number 3 to zero\n X_dense[3, :] = 0.0\n\n # set the row number 3 to zero without pruning (can happen in real life)\n indptr_3 = X_sparse_unpruned.indptr[3]\n indptr_4 = X_sparse_unpruned.indptr[4]\n X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0\n\n # build the pruned variant using the regular constructor\n X_sparse_pruned = sparse.csr_matrix(X_dense)\n\n # check inputs that support the no-copy optim\n for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):\n\n normalizer = Normalizer(norm='l1', copy=True)\n X_norm = normalizer.transform(X)\n assert X_norm is not X\n X_norm1 = toarray(X_norm)\n\n normalizer = Normalizer(norm='l1', copy=False)\n X_norm = normalizer.transform(X)\n assert X_norm is X\n X_norm2 = toarray(X_norm)\n\n for X_norm in (X_norm1, X_norm2):\n row_sums = np.abs(X_norm).sum(axis=1)\n for i in range(3):\n assert_almost_equal(row_sums[i], 1.0)\n assert_almost_equal(row_sums[3], 0.0)\n\n # check input for which copy=False won't prevent a copy\n for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):\n X = init(X_dense)\n X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)\n\n assert X_norm is not X\n assert isinstance(X_norm, sparse.csr_matrix)\n\n X_norm = toarray(X_norm)\n for i in range(3):\n assert_almost_equal(row_sums[i], 1.0)\n assert_almost_equal(la.norm(X_norm[3]), 0.0)\n\n\ndef test_normalizer_l2():\n rng = np.random.RandomState(0)\n X_dense = rng.randn(4, 5)\n X_sparse_unpruned = sparse.csr_matrix(X_dense)\n\n # set the row number 3 to zero\n X_dense[3, :] = 0.0\n\n # set the row number 3 to zero without pruning (can happen in real life)\n indptr_3 = X_sparse_unpruned.indptr[3]\n indptr_4 = X_sparse_unpruned.indptr[4]\n X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0\n\n # build the pruned variant using the regular constructor\n X_sparse_pruned = sparse.csr_matrix(X_dense)\n\n # check inputs that support the no-copy optim\n for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):\n\n normalizer = Normalizer(norm='l2', copy=True)\n X_norm1 = normalizer.transform(X)\n assert X_norm1 is not X\n X_norm1 = toarray(X_norm1)\n\n normalizer = Normalizer(norm='l2', copy=False)\n X_norm2 = normalizer.transform(X)\n assert X_norm2 is X\n X_norm2 = toarray(X_norm2)\n\n for X_norm in (X_norm1, X_norm2):\n for i in range(3):\n assert_almost_equal(la.norm(X_norm[i]), 1.0)\n assert_almost_equal(la.norm(X_norm[3]), 0.0)\n\n # check input for which copy=False won't prevent a copy\n for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):\n X = init(X_dense)\n X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)\n\n assert X_norm is not X\n assert isinstance(X_norm, sparse.csr_matrix)\n\n X_norm = toarray(X_norm)\n for i in range(3):\n assert_almost_equal(la.norm(X_norm[i]), 1.0)\n assert_almost_equal(la.norm(X_norm[3]), 0.0)\n\n\ndef test_normalizer_max():\n rng = np.random.RandomState(0)\n X_dense = rng.randn(4, 5)\n X_sparse_unpruned = sparse.csr_matrix(X_dense)\n\n # set the row number 3 to zero\n X_dense[3, :] = 0.0\n\n # set the row number 3 to zero without pruning (can happen in real life)\n indptr_3 = X_sparse_unpruned.indptr[3]\n indptr_4 = X_sparse_unpruned.indptr[4]\n X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0\n\n # build the pruned variant using the regular constructor\n X_sparse_pruned = sparse.csr_matrix(X_dense)\n\n # check inputs that support the no-copy optim\n for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):\n\n normalizer = Normalizer(norm='max', copy=True)\n X_norm1 = normalizer.transform(X)\n assert X_norm1 is not X\n X_norm1 = toarray(X_norm1)\n\n normalizer = Normalizer(norm='max', copy=False)\n X_norm2 = normalizer.transform(X)\n assert X_norm2 is X\n X_norm2 = toarray(X_norm2)\n\n for X_norm in (X_norm1, X_norm2):\n row_maxs = abs(X_norm).max(axis=1)\n for i in range(3):\n assert_almost_equal(row_maxs[i], 1.0)\n assert_almost_equal(row_maxs[3], 0.0)\n\n # check input for which copy=False won't prevent a copy\n for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):\n X = init(X_dense)\n X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)\n\n assert X_norm is not X\n assert isinstance(X_norm, sparse.csr_matrix)\n\n X_norm = toarray(X_norm)\n for i in range(3):\n assert_almost_equal(row_maxs[i], 1.0)\n assert_almost_equal(la.norm(X_norm[3]), 0.0)\n\n\ndef test_normalizer_max_sign():\n # check that we normalize by a positive number even for negative data\n rng = np.random.RandomState(0)\n X_dense = rng.randn(4, 5)\n # set the row number 3 to zero\n X_dense[3, :] = 0.0\n # check for mixed data where the value with\n # largest magnitude is negative\n X_dense[2, abs(X_dense[2, :]).argmax()] *= -1\n X_all_neg = -np.abs(X_dense)\n X_all_neg_sparse = sparse.csr_matrix(X_all_neg)\n\n for X in (X_dense, X_all_neg, X_all_neg_sparse):\n normalizer = Normalizer(norm='max')\n X_norm = normalizer.transform(X)\n assert X_norm is not X\n X_norm = toarray(X_norm)\n assert_array_equal(\n np.sign(X_norm), np.sign(toarray(X)))\n\n\ndef test_normalize():\n # Test normalize function\n # Only tests functionality not used by the tests for Normalizer.\n X = np.random.RandomState(37).randn(3, 2)\n assert_array_equal(normalize(X, copy=False),\n normalize(X.T, axis=0, copy=False).T)\n with pytest.raises(ValueError):\n normalize([[0]], axis=2)\n with pytest.raises(ValueError):\n normalize([[0]], norm='l3')\n\n rs = np.random.RandomState(0)\n X_dense = rs.randn(10, 5)\n X_sparse = sparse.csr_matrix(X_dense)\n ones = np.ones((10))\n for X in (X_dense, X_sparse):\n for dtype in (np.float32, np.float64):\n for norm in ('l1', 'l2'):\n X = X.astype(dtype)\n X_norm = normalize(X, norm=norm)\n assert X_norm.dtype == dtype\n\n X_norm = toarray(X_norm)\n if norm == 'l1':\n row_sums = np.abs(X_norm).sum(axis=1)\n else:\n X_norm_squared = X_norm**2\n row_sums = X_norm_squared.sum(axis=1)\n\n assert_array_almost_equal(row_sums, ones)\n\n # Test return_norm\n X_dense = np.array([[3.0, 0, 4.0], [1.0, 0.0, 0.0], [2.0, 3.0, 0.0]])\n for norm in ('l1', 'l2', 'max'):\n _, norms = normalize(X_dense, norm=norm, return_norm=True)\n if norm == 'l1':\n assert_array_almost_equal(norms, np.array([7.0, 1.0, 5.0]))\n elif norm == 'l2':\n assert_array_almost_equal(norms, np.array([5.0, 1.0, 3.60555127]))\n else:\n assert_array_almost_equal(norms, np.array([4.0, 1.0, 3.0]))\n\n X_sparse = sparse.csr_matrix(X_dense)\n for norm in ('l1', 'l2'):\n with pytest.raises(NotImplementedError):\n normalize(X_sparse, norm=norm, return_norm=True)\n _, norms = normalize(X_sparse, norm='max', return_norm=True)\n assert_array_almost_equal(norms, np.array([4.0, 1.0, 3.0]))\n\n\ndef test_binarizer():\n X_ = np.array([[1, 0, 5], [2, 3, -1]])\n\n for init in (np.array, list, sparse.csr_matrix, sparse.csc_matrix):\n\n X = init(X_.copy())\n\n binarizer = Binarizer(threshold=2.0, copy=True)\n X_bin = toarray(binarizer.transform(X))\n assert np.sum(X_bin == 0) == 4\n assert np.sum(X_bin == 1) == 2\n X_bin = binarizer.transform(X)\n assert sparse.issparse(X) == sparse.issparse(X_bin)\n\n binarizer = Binarizer(copy=True).fit(X)\n X_bin = toarray(binarizer.transform(X))\n assert X_bin is not X\n assert np.sum(X_bin == 0) == 2\n assert np.sum(X_bin == 1) == 4\n\n binarizer = Binarizer(copy=True)\n X_bin = binarizer.transform(X)\n assert X_bin is not X\n X_bin = toarray(X_bin)\n assert np.sum(X_bin == 0) == 2\n assert np.sum(X_bin == 1) == 4\n\n binarizer = Binarizer(copy=False)\n X_bin = binarizer.transform(X)\n if init is not list:\n assert X_bin is X\n\n binarizer = Binarizer(copy=False)\n X_float = np.array([[1, 0, 5], [2, 3, -1]], dtype=np.float64)\n X_bin = binarizer.transform(X_float)\n if init is not list:\n assert X_bin is X_float\n\n X_bin = toarray(X_bin)\n assert np.sum(X_bin == 0) == 2\n assert np.sum(X_bin == 1) == 4\n\n binarizer = Binarizer(threshold=-0.5, copy=True)\n for init in (np.array, list):\n X = init(X_.copy())\n\n X_bin = toarray(binarizer.transform(X))\n assert np.sum(X_bin == 0) == 1\n assert np.sum(X_bin == 1) == 5\n X_bin = binarizer.transform(X)\n\n # Cannot use threshold < 0 for sparse\n with pytest.raises(ValueError):\n binarizer.transform(sparse.csc_matrix(X))\n\n\ndef test_center_kernel():\n # Test that KernelCenterer is equivalent to StandardScaler\n # in feature space\n rng = np.random.RandomState(0)\n X_fit = rng.random_sample((5, 4))\n scaler = StandardScaler(with_std=False)\n scaler.fit(X_fit)\n X_fit_centered = scaler.transform(X_fit)\n K_fit = np.dot(X_fit, X_fit.T)\n\n # center fit time matrix\n centerer = KernelCenterer()\n K_fit_centered = np.dot(X_fit_centered, X_fit_centered.T)\n K_fit_centered2 = centerer.fit_transform(K_fit)\n assert_array_almost_equal(K_fit_centered, K_fit_centered2)\n\n # center predict time matrix\n X_pred = rng.random_sample((2, 4))\n K_pred = np.dot(X_pred, X_fit.T)\n X_pred_centered = scaler.transform(X_pred)\n K_pred_centered = np.dot(X_pred_centered, X_fit_centered.T)\n K_pred_centered2 = centerer.transform(K_pred)\n assert_array_almost_equal(K_pred_centered, K_pred_centered2)\n\n\ndef test_cv_pipeline_precomputed():\n # Cross-validate a regression on four coplanar points with the same\n # value. Use precomputed kernel to ensure Pipeline with KernelCenterer\n # is treated as a _pairwise operation.\n X = np.array([[3, 0, 0], [0, 3, 0], [0, 0, 3], [1, 1, 1]])\n y_true = np.ones((4,))\n K = X.dot(X.T)\n kcent = KernelCenterer()\n pipeline = Pipeline([(\"kernel_centerer\", kcent), (\"svr\", SVR())])\n\n # did the pipeline set the _pairwise attribute?\n assert pipeline._pairwise\n\n # test cross-validation, score should be almost perfect\n # NB: this test is pretty vacuous -- it's mainly to test integration\n # of Pipeline and KernelCenterer\n y_pred = cross_val_predict(pipeline, K, y_true, cv=2)\n assert_array_almost_equal(y_true, y_pred)\n\n\ndef test_fit_transform():\n rng = np.random.RandomState(0)\n X = rng.random_sample((5, 4))\n for obj in ((StandardScaler(), Normalizer(), Binarizer())):\n X_transformed = obj.fit(X).transform(X)\n X_transformed2 = obj.fit_transform(X)\n assert_array_equal(X_transformed, X_transformed2)\n\n\ndef test_add_dummy_feature():\n X = [[1, 0], [0, 1], [0, 1]]\n X = add_dummy_feature(X)\n assert_array_equal(X, [[1, 1, 0], [1, 0, 1], [1, 0, 1]])\n\n\ndef test_add_dummy_feature_coo():\n X = sparse.coo_matrix([[1, 0], [0, 1], [0, 1]])\n X = add_dummy_feature(X)\n assert sparse.isspmatrix_coo(X), X\n assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])\n\n\ndef test_add_dummy_feature_csc():\n X = sparse.csc_matrix([[1, 0], [0, 1], [0, 1]])\n X = add_dummy_feature(X)\n assert sparse.isspmatrix_csc(X), X\n assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])\n\n\ndef test_add_dummy_feature_csr():\n X = sparse.csr_matrix([[1, 0], [0, 1], [0, 1]])\n X = add_dummy_feature(X)\n assert sparse.isspmatrix_csr(X), X\n assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])\n\n\ndef test_fit_cold_start():\n X = iris.data\n X_2d = X[:, :2]\n\n # Scalers that have a partial_fit method\n scalers = [StandardScaler(with_mean=False, with_std=False),\n MinMaxScaler(),\n MaxAbsScaler()]\n\n for scaler in scalers:\n scaler.fit_transform(X)\n # with a different shape, this may break the scaler unless the internal\n # state is reset\n scaler.fit_transform(X_2d)\n\n\ndef test_quantile_transform_valid_axis():\n X = np.array([[0, 25, 50, 75, 100],\n [2, 4, 6, 8, 10],\n [2.6, 4.1, 2.3, 9.5, 0.1]])\n\n with pytest.raises(ValueError, match=\"axis should be either equal \"\n \"to 0 or 1. Got axis=2\"):\n quantile_transform(X.T, axis=2)\n\n\[email protected](\"method\", ['box-cox', 'yeo-johnson'])\ndef test_power_transformer_notfitted(method):\n pt = PowerTransformer(method=method)\n X = np.abs(X_1col)\n with pytest.raises(NotFittedError):\n pt.transform(X)\n with pytest.raises(NotFittedError):\n pt.inverse_transform(X)\n\n\[email protected]('method', ['box-cox', 'yeo-johnson'])\[email protected]('standardize', [True, False])\[email protected]('X', [X_1col, X_2d])\ndef test_power_transformer_inverse(method, standardize, X):\n # Make sure we get the original input when applying transform and then\n # inverse transform\n X = np.abs(X) if method == 'box-cox' else X\n pt = PowerTransformer(method=method, standardize=standardize)\n X_trans = pt.fit_transform(X)\n assert_almost_equal(X, pt.inverse_transform(X_trans))\n\n\ndef test_power_transformer_1d():\n X = np.abs(X_1col)\n\n for standardize in [True, False]:\n pt = PowerTransformer(method='box-cox', standardize=standardize)\n\n X_trans = pt.fit_transform(X)\n X_trans_func = power_transform(\n X, method='box-cox',\n standardize=standardize\n )\n\n X_expected, lambda_expected = stats.boxcox(X.flatten())\n\n if standardize:\n X_expected = scale(X_expected)\n\n assert_almost_equal(X_expected.reshape(-1, 1), X_trans)\n assert_almost_equal(X_expected.reshape(-1, 1), X_trans_func)\n\n assert_almost_equal(X, pt.inverse_transform(X_trans))\n assert_almost_equal(lambda_expected, pt.lambdas_[0])\n\n assert len(pt.lambdas_) == X.shape[1]\n assert isinstance(pt.lambdas_, np.ndarray)\n\n\ndef test_power_transformer_2d():\n X = np.abs(X_2d)\n\n for standardize in [True, False]:\n pt = PowerTransformer(method='box-cox', standardize=standardize)\n\n X_trans_class = pt.fit_transform(X)\n X_trans_func = power_transform(\n X, method='box-cox',\n standardize=standardize\n )\n\n for X_trans in [X_trans_class, X_trans_func]:\n for j in range(X_trans.shape[1]):\n X_expected, lmbda = stats.boxcox(X[:, j].flatten())\n\n if standardize:\n X_expected = scale(X_expected)\n\n assert_almost_equal(X_trans[:, j], X_expected)\n assert_almost_equal(lmbda, pt.lambdas_[j])\n\n # Test inverse transformation\n X_inv = pt.inverse_transform(X_trans)\n assert_array_almost_equal(X_inv, X)\n\n assert len(pt.lambdas_) == X.shape[1]\n assert isinstance(pt.lambdas_, np.ndarray)\n\n\ndef test_power_transformer_boxcox_strictly_positive_exception():\n # Exceptions should be raised for negative arrays and zero arrays when\n # method is boxcox\n\n pt = PowerTransformer(method='box-cox')\n pt.fit(np.abs(X_2d))\n X_with_negatives = X_2d\n not_positive_message = 'strictly positive'\n\n with pytest.raises(ValueError, match=not_positive_message):\n pt.transform(X_with_negatives)\n\n with pytest.raises(ValueError, match=not_positive_message):\n pt.fit(X_with_negatives)\n\n with pytest.raises(ValueError, match=not_positive_message):\n power_transform(X_with_negatives, method='box-cox')\n\n with pytest.raises(ValueError, match=not_positive_message):\n pt.transform(np.zeros(X_2d.shape))\n\n with pytest.raises(ValueError, match=not_positive_message):\n pt.fit(np.zeros(X_2d.shape))\n\n with pytest.raises(ValueError, match=not_positive_message):\n power_transform(np.zeros(X_2d.shape), method='box-cox')\n\n\[email protected]('X', [X_2d, np.abs(X_2d), -np.abs(X_2d),\n np.zeros(X_2d.shape)])\ndef test_power_transformer_yeojohnson_any_input(X):\n # Yeo-Johnson method should support any kind of input\n power_transform(X, method='yeo-johnson')\n\n\[email protected](\"method\", ['box-cox', 'yeo-johnson'])\ndef test_power_transformer_shape_exception(method):\n pt = PowerTransformer(method=method)\n X = np.abs(X_2d)\n pt.fit(X)\n\n # Exceptions should be raised for arrays with different num_columns\n # than during fitting\n wrong_shape_message = 'Input data has a different number of features'\n\n with pytest.raises(ValueError, match=wrong_shape_message):\n pt.transform(X[:, 0:1])\n\n with pytest.raises(ValueError, match=wrong_shape_message):\n pt.inverse_transform(X[:, 0:1])\n\n\ndef test_power_transformer_method_exception():\n pt = PowerTransformer(method='monty-python')\n X = np.abs(X_2d)\n\n # An exception should be raised if PowerTransformer.method isn't valid\n bad_method_message = \"'method' must be one of\"\n with pytest.raises(ValueError, match=bad_method_message):\n pt.fit(X)\n\n\ndef test_power_transformer_lambda_zero():\n pt = PowerTransformer(method='box-cox', standardize=False)\n X = np.abs(X_2d)[:, 0:1]\n\n # Test the lambda = 0 case\n pt.lambdas_ = np.array([0])\n X_trans = pt.transform(X)\n assert_array_almost_equal(pt.inverse_transform(X_trans), X)\n\n\ndef test_power_transformer_lambda_one():\n # Make sure lambda = 1 corresponds to the identity for yeo-johnson\n pt = PowerTransformer(method='yeo-johnson', standardize=False)\n X = np.abs(X_2d)[:, 0:1]\n\n pt.lambdas_ = np.array([1])\n X_trans = pt.transform(X)\n assert_array_almost_equal(X_trans, X)\n\n\[email protected](\"method, lmbda\", [('box-cox', .1),\n ('box-cox', .5),\n ('yeo-johnson', .1),\n ('yeo-johnson', .5),\n ('yeo-johnson', 1.),\n ])\ndef test_optimization_power_transformer(method, lmbda):\n # Test the optimization procedure:\n # - set a predefined value for lambda\n # - apply inverse_transform to a normal dist (we get X_inv)\n # - apply fit_transform to X_inv (we get X_inv_trans)\n # - check that X_inv_trans is roughly equal to X\n\n rng = np.random.RandomState(0)\n n_samples = 20000\n X = rng.normal(loc=0, scale=1, size=(n_samples, 1))\n\n pt = PowerTransformer(method=method, standardize=False)\n pt.lambdas_ = [lmbda]\n X_inv = pt.inverse_transform(X)\n\n pt = PowerTransformer(method=method, standardize=False)\n X_inv_trans = pt.fit_transform(X_inv)\n\n assert_almost_equal(0, np.linalg.norm(X - X_inv_trans) / n_samples,\n decimal=2)\n assert_almost_equal(0, X_inv_trans.mean(), decimal=1)\n assert_almost_equal(1, X_inv_trans.std(), decimal=1)\n\n\ndef test_yeo_johnson_darwin_example():\n # test from original paper \"A new family of power transformations to\n # improve normality or symmetry\" by Yeo and Johnson.\n X = [6.1, -8.4, 1.0, 2.0, 0.7, 2.9, 3.5, 5.1, 1.8, 3.6, 7.0, 3.0, 9.3,\n 7.5, -6.0]\n X = np.array(X).reshape(-1, 1)\n lmbda = PowerTransformer(method='yeo-johnson').fit(X).lambdas_\n assert np.allclose(lmbda, 1.305, atol=1e-3)\n\n\[email protected]('method', ['box-cox', 'yeo-johnson'])\ndef test_power_transformer_nans(method):\n # Make sure lambda estimation is not influenced by NaN values\n # and that transform() supports NaN silently\n\n X = np.abs(X_1col)\n pt = PowerTransformer(method=method)\n pt.fit(X)\n lmbda_no_nans = pt.lambdas_[0]\n\n # concat nans at the end and check lambda stays the same\n X = np.concatenate([X, np.full_like(X, np.nan)])\n X = shuffle(X, random_state=0)\n\n pt.fit(X)\n lmbda_nans = pt.lambdas_[0]\n\n assert_almost_equal(lmbda_no_nans, lmbda_nans, decimal=5)\n\n X_trans = pt.transform(X)\n assert_array_equal(np.isnan(X_trans), np.isnan(X))\n\n\[email protected]('method', ['box-cox', 'yeo-johnson'])\[email protected]('standardize', [True, False])\ndef test_power_transformer_fit_transform(method, standardize):\n # check that fit_transform() and fit().transform() return the same values\n X = X_1col\n if method == 'box-cox':\n X = np.abs(X)\n\n pt = PowerTransformer(method, standardize=standardize)\n assert_array_almost_equal(pt.fit(X).transform(X), pt.fit_transform(X))\n\n\[email protected]('method', ['box-cox', 'yeo-johnson'])\[email protected]('standardize', [True, False])\ndef test_power_transformer_copy_True(method, standardize):\n # Check that neither fit, transform, fit_transform nor inverse_transform\n # modify X inplace when copy=True\n X = X_1col\n if method == 'box-cox':\n X = np.abs(X)\n\n X_original = X.copy()\n assert X is not X_original # sanity checks\n assert_array_almost_equal(X, X_original)\n\n pt = PowerTransformer(method, standardize=standardize, copy=True)\n\n pt.fit(X)\n assert_array_almost_equal(X, X_original)\n X_trans = pt.transform(X)\n assert X_trans is not X\n\n X_trans = pt.fit_transform(X)\n assert_array_almost_equal(X, X_original)\n assert X_trans is not X\n\n X_inv_trans = pt.inverse_transform(X_trans)\n assert X_trans is not X_inv_trans\n\n\[email protected]('method', ['box-cox', 'yeo-johnson'])\[email protected]('standardize', [True, False])\ndef test_power_transformer_copy_False(method, standardize):\n # check that when copy=False fit doesn't change X inplace but transform,\n # fit_transform and inverse_transform do.\n X = X_1col\n if method == 'box-cox':\n X = np.abs(X)\n\n X_original = X.copy()\n assert X is not X_original # sanity checks\n assert_array_almost_equal(X, X_original)\n\n pt = PowerTransformer(method, standardize=standardize, copy=False)\n\n pt.fit(X)\n assert_array_almost_equal(X, X_original) # fit didn't change X\n\n X_trans = pt.transform(X)\n assert X_trans is X\n\n if method == 'box-cox':\n X = np.abs(X)\n X_trans = pt.fit_transform(X)\n assert X_trans is X\n\n X_inv_trans = pt.inverse_transform(X_trans)\n assert X_trans is X_inv_trans\n\n\[email protected](\n \"X_2\",\n [sparse.random(10, 1, density=0.8, random_state=0),\n sparse.csr_matrix(np.full((10, 1), fill_value=np.nan))]\n)\ndef test_standard_scaler_sparse_partial_fit_finite_variance(X_2):\n # non-regression test for:\n # https://github.com/scikit-learn/scikit-learn/issues/16448\n X_1 = sparse.random(5, 1, density=0.8)\n scaler = StandardScaler(with_mean=False)\n scaler.fit(X_1).partial_fit(X_2)\n assert np.isfinite(scaler.var_[0])\n", "\"\"\"\nMulti-dimensional Scaling (MDS)\n\"\"\"\n\n# author: Nelle Varoquaux <[email protected]>\n# License: BSD\n\nimport numpy as np\nfrom joblib import Parallel, delayed, effective_n_jobs\n\nimport warnings\n\nfrom ..base import BaseEstimator\nfrom ..metrics import euclidean_distances\nfrom ..utils import check_random_state, check_array, check_symmetric\nfrom ..isotonic import IsotonicRegression\nfrom ..utils.validation import _deprecate_positional_args\n\n\ndef _smacof_single(dissimilarities, metric=True, n_components=2, init=None,\n max_iter=300, verbose=0, eps=1e-3, random_state=None):\n \"\"\"Computes multidimensional scaling using SMACOF algorithm\n\n Parameters\n ----------\n dissimilarities : ndarray, shape (n_samples, n_samples)\n Pairwise dissimilarities between the points. Must be symmetric.\n\n metric : boolean, optional, default: True\n Compute metric or nonmetric SMACOF algorithm.\n\n n_components : int, optional, default: 2\n Number of dimensions in which to immerse the dissimilarities. If an\n ``init`` array is provided, this option is overridden and the shape of\n ``init`` is used to determine the dimensionality of the embedding\n space.\n\n init : ndarray, shape (n_samples, n_components), optional, default: None\n Starting configuration of the embedding to initialize the algorithm. By\n default, the algorithm is initialized with a randomly chosen array.\n\n max_iter : int, optional, default: 300\n Maximum number of iterations of the SMACOF algorithm for a single run.\n\n verbose : int, optional, default: 0\n Level of verbosity.\n\n eps : float, optional, default: 1e-3\n Relative tolerance with respect to stress at which to declare\n convergence.\n\n random_state : int, RandomState instance, default=None\n Determines the random number generator used to initialize the centers.\n Pass an int for reproducible results across multiple function calls.\n See :term: `Glossary <random_state>`.\n\n Returns\n -------\n X : ndarray, shape (n_samples, n_components)\n Coordinates of the points in a ``n_components``-space.\n\n stress : float\n The final value of the stress (sum of squared distance of the\n disparities and the distances for all constrained points).\n\n n_iter : int\n The number of iterations corresponding to the best stress.\n \"\"\"\n dissimilarities = check_symmetric(dissimilarities, raise_exception=True)\n\n n_samples = dissimilarities.shape[0]\n random_state = check_random_state(random_state)\n\n sim_flat = ((1 - np.tri(n_samples)) * dissimilarities).ravel()\n sim_flat_w = sim_flat[sim_flat != 0]\n if init is None:\n # Randomly choose initial configuration\n X = random_state.rand(n_samples * n_components)\n X = X.reshape((n_samples, n_components))\n else:\n # overrides the parameter p\n n_components = init.shape[1]\n if n_samples != init.shape[0]:\n raise ValueError(\"init matrix should be of shape (%d, %d)\" %\n (n_samples, n_components))\n X = init\n\n old_stress = None\n ir = IsotonicRegression()\n for it in range(max_iter):\n # Compute distance and monotonic regression\n dis = euclidean_distances(X)\n\n if metric:\n disparities = dissimilarities\n else:\n dis_flat = dis.ravel()\n # dissimilarities with 0 are considered as missing values\n dis_flat_w = dis_flat[sim_flat != 0]\n\n # Compute the disparities using a monotonic regression\n disparities_flat = ir.fit_transform(sim_flat_w, dis_flat_w)\n disparities = dis_flat.copy()\n disparities[sim_flat != 0] = disparities_flat\n disparities = disparities.reshape((n_samples, n_samples))\n disparities *= np.sqrt((n_samples * (n_samples - 1) / 2) /\n (disparities ** 2).sum())\n\n # Compute stress\n stress = ((dis.ravel() - disparities.ravel()) ** 2).sum() / 2\n\n # Update X using the Guttman transform\n dis[dis == 0] = 1e-5\n ratio = disparities / dis\n B = - ratio\n B[np.arange(len(B)), np.arange(len(B))] += ratio.sum(axis=1)\n X = 1. / n_samples * np.dot(B, X)\n\n dis = np.sqrt((X ** 2).sum(axis=1)).sum()\n if verbose >= 2:\n print('it: %d, stress %s' % (it, stress))\n if old_stress is not None:\n if(old_stress - stress / dis) < eps:\n if verbose:\n print('breaking at iteration %d with stress %s' % (it,\n stress))\n break\n old_stress = stress / dis\n\n return X, stress, it + 1\n\n\n@_deprecate_positional_args\ndef smacof(dissimilarities, *, metric=True, n_components=2, init=None,\n n_init=8, n_jobs=None, max_iter=300, verbose=0, eps=1e-3,\n random_state=None, return_n_iter=False):\n \"\"\"Computes multidimensional scaling using the SMACOF algorithm.\n\n The SMACOF (Scaling by MAjorizing a COmplicated Function) algorithm is a\n multidimensional scaling algorithm which minimizes an objective function\n (the *stress*) using a majorization technique. Stress majorization, also\n known as the Guttman Transform, guarantees a monotone convergence of\n stress, and is more powerful than traditional techniques such as gradient\n descent.\n\n The SMACOF algorithm for metric MDS can summarized by the following steps:\n\n 1. Set an initial start configuration, randomly or not.\n 2. Compute the stress\n 3. Compute the Guttman Transform\n 4. Iterate 2 and 3 until convergence.\n\n The nonmetric algorithm adds a monotonic regression step before computing\n the stress.\n\n Parameters\n ----------\n dissimilarities : ndarray, shape (n_samples, n_samples)\n Pairwise dissimilarities between the points. Must be symmetric.\n\n metric : boolean, optional, default: True\n Compute metric or nonmetric SMACOF algorithm.\n\n n_components : int, optional, default: 2\n Number of dimensions in which to immerse the dissimilarities. If an\n ``init`` array is provided, this option is overridden and the shape of\n ``init`` is used to determine the dimensionality of the embedding\n space.\n\n init : ndarray, shape (n_samples, n_components), optional, default: None\n Starting configuration of the embedding to initialize the algorithm. By\n default, the algorithm is initialized with a randomly chosen array.\n\n n_init : int, optional, default: 8\n Number of times the SMACOF algorithm will be run with different\n initializations. The final results will be the best output of the runs,\n determined by the run with the smallest final stress. If ``init`` is\n provided, this option is overridden and a single run is performed.\n\n n_jobs : int or None, optional (default=None)\n The number of jobs to use for the computation. If multiple\n initializations are used (``n_init``), each run of the algorithm is\n computed in parallel.\n\n ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n ``-1`` means using all processors. See :term:`Glossary <n_jobs>`\n for more details.\n\n max_iter : int, optional, default: 300\n Maximum number of iterations of the SMACOF algorithm for a single run.\n\n verbose : int, optional, default: 0\n Level of verbosity.\n\n eps : float, optional, default: 1e-3\n Relative tolerance with respect to stress at which to declare\n convergence.\n\n random_state : int, RandomState instance, default=None\n Determines the random number generator used to initialize the centers.\n Pass an int for reproducible results across multiple function calls.\n See :term: `Glossary <random_state>`.\n\n return_n_iter : bool, optional, default: False\n Whether or not to return the number of iterations.\n\n Returns\n -------\n X : ndarray, shape (n_samples, n_components)\n Coordinates of the points in a ``n_components``-space.\n\n stress : float\n The final value of the stress (sum of squared distance of the\n disparities and the distances for all constrained points).\n\n n_iter : int\n The number of iterations corresponding to the best stress. Returned\n only if ``return_n_iter`` is set to ``True``.\n\n Notes\n -----\n \"Modern Multidimensional Scaling - Theory and Applications\" Borg, I.;\n Groenen P. Springer Series in Statistics (1997)\n\n \"Nonmetric multidimensional scaling: a numerical method\" Kruskal, J.\n Psychometrika, 29 (1964)\n\n \"Multidimensional scaling by optimizing goodness of fit to a nonmetric\n hypothesis\" Kruskal, J. Psychometrika, 29, (1964)\n \"\"\"\n\n dissimilarities = check_array(dissimilarities)\n random_state = check_random_state(random_state)\n\n if hasattr(init, '__array__'):\n init = np.asarray(init).copy()\n if not n_init == 1:\n warnings.warn(\n 'Explicit initial positions passed: '\n 'performing only one init of the MDS instead of %d'\n % n_init)\n n_init = 1\n\n best_pos, best_stress = None, None\n\n if effective_n_jobs(n_jobs) == 1:\n for it in range(n_init):\n pos, stress, n_iter_ = _smacof_single(\n dissimilarities, metric=metric,\n n_components=n_components, init=init,\n max_iter=max_iter, verbose=verbose,\n eps=eps, random_state=random_state)\n if best_stress is None or stress < best_stress:\n best_stress = stress\n best_pos = pos.copy()\n best_iter = n_iter_\n else:\n seeds = random_state.randint(np.iinfo(np.int32).max, size=n_init)\n results = Parallel(n_jobs=n_jobs, verbose=max(verbose - 1, 0))(\n delayed(_smacof_single)(\n dissimilarities, metric=metric, n_components=n_components,\n init=init, max_iter=max_iter, verbose=verbose, eps=eps,\n random_state=seed)\n for seed in seeds)\n positions, stress, n_iters = zip(*results)\n best = np.argmin(stress)\n best_stress = stress[best]\n best_pos = positions[best]\n best_iter = n_iters[best]\n\n if return_n_iter:\n return best_pos, best_stress, best_iter\n else:\n return best_pos, best_stress\n\n\nclass MDS(BaseEstimator):\n \"\"\"Multidimensional scaling\n\n Read more in the :ref:`User Guide <multidimensional_scaling>`.\n\n Parameters\n ----------\n n_components : int, optional, default: 2\n Number of dimensions in which to immerse the dissimilarities.\n\n metric : boolean, optional, default: True\n If ``True``, perform metric MDS; otherwise, perform nonmetric MDS.\n\n n_init : int, optional, default: 4\n Number of times the SMACOF algorithm will be run with different\n initializations. The final results will be the best output of the runs,\n determined by the run with the smallest final stress.\n\n max_iter : int, optional, default: 300\n Maximum number of iterations of the SMACOF algorithm for a single run.\n\n verbose : int, optional, default: 0\n Level of verbosity.\n\n eps : float, optional, default: 1e-3\n Relative tolerance with respect to stress at which to declare\n convergence.\n\n n_jobs : int or None, optional (default=None)\n The number of jobs to use for the computation. If multiple\n initializations are used (``n_init``), each run of the algorithm is\n computed in parallel.\n\n ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n ``-1`` means using all processors. See :term:`Glossary <n_jobs>`\n for more details.\n\n random_state : int, RandomState instance, default=None\n Determines the random number generator used to initialize the centers.\n Pass an int for reproducible results across multiple function calls.\n See :term: `Glossary <random_state>`.\n\n dissimilarity : 'euclidean' | 'precomputed', optional, default: 'euclidean'\n Dissimilarity measure to use:\n\n - 'euclidean':\n Pairwise Euclidean distances between points in the dataset.\n\n - 'precomputed':\n Pre-computed dissimilarities are passed directly to ``fit`` and\n ``fit_transform``.\n\n Attributes\n ----------\n embedding_ : array-like, shape (n_samples, n_components)\n Stores the position of the dataset in the embedding space.\n\n stress_ : float\n The final value of the stress (sum of squared distance of the\n disparities and the distances for all constrained points).\n\n Examples\n --------\n >>> from sklearn.datasets import load_digits\n >>> from sklearn.manifold import MDS\n >>> X, _ = load_digits(return_X_y=True)\n >>> X.shape\n (1797, 64)\n >>> embedding = MDS(n_components=2)\n >>> X_transformed = embedding.fit_transform(X[:100])\n >>> X_transformed.shape\n (100, 2)\n\n References\n ----------\n \"Modern Multidimensional Scaling - Theory and Applications\" Borg, I.;\n Groenen P. Springer Series in Statistics (1997)\n\n \"Nonmetric multidimensional scaling: a numerical method\" Kruskal, J.\n Psychometrika, 29 (1964)\n\n \"Multidimensional scaling by optimizing goodness of fit to a nonmetric\n hypothesis\" Kruskal, J. Psychometrika, 29, (1964)\n\n \"\"\"\n @_deprecate_positional_args\n def __init__(self, n_components=2, *, metric=True, n_init=4,\n max_iter=300, verbose=0, eps=1e-3, n_jobs=None,\n random_state=None, dissimilarity=\"euclidean\"):\n self.n_components = n_components\n self.dissimilarity = dissimilarity\n self.metric = metric\n self.n_init = n_init\n self.max_iter = max_iter\n self.eps = eps\n self.verbose = verbose\n self.n_jobs = n_jobs\n self.random_state = random_state\n\n @property\n def _pairwise(self):\n return self.kernel == \"precomputed\"\n\n def fit(self, X, y=None, init=None):\n \"\"\"\n Computes the position of the points in the embedding space\n\n Parameters\n ----------\n X : array, shape (n_samples, n_features) or (n_samples, n_samples)\n Input data. If ``dissimilarity=='precomputed'``, the input should\n be the dissimilarity matrix.\n\n y : Ignored\n\n init : ndarray, shape (n_samples,), optional, default: None\n Starting configuration of the embedding to initialize the SMACOF\n algorithm. By default, the algorithm is initialized with a randomly\n chosen array.\n \"\"\"\n self.fit_transform(X, init=init)\n return self\n\n def fit_transform(self, X, y=None, init=None):\n \"\"\"\n Fit the data from X, and returns the embedded coordinates\n\n Parameters\n ----------\n X : array, shape (n_samples, n_features) or (n_samples, n_samples)\n Input data. If ``dissimilarity=='precomputed'``, the input should\n be the dissimilarity matrix.\n\n y : Ignored\n\n init : ndarray, shape (n_samples,), optional, default: None\n Starting configuration of the embedding to initialize the SMACOF\n algorithm. By default, the algorithm is initialized with a randomly\n chosen array.\n \"\"\"\n X = self._validate_data(X)\n if X.shape[0] == X.shape[1] and self.dissimilarity != \"precomputed\":\n warnings.warn(\"The MDS API has changed. ``fit`` now constructs an\"\n \" dissimilarity matrix from data. To use a custom \"\n \"dissimilarity matrix, set \"\n \"``dissimilarity='precomputed'``.\")\n\n if self.dissimilarity == \"precomputed\":\n self.dissimilarity_matrix_ = X\n elif self.dissimilarity == \"euclidean\":\n self.dissimilarity_matrix_ = euclidean_distances(X)\n else:\n raise ValueError(\"Proximity must be 'precomputed' or 'euclidean'.\"\n \" Got %s instead\" % str(self.dissimilarity))\n\n self.embedding_, self.stress_, self.n_iter_ = smacof(\n self.dissimilarity_matrix_, metric=self.metric,\n n_components=self.n_components, init=init, n_init=self.n_init,\n n_jobs=self.n_jobs, max_iter=self.max_iter, verbose=self.verbose,\n eps=self.eps, random_state=self.random_state,\n return_n_iter=True)\n\n return self.embedding_\n", "import numpy as np\nimport scipy.sparse as sp\n\nfrom re import escape\n\nfrom sklearn.utils._testing import assert_array_equal\nfrom sklearn.utils._testing import assert_almost_equal\nfrom sklearn.utils._testing import assert_raises\nfrom sklearn.utils._testing import assert_warns\nfrom sklearn.utils._testing import assert_raise_message\nfrom sklearn.utils._testing import assert_raises_regexp\nfrom sklearn.multiclass import OneVsRestClassifier\nfrom sklearn.multiclass import OneVsOneClassifier\nfrom sklearn.multiclass import OutputCodeClassifier\nfrom sklearn.utils.multiclass import (check_classification_targets,\n type_of_target)\nfrom sklearn.utils import shuffle\n\nfrom sklearn.metrics import precision_score\nfrom sklearn.metrics import recall_score\n\nfrom sklearn.svm import LinearSVC, SVC\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.linear_model import (LinearRegression, Lasso, ElasticNet, Ridge,\n Perceptron, LogisticRegression,\n SGDClassifier)\nfrom sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor\nfrom sklearn.model_selection import GridSearchCV, cross_val_score\nfrom sklearn.pipeline import Pipeline\nfrom sklearn import svm\nfrom sklearn import datasets\n\niris = datasets.load_iris()\nrng = np.random.RandomState(0)\nperm = rng.permutation(iris.target.size)\niris.data = iris.data[perm]\niris.target = iris.target[perm]\nn_classes = 3\n\n\ndef test_ovr_exceptions():\n ovr = OneVsRestClassifier(LinearSVC(random_state=0))\n assert_raises(ValueError, ovr.predict, [])\n\n # Fail on multioutput data\n assert_raises(ValueError, OneVsRestClassifier(MultinomialNB()).fit,\n np.array([[1, 0], [0, 1]]),\n np.array([[1, 2], [3, 1]]))\n assert_raises(ValueError, OneVsRestClassifier(MultinomialNB()).fit,\n np.array([[1, 0], [0, 1]]),\n np.array([[1.5, 2.4], [3.1, 0.8]]))\n\n\ndef test_check_classification_targets():\n # Test that check_classification_target return correct type. #5782\n y = np.array([0.0, 1.1, 2.0, 3.0])\n msg = type_of_target(y)\n assert_raise_message(ValueError, msg, check_classification_targets, y)\n\n\ndef test_ovr_fit_predict():\n # A classifier which implements decision_function.\n ovr = OneVsRestClassifier(LinearSVC(random_state=0))\n pred = ovr.fit(iris.data, iris.target).predict(iris.data)\n assert len(ovr.estimators_) == n_classes\n\n clf = LinearSVC(random_state=0)\n pred2 = clf.fit(iris.data, iris.target).predict(iris.data)\n assert np.mean(iris.target == pred) == np.mean(iris.target == pred2)\n\n # A classifier which implements predict_proba.\n ovr = OneVsRestClassifier(MultinomialNB())\n pred = ovr.fit(iris.data, iris.target).predict(iris.data)\n assert np.mean(iris.target == pred) > 0.65\n\n\ndef test_ovr_partial_fit():\n # Test if partial_fit is working as intended\n X, y = shuffle(iris.data, iris.target, random_state=0)\n ovr = OneVsRestClassifier(MultinomialNB())\n ovr.partial_fit(X[:100], y[:100], np.unique(y))\n ovr.partial_fit(X[100:], y[100:])\n pred = ovr.predict(X)\n ovr2 = OneVsRestClassifier(MultinomialNB())\n pred2 = ovr2.fit(X, y).predict(X)\n\n assert_almost_equal(pred, pred2)\n assert len(ovr.estimators_) == len(np.unique(y))\n assert np.mean(y == pred) > 0.65\n\n # Test when mini batches doesn't have all classes\n # with SGDClassifier\n X = np.abs(np.random.randn(14, 2))\n y = [1, 1, 1, 1, 2, 3, 3, 0, 0, 2, 3, 1, 2, 3]\n\n ovr = OneVsRestClassifier(SGDClassifier(max_iter=1, tol=None,\n shuffle=False, random_state=0))\n ovr.partial_fit(X[:7], y[:7], np.unique(y))\n ovr.partial_fit(X[7:], y[7:])\n pred = ovr.predict(X)\n ovr1 = OneVsRestClassifier(SGDClassifier(max_iter=1, tol=None,\n shuffle=False, random_state=0))\n pred1 = ovr1.fit(X, y).predict(X)\n assert np.mean(pred == y) == np.mean(pred1 == y)\n\n # test partial_fit only exists if estimator has it:\n ovr = OneVsRestClassifier(SVC())\n assert not hasattr(ovr, \"partial_fit\")\n\n\ndef test_ovr_partial_fit_exceptions():\n ovr = OneVsRestClassifier(MultinomialNB())\n X = np.abs(np.random.randn(14, 2))\n y = [1, 1, 1, 1, 2, 3, 3, 0, 0, 2, 3, 1, 2, 3]\n ovr.partial_fit(X[:7], y[:7], np.unique(y))\n # A new class value which was not in the first call of partial_fit\n # It should raise ValueError\n y1 = [5] + y[7:-1]\n assert_raises_regexp(ValueError, r\"Mini-batch contains \\[.+\\] while \"\n r\"classes must be subset of \\[.+\\]\",\n ovr.partial_fit, X=X[7:], y=y1)\n\n\ndef test_ovr_ovo_regressor():\n # test that ovr and ovo work on regressors which don't have a decision_\n # function\n ovr = OneVsRestClassifier(DecisionTreeRegressor())\n pred = ovr.fit(iris.data, iris.target).predict(iris.data)\n assert len(ovr.estimators_) == n_classes\n assert_array_equal(np.unique(pred), [0, 1, 2])\n # we are doing something sensible\n assert np.mean(pred == iris.target) > .9\n\n ovr = OneVsOneClassifier(DecisionTreeRegressor())\n pred = ovr.fit(iris.data, iris.target).predict(iris.data)\n assert len(ovr.estimators_) == n_classes * (n_classes - 1) / 2\n assert_array_equal(np.unique(pred), [0, 1, 2])\n # we are doing something sensible\n assert np.mean(pred == iris.target) > .9\n\n\ndef test_ovr_fit_predict_sparse():\n for sparse in [sp.csr_matrix, sp.csc_matrix, sp.coo_matrix, sp.dok_matrix,\n sp.lil_matrix]:\n base_clf = MultinomialNB(alpha=1)\n\n X, Y = datasets.make_multilabel_classification(n_samples=100,\n n_features=20,\n n_classes=5,\n n_labels=3,\n length=50,\n allow_unlabeled=True,\n random_state=0)\n\n X_train, Y_train = X[:80], Y[:80]\n X_test = X[80:]\n\n clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)\n Y_pred = clf.predict(X_test)\n\n clf_sprs = OneVsRestClassifier(base_clf).fit(X_train, sparse(Y_train))\n Y_pred_sprs = clf_sprs.predict(X_test)\n\n assert clf.multilabel_\n assert sp.issparse(Y_pred_sprs)\n assert_array_equal(Y_pred_sprs.toarray(), Y_pred)\n\n # Test predict_proba\n Y_proba = clf_sprs.predict_proba(X_test)\n\n # predict assigns a label if the probability that the\n # sample has the label is greater than 0.5.\n pred = Y_proba > .5\n assert_array_equal(pred, Y_pred_sprs.toarray())\n\n # Test decision_function\n clf = svm.SVC()\n clf_sprs = OneVsRestClassifier(clf).fit(X_train, sparse(Y_train))\n dec_pred = (clf_sprs.decision_function(X_test) > 0).astype(int)\n assert_array_equal(dec_pred, clf_sprs.predict(X_test).toarray())\n\n\ndef test_ovr_always_present():\n # Test that ovr works with classes that are always present or absent.\n # Note: tests is the case where _ConstantPredictor is utilised\n X = np.ones((10, 2))\n X[:5, :] = 0\n\n # Build an indicator matrix where two features are always on.\n # As list of lists, it would be: [[int(i >= 5), 2, 3] for i in range(10)]\n y = np.zeros((10, 3))\n y[5:, 0] = 1\n y[:, 1] = 1\n y[:, 2] = 1\n\n ovr = OneVsRestClassifier(LogisticRegression())\n assert_warns(UserWarning, ovr.fit, X, y)\n y_pred = ovr.predict(X)\n assert_array_equal(np.array(y_pred), np.array(y))\n y_pred = ovr.decision_function(X)\n assert np.unique(y_pred[:, -2:]) == 1\n y_pred = ovr.predict_proba(X)\n assert_array_equal(y_pred[:, -1], np.ones(X.shape[0]))\n\n # y has a constantly absent label\n y = np.zeros((10, 2))\n y[5:, 0] = 1 # variable label\n ovr = OneVsRestClassifier(LogisticRegression())\n assert_warns(UserWarning, ovr.fit, X, y)\n y_pred = ovr.predict_proba(X)\n assert_array_equal(y_pred[:, -1], np.zeros(X.shape[0]))\n\n\ndef test_ovr_multiclass():\n # Toy dataset where features correspond directly to labels.\n X = np.array([[0, 0, 5], [0, 5, 0], [3, 0, 0], [0, 0, 6], [6, 0, 0]])\n y = [\"eggs\", \"spam\", \"ham\", \"eggs\", \"ham\"]\n Y = np.array([[0, 0, 1],\n [0, 1, 0],\n [1, 0, 0],\n [0, 0, 1],\n [1, 0, 0]])\n\n classes = set(\"ham eggs spam\".split())\n\n for base_clf in (MultinomialNB(), LinearSVC(random_state=0),\n LinearRegression(), Ridge(),\n ElasticNet()):\n clf = OneVsRestClassifier(base_clf).fit(X, y)\n assert set(clf.classes_) == classes\n y_pred = clf.predict(np.array([[0, 0, 4]]))[0]\n assert_array_equal(y_pred, [\"eggs\"])\n\n # test input as label indicator matrix\n clf = OneVsRestClassifier(base_clf).fit(X, Y)\n y_pred = clf.predict([[0, 0, 4]])[0]\n assert_array_equal(y_pred, [0, 0, 1])\n\n\ndef test_ovr_binary():\n # Toy dataset where features correspond directly to labels.\n X = np.array([[0, 0, 5], [0, 5, 0], [3, 0, 0], [0, 0, 6], [6, 0, 0]])\n y = [\"eggs\", \"spam\", \"spam\", \"eggs\", \"spam\"]\n Y = np.array([[0, 1, 1, 0, 1]]).T\n\n classes = set(\"eggs spam\".split())\n\n def conduct_test(base_clf, test_predict_proba=False):\n clf = OneVsRestClassifier(base_clf).fit(X, y)\n assert set(clf.classes_) == classes\n y_pred = clf.predict(np.array([[0, 0, 4]]))[0]\n assert_array_equal(y_pred, [\"eggs\"])\n if hasattr(base_clf, 'decision_function'):\n dec = clf.decision_function(X)\n assert dec.shape == (5,)\n\n if test_predict_proba:\n X_test = np.array([[0, 0, 4]])\n probabilities = clf.predict_proba(X_test)\n assert 2 == len(probabilities[0])\n assert (clf.classes_[np.argmax(probabilities, axis=1)] ==\n clf.predict(X_test))\n\n # test input as label indicator matrix\n clf = OneVsRestClassifier(base_clf).fit(X, Y)\n y_pred = clf.predict([[3, 0, 0]])[0]\n assert y_pred == 1\n\n for base_clf in (LinearSVC(random_state=0), LinearRegression(),\n Ridge(), ElasticNet()):\n conduct_test(base_clf)\n\n for base_clf in (MultinomialNB(), SVC(probability=True),\n LogisticRegression()):\n conduct_test(base_clf, test_predict_proba=True)\n\n\ndef test_ovr_multilabel():\n # Toy dataset where features correspond directly to labels.\n X = np.array([[0, 4, 5], [0, 5, 0], [3, 3, 3], [4, 0, 6], [6, 0, 0]])\n y = np.array([[0, 1, 1],\n [0, 1, 0],\n [1, 1, 1],\n [1, 0, 1],\n [1, 0, 0]])\n\n for base_clf in (MultinomialNB(), LinearSVC(random_state=0),\n LinearRegression(), Ridge(),\n ElasticNet(), Lasso(alpha=0.5)):\n clf = OneVsRestClassifier(base_clf).fit(X, y)\n y_pred = clf.predict([[0, 4, 4]])[0]\n assert_array_equal(y_pred, [0, 1, 1])\n assert clf.multilabel_\n\n\ndef test_ovr_fit_predict_svc():\n ovr = OneVsRestClassifier(svm.SVC())\n ovr.fit(iris.data, iris.target)\n assert len(ovr.estimators_) == 3\n assert ovr.score(iris.data, iris.target) > .9\n\n\ndef test_ovr_multilabel_dataset():\n base_clf = MultinomialNB(alpha=1)\n for au, prec, recall in zip((True, False), (0.51, 0.66), (0.51, 0.80)):\n X, Y = datasets.make_multilabel_classification(n_samples=100,\n n_features=20,\n n_classes=5,\n n_labels=2,\n length=50,\n allow_unlabeled=au,\n random_state=0)\n X_train, Y_train = X[:80], Y[:80]\n X_test, Y_test = X[80:], Y[80:]\n clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)\n Y_pred = clf.predict(X_test)\n\n assert clf.multilabel_\n assert_almost_equal(precision_score(Y_test, Y_pred, average=\"micro\"),\n prec,\n decimal=2)\n assert_almost_equal(recall_score(Y_test, Y_pred, average=\"micro\"),\n recall,\n decimal=2)\n\n\ndef test_ovr_multilabel_predict_proba():\n base_clf = MultinomialNB(alpha=1)\n for au in (False, True):\n X, Y = datasets.make_multilabel_classification(n_samples=100,\n n_features=20,\n n_classes=5,\n n_labels=3,\n length=50,\n allow_unlabeled=au,\n random_state=0)\n X_train, Y_train = X[:80], Y[:80]\n X_test = X[80:]\n clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)\n\n # Decision function only estimator.\n decision_only = OneVsRestClassifier(svm.SVR()).fit(X_train, Y_train)\n assert not hasattr(decision_only, 'predict_proba')\n\n # Estimator with predict_proba disabled, depending on parameters.\n decision_only = OneVsRestClassifier(svm.SVC(probability=False))\n assert not hasattr(decision_only, 'predict_proba')\n decision_only.fit(X_train, Y_train)\n assert not hasattr(decision_only, 'predict_proba')\n assert hasattr(decision_only, 'decision_function')\n\n # Estimator which can get predict_proba enabled after fitting\n gs = GridSearchCV(svm.SVC(probability=False),\n param_grid={'probability': [True]})\n proba_after_fit = OneVsRestClassifier(gs)\n assert not hasattr(proba_after_fit, 'predict_proba')\n proba_after_fit.fit(X_train, Y_train)\n assert hasattr(proba_after_fit, 'predict_proba')\n\n Y_pred = clf.predict(X_test)\n Y_proba = clf.predict_proba(X_test)\n\n # predict assigns a label if the probability that the\n # sample has the label is greater than 0.5.\n pred = Y_proba > .5\n assert_array_equal(pred, Y_pred)\n\n\ndef test_ovr_single_label_predict_proba():\n base_clf = MultinomialNB(alpha=1)\n X, Y = iris.data, iris.target\n X_train, Y_train = X[:80], Y[:80]\n X_test = X[80:]\n clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)\n\n # Decision function only estimator.\n decision_only = OneVsRestClassifier(svm.SVR()).fit(X_train, Y_train)\n assert not hasattr(decision_only, 'predict_proba')\n\n Y_pred = clf.predict(X_test)\n Y_proba = clf.predict_proba(X_test)\n\n assert_almost_equal(Y_proba.sum(axis=1), 1.0)\n # predict assigns a label if the probability that the\n # sample has the label is greater than 0.5.\n pred = np.array([l.argmax() for l in Y_proba])\n assert not (pred - Y_pred).any()\n\n\ndef test_ovr_multilabel_decision_function():\n X, Y = datasets.make_multilabel_classification(n_samples=100,\n n_features=20,\n n_classes=5,\n n_labels=3,\n length=50,\n allow_unlabeled=True,\n random_state=0)\n X_train, Y_train = X[:80], Y[:80]\n X_test = X[80:]\n clf = OneVsRestClassifier(svm.SVC()).fit(X_train, Y_train)\n assert_array_equal((clf.decision_function(X_test) > 0).astype(int),\n clf.predict(X_test))\n\n\ndef test_ovr_single_label_decision_function():\n X, Y = datasets.make_classification(n_samples=100,\n n_features=20,\n random_state=0)\n X_train, Y_train = X[:80], Y[:80]\n X_test = X[80:]\n clf = OneVsRestClassifier(svm.SVC()).fit(X_train, Y_train)\n assert_array_equal(clf.decision_function(X_test).ravel() > 0,\n clf.predict(X_test))\n\n\ndef test_ovr_gridsearch():\n ovr = OneVsRestClassifier(LinearSVC(random_state=0))\n Cs = [0.1, 0.5, 0.8]\n cv = GridSearchCV(ovr, {'estimator__C': Cs})\n cv.fit(iris.data, iris.target)\n best_C = cv.best_estimator_.estimators_[0].C\n assert best_C in Cs\n\n\ndef test_ovr_pipeline():\n # Test with pipeline of length one\n # This test is needed because the multiclass estimators may fail to detect\n # the presence of predict_proba or decision_function.\n clf = Pipeline([(\"tree\", DecisionTreeClassifier())])\n ovr_pipe = OneVsRestClassifier(clf)\n ovr_pipe.fit(iris.data, iris.target)\n ovr = OneVsRestClassifier(DecisionTreeClassifier())\n ovr.fit(iris.data, iris.target)\n assert_array_equal(ovr.predict(iris.data), ovr_pipe.predict(iris.data))\n\n\ndef test_ovr_coef_():\n for base_classifier in [SVC(kernel='linear', random_state=0),\n LinearSVC(random_state=0)]:\n # SVC has sparse coef with sparse input data\n\n ovr = OneVsRestClassifier(base_classifier)\n for X in [iris.data, sp.csr_matrix(iris.data)]:\n # test with dense and sparse coef\n ovr.fit(X, iris.target)\n shape = ovr.coef_.shape\n assert shape[0] == n_classes\n assert shape[1] == iris.data.shape[1]\n # don't densify sparse coefficients\n assert (sp.issparse(ovr.estimators_[0].coef_) ==\n sp.issparse(ovr.coef_))\n\n\ndef test_ovr_coef_exceptions():\n # Not fitted exception!\n ovr = OneVsRestClassifier(LinearSVC(random_state=0))\n # lambda is needed because we don't want coef_ to be evaluated right away\n assert_raises(ValueError, lambda x: ovr.coef_, None)\n\n # Doesn't have coef_ exception!\n ovr = OneVsRestClassifier(DecisionTreeClassifier())\n ovr.fit(iris.data, iris.target)\n assert_raises(AttributeError, lambda x: ovr.coef_, None)\n\n\ndef test_ovo_exceptions():\n ovo = OneVsOneClassifier(LinearSVC(random_state=0))\n assert_raises(ValueError, ovo.predict, [])\n\n\ndef test_ovo_fit_on_list():\n # Test that OneVsOne fitting works with a list of targets and yields the\n # same output as predict from an array\n ovo = OneVsOneClassifier(LinearSVC(random_state=0))\n prediction_from_array = ovo.fit(iris.data, iris.target).predict(iris.data)\n iris_data_list = [list(a) for a in iris.data]\n prediction_from_list = ovo.fit(iris_data_list,\n list(iris.target)).predict(iris_data_list)\n assert_array_equal(prediction_from_array, prediction_from_list)\n\n\ndef test_ovo_fit_predict():\n # A classifier which implements decision_function.\n ovo = OneVsOneClassifier(LinearSVC(random_state=0))\n ovo.fit(iris.data, iris.target).predict(iris.data)\n assert len(ovo.estimators_) == n_classes * (n_classes - 1) / 2\n\n # A classifier which implements predict_proba.\n ovo = OneVsOneClassifier(MultinomialNB())\n ovo.fit(iris.data, iris.target).predict(iris.data)\n assert len(ovo.estimators_) == n_classes * (n_classes - 1) / 2\n\n\ndef test_ovo_partial_fit_predict():\n temp = datasets.load_iris()\n X, y = temp.data, temp.target\n ovo1 = OneVsOneClassifier(MultinomialNB())\n ovo1.partial_fit(X[:100], y[:100], np.unique(y))\n ovo1.partial_fit(X[100:], y[100:])\n pred1 = ovo1.predict(X)\n\n ovo2 = OneVsOneClassifier(MultinomialNB())\n ovo2.fit(X, y)\n pred2 = ovo2.predict(X)\n assert len(ovo1.estimators_) == n_classes * (n_classes - 1) / 2\n assert np.mean(y == pred1) > 0.65\n assert_almost_equal(pred1, pred2)\n\n # Test when mini-batches have binary target classes\n ovo1 = OneVsOneClassifier(MultinomialNB())\n ovo1.partial_fit(X[:60], y[:60], np.unique(y))\n ovo1.partial_fit(X[60:], y[60:])\n pred1 = ovo1.predict(X)\n ovo2 = OneVsOneClassifier(MultinomialNB())\n pred2 = ovo2.fit(X, y).predict(X)\n\n assert_almost_equal(pred1, pred2)\n assert len(ovo1.estimators_) == len(np.unique(y))\n assert np.mean(y == pred1) > 0.65\n\n ovo = OneVsOneClassifier(MultinomialNB())\n X = np.random.rand(14, 2)\n y = [1, 1, 2, 3, 3, 0, 0, 4, 4, 4, 4, 4, 2, 2]\n ovo.partial_fit(X[:7], y[:7], [0, 1, 2, 3, 4])\n ovo.partial_fit(X[7:], y[7:])\n pred = ovo.predict(X)\n ovo2 = OneVsOneClassifier(MultinomialNB())\n pred2 = ovo2.fit(X, y).predict(X)\n assert_almost_equal(pred, pred2)\n\n # raises error when mini-batch does not have classes from all_classes\n ovo = OneVsOneClassifier(MultinomialNB())\n error_y = [0, 1, 2, 3, 4, 5, 2]\n message_re = escape(\"Mini-batch contains {0} while \"\n \"it must be subset of {1}\".format(np.unique(error_y),\n np.unique(y)))\n assert_raises_regexp(ValueError, message_re, ovo.partial_fit, X[:7],\n error_y, np.unique(y))\n\n # test partial_fit only exists if estimator has it:\n ovr = OneVsOneClassifier(SVC())\n assert not hasattr(ovr, \"partial_fit\")\n\n\ndef test_ovo_decision_function():\n n_samples = iris.data.shape[0]\n\n ovo_clf = OneVsOneClassifier(LinearSVC(random_state=0))\n # first binary\n ovo_clf.fit(iris.data, iris.target == 0)\n decisions = ovo_clf.decision_function(iris.data)\n assert decisions.shape == (n_samples,)\n\n # then multi-class\n ovo_clf.fit(iris.data, iris.target)\n decisions = ovo_clf.decision_function(iris.data)\n\n assert decisions.shape == (n_samples, n_classes)\n assert_array_equal(decisions.argmax(axis=1), ovo_clf.predict(iris.data))\n\n # Compute the votes\n votes = np.zeros((n_samples, n_classes))\n\n k = 0\n for i in range(n_classes):\n for j in range(i + 1, n_classes):\n pred = ovo_clf.estimators_[k].predict(iris.data)\n votes[pred == 0, i] += 1\n votes[pred == 1, j] += 1\n k += 1\n\n # Extract votes and verify\n assert_array_equal(votes, np.round(decisions))\n\n for class_idx in range(n_classes):\n # For each sample and each class, there only 3 possible vote levels\n # because they are only 3 distinct class pairs thus 3 distinct\n # binary classifiers.\n # Therefore, sorting predictions based on votes would yield\n # mostly tied predictions:\n assert set(votes[:, class_idx]).issubset(set([0., 1., 2.]))\n\n # The OVO decision function on the other hand is able to resolve\n # most of the ties on this data as it combines both the vote counts\n # and the aggregated confidence levels of the binary classifiers\n # to compute the aggregate decision function. The iris dataset\n # has 150 samples with a couple of duplicates. The OvO decisions\n # can resolve most of the ties:\n assert len(np.unique(decisions[:, class_idx])) > 146\n\n\ndef test_ovo_gridsearch():\n ovo = OneVsOneClassifier(LinearSVC(random_state=0))\n Cs = [0.1, 0.5, 0.8]\n cv = GridSearchCV(ovo, {'estimator__C': Cs})\n cv.fit(iris.data, iris.target)\n best_C = cv.best_estimator_.estimators_[0].C\n assert best_C in Cs\n\n\ndef test_ovo_ties():\n # Test that ties are broken using the decision function,\n # not defaulting to the smallest label\n X = np.array([[1, 2], [2, 1], [-2, 1], [-2, -1]])\n y = np.array([2, 0, 1, 2])\n multi_clf = OneVsOneClassifier(Perceptron(shuffle=False, max_iter=4,\n tol=None))\n ovo_prediction = multi_clf.fit(X, y).predict(X)\n ovo_decision = multi_clf.decision_function(X)\n\n # Classifiers are in order 0-1, 0-2, 1-2\n # Use decision_function to compute the votes and the normalized\n # sum_of_confidences, which is used to disambiguate when there is a tie in\n # votes.\n votes = np.round(ovo_decision)\n normalized_confidences = ovo_decision - votes\n\n # For the first point, there is one vote per class\n assert_array_equal(votes[0, :], 1)\n # For the rest, there is no tie and the prediction is the argmax\n assert_array_equal(np.argmax(votes[1:], axis=1), ovo_prediction[1:])\n # For the tie, the prediction is the class with the highest score\n assert ovo_prediction[0] == normalized_confidences[0].argmax()\n\n\ndef test_ovo_ties2():\n # test that ties can not only be won by the first two labels\n X = np.array([[1, 2], [2, 1], [-2, 1], [-2, -1]])\n y_ref = np.array([2, 0, 1, 2])\n\n # cycle through labels so that each label wins once\n for i in range(3):\n y = (y_ref + i) % 3\n multi_clf = OneVsOneClassifier(Perceptron(shuffle=False, max_iter=4,\n tol=None))\n ovo_prediction = multi_clf.fit(X, y).predict(X)\n assert ovo_prediction[0] == i % 3\n\n\ndef test_ovo_string_y():\n # Test that the OvO doesn't mess up the encoding of string labels\n X = np.eye(4)\n y = np.array(['a', 'b', 'c', 'd'])\n\n ovo = OneVsOneClassifier(LinearSVC())\n ovo.fit(X, y)\n assert_array_equal(y, ovo.predict(X))\n\n\ndef test_ovo_one_class():\n # Test error for OvO with one class\n X = np.eye(4)\n y = np.array(['a'] * 4)\n\n ovo = OneVsOneClassifier(LinearSVC())\n assert_raise_message(ValueError, \"when only one class\", ovo.fit, X, y)\n\n\ndef test_ovo_float_y():\n # Test that the OvO errors on float targets\n X = iris.data\n y = iris.data[:, 0]\n\n ovo = OneVsOneClassifier(LinearSVC())\n assert_raise_message(ValueError, \"Unknown label type\", ovo.fit, X, y)\n\n\ndef test_ecoc_exceptions():\n ecoc = OutputCodeClassifier(LinearSVC(random_state=0))\n assert_raises(ValueError, ecoc.predict, [])\n\n\ndef test_ecoc_fit_predict():\n # A classifier which implements decision_function.\n ecoc = OutputCodeClassifier(LinearSVC(random_state=0),\n code_size=2, random_state=0)\n ecoc.fit(iris.data, iris.target).predict(iris.data)\n assert len(ecoc.estimators_) == n_classes * 2\n\n # A classifier which implements predict_proba.\n ecoc = OutputCodeClassifier(MultinomialNB(), code_size=2, random_state=0)\n ecoc.fit(iris.data, iris.target).predict(iris.data)\n assert len(ecoc.estimators_) == n_classes * 2\n\n\ndef test_ecoc_gridsearch():\n ecoc = OutputCodeClassifier(LinearSVC(random_state=0),\n random_state=0)\n Cs = [0.1, 0.5, 0.8]\n cv = GridSearchCV(ecoc, {'estimator__C': Cs})\n cv.fit(iris.data, iris.target)\n best_C = cv.best_estimator_.estimators_[0].C\n assert best_C in Cs\n\n\ndef test_ecoc_float_y():\n # Test that the OCC errors on float targets\n X = iris.data\n y = iris.data[:, 0]\n\n ovo = OutputCodeClassifier(LinearSVC())\n assert_raise_message(ValueError, \"Unknown label type\", ovo.fit, X, y)\n ovo = OutputCodeClassifier(LinearSVC(), code_size=-1)\n assert_raise_message(ValueError, \"code_size should be greater than 0,\"\n \" got -1\", ovo.fit, X, y)\n\n\ndef test_pairwise_indices():\n clf_precomputed = svm.SVC(kernel='precomputed')\n X, y = iris.data, iris.target\n\n ovr_false = OneVsOneClassifier(clf_precomputed)\n linear_kernel = np.dot(X, X.T)\n ovr_false.fit(linear_kernel, y)\n\n n_estimators = len(ovr_false.estimators_)\n precomputed_indices = ovr_false.pairwise_indices_\n\n for idx in precomputed_indices:\n assert (idx.shape[0] * n_estimators / (n_estimators - 1) ==\n linear_kernel.shape[0])\n\n\ndef test_pairwise_attribute():\n clf_precomputed = svm.SVC(kernel='precomputed')\n clf_notprecomputed = svm.SVC()\n\n for MultiClassClassifier in [OneVsRestClassifier, OneVsOneClassifier]:\n ovr_false = MultiClassClassifier(clf_notprecomputed)\n assert not ovr_false._pairwise\n\n ovr_true = MultiClassClassifier(clf_precomputed)\n assert ovr_true._pairwise\n\n\ndef test_pairwise_cross_val_score():\n clf_precomputed = svm.SVC(kernel='precomputed')\n clf_notprecomputed = svm.SVC(kernel='linear')\n\n X, y = iris.data, iris.target\n\n for MultiClassClassifier in [OneVsRestClassifier, OneVsOneClassifier]:\n ovr_false = MultiClassClassifier(clf_notprecomputed)\n ovr_true = MultiClassClassifier(clf_precomputed)\n\n linear_kernel = np.dot(X, X.T)\n score_precomputed = cross_val_score(ovr_true, linear_kernel, y)\n score_linear = cross_val_score(ovr_false, X, y)\n assert_array_equal(score_precomputed, score_linear)\n", "# coding: utf-8\n\n# Author: Johannes Schönberger\n#\n# License: BSD 3 clause\n\nimport numpy as np\nimport warnings\n\nfrom ..base import BaseEstimator, MetaEstimatorMixin, RegressorMixin, clone\nfrom ..base import MultiOutputMixin\nfrom ..utils import check_random_state, check_consistent_length\nfrom ..utils.random import sample_without_replacement\nfrom ..utils.validation import check_is_fitted, _check_sample_weight\nfrom ..utils.validation import _deprecate_positional_args\nfrom ._base import LinearRegression\nfrom ..utils.validation import has_fit_parameter\nfrom ..exceptions import ConvergenceWarning\n\n_EPSILON = np.spacing(1)\n\n\ndef _dynamic_max_trials(n_inliers, n_samples, min_samples, probability):\n \"\"\"Determine number trials such that at least one outlier-free subset is\n sampled for the given inlier/outlier ratio.\n\n Parameters\n ----------\n n_inliers : int\n Number of inliers in the data.\n\n n_samples : int\n Total number of samples in the data.\n\n min_samples : int\n Minimum number of samples chosen randomly from original data.\n\n probability : float\n Probability (confidence) that one outlier-free sample is generated.\n\n Returns\n -------\n trials : int\n Number of trials.\n\n \"\"\"\n inlier_ratio = n_inliers / float(n_samples)\n nom = max(_EPSILON, 1 - probability)\n denom = max(_EPSILON, 1 - inlier_ratio ** min_samples)\n if nom == 1:\n return 0\n if denom == 1:\n return float('inf')\n return abs(float(np.ceil(np.log(nom) / np.log(denom))))\n\n\nclass RANSACRegressor(MetaEstimatorMixin, RegressorMixin,\n MultiOutputMixin, BaseEstimator):\n \"\"\"RANSAC (RANdom SAmple Consensus) algorithm.\n\n RANSAC is an iterative algorithm for the robust estimation of parameters\n from a subset of inliers from the complete data set.\n\n Read more in the :ref:`User Guide <ransac_regression>`.\n\n Parameters\n ----------\n base_estimator : object, optional\n Base estimator object which implements the following methods:\n\n * `fit(X, y)`: Fit model to given training data and target values.\n * `score(X, y)`: Returns the mean accuracy on the given test data,\n which is used for the stop criterion defined by `stop_score`.\n Additionally, the score is used to decide which of two equally\n large consensus sets is chosen as the better one.\n * `predict(X)`: Returns predicted values using the linear model,\n which is used to compute residual error using loss function.\n\n If `base_estimator` is None, then\n ``base_estimator=sklearn.linear_model.LinearRegression()`` is used for\n target values of dtype float.\n\n Note that the current implementation only supports regression\n estimators.\n\n min_samples : int (>= 1) or float ([0, 1]), optional\n Minimum number of samples chosen randomly from original data. Treated\n as an absolute number of samples for `min_samples >= 1`, treated as a\n relative number `ceil(min_samples * X.shape[0]`) for\n `min_samples < 1`. This is typically chosen as the minimal number of\n samples necessary to estimate the given `base_estimator`. By default a\n ``sklearn.linear_model.LinearRegression()`` estimator is assumed and\n `min_samples` is chosen as ``X.shape[1] + 1``.\n\n residual_threshold : float, optional\n Maximum residual for a data sample to be classified as an inlier.\n By default the threshold is chosen as the MAD (median absolute\n deviation) of the target values `y`.\n\n is_data_valid : callable, optional\n This function is called with the randomly selected data before the\n model is fitted to it: `is_data_valid(X, y)`. If its return value is\n False the current randomly chosen sub-sample is skipped.\n\n is_model_valid : callable, optional\n This function is called with the estimated model and the randomly\n selected data: `is_model_valid(model, X, y)`. If its return value is\n False the current randomly chosen sub-sample is skipped.\n Rejecting samples with this function is computationally costlier than\n with `is_data_valid`. `is_model_valid` should therefore only be used if\n the estimated model is needed for making the rejection decision.\n\n max_trials : int, optional\n Maximum number of iterations for random sample selection.\n\n max_skips : int, optional\n Maximum number of iterations that can be skipped due to finding zero\n inliers or invalid data defined by ``is_data_valid`` or invalid models\n defined by ``is_model_valid``.\n\n .. versionadded:: 0.19\n\n stop_n_inliers : int, optional\n Stop iteration if at least this number of inliers are found.\n\n stop_score : float, optional\n Stop iteration if score is greater equal than this threshold.\n\n stop_probability : float in range [0, 1], optional\n RANSAC iteration stops if at least one outlier-free set of the training\n data is sampled in RANSAC. This requires to generate at least N\n samples (iterations)::\n\n N >= log(1 - probability) / log(1 - e**m)\n\n where the probability (confidence) is typically set to high value such\n as 0.99 (the default) and e is the current fraction of inliers w.r.t.\n the total number of samples.\n\n loss : string, callable, optional, default \"absolute_loss\"\n String inputs, \"absolute_loss\" and \"squared_loss\" are supported which\n find the absolute loss and squared loss per sample\n respectively.\n\n If ``loss`` is a callable, then it should be a function that takes\n two arrays as inputs, the true and predicted value and returns a 1-D\n array with the i-th value of the array corresponding to the loss\n on ``X[i]``.\n\n If the loss on a sample is greater than the ``residual_threshold``,\n then this sample is classified as an outlier.\n\n .. versionadded:: 0.18\n\n random_state : int, RandomState instance, default=None\n The generator used to initialize the centers.\n Pass an int for reproducible output across multiple function calls.\n See :term:`Glossary <random_state>`.\n\n Attributes\n ----------\n estimator_ : object\n Best fitted model (copy of the `base_estimator` object).\n\n n_trials_ : int\n Number of random selection trials until one of the stop criteria is\n met. It is always ``<= max_trials``.\n\n inlier_mask_ : bool array of shape [n_samples]\n Boolean mask of inliers classified as ``True``.\n\n n_skips_no_inliers_ : int\n Number of iterations skipped due to finding zero inliers.\n\n .. versionadded:: 0.19\n\n n_skips_invalid_data_ : int\n Number of iterations skipped due to invalid data defined by\n ``is_data_valid``.\n\n .. versionadded:: 0.19\n\n n_skips_invalid_model_ : int\n Number of iterations skipped due to an invalid model defined by\n ``is_model_valid``.\n\n .. versionadded:: 0.19\n\n Examples\n --------\n >>> from sklearn.linear_model import RANSACRegressor\n >>> from sklearn.datasets import make_regression\n >>> X, y = make_regression(\n ... n_samples=200, n_features=2, noise=4.0, random_state=0)\n >>> reg = RANSACRegressor(random_state=0).fit(X, y)\n >>> reg.score(X, y)\n 0.9885...\n >>> reg.predict(X[:1,])\n array([-31.9417...])\n\n References\n ----------\n .. [1] https://en.wikipedia.org/wiki/RANSAC\n .. [2] https://www.sri.com/sites/default/files/publications/ransac-publication.pdf\n .. [3] http://www.bmva.org/bmvc/2009/Papers/Paper355/Paper355.pdf\n \"\"\"\n @_deprecate_positional_args\n def __init__(self, base_estimator=None, *, min_samples=None,\n residual_threshold=None, is_data_valid=None,\n is_model_valid=None, max_trials=100, max_skips=np.inf,\n stop_n_inliers=np.inf, stop_score=np.inf,\n stop_probability=0.99, loss='absolute_loss',\n random_state=None):\n\n self.base_estimator = base_estimator\n self.min_samples = min_samples\n self.residual_threshold = residual_threshold\n self.is_data_valid = is_data_valid\n self.is_model_valid = is_model_valid\n self.max_trials = max_trials\n self.max_skips = max_skips\n self.stop_n_inliers = stop_n_inliers\n self.stop_score = stop_score\n self.stop_probability = stop_probability\n self.random_state = random_state\n self.loss = loss\n\n def fit(self, X, y, sample_weight=None):\n \"\"\"Fit estimator using RANSAC algorithm.\n\n Parameters\n ----------\n X : array-like or sparse matrix, shape [n_samples, n_features]\n Training data.\n\n y : array-like of shape (n_samples,) or (n_samples, n_targets)\n Target values.\n\n sample_weight : array-like of shape (n_samples,), default=None\n Individual weights for each sample\n raises error if sample_weight is passed and base_estimator\n fit method does not support it.\n\n .. versionadded:: 0.18\n\n Raises\n ------\n ValueError\n If no valid consensus set could be found. This occurs if\n `is_data_valid` and `is_model_valid` return False for all\n `max_trials` randomly chosen sub-samples.\n\n \"\"\"\n # Need to validate separately here.\n # We can't pass multi_ouput=True because that would allow y to be csr.\n check_X_params = dict(accept_sparse='csr')\n check_y_params = dict(ensure_2d=False)\n X, y = self._validate_data(X, y, validate_separately=(check_X_params,\n check_y_params))\n check_consistent_length(X, y)\n\n if self.base_estimator is not None:\n base_estimator = clone(self.base_estimator)\n else:\n base_estimator = LinearRegression()\n\n if self.min_samples is None:\n # assume linear model by default\n min_samples = X.shape[1] + 1\n elif 0 < self.min_samples < 1:\n min_samples = np.ceil(self.min_samples * X.shape[0])\n elif self.min_samples >= 1:\n if self.min_samples % 1 != 0:\n raise ValueError(\"Absolute number of samples must be an \"\n \"integer value.\")\n min_samples = self.min_samples\n else:\n raise ValueError(\"Value for `min_samples` must be scalar and \"\n \"positive.\")\n if min_samples > X.shape[0]:\n raise ValueError(\"`min_samples` may not be larger than number \"\n \"of samples: n_samples = %d.\" % (X.shape[0]))\n\n if self.stop_probability < 0 or self.stop_probability > 1:\n raise ValueError(\"`stop_probability` must be in range [0, 1].\")\n\n if self.residual_threshold is None:\n # MAD (median absolute deviation)\n residual_threshold = np.median(np.abs(y - np.median(y)))\n else:\n residual_threshold = self.residual_threshold\n\n if self.loss == \"absolute_loss\":\n if y.ndim == 1:\n loss_function = lambda y_true, y_pred: np.abs(y_true - y_pred)\n else:\n loss_function = lambda \\\n y_true, y_pred: np.sum(np.abs(y_true - y_pred), axis=1)\n\n elif self.loss == \"squared_loss\":\n if y.ndim == 1:\n loss_function = lambda y_true, y_pred: (y_true - y_pred) ** 2\n else:\n loss_function = lambda \\\n y_true, y_pred: np.sum((y_true - y_pred) ** 2, axis=1)\n\n elif callable(self.loss):\n loss_function = self.loss\n\n else:\n raise ValueError(\n \"loss should be 'absolute_loss', 'squared_loss' or a callable.\"\n \"Got %s. \" % self.loss)\n\n\n random_state = check_random_state(self.random_state)\n\n try: # Not all estimator accept a random_state\n base_estimator.set_params(random_state=random_state)\n except ValueError:\n pass\n\n estimator_fit_has_sample_weight = has_fit_parameter(base_estimator,\n \"sample_weight\")\n estimator_name = type(base_estimator).__name__\n if (sample_weight is not None and not\n estimator_fit_has_sample_weight):\n raise ValueError(\"%s does not support sample_weight. Samples\"\n \" weights are only used for the calibration\"\n \" itself.\" % estimator_name)\n if sample_weight is not None:\n sample_weight = _check_sample_weight(sample_weight, X)\n\n n_inliers_best = 1\n score_best = -np.inf\n inlier_mask_best = None\n X_inlier_best = None\n y_inlier_best = None\n inlier_best_idxs_subset = None\n self.n_skips_no_inliers_ = 0\n self.n_skips_invalid_data_ = 0\n self.n_skips_invalid_model_ = 0\n\n # number of data samples\n n_samples = X.shape[0]\n sample_idxs = np.arange(n_samples)\n\n self.n_trials_ = 0\n max_trials = self.max_trials\n while self.n_trials_ < max_trials:\n self.n_trials_ += 1\n\n if (self.n_skips_no_inliers_ + self.n_skips_invalid_data_ +\n self.n_skips_invalid_model_) > self.max_skips:\n break\n\n # choose random sample set\n subset_idxs = sample_without_replacement(n_samples, min_samples,\n random_state=random_state)\n X_subset = X[subset_idxs]\n y_subset = y[subset_idxs]\n\n # check if random sample set is valid\n if (self.is_data_valid is not None\n and not self.is_data_valid(X_subset, y_subset)):\n self.n_skips_invalid_data_ += 1\n continue\n\n # fit model for current random sample set\n if sample_weight is None:\n base_estimator.fit(X_subset, y_subset)\n else:\n base_estimator.fit(X_subset, y_subset,\n sample_weight=sample_weight[subset_idxs])\n\n # check if estimated model is valid\n if (self.is_model_valid is not None and not\n self.is_model_valid(base_estimator, X_subset, y_subset)):\n self.n_skips_invalid_model_ += 1\n continue\n\n # residuals of all data for current random sample model\n y_pred = base_estimator.predict(X)\n residuals_subset = loss_function(y, y_pred)\n\n # classify data into inliers and outliers\n inlier_mask_subset = residuals_subset < residual_threshold\n n_inliers_subset = np.sum(inlier_mask_subset)\n\n # less inliers -> skip current random sample\n if n_inliers_subset < n_inliers_best:\n self.n_skips_no_inliers_ += 1\n continue\n\n # extract inlier data set\n inlier_idxs_subset = sample_idxs[inlier_mask_subset]\n X_inlier_subset = X[inlier_idxs_subset]\n y_inlier_subset = y[inlier_idxs_subset]\n\n # score of inlier data set\n score_subset = base_estimator.score(X_inlier_subset,\n y_inlier_subset)\n\n # same number of inliers but worse score -> skip current random\n # sample\n if (n_inliers_subset == n_inliers_best\n and score_subset < score_best):\n continue\n\n # save current random sample as best sample\n n_inliers_best = n_inliers_subset\n score_best = score_subset\n inlier_mask_best = inlier_mask_subset\n X_inlier_best = X_inlier_subset\n y_inlier_best = y_inlier_subset\n inlier_best_idxs_subset = inlier_idxs_subset\n\n max_trials = min(\n max_trials,\n _dynamic_max_trials(n_inliers_best, n_samples,\n min_samples, self.stop_probability))\n\n # break if sufficient number of inliers or score is reached\n if n_inliers_best >= self.stop_n_inliers or \\\n score_best >= self.stop_score:\n break\n\n # if none of the iterations met the required criteria\n if inlier_mask_best is None:\n if ((self.n_skips_no_inliers_ + self.n_skips_invalid_data_ +\n self.n_skips_invalid_model_) > self.max_skips):\n raise ValueError(\n \"RANSAC skipped more iterations than `max_skips` without\"\n \" finding a valid consensus set. Iterations were skipped\"\n \" because each randomly chosen sub-sample failed the\"\n \" passing criteria. See estimator attributes for\"\n \" diagnostics (n_skips*).\")\n else:\n raise ValueError(\n \"RANSAC could not find a valid consensus set. All\"\n \" `max_trials` iterations were skipped because each\"\n \" randomly chosen sub-sample failed the passing criteria.\"\n \" See estimator attributes for diagnostics (n_skips*).\")\n else:\n if (self.n_skips_no_inliers_ + self.n_skips_invalid_data_ +\n self.n_skips_invalid_model_) > self.max_skips:\n warnings.warn(\"RANSAC found a valid consensus set but exited\"\n \" early due to skipping more iterations than\"\n \" `max_skips`. See estimator attributes for\"\n \" diagnostics (n_skips*).\",\n ConvergenceWarning)\n\n # estimate final model using all inliers\n if sample_weight is None:\n base_estimator.fit(X_inlier_best, y_inlier_best)\n else:\n base_estimator.fit(\n X_inlier_best,\n y_inlier_best,\n sample_weight=sample_weight[inlier_best_idxs_subset])\n\n self.estimator_ = base_estimator\n self.inlier_mask_ = inlier_mask_best\n return self\n\n def predict(self, X):\n \"\"\"Predict using the estimated model.\n\n This is a wrapper for `estimator_.predict(X)`.\n\n Parameters\n ----------\n X : numpy array of shape [n_samples, n_features]\n\n Returns\n -------\n y : array, shape = [n_samples] or [n_samples, n_targets]\n Returns predicted values.\n \"\"\"\n check_is_fitted(self)\n\n return self.estimator_.predict(X)\n\n def score(self, X, y):\n \"\"\"Returns the score of the prediction.\n\n This is a wrapper for `estimator_.score(X, y)`.\n\n Parameters\n ----------\n X : numpy array or sparse matrix of shape [n_samples, n_features]\n Training data.\n\n y : array, shape = [n_samples] or [n_samples, n_targets]\n Target values.\n\n Returns\n -------\n z : float\n Score of the prediction.\n \"\"\"\n check_is_fitted(self)\n\n return self.estimator_.score(X, y)\n", "import os\n\nimport numpy as np\nfrom numpy.testing import (assert_equal, assert_allclose, assert_almost_equal,\n suppress_warnings)\nfrom pytest import raises as assert_raises\nimport pytest\n\nimport scipy.interpolate.interpnd as interpnd\nimport scipy.spatial.qhull as qhull\n\nimport pickle\n\n\ndef data_file(basename):\n return os.path.join(os.path.abspath(os.path.dirname(__file__)),\n 'data', basename)\n\n\nclass TestLinearNDInterpolation(object):\n def test_smoketest(self):\n # Test at single points\n x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)],\n dtype=np.double)\n y = np.arange(x.shape[0], dtype=np.double)\n\n yi = interpnd.LinearNDInterpolator(x, y)(x)\n assert_almost_equal(y, yi)\n\n def test_smoketest_alternate(self):\n # Test at single points, alternate calling convention\n x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)],\n dtype=np.double)\n y = np.arange(x.shape[0], dtype=np.double)\n\n yi = interpnd.LinearNDInterpolator((x[:,0], x[:,1]), y)(x[:,0], x[:,1])\n assert_almost_equal(y, yi)\n\n def test_complex_smoketest(self):\n # Test at single points\n x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)],\n dtype=np.double)\n y = np.arange(x.shape[0], dtype=np.double)\n y = y - 3j*y\n\n yi = interpnd.LinearNDInterpolator(x, y)(x)\n assert_almost_equal(y, yi)\n\n def test_tri_input(self):\n # Test at single points\n x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)],\n dtype=np.double)\n y = np.arange(x.shape[0], dtype=np.double)\n y = y - 3j*y\n\n tri = qhull.Delaunay(x)\n yi = interpnd.LinearNDInterpolator(tri, y)(x)\n assert_almost_equal(y, yi)\n\n def test_square(self):\n # Test barycentric interpolation on a square against a manual\n # implementation\n\n points = np.array([(0,0), (0,1), (1,1), (1,0)], dtype=np.double)\n values = np.array([1., 2., -3., 5.], dtype=np.double)\n\n # NB: assume triangles (0, 1, 3) and (1, 2, 3)\n #\n # 1----2\n # | \\ |\n # | \\ |\n # 0----3\n\n def ip(x, y):\n t1 = (x + y <= 1)\n t2 = ~t1\n\n x1 = x[t1]\n y1 = y[t1]\n\n x2 = x[t2]\n y2 = y[t2]\n\n z = 0*x\n\n z[t1] = (values[0]*(1 - x1 - y1)\n + values[1]*y1\n + values[3]*x1)\n\n z[t2] = (values[2]*(x2 + y2 - 1)\n + values[1]*(1 - x2)\n + values[3]*(1 - y2))\n return z\n\n xx, yy = np.broadcast_arrays(np.linspace(0, 1, 14)[:,None],\n np.linspace(0, 1, 14)[None,:])\n xx = xx.ravel()\n yy = yy.ravel()\n\n xi = np.array([xx, yy]).T.copy()\n zi = interpnd.LinearNDInterpolator(points, values)(xi)\n\n assert_almost_equal(zi, ip(xx, yy))\n\n def test_smoketest_rescale(self):\n # Test at single points\n x = np.array([(0, 0), (-5, -5), (-5, 5), (5, 5), (2.5, 3)],\n dtype=np.double)\n y = np.arange(x.shape[0], dtype=np.double)\n\n yi = interpnd.LinearNDInterpolator(x, y, rescale=True)(x)\n assert_almost_equal(y, yi)\n\n def test_square_rescale(self):\n # Test barycentric interpolation on a rectangle with rescaling\n # agaings the same implementation without rescaling\n\n points = np.array([(0,0), (0,100), (10,100), (10,0)], dtype=np.double)\n values = np.array([1., 2., -3., 5.], dtype=np.double)\n\n xx, yy = np.broadcast_arrays(np.linspace(0, 10, 14)[:,None],\n np.linspace(0, 100, 14)[None,:])\n xx = xx.ravel()\n yy = yy.ravel()\n xi = np.array([xx, yy]).T.copy()\n zi = interpnd.LinearNDInterpolator(points, values)(xi)\n zi_rescaled = interpnd.LinearNDInterpolator(points, values,\n rescale=True)(xi)\n\n assert_almost_equal(zi, zi_rescaled)\n\n def test_tripoints_input_rescale(self):\n # Test at single points\n x = np.array([(0,0), (-5,-5), (-5,5), (5, 5), (2.5, 3)],\n dtype=np.double)\n y = np.arange(x.shape[0], dtype=np.double)\n y = y - 3j*y\n\n tri = qhull.Delaunay(x)\n yi = interpnd.LinearNDInterpolator(tri.points, y)(x)\n yi_rescale = interpnd.LinearNDInterpolator(tri.points, y,\n rescale=True)(x)\n assert_almost_equal(yi, yi_rescale)\n\n def test_tri_input_rescale(self):\n # Test at single points\n x = np.array([(0,0), (-5,-5), (-5,5), (5, 5), (2.5, 3)],\n dtype=np.double)\n y = np.arange(x.shape[0], dtype=np.double)\n y = y - 3j*y\n\n tri = qhull.Delaunay(x)\n match = (\"Rescaling is not supported when passing a \"\n \"Delaunay triangulation as ``points``.\")\n with pytest.raises(ValueError, match=match):\n interpnd.LinearNDInterpolator(tri, y, rescale=True)(x)\n\n def test_pickle(self):\n # Test at single points\n np.random.seed(1234)\n x = np.random.rand(30, 2)\n y = np.random.rand(30) + 1j*np.random.rand(30)\n\n ip = interpnd.LinearNDInterpolator(x, y)\n ip2 = pickle.loads(pickle.dumps(ip))\n\n assert_almost_equal(ip(0.5, 0.5), ip2(0.5, 0.5))\n\n\nclass TestEstimateGradients2DGlobal(object):\n def test_smoketest(self):\n x = np.array([(0, 0), (0, 2),\n (1, 0), (1, 2), (0.25, 0.75), (0.6, 0.8)], dtype=float)\n tri = qhull.Delaunay(x)\n\n # Should be exact for linear functions, independent of triangulation\n\n funcs = [\n (lambda x, y: 0*x + 1, (0, 0)),\n (lambda x, y: 0 + x, (1, 0)),\n (lambda x, y: -2 + y, (0, 1)),\n (lambda x, y: 3 + 3*x + 14.15*y, (3, 14.15))\n ]\n\n for j, (func, grad) in enumerate(funcs):\n z = func(x[:,0], x[:,1])\n dz = interpnd.estimate_gradients_2d_global(tri, z, tol=1e-6)\n\n assert_equal(dz.shape, (6, 2))\n assert_allclose(dz, np.array(grad)[None,:] + 0*dz,\n rtol=1e-5, atol=1e-5, err_msg=\"item %d\" % j)\n\n def test_regression_2359(self):\n # Check regression --- for certain point sets, gradient\n # estimation could end up in an infinite loop\n points = np.load(data_file('estimate_gradients_hang.npy'))\n values = np.random.rand(points.shape[0])\n tri = qhull.Delaunay(points)\n\n # This should not hang\n with suppress_warnings() as sup:\n sup.filter(interpnd.GradientEstimationWarning,\n \"Gradient estimation did not converge\")\n interpnd.estimate_gradients_2d_global(tri, values, maxiter=1)\n\n\nclass TestCloughTocher2DInterpolator(object):\n\n def _check_accuracy(self, func, x=None, tol=1e-6, alternate=False, rescale=False, **kw):\n np.random.seed(1234)\n if x is None:\n x = np.array([(0, 0), (0, 1),\n (1, 0), (1, 1), (0.25, 0.75), (0.6, 0.8),\n (0.5, 0.2)],\n dtype=float)\n\n if not alternate:\n ip = interpnd.CloughTocher2DInterpolator(x, func(x[:,0], x[:,1]),\n tol=1e-6, rescale=rescale)\n else:\n ip = interpnd.CloughTocher2DInterpolator((x[:,0], x[:,1]),\n func(x[:,0], x[:,1]),\n tol=1e-6, rescale=rescale)\n\n p = np.random.rand(50, 2)\n\n if not alternate:\n a = ip(p)\n else:\n a = ip(p[:,0], p[:,1])\n b = func(p[:,0], p[:,1])\n\n try:\n assert_allclose(a, b, **kw)\n except AssertionError:\n print(abs(a - b))\n print(ip.grad)\n raise\n\n def test_linear_smoketest(self):\n # Should be exact for linear functions, independent of triangulation\n funcs = [\n lambda x, y: 0*x + 1,\n lambda x, y: 0 + x,\n lambda x, y: -2 + y,\n lambda x, y: 3 + 3*x + 14.15*y,\n ]\n\n for j, func in enumerate(funcs):\n self._check_accuracy(func, tol=1e-13, atol=1e-7, rtol=1e-7,\n err_msg=\"Function %d\" % j)\n self._check_accuracy(func, tol=1e-13, atol=1e-7, rtol=1e-7,\n alternate=True,\n err_msg=\"Function (alternate) %d\" % j)\n # check rescaling\n self._check_accuracy(func, tol=1e-13, atol=1e-7, rtol=1e-7,\n err_msg=\"Function (rescaled) %d\" % j, rescale=True)\n self._check_accuracy(func, tol=1e-13, atol=1e-7, rtol=1e-7,\n alternate=True, rescale=True,\n err_msg=\"Function (alternate, rescaled) %d\" % j)\n\n def test_quadratic_smoketest(self):\n # Should be reasonably accurate for quadratic functions\n funcs = [\n lambda x, y: x**2,\n lambda x, y: y**2,\n lambda x, y: x**2 - y**2,\n lambda x, y: x*y,\n ]\n\n for j, func in enumerate(funcs):\n self._check_accuracy(func, tol=1e-9, atol=0.22, rtol=0,\n err_msg=\"Function %d\" % j)\n self._check_accuracy(func, tol=1e-9, atol=0.22, rtol=0,\n err_msg=\"Function %d\" % j, rescale=True)\n\n def test_tri_input(self):\n # Test at single points\n x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)],\n dtype=np.double)\n y = np.arange(x.shape[0], dtype=np.double)\n y = y - 3j*y\n\n tri = qhull.Delaunay(x)\n yi = interpnd.CloughTocher2DInterpolator(tri, y)(x)\n assert_almost_equal(y, yi)\n\n def test_tri_input_rescale(self):\n # Test at single points\n x = np.array([(0,0), (-5,-5), (-5,5), (5, 5), (2.5, 3)],\n dtype=np.double)\n y = np.arange(x.shape[0], dtype=np.double)\n y = y - 3j*y\n\n tri = qhull.Delaunay(x)\n match = (\"Rescaling is not supported when passing a \"\n \"Delaunay triangulation as ``points``.\")\n with pytest.raises(ValueError, match=match):\n interpnd.CloughTocher2DInterpolator(tri, y, rescale=True)(x)\n\n def test_tripoints_input_rescale(self):\n # Test at single points\n x = np.array([(0,0), (-5,-5), (-5,5), (5, 5), (2.5, 3)],\n dtype=np.double)\n y = np.arange(x.shape[0], dtype=np.double)\n y = y - 3j*y\n\n tri = qhull.Delaunay(x)\n yi = interpnd.CloughTocher2DInterpolator(tri.points, y)(x)\n yi_rescale = interpnd.CloughTocher2DInterpolator(tri.points, y, rescale=True)(x)\n assert_almost_equal(yi, yi_rescale)\n\n def test_dense(self):\n # Should be more accurate for dense meshes\n funcs = [\n lambda x, y: x**2,\n lambda x, y: y**2,\n lambda x, y: x**2 - y**2,\n lambda x, y: x*y,\n lambda x, y: np.cos(2*np.pi*x)*np.sin(2*np.pi*y)\n ]\n\n np.random.seed(4321) # use a different seed than the check!\n grid = np.r_[np.array([(0,0), (0,1), (1,0), (1,1)], dtype=float),\n np.random.rand(30*30, 2)]\n\n for j, func in enumerate(funcs):\n self._check_accuracy(func, x=grid, tol=1e-9, atol=5e-3, rtol=1e-2,\n err_msg=\"Function %d\" % j)\n self._check_accuracy(func, x=grid, tol=1e-9, atol=5e-3, rtol=1e-2,\n err_msg=\"Function %d\" % j, rescale=True)\n\n def test_wrong_ndim(self):\n x = np.random.randn(30, 3)\n y = np.random.randn(30)\n assert_raises(ValueError, interpnd.CloughTocher2DInterpolator, x, y)\n\n def test_pickle(self):\n # Test at single points\n np.random.seed(1234)\n x = np.random.rand(30, 2)\n y = np.random.rand(30) + 1j*np.random.rand(30)\n\n ip = interpnd.CloughTocher2DInterpolator(x, y)\n ip2 = pickle.loads(pickle.dumps(ip))\n\n assert_almost_equal(ip(0.5, 0.5), ip2(0.5, 0.5))\n\n def test_boundary_tri_symmetry(self):\n # Interpolation at neighbourless triangles should retain\n # symmetry with mirroring the triangle.\n\n # Equilateral triangle\n points = np.array([(0, 0), (1, 0), (0.5, np.sqrt(3)/2)])\n values = np.array([1, 0, 0])\n\n ip = interpnd.CloughTocher2DInterpolator(points, values)\n\n # Set gradient to zero at vertices\n ip.grad[...] = 0\n\n # Interpolation should be symmetric vs. bisector\n alpha = 0.3\n p1 = np.array([0.5 * np.cos(alpha), 0.5 * np.sin(alpha)])\n p2 = np.array([0.5 * np.cos(np.pi/3 - alpha), 0.5 * np.sin(np.pi/3 - alpha)])\n\n v1 = ip(p1)\n v2 = ip(p2)\n assert_allclose(v1, v2)\n\n # ... and affine invariant\n np.random.seed(1)\n A = np.random.randn(2, 2)\n b = np.random.randn(2)\n\n points = A.dot(points.T).T + b[None,:]\n p1 = A.dot(p1) + b\n p2 = A.dot(p2) + b\n\n ip = interpnd.CloughTocher2DInterpolator(points, values)\n ip.grad[...] = 0\n\n w1 = ip(p1)\n w2 = ip(p2)\n assert_allclose(w1, v1)\n assert_allclose(w2, v2)\n" ]
[ [ "numpy.nonzero", "numpy.set_printoptions", "numpy.get_printoptions" ], [ "sklearn.utils._testing.assert_array_less", "sklearn.preprocessing._data.QuantileTransformer", "sklearn.utils._testing.assert_almost_equal", "scipy.sparse.isspmatrix_csc", "sklearn.utils._testing.assert_allclose", "numpy.unique", "numpy.full", "numpy.diff", "numpy.zeros", "numpy.log", "numpy.median", "numpy.full_like", "numpy.array", "numpy.sum", "sklearn.preprocessing._data.KernelCenterer", "sklearn.utils.gen_batches", "sklearn.preprocessing._data.RobustScaler", "numpy.asarray", "sklearn.preprocessing._data.add_dummy_feature", "scipy.sparse.random", "numpy.var", "sklearn.preprocessing._data.StandardScaler", "numpy.allclose", "sklearn.preprocessing._data.MinMaxScaler", "sklearn.utils._testing.assert_warns_message", "numpy.std", "numpy.min", "sklearn.svm.SVR", "sklearn.preprocessing._data.scale", "sklearn.utils.sparsefuncs.mean_variance_axis", "sklearn.preprocessing._data.robust_scale", "numpy.errstate", "scipy.sparse.isspmatrix_csr", "numpy.random.RandomState", "sklearn.preprocessing._data.MaxAbsScaler", "sklearn.preprocessing._data.PolynomialFeatures", "numpy.ones", "numpy.linspace", "sklearn.preprocessing._data.maxabs_scale", "sklearn.base.clone", "numpy.zeros_like", "numpy.random.sample", "sklearn.preprocessing._data.PowerTransformer", "numpy.hstack", "scipy.sparse.coo_matrix", "scipy.sparse.isspmatrix_coo", "sklearn.preprocessing._data.power_transform", "scipy.sparse.csc_matrix", "numpy.isnan", "scipy.sparse.csr_matrix", "numpy.transpose", "sklearn.utils._testing._convert_container", "sklearn.model_selection.cross_val_predict", "sklearn.utils.shuffle", "scipy.sparse.rand", "numpy.linalg.norm", "numpy.percentile", "sklearn.preprocessing._data.minmax_scale", "sklearn.preprocessing._data._handle_zeros_in_scale", "numpy.dot", "numpy.max", "numpy.random.randn", "numpy.ones_like", "scipy.sparse.issparse", "numpy.arange", "numpy.finfo", "sklearn.preprocessing._data.normalize", "sklearn.utils._testing.assert_allclose_dense_sparse", "sklearn.preprocessing._data.Normalizer", "numpy.ravel", "sklearn.utils._testing.assert_array_equal", "sklearn.preprocessing._data.quantile_transform", "sklearn.datasets.load_iris", "numpy.random.random", "numpy.abs", "numpy.isfinite", "numpy.isfortran", "numpy.sort", "numpy.sign", "sklearn.utils._testing.assert_array_almost_equal", "sklearn.utils._testing.assert_no_warnings", "sklearn.preprocessing._data.Binarizer" ], [ "numpy.dot", "numpy.asarray", "numpy.argmin", "numpy.iinfo", "numpy.tri" ], [ "numpy.dot", "sklearn.datasets.make_classification", "sklearn.linear_model.ElasticNet", "numpy.round", "sklearn.tree.DecisionTreeClassifier", "sklearn.utils._testing.assert_almost_equal", "numpy.mean", "sklearn.svm.LinearSVC", "numpy.random.randn", "sklearn.multiclass.OneVsRestClassifier", "sklearn.linear_model.SGDClassifier", "sklearn.linear_model.Perceptron", "scipy.sparse.issparse", "numpy.unique", "sklearn.multiclass.OneVsOneClassifier", "numpy.eye", "sklearn.linear_model.Lasso", "numpy.argmax", "numpy.zeros", "sklearn.utils._testing.assert_array_equal", "sklearn.utils._testing.assert_raise_message", "sklearn.naive_bayes.MultinomialNB", "sklearn.metrics.precision_score", "sklearn.datasets.load_iris", "sklearn.utils._testing.assert_raises_regexp", "scipy.sparse.csr_matrix", "sklearn.svm.SVR", "sklearn.linear_model.Ridge", "sklearn.datasets.make_multilabel_classification", "numpy.random.rand", "sklearn.svm.SVC", "numpy.array", "numpy.random.RandomState", "sklearn.metrics.recall_score", "sklearn.utils._testing.assert_raises", "sklearn.utils._testing.assert_warns", "sklearn.model_selection.GridSearchCV", "sklearn.tree.DecisionTreeRegressor", "sklearn.linear_model.LogisticRegression", "sklearn.model_selection.cross_val_score", "sklearn.utils.shuffle", "numpy.ones", "sklearn.utils.multiclass.type_of_target", "sklearn.linear_model.LinearRegression" ], [ "numpy.log", "numpy.spacing", "numpy.abs", "numpy.arange", "numpy.median", "numpy.ceil", "numpy.sum" ], [ "scipy.spatial.qhull.Delaunay", "numpy.testing.assert_equal", "numpy.sqrt", "numpy.random.seed", "numpy.linspace", "numpy.testing.suppress_warnings", "numpy.arange", "numpy.cos", "scipy.interpolate.interpnd.LinearNDInterpolator", "numpy.sin", "numpy.testing.assert_almost_equal", "numpy.random.randn", "numpy.random.rand", "scipy.interpolate.interpnd.CloughTocher2DInterpolator", "numpy.testing.assert_allclose", "numpy.array", "scipy.interpolate.interpnd.estimate_gradients_2d_global" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.6", "1.10", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
realchrisward/RodentPlethysmography
[ "6b27e08a65b4c0b399e5b20173c0d134f5dbca38" ]
[ "calm segment extractor py34 v5 20170918.py" ]
[ "#!usr/bin/env python3\r\n## /\\ compatability line\r\n\r\n## distribution notes - Calm Segment Extractor py34 v4.py\r\n\"\"\"\r\nCalm Segment Extractor by Chris Ward (C) 2015\r\nupdated for python 3.4 compatability by Chris Ward (C) 2016\r\nprovided free for non-commercial/fair use.\r\n\r\nThis program attempts to define periods of calm behavior\r\nby marking contiguous segments below a movement score\r\n(uses the output from the Movement Quantification script)\r\n\r\n\r\n*recommended versions for dependencies*\r\n*matplotlib [1.4.2]\r\n*numpy [1.9.1]\r\n\r\nUser inputs:\r\n*motion scores-tab delimited file containing movements scores by time\r\n*animal information-tab delimited file containing animal information for\r\nthe motion files to be analyzed\r\n*OutFile-name of the txt file to be output by this program\r\n\r\nCustomizable Settings:\r\n*padding - amount of time to subtract from 'calm bouts' to correct for time stamp\r\nand threshold inaccuracies when resumping calm behavior after an active bout\r\n*baseline percentile - percentile of overall motion score considered 'background noise'\r\n*relative threshhold - threshhold relative to baseline used to define calm vs avtive\r\n\r\nOutputs:\r\n*a tab delimited file with ...\r\n -animal information and time segments corresponding to 'calm behavior'\r\n\"\"\"\r\n\r\n## import modules\r\nimport datetime\r\nimport wardcode3 as wardcode\r\nimport matplotlib.pyplot as plt\r\nimport numpy\r\n\r\n## define functions\r\ndef AutoCallSegs(timestamps,movement,pd,bs,rt):\r\n # determin threshold level for movement\r\n timestamps=[float(i) for i in timestamps]\r\n movement=[float(i) for i in movement]\r\n thresh=numpy.percentile(movement,bs)*rt\r\n # calculate intervals above and below theshold\r\n crossings=wardcode.getbelowthresh(movement,thresh)\r\n diffcross=wardcode.getdifflist(crossings)\r\n starts=wardcode.getlistfromfilter(timestamps[1:],diffcross,1)\r\n stops=wardcode.getlistfromfilter(timestamps[1:],diffcross,-1)\r\n if len(starts)<1:starts=[timestamps[0]-1]\r\n if len(stops)<1:stops=[timestamps[-1]+1]\r\n #align lists\r\n if starts[0]>stops[0]:\r\n starts=[timestamps[0]-1]+starts\r\n if starts[-1]>stops[-1]:\r\n stops.append(timestamps[-1])\r\n #populate calmseg tuples and check padding\r\n calmsegs=[]\r\n segdur=[]\r\n for i in range(min(len(starts),len(stops))):\r\n calmsegs.append((starts[i]+pd,stops[i]-pd))\r\n segdur.append((stops[i]-pd)-(starts[i]+pd))\r\n goodsegs=wardcode.getabovethresh(segdur,0)\r\n calmsegsout=wardcode.getlistfromfilter(calmsegs,goodsegs,1)\r\n\r\n return calmsegsout,thresh\r\n \r\n \r\n\r\ndef checkExpActScores(expdict,exp,bs,rt):\r\n # expdict - dictionary containing data\r\n # exp - current experiment\r\n # bs - baseline\r\n # rt - relative threshold\r\n ##\r\n anikey=[int(i) for i in expdict[exp].keys()]\r\n anikey.sort()\r\n spx=int(numpy.ceil(len(anikey)/4))\r\n\r\n if len(anikey)<4:\r\n spy=len(anikey)\r\n else:\r\n spy=int(numpy.ceil(len(anikey)/spx))\r\n \r\n plt.figure(exp)\r\n \r\n for i in range(len(anikey)):\r\n animal=str(anikey[i])\r\n cs=expdict[exp][animal]['auto']['calmsegs']\r\n ts=[float(i) for i in expdict[exp][animal]['data']['timestamp']]\r\n y1=[float(i) for i in expdict[exp][animal]['data']['movement']]\r\n noise=numpy.percentile(y1,bs)\r\n thresh=noise*rt\r\n aniname=expdict[exp][animal]['line']+'-'+expdict[exp][animal]['id']\r\n\r\n plt.subplot(spx,spy,i+1)\r\n \r\n plt.plot(hold=False)\r\n plt.plot(hold=True)\r\n plt.plot((ts[0],ts[-1]),\r\n (thresh,thresh),'y-')\r\n plt.plot(ts,y1,'r-')\r\n plt.plot((ts[0],ts[-1]),\r\n (noise,noise),'b-')\r\n for j in cs:\r\n plt.plot(j,(thresh,thresh),'ko-')\r\n plt.title('|'+str(i+1)+'|'+aniname)\r\n plt.axis([ts[0],ts[-1],0,noise*4])\r\n \r\n plt.show()\r\n\r\n \r\n## define main\r\ndef main():\r\n ## get filenames\r\n # get motion scores\r\n MotionName=wardcode.guiOpenFileName({'title':'Open Motion Score File','filetypes':[('motion','.mtn'),('all files','.*')]})\r\n # get animal information\r\n AnimalName=wardcode.guiOpenFileName({'title':'Open Animal Information File','filetypes':[('animal list','.al'),('all files','.*')]})\r\n # get output filename\r\n outputname=wardcode.guiSaveFileName({'title':'Save Output As...'})\r\n \r\n ## set parameters\r\n PD=wardcode.getInt('pad inactive time by __ seconds:')\r\n BS=wardcode.getInt('consider baseline noise at __ percentile of movement signal (3% recommended)')\r\n RT=wardcode.getFloat('set relative threshold for movement at __ x of baseline noise (1.4x recommended)')\r\n CheckCalls=wardcode.getYN('Check calm segment calls? (y/n)')\r\n \r\n ## get data\r\n MotionDict=wardcode.dataDictUnfold(\r\n wardcode.dataParseTabDelToColumns(\r\n [i.lower() for i in wardcode.dataGrab(MotionName)]\r\n ,0)\r\n )\r\n AnimalDict=wardcode.dataDictUnfold(\r\n wardcode.dataParseTabDelToColumns(\r\n [i.lower() for i in wardcode.dataGrab(AnimalName)]\r\n ,0)\r\n )\r\n\r\n ## convert motion scores and timestamps to numbers\r\n for i in range(len(MotionDict['movement'])):\r\n MotionDict['movement'][i]=float(MotionDict['movement'][i])\r\n MotionDict['timestamp'][i]=float(MotionDict['timestamp'][i])\r\n \r\n ## parse MotionDict into experiment groups\r\n ExpDict={}\r\n # step through experiments to get animal information\r\n for Exp in set(MotionDict['filename']):\r\n ##\r\n print(str(datetime.datetime.now())+' : parsing data : '+Exp)\r\n ExpDict[Exp]={}\r\n CurAniDict={}\r\n CurAniDict[Exp]={}\r\n # populate current experiment animal information\r\n for k in AnimalDict.keys():\r\n CurAniDict[Exp][k]=wardcode.getlistfromfilter(AnimalDict[k],AnimalDict['video filename'],Exp)\r\n # iterate through animals in current video and get info\r\n ##\r\n filefilter=[1 if i==Exp else 0 for i in MotionDict['filename']]\r\n CurExp={}\r\n # populate CurExp dictionary with animal, timestamp, and movement data\r\n for k in ['animal','timestamp','movement']:\r\n CurExp[k]=wardcode.getlistfromfilter(MotionDict[k],filefilter,1)\r\n # grab the data from the current experiment - auto populate animals with motion data using \"set\" passing the current file filter\r\n for animal in set(\r\n wardcode.getlistfromfilter(\r\n MotionDict['animal'],MotionDict['filename'],Exp)\r\n ):\r\n \r\n ExpDict[Exp][animal]={}\r\n ExpDict[Exp][animal]['data']={}\r\n for k in AnimalDict.keys():\r\n try:\r\n ExpDict[Exp][animal][k]=''.join(\r\n wardcode.getlistfromfilter(\r\n CurAniDict[Exp][k],CurAniDict[Exp]['video chamber'],animal)\r\n )\r\n except:\r\n ExpDict[Exp][animal][k]='unk'\r\n animalfilter=[1 if i==animal else 0 for i in CurExp['animal']]\r\n for k in ['timestamp','movement']:#<-MotionDict.keys():\r\n \r\n ExpDict[Exp][animal]['data'][k]=wardcode.getlistfromfilter(\r\n CurExp[k],\r\n animalfilter,\r\n 1)\r\n \r\n ## auto call segments\r\n for Exp in ExpDict:\r\n print(str(datetime.datetime.now())+' : calling calm segs : '+Exp)\r\n for animal in ExpDict[Exp]:\r\n TS=ExpDict[Exp][animal]['data']['timestamp']\r\n ExpDict[Exp][animal]['auto']={'calmsegs':[],'thresh':0}\r\n ExpDict[Exp][animal]['auto']['calmsegs'],ExpDict[Exp][animal]['auto']['thresh']=AutoCallSegs(\r\n TS,ExpDict[Exp][animal]['data']['movement'],PD,BS,RT)\r\n\r\n ExpDict[Exp][animal]['cBS']=BS\r\n ExpDict[Exp][animal]['cRT']=RT\r\n cBS=BS\r\n cRT=RT\r\n \r\n ##\r\n if CheckCalls=='y':\r\n while 1:\r\n checkExpActScores(ExpDict,Exp,cBS,cRT)\r\n if wardcode.getYN('Accept current results (\"N\" to try alternate settings)\\n')=='y':\r\n break\r\n cBS=wardcode.getInt('consider baseline noise at __ percentile of movement signal (3% recommended)')\r\n cRT=wardcode.getFloat('set relative threshold for movement at __ x of baseline noise (1.4x recommended)')\r\n for animal in ExpDict[Exp]:\r\n ExpDict[Exp][animal]['auto']['calmsegs'],ExpDict[Exp][animal]['auto']['thresh']=AutoCallSegs(\r\n TS,ExpDict[Exp][animal]['data']['movement'],PD,cBS,cRT) \r\n ExpDict[Exp][animal]['cBS']=cBS\r\n ExpDict[Exp][animal]['cRT']=cRT\r\n ## prepare header - asciifile, line, id, animalcode, subsegment, mainsegment, start, stop, base, relthresh, absthresh\r\n outheader=(\"{asciifile}\\t{videofile}\\t{line}\\t{idno}\\t{animalcode}\\t{subsegment}\\t{mainsegment}\\t{start}\\t{stop}\\t{base}\\t{relthresh}\\t{absthresh}\".format(\r\n asciifile='asciifile',videofile='videofile',line='line',idno='idno',animalcode='animalcode',\r\n subsegment='subsegment',mainsegment='mainsegment',start='start',stop='stop',\r\n base='base',relthresh='relthresh',absthresh='absthresh'))\r\n ## output data\r\n outlist=[]\r\n outlist.append(outheader)\r\n for Exp in ExpDict:\r\n for animal in ExpDict[Exp]:\r\n for startseg,stopseg in ExpDict[Exp][animal]['auto']['calmsegs']:\r\n nextline=(\"{asciifile}\\t{videofile}\\t{line}\\t{idno}\\t{animalcode}\\t{subsegment}\\t{mainsegment}\\t{start}\\t{stop}\\t{base}\\t{relthresh}\\t{absthresh}\".format(\r\n asciifile=ExpDict[Exp][animal]['filename'],videofile=Exp,\r\n line=ExpDict[Exp][animal]['line'],\r\n idno=ExpDict[Exp][animal]['id'],animalcode=ExpDict[Exp][animal]['chamber'],\r\n subsegment='calm',mainsegment='calm',start=startseg,stop=stopseg,\r\n base=ExpDict[Exp][animal]['cBS'],relthresh=ExpDict[Exp][animal]['cRT'],\r\n absthresh=ExpDict[Exp][animal]['auto']['thresh']))\r\n outlist.append(nextline)\r\n ## write results to output file\r\n with open(outputname+'.seg','w') as f:\r\n f.write('\\n'.join(outlist))\r\n f.close()\r\n \r\n input('finished calm segment extraction...\\npress ENTER to exit')\r\n\r\n## run main\r\nif __name__=='__main__':\r\n main()\r\n" ]
[ [ "numpy.percentile", "matplotlib.pyplot.plot", "matplotlib.pyplot.subplot", "matplotlib.pyplot.axis", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ablotekar/irfu-python
[ "740cb51ca9ce2ab0d62cb6fef3a7a722d430d79e", "740cb51ca9ce2ab0d62cb6fef3a7a722d430d79e", "740cb51ca9ce2ab0d62cb6fef3a7a722d430d79e", "740cb51ca9ce2ab0d62cb6fef3a7a722d430d79e", "740cb51ca9ce2ab0d62cb6fef3a7a722d430d79e", "740cb51ca9ce2ab0d62cb6fef3a7a722d430d79e", "740cb51ca9ce2ab0d62cb6fef3a7a722d430d79e", "740cb51ca9ce2ab0d62cb6fef3a7a722d430d79e" ]
[ "pyrfu/mms/eis_skymap_combine_sc.py", "pyrfu/pyrf/convert_fac.py", "pyrfu/pyrf/dynamic_press.py", "pyrfu/pyrf/movmean.py", "pyrfu/mms/eis_proton_correction.py", "pyrfu/pyrf/filt.py", "pyrfu/mms/psd_rebin.py", "pyrfu/pyrf/dist_append.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# 3rd party imports\nimport numpy as np\nimport xarray as xr\n\n__author__ = \"Louis Richard\"\n__email__ = \"[email protected]\"\n__copyright__ = \"Copyright 2020-2021\"\n__license__ = \"MIT\"\n__version__ = \"2.3.7\"\n__status__ = \"Prototype\"\n\n\ndef _idx_closest(lst0, lst1):\n return [(np.abs(np.asarray(lst0) - k)).argmin() for k in lst1]\n\n\ndef eis_skymap_combine_sc(skymaps):\n r\"\"\"Generate composite skymap from the EIS sensors across the MMS\n spacecraft.\n\n Parameters\n ----------\n skymaps : list of xarray.DataArray\n Skymap distribution for all spacecraft.\n\n Returns\n -------\n out : xarray.Dataset\n Composite skymap distribution\n\n See Also\n --------\n pyrfu.mms.get_eis_allt, pyrfu.mms.eis_pad,\n pyrfu.mms.eis_spec_combine_sc, pyrfu.mms.eis_spec_combine_sc\n\n \"\"\"\n\n # Determine spacecraft with smallest number of time steps to use as\n # reference spacecraft\n time_size = [len(probe.time.data) for probe in skymaps]\n ref_sc_time_size, ref_sc_loc = [np.min(time_size), np.argmin(time_size)]\n ref_probe = skymaps[ref_sc_loc]\n\n # Define common energy grid across EIS instruments\n n_en_chans = [probe.energy.shape[1] for probe in skymaps]\n size_en, loc_ref_en = [np.min(n_en_chans), np.argmin(n_en_chans)]\n ref_energy = skymaps[loc_ref_en].energy.data[0, :]\n\n energy_data, e_plus, e_minu = [[], [], []]\n for probe in skymaps:\n idx = _idx_closest(probe.energy.data[0, :], ref_energy)\n energy_data.append(probe.energy.data[0, idx])\n e_minu.append(probe.attrs[\"energy_dminus\"][idx])\n e_plus.append(probe.attrs[\"energy_dplus\"][idx])\n\n energy_data = np.stack(energy_data)\n common_energy = np.nanmean(energy_data, axis=0)\n common_energy = np.tile(common_energy, (ref_sc_time_size, 1))\n\n #\n e_minu = np.stack(e_minu)\n e_plus = np.stack(e_plus)\n common_minu = np.nanmean(e_minu, axis=0)\n common_plus = np.nanmean(e_plus, axis=0)\n\n # Use azimuthal and elevation angle from reference spacecraft (in\n # practice they are the same for all spacecraft)\n phi = ref_probe.phi.data\n theta = ref_probe.theta.data\n\n allmms_skymap = np.zeros([ref_sc_time_size, size_en, phi.shape[1],\n len(theta), len(skymaps)])\n\n for p, skymap in enumerate(skymaps):\n idx_en = _idx_closest(skymap.energy.data[0, :], common_energy[0, :])\n allmms_skymap[..., p] = skymap.data[:ref_sc_time_size, idx_en, ...]\n\n # Average the four spacecraft\n allmms_skymap_avg = np.nanmean(allmms_skymap, axis=-1)\n\n # Create combined skymap\n out_dict = {\"time\": ref_probe.time.data,\n \"idx0\": range(common_energy.shape[1]),\n \"idx1\": range(phi.shape[1]), \"idx2\": range(len(theta)),\n \"data\": ([\"time\", \"idx0\", \"idx1\", \"idx2\"], allmms_skymap_avg),\n \"energy\": ([\"time\", \"idx0\"], common_energy),\n \"phi\": ([\"time\", \"idx1\"], phi), \"theta\": ([\"idx2\"], theta)}\n\n out = xr.Dataset(out_dict)\n\n out.attrs[\"energy_dminus\"] = common_minu\n out.attrs[\"energy_dplus\"] = common_plus\n\n return out\n", "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# 3rd party imports\nimport numpy as np\nimport xarray as xr\n\n# Local imports\nfrom .resample import resample\nfrom .ts_vec_xyz import ts_vec_xyz\nfrom .calc_fs import calc_fs\n\n__author__ = \"Louis Richard\"\n__email__ = \"[email protected]\"\n__copyright__ = \"Copyright 2020-2021\"\n__license__ = \"MIT\"\n__version__ = \"2.3.7\"\n__status__ = \"Prototype\"\n\n\ndef convert_fac(inp, b_bgd, r_xyz: list = None):\n r\"\"\"Transforms to a field-aligned coordinate (FAC) system defined as :\n * R_parallel_z aligned with the background magnetic field\n * R_perp_y defined by R_parallel cross the position vector of the\n spacecraft (nominally eastward at the equator)\n * R_perp_x defined by R_perp_y cross R_par\n\n If inp is one vector along r direction, out is inp[perp, para]\n projection.\n\n Parameters\n ----------\n inp : xarray.DataArray\n Time series of the input field.\n b_bgd : xarray.DataArray\n Time series of the background magnetic field.\n r_xyz : xarray.DataArray or ndarray or list\n Position vector of spacecraft.\n\n Returns\n -------\n out : xarray.DataArray\n Time series of the input field in field aligned coordinates\n system.\n\n Notes\n -----\n All input parameters must be in the same coordinate system.\n\n Examples\n --------\n >>> import numpy\n >>> from pyrfu import mms, pyrf\n\n Time interval\n\n >>> tint = [\"2019-09-14T07:54:00.000\", \"2019-09-14T08:11:00.000\"]\n\n Spacecraft index\n\n >>> mms_id = 1\n\n Load magnetic field (FGM) and electric field (EDP)\n\n >>> b_xyz = mms.get_data(\"B_gse_fgm_brst_l2\", tint, mms_id)\n >>> e_xyz = mms.get_data(\"E_gse_edp_brst_l2\", tint, mms_id)\n\n Convert to field aligned coordinates\n\n >>> e_xyzfac = pyrf.convert_fac(e_xyz, b_xyz, numpy.array([1, 0, 0]))\n\n \"\"\"\n\n assert r_xyz is None or isinstance(r_xyz, (xr.DataArray, list, np.ndarray))\n\n if r_xyz is None:\n r_xyz = np.array([1, 0, 0])\n\n if len(inp) != len(b_bgd):\n b_bgd = resample(b_bgd, inp, f_s=calc_fs(inp))\n\n time, inp_data = [inp.time.data, inp.data]\n\n # Normalize background magnetic field\n b_hat = b_bgd / np.linalg.norm(b_bgd, axis=1, keepdims=True)\n\n if isinstance(r_xyz, xr.DataArray):\n r_xyz = resample(r_xyz, b_bgd)\n\n elif len(r_xyz) == 3:\n r_xyz = np.tile(r_xyz, (len(b_bgd), 1))\n\n # Perpendicular\n r_perp_y = np.cross(b_hat, r_xyz, axis=1)\n r_perp_y /= np.linalg.norm(r_perp_y, axis=1, keepdims=True)\n r_perp_x = np.cross(r_perp_y, b_bgd, axis=1)\n r_perp_x /= np.linalg.norm(r_perp_x, axis=1, keepdims=True)\n\n assert inp_data.shape[1] in [1, 3], \"Invalid dimension of inp\"\n\n if inp_data.shape[1] == 3:\n out_data = np.zeros(inp.shape)\n\n out_data[:, 0] = np.sum(r_perp_x * inp_data, axis=1)\n out_data[:, 1] = np.sum(r_perp_y * inp_data, axis=1)\n out_data[:, 2] = np.sum(b_hat * inp_data, axis=1)\n\n # xarray\n out = xr.DataArray(out_data, coords=[time, inp.comp],\n dims=[\"time\", \"comp\"])\n\n else:\n out_data = np.zeros([3, inp_data.shape[0]])\n\n out_data[:, 0] = inp[:, 0] * np.sum(r_perp_x * r_xyz, axis=1)\n out_data[:, 1] = inp[:, 0] * np.sum(b_hat * r_xyz, axis=1)\n\n out = ts_vec_xyz(time, out_data, attrs=inp.attrs)\n\n return out\n", "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# 3rd party imports\nimport numpy as np\n\nfrom scipy import constants\n\n__author__ = \"Louis Richard\"\n__email__ = \"[email protected]\"\n__copyright__ = \"Copyright 2020-2021\"\n__license__ = \"MIT\"\n__version__ = \"2.3.7\"\n__status__ = \"Prototype\"\n\n\ndef dynamic_press(n_s, v_xyz, specie: str = \"i\"):\n r\"\"\"Computes dynamic pressure.\n\n Parameters\n ----------\n n_s : xarray.DataArray\n Time series of the number density of the specie.\n v_xyz : xarray.DataArray\n Time series of the bulk velocity of the specie.\n specie : {\"i\", \"e\"}, Optional\n Specie. default \"i\".\n\n Returns\n -------\n p_dyn : xarray.DataArray\n Time series of the dynamic pressure of the specie.\n\n Examples\n --------\n >>> from pyrfu import mms, pyrf\n\n Time interval\n\n >>> tint = [\"2019-09-14T07:54:00.000\", \"2019-09-14T08:11:00.000\"]\n\n Spacecraft index\n\n >>> mms_id = 1\n\n Load ion bulk velocity and remove spintone\n\n >>> v_xyz_i = mms.get_data(\"Vi_gse_fpi_fast_l2\", tint, mms_id)\n >>> st_xyz_i = mms.get_data(\"STi_gse_fpi_fast_l2\", tint, mms_id)\n >>> v_xyz_i = v_xyz_i - st_xyz_i\n\n Ion number density\n\n >>> n_i = mms.get_data(\"Ni_fpi_fast_l2\", tint, mms_id)\n\n Compute dynamic pressure\n\n >>> p = pyrf.dynamic_press(n_i, v_xyz_i, specie=\"i\")\n\n \"\"\"\n\n if specie == \"i\":\n mass = constants.proton_mass\n elif specie == \"e\":\n mass = constants.electron_mass\n else:\n raise ValueError(\"Unknown specie\")\n\n p_dyn = n_s * mass * np.linalg.norm(v_xyz, axis=0) ** 2\n\n return p_dyn\n", "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# 3rd party imports\nimport numpy as np\nimport xarray as xr\n\n__author__ = \"Louis Richard\"\n__email__ = \"[email protected]\"\n__copyright__ = \"Copyright 2020-2021\"\n__license__ = \"MIT\"\n__version__ = \"2.3.7\"\n__status__ = \"Prototype\"\n\n\ndef movmean(inp, n_pts: int = 100):\n r\"\"\"Computes running average of the inp over npts points.\n\n Parameters\n ----------\n inp : xarray.DataArray\n Time series of the input variable.\n n_pts : int, Optional\n Number of points to average over.\n\n Returns\n -------\n out : xarray.DataArray\n Time series of the input variable averaged over npts points.\n\n Notes\n -----\n Works also with 3D skymap distribution.\n\n Examples\n --------\n >>> from pyrfu import mms, pyrf\n\n Time interval\n\n >>> tint = [\"2019-09-14T07:54:00.000\",\"2019-09-14T08:11:00.000\"]\n\n Spacecraft index\n\n >>> mms_id = 1\n\n Load ion pressure tensor\n\n >>> p_xyz_i = mms.get_data(\"Pi_gse_fpi_brst_l2\", tint, mms_id)\n\n Running average the pressure tensor over 10s\n\n >>> fs = pyrf.calc_fs(p_xyz_i)\n >>>> p_xyz_i = pyrf.movmean(p_xyz_i, int(10 * fs))\n\n \"\"\"\n\n if isinstance(n_pts, float):\n n_pts = np.floor(n_pts).astype(int)\n\n if n_pts % 2:\n n_pts -= 1\n\n # Computes moving average\n cum_sum = np.cumsum(inp.data, axis=0)\n out_dat = (cum_sum[n_pts:, ...] - cum_sum[:-n_pts, ...]) / n_pts\n\n coords = []\n\n for k in inp.dims:\n if k == \"time\":\n coords.append(inp.coords[k][int(n_pts / 2):-int(n_pts / 2)])\n else:\n coords.append(inp.coords[k].data)\n\n # Output in DataArray type\n out = xr.DataArray(out_dat, coords=coords, dims=inp.dims, attrs=inp.attrs)\n\n return out\n", "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# 3rd party imports\nimport numpy as np\nimport xarray as xr\n\n__author__ = \"Louis Richard\"\n__email__ = \"[email protected]\"\n__copyright__ = \"Copyright 2020-2021\"\n__license__ = \"MIT\"\n__version__ = \"2.3.7\"\n__status__ = \"Prototype\"\n\n\ndef _phxtof_calibration(energy, alpha, beta, gamma):\n r\"\"\"Pulse Height x Time Of Flight correction model from EPD Data Product\n Guide\"\"\"\n return 1 / (.5 * (1 + alpha * (np.tanh((energy - beta) / gamma) + 1)))\n\n\ndef _extof_calibration(energy, alpha, beta, gamma):\n r\"\"\"Energy x Time Of Flight correction model from EPD Data Product Guide\"\"\"\n return 1 / (.5 * (1 + alpha * (1 - np.tanh((energy - beta) / gamma) + 1)))\n\n\ndef eis_proton_correction(flux_eis):\n r\"\"\"Corrects proton flux values based on FPI/HPCA/EPD-EIS cross\n calibration.\n\n Parameters\n ----------\n flux_eis : xarray.DataArray\n Omni-directional energy spectrum from EPD-EIS.\n\n Returns\n -------\n flux_eis_corr : xarray.DataArray\n Cross-calibrated omni-directional energy spectrum from EIS-EPD.\n\n See Also\n --------\n pyrfu.mms.get_eis_allt, pyrfu.mms.eis_omni\n\n \"\"\"\n\n # Coefficients from EPD Data Product Guide\n alpha_, beta_, gamma_ = [-.3, 49e-3, 1e-3]\n\n # Pulse Height x Time Of Flight (PHxTOF) energy correction factor\n energy_phxtof = flux_eis.energy.data[:7]\n phxtof_corr = _phxtof_calibration(energy_phxtof, alpha_, beta_, gamma_)\n\n # Energy x Time Of Flight (ExTOF) energy correction factor\n energy_extof = flux_eis.energy.data[7:]\n extof_corr = _extof_calibration(energy_extof, alpha_, beta_, gamma_)\n\n eis_corr = np.hstack([phxtof_corr, extof_corr])\n\n if isinstance(flux_eis, xr.Dataset):\n scopes_eis = list(filter(lambda x: x[0] == \"t\", flux_eis))\n out_keys = list(filter(lambda x: x not in scopes_eis, flux_eis))\n out_dict = {k: flux_eis[k] for k in out_keys}\n\n for scope in scopes_eis:\n out_dict[scope] = flux_eis[scope].copy()\n out_dict[scope].data *= eis_corr\n\n flux_eis_corr = xr.Dataset(out_dict)\n elif isinstance(flux_eis, xr.DataArray):\n # Apply correction to omni-directional energy spectrum\n flux_eis_corr = flux_eis.copy()\n flux_eis_corr.data *= eis_corr\n else:\n raise TypeError(\"flux_eis must be a Dataset or a DataArray\")\n\n return flux_eis_corr\n", "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# 3rd party imports\nimport numpy as np\nimport xarray as xr\n\nfrom scipy import signal\n\n__author__ = \"Louis Richard\"\n__email__ = \"[email protected]\"\n__copyright__ = \"Copyright 2020-2021\"\n__license__ = \"MIT\"\n__version__ = \"2.3.7\"\n__status__ = \"Prototype\"\n\n\n# noinspection PyTupleAssignmentBalance\ndef _ellip_coefficients(f_min, f_max, order):\n num1, den1, num2, den2 = [None] * 4\n\n if f_min == 0:\n if order == -1:\n order, f_max = signal.ellipord(f_max,\n np.min([f_max * 1.1, 0.9999]),\n .5, 60)\n\n num1, den1 = signal.ellip(order, .5, 60, f_max, btype=\"lowpass\")\n elif f_max == 0:\n if order == -1:\n order, f_min = signal.ellipord(f_min,\n np.min([f_min * 1.1, 0.9999]),\n .5, 60)\n\n num1, den1 = signal.ellip(order, .5, 60, f_min, btype=\"highpass\")\n else:\n if order == -1:\n order, f_max = signal.ellipord(f_max,\n np.min([f_max * 1.3, 0.9999]),\n .5, 60)\n\n num1, den1 = signal.ellip(order, .5, 60, f_max)\n\n if order == -1:\n order, f_min = signal.ellipord(f_min, f_min * .75, .5, 60)\n\n num2, den2 = signal.ellip(order, .5, 60, f_min)\n\n return num1, den1, num2, den2\n\n\ndef filt(inp, f_min: float = 0., f_max: float = 1., order: int = -1):\n r\"\"\"Filters input quantity.\n\n Parameters\n ----------\n inp : xarray.DataArray\n Time series of the variable to filter.\n f_min : float, Optional\n Lower limit of the frequency range. Default is 0. (Highpass filter).\n f_max : float, Optional\n Upper limit of the frequency range. Default is 1. (Highpass filter).\n order : int, Optional\n Order of the elliptic filter. Default is -1.\n\n Returns\n -------\n out : xarray.DataArray\n Time series of the filtered signal.\n\n Examples\n --------\n >>> from pyrfu import mms, pyrf\n\n Time interval\n\n >>> tint = [\"2017-07-18T13:03:34.000\", \"2017-07-18T13:07:00.000\"]\n\n Spacecraft index\n\n >>> mms_id = 1\n\n Load magnetic and electric fields\n\n >>> b_xyz = mms.get_data(\"B_gse_fgm_brst_l2\", tint, mms_id)\n >>> e_xyz = mms.get_data(\"E_gse_edp_brst_l2\", tint, mms_id)\n\n Convert E to field aligned coordinates\n\n >>> e_xyzfac = pyrf.convert_fac(e_xyz, b_xyz, [1,0,0])\n\n Bandpass filter E waveform\n\n >>> e_xyzfac_hf = pyrf.filt(e_xyzfac, 4, 0, 3)\n >>> e_xyzfac_lf = pyrf.filt(e_xyzfac, 0, 4, 3)\n\n \"\"\"\n\n f_samp = 1 / (np.median(np.diff(inp.time)).astype(int) * 1e-9)\n\n # Data of the input\n inp_data = inp.data\n\n f_min, f_max = [f_min / (f_samp / 2), f_max / (f_samp / 2)]\n\n f_max = np.min([f_max, 1.])\n\n # Parameters of the elliptic filter. fact defines the width between\n # stopband and passband\n # r_pass, r_stop, fact = [0.5, 60, 1.1]\n\n num1, den1, num2, den2 = _ellip_coefficients(f_min, f_max, order)\n\n if len(inp_data.shape) == 1:\n inp_data = inp_data[:, np.newaxis]\n\n out_data = np.zeros(inp_data.shape)\n\n for i_col in range(inp_data.shape[1]):\n out_data[:, i_col] = signal.filtfilt(num1, den1, inp_data[:, i_col])\n\n if num2 is not None and den2 is not None:\n out_data[:, i_col] = signal.filtfilt(num2, den2,\n out_data[:, i_col])\n if inp_data.shape[1] == 1:\n out_data = out_data[:, 0]\n\n out = xr.DataArray(out_data, coords=inp.coords, dims=inp.dims,\n attrs=inp.attrs)\n\n return out\n", "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# 3rd party\nimport numpy as np\nimport xarray as xr\n\n# Local imports\nfrom ..pyrf import calc_dt\n\n__author__ = \"Louis Richard\"\n__email__ = \"[email protected]\"\n__copyright__ = \"Copyright 2020-2021\"\n__license__ = \"MIT\"\n__version__ = \"2.3.7\"\n__status__ = \"Prototype\"\n\n\ndef psd_rebin(vdf, phi, energy0, energy1, step_table):\n r\"\"\"Converts burst mode distribution into 64 energy channel distribution.\n Functions takes the burst mode distribution sampled in two energy tables\n and converts to a single energy table with 64 energy channels. Time\n resolution is halved and phi angles are averaged over adjacent times.\n\n Parameters\n ----------\n vdf : xarray.Dataset\n Time series of the particle distribution.\n phi : xarray.DataArray\n Time series of the phi angles.\n energy0 : xarray.DataArray or ndarray\n Energy table 0.\n energy1 : xarray.DataArray or ndarray\n Energy table 1.\n step_table : xarray.DataArray\n Time series of the stepping table between energies (burst).\n\n Returns\n -------\n time_r : ndarray\n Revised time steps.\n vdf_r : ndarray\n Rebinned particle distribution.\n energy_r : ndarray\n Revised energy table.\n phi_r : ndarray\n Time series of the recalculated phi angle.\n\n Notes\n -----\n I'm assuming no gaps in the burst data interval. If there is a gap use\n time_clip before running. To be updated later.\n\n \"\"\"\n\n if isinstance(energy0, xr.DataArray):\n energy0 = energy0.data\n else:\n pass\n\n if isinstance(energy1, xr.DataArray):\n energy1 = energy1.data\n else:\n pass\n\n step_table = step_table.data\n\n # Sort energy levels\n energy_r = np.sort(np.hstack([energy0, energy1]))\n\n # Define new times\n delta_t = calc_dt(vdf.data)\n time_r = vdf.time.data[:-1:2] + int(delta_t * 1e9 / 2)\n\n vdf_r = np.zeros((len(time_r), 64, 32, 16))\n phi_r = np.zeros((len(time_r), 32))\n\n phi_s = np.roll(phi.data, 2, axis=1)\n phi_s[:, 0] = phi_s[:, 0] - 360\n\n time_indices = np.arange(0, len(vdf.time) - 1, 2)\n\n for new_el_num, idx in enumerate(time_indices[:-1]):\n if phi.data[idx, 0] > phi.data[idx + 1, 0]:\n phi_r[new_el_num, :] = (phi.data[idx, :] + phi_s[idx + 1, :]) / 2\n\n vdf_temp = np.roll(np.squeeze(vdf.data.data[idx + 1, ...]), 2,\n axis=1)\n\n if step_table[idx]:\n vdf_r[new_el_num, 1:64:2, ...] = vdf.data.data[idx, ...]\n vdf_r[new_el_num, 0:63:2, ...] = vdf_temp\n else:\n vdf_r[new_el_num, 1:64:2, ...] = vdf.data.data[idx, ...]\n vdf_r[new_el_num, 0:63:2, ...] = vdf_temp\n\n else:\n phi_r[new_el_num, :] = (phi.data[idx, :] + phi.data[idx + 1, :])\n phi_r[new_el_num, :] /= 2\n\n if step_table[idx]:\n vdf_r[new_el_num, 1:64:2, ...] = vdf.data.data[idx, ...]\n vdf_r[new_el_num, 0:63:2, ...] = vdf.data.data[idx + 1, ...]\n else:\n vdf_r[new_el_num, 1:64:2, ...] = vdf.data.data[idx + 1, ...]\n vdf_r[new_el_num, 0:63:2, ...] = vdf.data.data[idx, ...]\n\n return time_r, vdf_r, energy_r, phi_r\n", "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# 3rd party imports\nimport numpy as np\n\n# Local imports\nfrom .ts_skymap import ts_skymap\n\n__author__ = \"Louis Richard\"\n__email__ = \"[email protected]\"\n__copyright__ = \"Copyright 2020-2021\"\n__license__ = \"MIT\"\n__version__ = \"2.3.7\"\n__status__ = \"Prototype\"\n\n\ndef dist_append(inp0, inp1):\n r\"\"\"Concatenate two distribution skymaps along the time axis.\n\n Parameters\n ----------\n inp0 : xarray.Dataset\n 3D skymap distribution at early times.\n inp1 : xarray.Dataset\n 3D skymap distribution at late times.\n\n Returns\n -------\n out : xarray.Dataset\n 3D skymap of the concatenated 3D skymaps.\n\n Notes\n -----\n The time series have to be in the correct time order.\n\n \"\"\"\n\n if inp0 is None:\n return inp1\n\n # time\n time = np.hstack([inp0.time.data, inp1.time.data])\n\n # attributes\n attrs = inp0.attrs\n\n # Azimuthal angle\n if inp0.phi.ndim == 2:\n phi = np.vstack([inp0.phi.data, inp1.phi.data])\n else:\n phi = inp0.phi.data\n\n # Elevation angle\n theta = inp0.theta.data\n\n # distribution\n data = np.vstack([inp0.data, inp1.data])\n\n if \"delta_energy_plus\" in attrs:\n delta_energy_plus = np.vstack([inp0.attrs[\"delta_energy_plus\"].data,\n inp1.attrs[\"delta_energy_plus\"].data])\n attrs[\"delta_energy_plus\"] = delta_energy_plus\n\n if \"delta_energy_minus\" in inp0.attrs:\n delta_energy_minus = np.vstack([inp0.attrs[\"delta_energy_minus\"].data,\n inp1.attrs[\"delta_energy_minus\"].data])\n attrs[\"delta_energy_minus\"] = delta_energy_minus\n\n # Energy\n if inp0.attrs[\"tmmode\"] == \"brst\":\n step_table = np.hstack([inp0.attrs[\"esteptable\"],\n inp1.attrs[\"esteptable\"]])\n\n out = ts_skymap(time, data, None, phi, theta, energy0=inp0.energy0,\n energy1=inp0.energy1, esteptable=step_table)\n\n attrs.pop(\"esteptable\")\n else:\n energy = np.vstack([inp0.energy.data, inp1.energy.data])\n\n out = ts_skymap(time, data, energy, phi, theta)\n\n for k in attrs:\n out.attrs[k] = attrs[k]\n\n return out\n" ]
[ [ "numpy.min", "numpy.asarray", "numpy.tile", "numpy.stack", "numpy.argmin", "numpy.nanmean" ], [ "numpy.linalg.norm", "numpy.cross", "numpy.array", "numpy.zeros", "numpy.sum" ], [ "numpy.linalg.norm" ], [ "numpy.cumsum", "numpy.floor" ], [ "numpy.hstack", "numpy.tanh" ], [ "scipy.signal.filtfilt", "numpy.min", "scipy.signal.ellip", "numpy.diff", "scipy.signal.ellipord", "numpy.zeros" ], [ "numpy.hstack", "numpy.squeeze", "numpy.roll" ], [ "numpy.hstack", "numpy.vstack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [ "1.10", "1.12", "1.11", "1.19", "1.24", "1.13", "1.16", "1.9", "1.18", "1.23", "1.21", "1.22", "1.20", "1.7", "1.15", "1.14", "1.17", "1.8" ], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "1.3", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "0.16", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
RobertJaro/qt-solar-viewer
[ "dcecdc8040f457abf8d978a5ecbff61396358c32" ]
[ "solarviewer/app/plot.py" ]
[ "from abc import abstractmethod\n\nfrom matplotlib import pyplot as plt\nfrom matplotlib.backends.backend_qt5 import NavigationToolbar2QT\nfrom matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas\nfrom qtpy import QtWidgets\n\nfrom solarviewer.config.base import Viewer, DataModel\nfrom solarviewer.ui.plot import Ui_Plot\nfrom solarviewer.util import executeTask\n\n\nclass PlotWidget(Viewer):\n\n def __init__(self):\n Viewer.__init__(self)\n self.ui = Ui_Plot()\n self.ui.setupUi(self)\n\n self.initMainCanvas()\n self.rendered.clear()\n\n def initMainCanvas(self):\n self.figure = plt.figure()\n self.canvas = FigureCanvas(self.figure)\n self.toolbar = NavigationToolbar2QT(self.canvas, self)\n self.toolbar.setVisible(False)\n FigureCanvas.setSizePolicy(self.canvas,\n QtWidgets.QSizePolicy.Expanding,\n QtWidgets.QSizePolicy.Expanding)\n FigureCanvas.updateGeometry(self.canvas)\n self.canvas.hide()\n self.ui.verticalLayout.addWidget(self.canvas)\n\n def updateModel(self, model: DataModel):\n self._model = model\n self.redraw()\n\n def redraw(self):\n self.rendered.clear()\n self.canvas.hide()\n self.ui.progress.show()\n executeTask(self._redraw, [], self._afterRedraw)\n\n def _redraw(self):\n self.draw(self._model)\n self.canvas.draw()\n\n def _afterRedraw(self):\n self.ui.progress.hide()\n self.canvas.show()\n self.rendered.set()\n\n @abstractmethod\n def draw(self, data_model: DataModel):\n raise NotImplementedError\n" ]
[ [ "matplotlib.backends.backend_qt5.NavigationToolbar2QT", "matplotlib.pyplot.figure", "matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg.updateGeometry", "matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg", "matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg.setSizePolicy" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ducongju/HRNet
[ "8c6e1580d0410c439d16a9f220bf0ac48fb39e9a", "8c6e1580d0410c439d16a9f220bf0ac48fb39e9a" ]
[ "lib/dataset/coco.py", "lib/models/pose_resnet.py" ]
[ "# ------------------------------------------------------------------------------\n# Copyright (c) Microsoft\n# Licensed under the MIT License.\n# Written by Bin Xiao ([email protected])\n# ------------------------------------------------------------------------------\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom collections import defaultdict\nfrom collections import OrderedDict\nimport logging\nimport os\n\nfrom pycocotools.coco import COCO\nfrom pycocotools.cocoeval import COCOeval\n\"\"\"\njson文件:\n1. 以人类可读的格式存储和加载numpy数组。\n2. 存储和加载通用和定制的类实例。\n3. 将日期/时间存储并加载为字典(包括时区)。\n4. 使用OrderedDict保留地图顺序。\n5. 通过以#开头的行来允许json文件中的注释。\n\"\"\"\nimport json_tricks as json\nimport numpy as np\n\nfrom dataset.JointsDataset import JointsDataset\nfrom nms.nms import oks_nms\nfrom nms.nms import soft_oks_nms\n\n\n# TODO makefile用途是什么\nlogger = logging.getLogger(__name__)\n\n\nclass COCODataset(JointsDataset):\n '''\n \"keypoints\": {\n 0: \"nose\",\n 1: \"left_eye\",\n 2: \"right_eye\",\n 3: \"left_ear\",\n 4: \"right_ear\",\n 5: \"left_shoulder\",\n 6: \"right_shoulder\",\n 7: \"left_elbow\",\n 8: \"right_elbow\",\n 9: \"left_wrist\",\n 10: \"right_wrist\",\n 11: \"left_hip\",\n 12: \"right_hip\",\n 13: \"left_knee\",\n 14: \"right_knee\",\n 15: \"left_ankle\",\n 16: \"right_ankle\"\n },\n\t\"skeleton\": [\n [16,14],[14,12],[17,15],[15,13],[12,13],[6,12],[7,13], [6,7],[6,8],\n [7,9],[8,10],[9,11],[2,3],[1,2],[1,3],[2,4],[3,5],[4,6],[5,7]]\n '''\n def __init__(self, cfg, root, image_set, is_train, transform=None):\n super().__init__(cfg, root, image_set, is_train, transform)\n self.nms_thre = cfg.TEST.NMS_THRE\n self.image_thre = cfg.TEST.IMAGE_THRE\n self.soft_nms = cfg.TEST.SOFT_NMS\n self.oks_thre = cfg.TEST.OKS_THRE\n self.in_vis_thre = cfg.TEST.IN_VIS_THRE\n self.bbox_file = cfg.TEST.COCO_BBOX_FILE\n self.use_gt_bbox = cfg.TEST.USE_GT_BBOX\n self.image_width = cfg.MODEL.IMAGE_SIZE[0]\n self.image_height = cfg.MODEL.IMAGE_SIZE[1]\n self.aspect_ratio = self.image_width * 1.0 / self.image_height\n self.pixel_std = 200\n\n self.coco = COCO(self._get_ann_file_keypoint())\n\n # deal with class names\n cats = [cat['name']\n for cat in self.coco.loadCats(self.coco.getCatIds())]\n self.classes = ['__background__'] + cats\n logger.info('=> classes: {}'.format(self.classes))\n self.num_classes = len(self.classes)\n self._class_to_ind = dict(zip(self.classes, range(self.num_classes)))\n self._class_to_coco_ind = dict(zip(cats, self.coco.getCatIds()))\n self._coco_ind_to_class_ind = dict(\n [\n (self._class_to_coco_ind[cls], self._class_to_ind[cls])\n for cls in self.classes[1:]\n ]\n )\n\n # load image file names\n self.image_set_index = self._load_image_set_index()\n self.num_images = len(self.image_set_index)\n logger.info('=> num_images: {}'.format(self.num_images))\n\n self.num_joints = 17\n self.flip_pairs = [[1, 2], [3, 4], [5, 6], [7, 8],\n [9, 10], [11, 12], [13, 14], [15, 16]]\n self.parent_ids = None\n self.upper_body_ids = (0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10)\n self.lower_body_ids = (11, 12, 13, 14, 15, 16)\n\n self.joints_weight = np.array(\n [\n 1., 1., 1., 1., 1., 1., 1., 1.2, 1.2,\n 1.5, 1.5, 1., 1., 1.2, 1.2, 1.5, 1.5\n ],\n dtype=np.float32\n ).reshape((self.num_joints, 1)) # TODO 处理细节: 不同关节设置不同权重\n\n self.db = self._get_db()\n\n if is_train and cfg.DATASET.SELECT_DATA:\n self.db = self.select_data(self.db)\n\n logger.info('=> load {} samples'.format(len(self.db)))\n\n def _get_ann_file_keypoint(self):\n \"\"\" self.root / annotations / person_keypoints_train2017.json \"\"\"\n prefix = 'person_keypoints' \\\n if 'test' not in self.image_set else 'image_info'\n return os.path.join(\n self.root,\n 'annotations',\n prefix + '_' + self.image_set + '.json'\n )\n\n def _load_image_set_index(self):\n \"\"\" image id: int \"\"\"\n image_ids = self.coco.getImgIds()\n return image_ids\n\n def _get_db(self):\n if self.is_train or self.use_gt_bbox:\n # use ground truth bbox\n gt_db = self._load_coco_keypoint_annotations()\n else:\n # use bbox from detection\n gt_db = self._load_coco_person_detection_results()\n return gt_db\n\n def _load_coco_keypoint_annotations(self):\n \"\"\" ground truth bbox and keypoints \"\"\"\n gt_db = []\n for index in self.image_set_index:\n gt_db.extend(self._load_coco_keypoint_annotation_kernal(index))\n return gt_db\n\n def _load_coco_keypoint_annotation_kernal(self, index):\n \"\"\"\n coco ann: [u'segmentation', u'area', u'iscrowd', u'image_id', u'bbox', u'category_id', u'id']\n iscrowd:\n crowd instances are handled by marking their overlaps with all categories to -1\n and later excluded in training\n bbox:\n [x1, y1, w, h]\n :param index: coco image id\n :return: db entry\n \"\"\"\n im_ann = self.coco.loadImgs(index)[0]\n width = im_ann['width']\n height = im_ann['height']\n\n annIds = self.coco.getAnnIds(imgIds=index, iscrowd=False)\n objs = self.coco.loadAnns(annIds)\n\n # sanitize bboxes\n valid_objs = []\n for obj in objs:\n x, y, w, h = obj['bbox']\n x1 = np.max((0, x))\n y1 = np.max((0, y))\n x2 = np.min((width - 1, x1 + np.max((0, w - 1))))\n y2 = np.min((height - 1, y1 + np.max((0, h - 1))))\n if obj['area'] > 0 and x2 >= x1 and y2 >= y1:\n obj['clean_bbox'] = [x1, y1, x2-x1, y2-y1]\n valid_objs.append(obj)\n objs = valid_objs\n\n rec = []\n for obj in objs:\n cls = self._coco_ind_to_class_ind[obj['category_id']]\n if cls != 1:\n continue\n\n # ignore objs without keypoints annotation\n if max(obj['keypoints']) == 0:\n continue\n\n joints_3d = np.zeros((self.num_joints, 3), dtype=np.float)\n joints_3d_vis = np.zeros((self.num_joints, 3), dtype=np.float)\n for ipt in range(self.num_joints):\n joints_3d[ipt, 0] = obj['keypoints'][ipt * 3 + 0]\n joints_3d[ipt, 1] = obj['keypoints'][ipt * 3 + 1]\n joints_3d[ipt, 2] = 0\n t_vis = obj['keypoints'][ipt * 3 + 2]\n if t_vis > 1:\n t_vis = 1\n joints_3d_vis[ipt, 0] = t_vis\n joints_3d_vis[ipt, 1] = t_vis\n joints_3d_vis[ipt, 2] = 0\n\n center, scale = self._box2cs(obj['clean_bbox'][:4])\n rec.append({\n 'image': self.image_path_from_index(index),\n 'center': center,\n 'scale': scale,\n 'joints_3d': joints_3d,\n 'joints_3d_vis': joints_3d_vis,\n 'filename': '',\n 'imgnum': 0,\n })\n\n return rec\n\n def _box2cs(self, box):\n x, y, w, h = box[:4]\n return self._xywh2cs(x, y, w, h)\n\n def _xywh2cs(self, x, y, w, h):\n center = np.zeros((2), dtype=np.float32)\n center[0] = x + w * 0.5\n center[1] = y + h * 0.5\n\n if w > self.aspect_ratio * h:\n h = w * 1.0 / self.aspect_ratio\n elif w < self.aspect_ratio * h:\n w = h * self.aspect_ratio\n scale = np.array(\n [w * 1.0 / self.pixel_std, h * 1.0 / self.pixel_std],\n dtype=np.float32)\n if center[0] != -1:\n scale = scale * 1.25\n\n return center, scale\n\n def image_path_from_index(self, index):\n \"\"\" example: images / train2017 / 000000119993.jpg \"\"\"\n file_name = '%012d.jpg' % index\n if '2014' in self.image_set:\n file_name = 'COCO_%s_' % self.image_set + file_name\n\n prefix = 'test2017' if 'test' in self.image_set else self.image_set\n\n data_name = prefix + '.zip@' if self.data_format == 'zip' else prefix\n\n image_path = os.path.join(\n self.root, 'images', data_name, file_name)\n\n return image_path\n\n def _load_coco_person_detection_results(self):\n all_boxes = None\n with open(self.bbox_file, 'r') as f:\n all_boxes = json.load(f)\n\n if not all_boxes:\n logger.error('=> Load %s fail!' % self.bbox_file)\n return None\n\n logger.info('=> Total boxes: {}'.format(len(all_boxes)))\n\n kpt_db = []\n num_boxes = 0\n for n_img in range(0, len(all_boxes)):\n det_res = all_boxes[n_img]\n if det_res['category_id'] != 1:\n continue\n img_name = self.image_path_from_index(det_res['image_id'])\n box = det_res['bbox']\n score = det_res['score']\n\n if score < self.image_thre:\n continue\n\n num_boxes = num_boxes + 1\n\n center, scale = self._box2cs(box)\n joints_3d = np.zeros((self.num_joints, 3), dtype=np.float)\n joints_3d_vis = np.ones(\n (self.num_joints, 3), dtype=np.float)\n kpt_db.append({\n 'image': img_name,\n 'center': center,\n 'scale': scale,\n 'score': score,\n 'joints_3d': joints_3d,\n 'joints_3d_vis': joints_3d_vis,\n })\n\n logger.info('=> Total boxes after fliter low score@{}: {}'.format(\n self.image_thre, num_boxes))\n return kpt_db\n\n def evaluate(self, cfg, preds, output_dir, all_boxes, img_path,\n *args, **kwargs):\n rank = cfg.RANK\n\n res_folder = os.path.join(output_dir, 'results')\n if not os.path.exists(res_folder):\n try:\n os.makedirs(res_folder)\n except Exception:\n logger.error('Fail to make {}'.format(res_folder))\n\n res_file = os.path.join(\n res_folder, 'keypoints_{}_results_{}.json'.format(\n self.image_set, rank)\n )\n\n # person x (keypoints)\n _kpts = []\n for idx, kpt in enumerate(preds):\n _kpts.append({\n 'keypoints': kpt,\n 'center': all_boxes[idx][0:2],\n 'scale': all_boxes[idx][2:4],\n 'area': all_boxes[idx][4],\n 'score': all_boxes[idx][5],\n 'image': int(img_path[idx][-16:-4])\n })\n # image x person x (keypoints)\n kpts = defaultdict(list)\n for kpt in _kpts:\n kpts[kpt['image']].append(kpt)\n\n # rescoring and oks nms\n num_joints = self.num_joints\n in_vis_thre = self.in_vis_thre\n oks_thre = self.oks_thre\n oks_nmsed_kpts = []\n for img in kpts.keys():\n img_kpts = kpts[img]\n for n_p in img_kpts:\n box_score = n_p['score']\n kpt_score = 0\n valid_num = 0\n for n_jt in range(0, num_joints):\n t_s = n_p['keypoints'][n_jt][2]\n if t_s > in_vis_thre:\n kpt_score = kpt_score + t_s\n valid_num = valid_num + 1\n if valid_num != 0:\n kpt_score = kpt_score / valid_num\n # rescoring\n n_p['score'] = kpt_score * box_score\n\n if self.soft_nms:\n keep = soft_oks_nms(\n [img_kpts[i] for i in range(len(img_kpts))],\n oks_thre\n )\n else:\n keep = oks_nms(\n [img_kpts[i] for i in range(len(img_kpts))],\n oks_thre\n )\n\n if len(keep) == 0:\n oks_nmsed_kpts.append(img_kpts)\n else:\n oks_nmsed_kpts.append([img_kpts[_keep] for _keep in keep])\n\n self._write_coco_keypoint_results(\n oks_nmsed_kpts, res_file)\n if 'test' not in self.image_set:\n info_str = self._do_python_keypoint_eval(\n res_file, res_folder)\n name_value = OrderedDict(info_str)\n return name_value, name_value['AP']\n else:\n return {'Null': 0}, 0\n\n def _write_coco_keypoint_results(self, keypoints, res_file):\n data_pack = [\n {\n 'cat_id': self._class_to_coco_ind[cls],\n 'cls_ind': cls_ind,\n 'cls': cls,\n 'ann_type': 'keypoints',\n 'keypoints': keypoints\n }\n for cls_ind, cls in enumerate(self.classes) if not cls == '__background__'\n ]\n\n results = self._coco_keypoint_results_one_category_kernel(data_pack[0])\n logger.info('=> writing results json to %s' % res_file)\n with open(res_file, 'w') as f:\n json.dump(results, f, sort_keys=True, indent=4)\n try:\n json.load(open(res_file))\n except Exception:\n content = []\n with open(res_file, 'r') as f:\n for line in f:\n content.append(line)\n content[-1] = ']'\n with open(res_file, 'w') as f:\n for c in content:\n f.write(c)\n\n def _coco_keypoint_results_one_category_kernel(self, data_pack):\n cat_id = data_pack['cat_id']\n keypoints = data_pack['keypoints']\n cat_results = []\n\n for img_kpts in keypoints:\n if len(img_kpts) == 0:\n continue\n\n _key_points = np.array([img_kpts[k]['keypoints']\n for k in range(len(img_kpts))])\n key_points = np.zeros(\n (_key_points.shape[0], self.num_joints * 3), dtype=np.float\n )\n\n for ipt in range(self.num_joints):\n key_points[:, ipt * 3 + 0] = _key_points[:, ipt, 0]\n key_points[:, ipt * 3 + 1] = _key_points[:, ipt, 1]\n key_points[:, ipt * 3 + 2] = _key_points[:, ipt, 2] # keypoints score.\n\n result = [\n {\n 'image_id': img_kpts[k]['image'],\n 'category_id': cat_id,\n 'keypoints': list(key_points[k]),\n 'score': img_kpts[k]['score'],\n 'center': list(img_kpts[k]['center']),\n 'scale': list(img_kpts[k]['scale'])\n }\n for k in range(len(img_kpts))\n ]\n cat_results.extend(result)\n\n return cat_results\n\n def _do_python_keypoint_eval(self, res_file, res_folder):\n coco_dt = self.coco.loadRes(res_file)\n coco_eval = COCOeval(self.coco, coco_dt, 'keypoints')\n coco_eval.params.useSegm = None\n coco_eval.evaluate()\n coco_eval.accumulate()\n coco_eval.summarize()\n\n stats_names = ['AP', 'Ap .5', 'AP .75', 'AP (M)', 'AP (L)', 'AR', 'AR .5', 'AR .75', 'AR (M)', 'AR (L)']\n\n info_str = []\n for ind, name in enumerate(stats_names):\n info_str.append((name, coco_eval.stats[ind]))\n\n return info_str\n", "# ------------------------------------------------------------------------------\n# Copyright (c) Microsoft\n# Licensed under the MIT License.\n# Written by Bin Xiao ([email protected])\n# ------------------------------------------------------------------------------\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport logging\n\nimport torch\nimport torch.nn as nn\n\n\nBN_MOMENTUM = 0.1\n\nlogger = logging.getLogger(__name__)\n\n\ndef conv3x3(in_planes, out_planes, stride=1):\n \"\"\"3x3 convolution with padding\"\"\"\n return nn.Conv2d(\n in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False\n )\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(BasicBlock, self).__init__()\n self.conv1 = conv3x3(inplanes, planes, stride)\n self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM) # 默认 momentum = 0.1\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = conv3x3(planes, planes)\n self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n # 由于我们的规范化操作会对减去均值,因此,偏置项b可以被忽略掉或可以被置为0\n out = self.conv1(x) # inplanes, planes, kernel_size=3, stride=1, padding=1, bias=False\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out) # planes, planes, kernel_size=3, stride=1, padding=1, bias=False\n out = self.bn2(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(Bottleneck, self).__init__()\n self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)\n self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1,\n bias=False)\n self.bn3 = nn.BatchNorm2d(planes * self.expansion,\n momentum=BN_MOMENTUM)\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x) # inplanes, planes, kernel_size=1, stride=1, padding=0, bias=False\n out = self.relu(out)\n\n out = self.conv2(out) # planes, planes, kernel_size=3, stride=1, padding=1, bias=False\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out) # planes, planes * expansion, kernel_size=1, stride=2, padding=0, bias=False\n out = self.bn3(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass PoseResNet(nn.Module):\n\n def __init__(self, block, layers, cfg, **kwargs):\n self.inplanes = 64\n extra = cfg.MODEL.EXTRA\n self.deconv_with_bias = extra.DECONV_WITH_BIAS\n\n super(PoseResNet, self).__init__()\n self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,\n bias=False)\n self.bn1 = nn.BatchNorm2d(64, momentum=BN_MOMENTUM)\n self.relu = nn.ReLU(inplace=True)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n self.layer1 = self._make_layer(block, 64, layers[0])\n self.layer2 = self._make_layer(block, 128, layers[1], stride=2)\n self.layer3 = self._make_layer(block, 256, layers[2], stride=2)\n self.layer4 = self._make_layer(block, 512, layers[3], stride=2)\n\n # used for deconv layers\n self.deconv_layers = self._make_deconv_layer(\n extra.NUM_DECONV_LAYERS,\n extra.NUM_DECONV_FILTERS,\n extra.NUM_DECONV_KERNELS,\n )\n\n self.final_layer = nn.Conv2d(\n in_channels=extra.NUM_DECONV_FILTERS[-1],\n out_channels=cfg.MODEL.NUM_JOINTS,\n kernel_size=extra.FINAL_CONV_KERNEL,\n stride=1,\n padding=1 if extra.FINAL_CONV_KERNEL == 3 else 0\n )\n\n def _make_layer(self, block, planes, blocks, stride=1):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n nn.Conv2d(self.inplanes, planes * block.expansion,\n kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(planes * block.expansion, momentum=BN_MOMENTUM),\n )\n\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample))\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(self.inplanes, planes))\n\n return nn.Sequential(*layers)\n\n def _get_deconv_cfg(self, deconv_kernel, index):\n if deconv_kernel == 4:\n padding = 1\n output_padding = 0\n elif deconv_kernel == 3:\n padding = 1\n output_padding = 1\n elif deconv_kernel == 2:\n padding = 0\n output_padding = 0\n\n return deconv_kernel, padding, output_padding\n\n def _make_deconv_layer(self, num_layers, num_filters, num_kernels):\n assert num_layers == len(num_filters), \\\n 'ERROR: num_deconv_layers is different len(num_deconv_filters)'\n assert num_layers == len(num_kernels), \\\n 'ERROR: num_deconv_layers is different len(num_deconv_filters)'\n\n layers = []\n for i in range(num_layers):\n kernel, padding, output_padding = \\\n self._get_deconv_cfg(num_kernels[i], i)\n\n planes = num_filters[i]\n layers.append(\n nn.ConvTranspose2d(\n in_channels=self.inplanes,\n out_channels=planes,\n kernel_size=kernel,\n stride=2,\n padding=padding,\n output_padding=output_padding,\n bias=self.deconv_with_bias))\n layers.append(nn.BatchNorm2d(planes, momentum=BN_MOMENTUM))\n layers.append(nn.ReLU(inplace=True))\n self.inplanes = planes\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n \"\"\"\n resnet34\n layer1 = 3: 64, 3*3, stride=1 64, 3*3, stride=1\n 64, 3*3, stride=1 64, 3*3, stride=1 repeat 2\n layer2 = 4: 128, 3*3, stride=2 128, 3*3, stride=1\n 128, 3*3, stride=1 128, 3*3, stride=1 repeat 3\n layer3 = 6: 256, 3*3, stride=2 256, 3*3, stride=1\n 256, 3*3, stride=1 256, 3*3, stride=1 repeat 5\n layer4 = 3: 512, 3*3, stride=2 512, 3*3, stride=1\n 512, 3*3, stride=1 512, 3*3, stride=1 repeat 2\n \"\"\"\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n\n x = self.deconv_layers(x)\n x = self.final_layer(x)\n\n \"\"\"\n res50_256x192_d256x3_adam_lr1e-3.yaml (resnet50):\n \n conv1: 64, 7*7, stride=2\n maxpool: 64, 3*3, stride=2\n layer1 = 3: 64, 3*3, stride=1 64, 1*1, stride=1 256, 3*3, stride=1\n 64, 3*3, stride=1 64, 1*1, stride=1 256, 3*3, stride=1 repeat 2\n layer2 = 4: 128, 3*3, stride=2 128, 1*1, stride=1 512, 3*3, stride=1\n 128, 3*3, stride=1 128, 1*1, stride=1 512, 3*3, stride=1 repeat 3\n layer3 = 6: 256, 3*3, stride=2 256, 1*1, stride=1 1024, 3*3, stride=1\n 256, 3*3, stride=1 256, 1*1, stride=1 1024, 3*3, stride=1 repeat 5\n layer4 = 3: 512, 3*3, stride=2 512, 1*1, stride=1 2048, 3*3, stride=1\n 512, 3*3, stride=1 512, 1*1, stride=1 2048, 3*3, stride=1 repeat 2\n deconv_layers: (2048), 256, 4*4, stride=2\n 256, 4*4, stride=2\n 256, 4*4, stride=2\n final_layer: 17, 1*1, stride=1\n \"\"\"\n\n return x\n\n # 权重初始化,并记录日志\n def init_weights(self, pretrained=''):\n # 如果有Imagenet预训练模型, 加载并初始化后半部分,否则全部参数进行初始化\n if os.path.isfile(pretrained): # 判断某一对象(需提供绝对路径)是否为文件\n logger.info('=> init deconv weights from normal distribution')\n for name, m in self.deconv_layers.named_modules(): # 在网络中的所有模块上返回一个迭代器,同时产生模块的名称和模块本身\n if isinstance(m, nn.ConvTranspose2d): # 判断一个对象是否是一个已知的类型\n logger.info('=> init {}.weight as normal(0, 0.001)'.format(name))\n logger.info('=> init {}.bias as 0'.format(name))\n nn.init.normal_(m.weight, std=0.001)\n if self.deconv_with_bias:\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.BatchNorm2d):\n logger.info('=> init {}.weight as 1'.format(name))\n logger.info('=> init {}.bias as 0'.format(name))\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n logger.info('=> init final conv weights from normal distribution')\n for m in self.final_layer.modules():\n if isinstance(m, nn.Conv2d):\n # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n logger.info('=> init {}.weight as normal(0, 0.001)'.format(name))\n logger.info('=> init {}.bias as 0'.format(name))\n nn.init.normal_(m.weight, std=0.001)\n nn.init.constant_(m.bias, 0)\n\n pretrained_state_dict = torch.load(pretrained) # 解序列化一个pickled对象并加载到内存当中\n logger.info('=> loading pretrained model {}'.format(pretrained))\n self.load_state_dict(pretrained_state_dict, strict=False) # 加载一个解序列化的state_dict对象\n else:\n logger.info('=> init weights from normal distribution')\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n nn.init.normal_(m.weight, std=0.001)\n # nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.ConvTranspose2d):\n nn.init.normal_(m.weight, std=0.001)\n if self.deconv_with_bias:\n nn.init.constant_(m.bias, 0)\n\n\nresnet_spec = {\n 18: (BasicBlock, [2, 2, 2, 2]),\n 34: (BasicBlock, [3, 4, 6, 3]),\n 50: (Bottleneck, [3, 4, 6, 3]),\n 101: (Bottleneck, [3, 4, 23, 3]),\n 152: (Bottleneck, [3, 8, 36, 3])\n}\n\n\ndef get_pose_net(cfg, is_train, **kwargs):\n num_layers = cfg.MODEL.EXTRA.NUM_LAYERS # 50\n\n block_class, layers = resnet_spec[num_layers]\n\n model = PoseResNet(block_class, layers, cfg, **kwargs)\n\n if is_train and cfg.MODEL.INIT_WEIGHTS: # True\n model.init_weights(cfg.MODEL.PRETRAINED) # 'models/pytorch/imagenet/resnet50-19c8e357.pth'\n\n return model\n" ]
[ [ "numpy.max", "numpy.array", "numpy.zeros", "numpy.ones" ], [ "torch.nn.Sequential", "torch.nn.ConvTranspose2d", "torch.load", "torch.nn.init.constant_", "torch.nn.Conv2d", "torch.nn.MaxPool2d", "torch.nn.init.normal_", "torch.nn.BatchNorm2d", "torch.nn.ReLU" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
frankdvd/curb-monitor
[ "a5ad37a9dd0ca93477dffc647d2dfe7a8d9361e0" ]
[ "scripts/extract_gps.py" ]
[ "from pathlib import Path\nimport os, sys, shutil\nimport subprocess\nimport pandas as pd\nimport string\n\nif len(sys.argv) != 2:\n print(\"Usage: ./extract_gps.py <video dir>\")\n sys.exit()\n\ndef convert_latlong(in_str):\n split_latlong = in_str.split(' ')\n return float(split_latlong[0]) + float(split_latlong[2][:-1])/60.0 + float(split_latlong[3][:-1])/3600.00\n\ndef on_bancroft(latitude, longitude):\n # Southwest corner of bancroft -- Intersection of Bancroft and oxford st.\n # lat_0 = 37.867745120011236\n # long_0 = 122.265914980762\n lat_0 = 37.86792981681717\n long_0 = 122.26526052183016\n # Northeast corner of bancroft -- Intersection of Bancroft and Piedmont Ave\n # lat_1 = 37.86974393324088\n # long_1 = 122.25221425754695\n lat_1 = 37.86956443944309\n long_1 = 122.25276142821582\n # Bounding box calculation\n return (latitude > lat_0 and latitude < lat_1) and (longitude > long_1 and longitude < long_0)\n\nvid_dir = sys.argv[1]\nvid_path = Path(vid_dir)\nout_path = vid_path/Path(\"out\")\nif os.path.exists(out_path):\n shutil.rmtree(out_path)\nos.makedirs(out_path)\n\nfor filename in os.listdir(vid_path):\n if not filename.endswith(\".MP4\") and not filename.endswith(\".mp4\"):\n continue\n # outfile = open(out_path/Path(filename[:-4]+\"out.txt\"), 'w')\n out_process = subprocess.run(args = [\"./exiftool.exe\", \"-a\", \"\\\"-gps*\\\"\", \"-ee\", str(vid_path) + \"/\" + filename], universal_newlines = True, stdout = subprocess.PIPE)\n output = out_process.stdout\n output_lines = output[output.index(\"Sample Time\"):].split('\\n')\n\n #gps_df = pd.dataframe({'Lat': [], 'Long': [], 'Speed': })\n lats = []\n longs = []\n speeds = []\n stimes = []\n sdurations = []\n datetimes = []\n vid_on_bancroft = False\n banc_ratio = 0.0\n for line in output_lines:\n if len(line) == 0:\n continue\n split_line = line.split(':')\n split_line[1] = split_line[1][1:]\n if line.startswith('Sample Time'):\n if len(split_line) == 2:\n stimes.append(float(split_line[1][:-2]))\n else:\n stimes.append(float(split_line[3]))\n if line.startswith('Sample Duration'):\n sdurations.append(split_line[1])\n if line.startswith('GPS Latitude'):\n lats.append(split_line[1])\n if line.startswith('GPS Longitude'):\n longs.append(split_line[1])\n # Can check the most recent latitude and longitude to see if the vid is on bancroft\n # Perform the check here since longitude measurement always comes after latitude measurement\n if on_bancroft(convert_latlong(lats[-1]), convert_latlong(longs[-1])):\n # print(convert_latlong(lats[-1]))\n # print(convert_latlong(longs[-1]))\n vid_on_bancroft = True\n banc_ratio += 1.0\n if line.startswith('GPS Speed'):\n speeds.append(split_line[1])\n if line.startswith('GPS Date/Time'):\n datetimes.append(line[line.index(': ')+2:])\n gps_df = pd.DataFrame( {'lat': pd.Series(lats), \n 'long': pd.Series(longs), \n 'speed': pd.Series(speeds),\n 'datetime': pd.Series(datetimes),\n 'sample_time': pd.Series(stimes),\n 'sample_dur': pd.Series(sdurations)\n } ).set_index('sample_time')\n\n # Since this is in the Berkeley area, N and W are implied for the latitude and longitude, respectively\n gps_df['converted_lat'] = gps_df['lat'].apply(convert_latlong)\n gps_df['converted_long'] = gps_df['long'].apply(convert_latlong)\n\n #print(gps_df[['converted_lat', 'converted_long', 'speed', 'datetime']].head())\n print(filename + \" on Bancroft Way: \" + str(vid_on_bancroft), end=\"\\t\")\n print(filename + \" Bancroft Ratio: \" + str(banc_ratio/59))\n #print(gps_df.head())\n #print(output_lines[:10])\n # outfile.close()\n" ]
[ [ "pandas.Series" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
sgdread/autogluon
[ "fa95c72a07066dc5380fccf8bbce04b5c031fc68" ]
[ "text/src/autogluon/text/automm/optimization/utils.py" ]
[ "from typing import Optional, Union, Tuple, List, Dict\nimport functools\nfrom torch import nn\nfrom torch import optim\nfrom torch.nn import functional as F\nfrom transformers.trainer_pt_utils import get_parameter_names\nimport torchmetrics\nfrom .lr_scheduler import (\n get_cosine_schedule_with_warmup,\n get_polynomial_decay_schedule_with_warmup,\n get_linear_schedule_with_warmup,\n)\nfrom ..constants import (\n BINARY, MULTICLASS, REGRESSION, MAX, MIN, NORM_FIT, BIT_FIT,\n ACC, ACCURACY, RMSE, ROOT_MEAN_SQUARED_ERROR, R2, QUADRATIC_KAPPA,\n ROC_AUC, AVERAGE_PRECISION, LOG_LOSS, CROSS_ENTROPY,\n PEARSONR, SPEARMANR,\n)\nimport warnings\n\n\ndef get_loss_func(problem_type: str):\n \"\"\"\n Choose a suitable Pytorch loss module based on the provided problem type.\n\n Parameters\n ----------\n problem_type\n Type of problem.\n\n Returns\n -------\n A Pytorch loss module.\n \"\"\"\n if problem_type in [BINARY, MULTICLASS]:\n loss_func = nn.CrossEntropyLoss()\n elif problem_type == REGRESSION:\n loss_func = nn.MSELoss()\n else:\n raise NotImplementedError\n\n return loss_func\n\n\ndef get_metric(\n metric_name: str,\n problem_type: str,\n num_classes: Optional[int] = None,\n pos_label: Optional[int] = None,\n):\n \"\"\"\n Obtain a torchmerics.Metric from its name.\n Define a customized metric function in case that torchmetrics doesn't support some metric.\n\n Parameters\n ----------\n metric_name\n Name of metric\n problem_type\n The type of the problem.\n num_classes\n Number of classes, used in the quadratic_kappa metric for binary classification.\n pos_label\n The label (0 or 1) of binary classification's positive class, which is used in some metrics, e.g., AUROC.\n\n Returns\n -------\n torchmetrics.Metric\n A torchmetrics.Metric object.\n mode\n The min/max mode used in selecting model checkpoints.\n - min\n Its means that smaller metric is better.\n - max\n It means that larger metric is better.\n custom_metric_func\n A customized metric function.\n \"\"\"\n metric_name = metric_name.lower()\n if metric_name in [ACC, ACCURACY]:\n return torchmetrics.Accuracy(), MAX, None\n elif metric_name in [RMSE, ROOT_MEAN_SQUARED_ERROR]:\n return torchmetrics.MeanSquaredError(squared=False), MIN, None\n elif metric_name == R2:\n return torchmetrics.R2Score(), MAX, None\n elif metric_name == QUADRATIC_KAPPA:\n return torchmetrics.CohenKappa(num_classes=num_classes,\n weights=\"quadratic\"), MAX, None\n elif metric_name == ROC_AUC:\n return torchmetrics.AUROC(pos_label=pos_label), MAX, None\n elif metric_name == AVERAGE_PRECISION:\n return torchmetrics.AveragePrecision(pos_label=pos_label), MAX, None\n elif metric_name in [LOG_LOSS, CROSS_ENTROPY]:\n return torchmetrics.MeanMetric(), MIN, \\\n functools.partial(F.cross_entropy, reduction=\"none\")\n elif metric_name == PEARSONR:\n return torchmetrics.PearsonCorrCoef(), MAX, None\n elif metric_name == SPEARMANR:\n return torchmetrics.SpearmanCorrCoef(), MAX, None\n else:\n warnings.warn(f\"Currently, we cannot convert the metric: {metric_name} to a metric supported in torchmetrics. \"\n f\"Thus, we will fall-back to use accuracy for multi-class classification problems \"\n f\", ROC-AUC for binary classification problem, and MSE for regression problems.\", UserWarning)\n if problem_type == REGRESSION:\n return torchmetrics.MeanSquaredError(squared=False), MIN, None\n elif problem_type == MULTICLASS:\n return torchmetrics.Accuracy(), MAX, None\n elif problem_type == BINARY:\n return torchmetrics.AUROC(pos_label=pos_label), MAX, None\n else:\n raise ValueError(f'The problem_type={problem_type} is currently not supported')\n\n\ndef get_optimizer(\n optim_type: str,\n optimizer_grouped_parameters,\n lr: float,\n weight_decay: float,\n eps: Optional[float] = 1e-6,\n betas: Optional[Tuple[float, float]] = (0.9, 0.999),\n momentum: Optional[float] = 0.9,\n):\n \"\"\"\n Choose a Pytorch optimizer based on its name.\n\n Parameters\n ----------\n optim_type\n Name of optimizer.\n optimizer_grouped_parameters\n The model parameters to be optimized.\n lr\n Learning rate.\n weight_decay\n Optimizer weight decay.\n eps\n Optimizer eps.\n betas\n Optimizer betas.\n momentum\n Momentum used in the SGD optimizer.\n\n Returns\n -------\n A Pytorch optimizer.\n \"\"\"\n if optim_type == \"adamw\":\n optimizer = optim.AdamW(\n optimizer_grouped_parameters,\n lr=lr,\n weight_decay=weight_decay,\n eps=eps,\n betas=betas,\n )\n elif optim_type == \"adam\":\n optimizer = optim.Adam(\n optimizer_grouped_parameters,\n lr=lr,\n weight_decay=weight_decay,\n )\n elif optim_type == \"sgd\":\n optimizer = optim.SGD(\n optimizer_grouped_parameters,\n lr=lr,\n weight_decay=weight_decay,\n momentum=momentum,\n )\n else:\n raise ValueError(f\"unknown optimizer: {optim_type}\")\n\n return optimizer\n\n\ndef get_lr_scheduler(\n optimizer: optim.Optimizer,\n num_max_steps: int,\n num_warmup_steps: int,\n lr_schedule: str,\n end_lr: Union[float, int],\n):\n \"\"\"\n Get the learning rate scheduler from its name. Here we use our defined learning rate\n scheduler instead of those imported from \"transformers\" because we want to support\n Pytorch lightning's \"ddp_spawn\" training strategy.\n\n Parameters\n ----------\n optimizer\n A Pytorch optimizer.\n num_max_steps\n Number of maximum training steps.\n num_warmup_steps\n Number of steps to do learning rate warmup.\n lr_schedule\n Name of the learning rate scheduler.\n end_lr\n The final learning rate after decay.\n\n Returns\n -------\n A learning rate scheduler.\n \"\"\"\n if lr_schedule == \"cosine_decay\":\n scheduler = get_cosine_schedule_with_warmup(\n optimizer=optimizer,\n num_warmup_steps=num_warmup_steps,\n num_training_steps=num_max_steps,\n )\n elif lr_schedule == \"polynomial_decay\":\n scheduler = get_polynomial_decay_schedule_with_warmup(\n optimizer=optimizer,\n num_warmup_steps=num_warmup_steps,\n num_training_steps=num_max_steps,\n lr_end=end_lr,\n power=1,\n )\n elif lr_schedule == \"linear_decay\":\n scheduler = get_linear_schedule_with_warmup(\n optimizer=optimizer,\n num_warmup_steps=num_warmup_steps,\n num_training_steps=num_max_steps\n )\n else:\n raise ValueError(f\"unknown lr schedule: {lr_schedule}\")\n\n return scheduler\n\n\ndef get_weight_decay_param_names(model: nn.Module):\n \"\"\"\n Set the layer normalization parameters and other layers' bias parameters not to use weight decay.\n\n Parameters\n ----------\n model\n A Pytorch model.\n\n Returns\n -------\n A list of parameter names not using weight decay.\n \"\"\"\n # By default, we should not apply weight decay for all the norm layers\n decay_param_names = get_parameter_names(model,\n [nn.LayerNorm, nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d,\n nn.GroupNorm])\n decay_param_names = [name for name in decay_param_names if \"bias\" not in name]\n return decay_param_names\n\n\ndef get_norm_layer_param_names(model: nn.Module):\n \"\"\"\n Get parameters associated with the normalization layers\n\n Parameters\n ----------\n model\n A Pytorch model\n\n Returns\n -------\n norm_param_names\n A list of normalization parameter names\n \"\"\"\n all_param_names = [name for name, _ in model.named_parameters()]\n all_param_names_except_norm_names = get_parameter_names(\n model, [nn.LayerNorm, nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d, nn.GroupNorm])\n norm_param_names = [name for name in all_param_names if name not in all_param_names_except_norm_names]\n return norm_param_names\n\n\ndef apply_single_lr(\n model: nn.Module,\n lr: float,\n weight_decay: float,\n return_params: Optional[bool] = True,\n):\n \"\"\"\n Set to use a single learning rate for all parameters. Layer normalization parameters and other\n layers' bias parameters don't use weight decay.\n\n Parameters\n ----------\n model\n A Pytorch model.\n lr\n Learning rate.\n weight_decay\n Weight decay.\n return_params\n Whether to return parameters or their names. If you want to double-check\n whether the learning rate setup is as expected, you can set \"return_params=False\",\n and print the layer names along with their learning rates through\n \"print(\"Param groups = %s\" % json.dumps(optimizer_grouped_parameters, indent=2))\".\n\n Returns\n -------\n The grouped parameters or their names.\n \"\"\"\n decay_param_names = get_weight_decay_param_names(model)\n optimizer_grouped_parameters = [\n {\n \"params\": [p if return_params else n for n, p in model.named_parameters() if n in decay_param_names],\n \"weight_decay\": weight_decay,\n \"lr\": lr,\n },\n {\n \"params\": [p if return_params else n for n, p in model.named_parameters() if n not in decay_param_names],\n \"weight_decay\": 0.0,\n \"lr\": lr,\n },\n ]\n return optimizer_grouped_parameters\n\n\ndef apply_two_stages_lr(\n model: nn.Module,\n lr: float,\n lr_mult: Union[float, int],\n weight_decay: float,\n return_params: Optional[bool] = True,\n):\n \"\"\"\n Set up the pretrained backbone to use a smaller learning rate (lr * lr_mult).\n The newly added head layers use the normal learning rate (lr).\n Layer normalization parameters and other layers' bias parameters don't use weight decay.\n\n Parameters\n ----------\n model\n A Pytorch model.\n lr\n The learning rate.\n lr_mult\n The multiplier (0, 1) to scale down the learning rate.\n weight_decay\n Weight decay.\n return_params\n return_params\n Whether to return parameters or their names. If you want to double-check\n whether the learning rate setup is as expected, you can set \"return_params=False\",\n and print the layer names along with their learning rates through\n \"print(\"Param groups = %s\" % json.dumps(optimizer_grouped_parameters, indent=2))\".\n\n Returns\n -------\n The grouped parameters or their names.\n \"\"\"\n decay_param_names = get_weight_decay_param_names(model)\n\n optimizer_grouped_parameters = [\n {\n \"params\": [\n p if return_params else n\n for n, p in model.named_parameters()\n if n in decay_param_names\n and not any(bb in n for bb in model.head_layer_names)\n ],\n \"weight_decay\": weight_decay,\n \"lr\": lr,\n },\n {\n \"params\": [\n p if return_params else n\n for n, p in model.named_parameters()\n if n not in decay_param_names\n and not any(bb in n for bb in model.head_layer_names)\n ],\n \"weight_decay\": 0.0,\n \"lr\": lr,\n },\n {\n \"params\": [\n p if return_params else n\n for n, p in model.named_parameters()\n if n in decay_param_names\n and any(bb in n for bb in model.head_layer_names)\n ],\n \"weight_decay\": weight_decay,\n \"lr\": lr * lr_mult,\n },\n {\n \"params\": [\n p if return_params else n\n for n, p in model.named_parameters()\n if n not in decay_param_names\n and any(bb in n for bb in model.head_layer_names)\n ],\n \"weight_decay\": 0.0,\n \"lr\": lr * lr_mult,\n },\n ]\n\n return optimizer_grouped_parameters\n\n\ndef apply_layerwise_lr_decay(\n model: nn.Module,\n lr: float,\n lr_decay: float,\n weight_decay: float,\n efficient_finetune: Optional[str] = None,\n):\n \"\"\"\n Assign monotonically decreasing learning rates for layers from the output end to the input end.\n The intuition behind is that later layers are more task-related compared to the early layers.\n Layer normalization parameters and other layers' bias parameters don't use weight decay.\n If you want to double-check whether the learning rate setup is as expected,\n you can print the layer names along with their learning rates through\n \"print(\"Param groups = %s\" % json.dumps(parameter_group_names, indent=2))\".\n\n Parameters\n ----------\n model\n A Pytorch model.\n lr\n The learning rate.\n lr_decay\n The learning rate decay factor (0, 1).\n weight_decay\n Weight decay.\n efficient_finetune\n Efficient finetuning strategy. Can be \"bit_fit\", \"norm_fit\". It will only finetune part of the parameters\n\n Returns\n -------\n The grouped parameters based on their layer ids and whether using weight decay.\n \"\"\"\n parameter_group_names = {}\n parameter_group_vars = {}\n decay_param_names = get_weight_decay_param_names(model)\n norm_param_names = get_norm_layer_param_names(model)\n for name, param in model.named_parameters():\n if efficient_finetune == BIT_FIT:\n # For bit_fit, we disable tuning everything except the bias terms\n if 'bias' not in name:\n param.requires_grad = False\n elif efficient_finetune == NORM_FIT:\n # For norm-fit, we finetune all the normalization layers and bias layers\n if name not in norm_param_names and 'bias' not in name:\n param.requires_grad = False\n\n if not param.requires_grad:\n continue # frozen weights\n\n if name in decay_param_names:\n group_name = \"decay\"\n this_weight_decay = weight_decay\n else:\n group_name = \"no_decay\"\n this_weight_decay = 0.\n\n layer_id = model.name_to_id[name]\n group_name = \"layer_%d_%s\" % (layer_id, group_name)\n\n if group_name not in parameter_group_names:\n scale = lr_decay ** layer_id\n\n parameter_group_names[group_name] = {\n \"weight_decay\": this_weight_decay,\n \"params\": [],\n \"lr\": scale * lr\n }\n parameter_group_vars[group_name] = {\n \"weight_decay\": this_weight_decay,\n \"params\": [],\n \"lr\": scale * lr\n }\n\n parameter_group_vars[group_name][\"params\"].append(param)\n parameter_group_names[group_name][\"params\"].append(name)\n\n return list(parameter_group_vars.values())\n" ]
[ [ "torch.optim.Adam", "torch.nn.CrossEntropyLoss", "torch.optim.AdamW", "torch.optim.SGD", "torch.nn.MSELoss" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
xiaohui-zhang/audio
[ "0ec482cedfe82b209d23769a286fcd7d3fd468d2" ]
[ "test/torchaudio_unittest/common_utils/data_utils.py" ]
[ "import os.path\nfrom typing import Union, Optional\n\nimport torch\n\n\n_TEST_DIR_PATH = os.path.realpath(os.path.join(os.path.dirname(__file__), \"..\"))\n\n\ndef get_asset_path(*paths):\n \"\"\"Return full path of a test asset\"\"\"\n return os.path.join(_TEST_DIR_PATH, \"assets\", *paths)\n\n\ndef convert_tensor_encoding(\n tensor: torch.tensor,\n dtype: torch.dtype,\n):\n \"\"\"Convert input tensor with values between -1 and 1 to integer encoding\n Args:\n tensor: input tensor, assumed between -1 and 1\n dtype: desired output tensor dtype\n Returns:\n Tensor: shape of (n_channels, sample_rate * duration)\n \"\"\"\n if dtype == torch.int32:\n tensor *= (tensor > 0) * 2147483647 + (tensor < 0) * 2147483648\n if dtype == torch.int16:\n tensor *= (tensor > 0) * 32767 + (tensor < 0) * 32768\n if dtype == torch.uint8:\n tensor *= (tensor > 0) * 127 + (tensor < 0) * 128\n tensor += 128\n tensor = tensor.to(dtype)\n return tensor\n\n\ndef get_whitenoise(\n *,\n sample_rate: int = 16000,\n duration: float = 1, # seconds\n n_channels: int = 1,\n seed: int = 0,\n dtype: Union[str, torch.dtype] = \"float32\",\n device: Union[str, torch.device] = \"cpu\",\n channels_first=True,\n scale_factor: float = 1,\n):\n \"\"\"Generate pseudo audio data with whitenoise\n Args:\n sample_rate: Sampling rate\n duration: Length of the resulting Tensor in seconds.\n n_channels: Number of channels\n seed: Seed value used for random number generation.\n Note that this function does not modify global random generator state.\n dtype: Torch dtype\n device: device\n channels_first: whether first dimension is n_channels\n scale_factor: scale the Tensor before clamping and quantization\n Returns:\n Tensor: shape of (n_channels, sample_rate * duration)\n \"\"\"\n if isinstance(dtype, str):\n dtype = getattr(torch, dtype)\n if dtype not in [torch.float64, torch.float32, torch.int32, torch.int16, torch.uint8]:\n raise NotImplementedError(f\"dtype {dtype} is not supported.\")\n # According to the doc, folking rng on all CUDA devices is slow when there are many CUDA devices,\n # so we only fork on CPU, generate values and move the data to the given device\n with torch.random.fork_rng([]):\n torch.random.manual_seed(seed)\n tensor = torch.randn([n_channels, int(sample_rate * duration)], dtype=torch.float32, device=\"cpu\")\n tensor /= 2.0\n tensor *= scale_factor\n tensor.clamp_(-1.0, 1.0)\n if not channels_first:\n tensor = tensor.t()\n\n tensor = tensor.to(device)\n\n return convert_tensor_encoding(tensor, dtype)\n\n\ndef get_sinusoid(\n *,\n frequency: float = 300,\n sample_rate: int = 16000,\n duration: float = 1, # seconds\n n_channels: int = 1,\n dtype: Union[str, torch.dtype] = \"float32\",\n device: Union[str, torch.device] = \"cpu\",\n channels_first: bool = True,\n):\n \"\"\"Generate pseudo audio data with sine wave.\n\n Args:\n frequency: Frequency of sine wave\n sample_rate: Sampling rate\n duration: Length of the resulting Tensor in seconds.\n n_channels: Number of channels\n dtype: Torch dtype\n device: device\n\n Returns:\n Tensor: shape of (n_channels, sample_rate * duration)\n \"\"\"\n if isinstance(dtype, str):\n dtype = getattr(torch, dtype)\n pie2 = 2 * 3.141592653589793\n end = pie2 * frequency * duration\n num_frames = int(sample_rate * duration)\n # Randomize the initial phase. (except the first channel)\n theta0 = pie2 * torch.randn(n_channels, 1, dtype=torch.float32, device=device)\n theta0[0, :] = 0\n theta = torch.linspace(0, end, num_frames, dtype=torch.float32, device=device)\n theta = theta0 + theta\n tensor = torch.sin(theta, out=None)\n if not channels_first:\n tensor = tensor.t()\n return convert_tensor_encoding(tensor, dtype)\n\n\ndef get_spectrogram(\n waveform,\n *,\n n_fft: int = 2048,\n hop_length: Optional[int] = None,\n win_length: Optional[int] = None,\n window: Optional[torch.Tensor] = None,\n center: bool = True,\n pad_mode: str = \"reflect\",\n power: Optional[float] = None,\n):\n \"\"\"Generate a spectrogram of the given Tensor\n\n Args:\n n_fft: The number of FFT bins.\n hop_length: Stride for sliding window. default: ``n_fft // 4``.\n win_length: The size of window frame and STFT filter. default: ``n_fft``.\n winwdow: Window function. default: Hann window\n center: Pad the input sequence if True. See ``torch.stft`` for the detail.\n pad_mode: Padding method used when center is True. Default: \"reflect\".\n power: If ``None``, raw spectrogram with complex values are returned,\n otherwise the norm of the spectrogram is returned.\n \"\"\"\n hop_length = hop_length or n_fft // 4\n win_length = win_length or n_fft\n window = torch.hann_window(win_length, device=waveform.device) if window is None else window\n spec = torch.stft(\n waveform,\n n_fft=n_fft,\n hop_length=hop_length,\n win_length=win_length,\n center=center,\n window=window,\n pad_mode=pad_mode,\n return_complex=True,\n )\n if power is not None:\n spec = spec.abs() ** power\n return spec\n" ]
[ [ "torch.linspace", "torch.sin", "torch.random.fork_rng", "torch.randn", "torch.random.manual_seed", "torch.hann_window", "torch.stft" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
siddarthjha/Opencv
[ "ccf26ade18a4a04da464acbbc15f074904fab208" ]
[ "Image Processing/11_Template_Matching.py" ]
[ "import cv2\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\nimg = cv2.imread('a.jpg',0)\nimg2 = img.copy()\ntemplate = cv2.imread('b.jpg',0)\nw, h = template.shape[::-1]\n\n# All the 6 methods for comparison in a list\nmethods = ['cv2.TM_CCOEFF', 'cv2.TM_CCOEFF_NORMED', 'cv2.TM_CCORR',\n 'cv2.TM_CCORR_NORMED', 'cv2.TM_SQDIFF', 'cv2.TM_SQDIFF_NORMED']\n\nfor meth in methods:\n img = img2.copy()\n method = eval(meth)\n\n # Apply template Matching\n res = cv2.matchTemplate(img,template,method)\n min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)\n\n # If the method is TM_SQDIFF or TM_SQDIFF_NORMED, take minimum\n if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:\n top_left = min_loc\n else:\n top_left = max_loc\n bottom_right = (top_left[0] + w, top_left[1] + h)\n\n cv2.rectangle(img,top_left, bottom_right, 255, 2)\n\n plt.subplot(121),plt.imshow(res,cmap = 'gray')\n plt.title('Matching Result'), plt.xticks([]), plt.yticks([])\n plt.subplot(122),plt.imshow(img,cmap = 'gray')\n plt.title('Detected Point'), plt.xticks([]), plt.yticks([])\n plt.suptitle(meth)\n\n plt.show()\n" ]
[ [ "matplotlib.pyplot.imshow", "matplotlib.pyplot.yticks", "matplotlib.pyplot.title", "matplotlib.pyplot.subplot", "matplotlib.pyplot.xticks", "matplotlib.pyplot.suptitle", "matplotlib.pyplot.show" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
terry97-guel/SMInet
[ "e4c158fb03096a12723bb474c3e468044eca46a6" ]
[ "evaluate.py" ]
[ "import torch\nfrom model import *\nfrom dataloader import *\nfrom utils.pyart import *\nimport argparse\nimport numpy as np\nfrom pathlib import Path\n\ndef main(args):\n print(\"Processing...\")\n\n # set device:\n if torch.cuda.is_available():\n device = torch.device('cuda:0')\n else:\n device = torch.device('cpu')\n\n # make save_dir\n Path(args.save_dir).mkdir(parents=True, exist_ok=True)\n\n # load checkpoint\n checkpoint = torch.load(args.checkpoint)\n branchNum = checkpoint['branchNum']\n input_dim = checkpoint['input_dim']\n branchLs = bnum2ls(branchNum)\n n_joint = len(branchLs)\n\n # load model\n model = Model(branchNum, input_dim)\n model.load_state_dict(checkpoint['state_dict'])\n model = model.to(device)\n model.eval()\n\n # load data\n IOScale = checkpoint['IOScale']\n test_data_loader = ToyDataloader(args.data_path, IOScale, n_workers = 1, batch = 1, shuffle=False)\n\n # get IOScale.txt\n (inputSigma,inputMean),(outputSigma,outputMean) = IOScale\n inputSigma,inputMean,outputSigma,outputMean = \\\n inputSigma.detach().numpy(),inputMean.detach().numpy(),outputSigma.detach().numpy(),outputMean.detach().numpy()\n\n ScaleInfo = np.array([]).reshape(-1,3)\n ScaleInfo = np.vstack((ScaleInfo, inputSigma))\n ScaleInfo = np.vstack((ScaleInfo, inputMean))\n ScaleInfo = np.vstack((ScaleInfo, outputSigma))\n ScaleInfo = np.vstack((ScaleInfo, outputMean))\n \n np.savetxt(args.save_dir+\"/IOScale.txt\",ScaleInfo)\n\n # get jointAngle.txt\n revAngle = np.array([]).reshape(-1,n_joint)\n priAngle = np.array([]).reshape(-1,n_joint)\n for input,_ in test_data_loader:\n input = input.to(device)\n rev_q_value, pri_q_value = model.q_layer(input)\n\n rev_q_value = rev_q_value.detach().cpu().numpy()\n pri_q_value = pri_q_value.detach().cpu().numpy()\n\n revAngle = np.vstack((revAngle,rev_q_value))\n priAngle = np.vstack((priAngle,pri_q_value))\n np.savetxt(args.save_dir+\"/revAngle.txt\", revAngle)\n np.savetxt(args.save_dir+\"/priAngle.txt\", priAngle)\n\n # get branchLs\n np.savetxt(args.save_dir+'/branchLs.txt',branchLs)\n\n # get targetPose.txt\n targetPose = test_data_loader.dataset.label\n targetPose = targetPose.detach().cpu().numpy()\n np.savetxt(args.save_dir+'/targetPose.txt', targetPose)\n\n # get outputPose.txt\n outputPose = np.array([]).reshape(-1,targetPose.shape[1])\n for input,_ in test_data_loader:\n input = input.to(device)\n outputPose_temp,_,_ = model(input)\n outputPose_temp = outputPose_temp[:,:,0:3,3]\n outputPose_temp = outputPose_temp.reshape(-1,outputPose_temp.size()[1]*outputPose_temp.size()[2])\n outputPose_temp = outputPose_temp.detach().cpu().numpy()[0]\n outputPose = np.vstack((outputPose,outputPose_temp))\n \n np.savetxt(args.save_dir+\"/outputPose.txt\", outputPose)\n\n print(\"Done...\")\nif __name__ == '__main__':\n args = argparse.ArgumentParser(description= 'parse for POENet')\n args.add_argument('--data_path', \\\n default= './data/Sorosim/SorosimPlot1.txt',type=str, \\\n help='path to model checkpoint') \n args.add_argument('--checkpoint', default= './output/0205/10ScaleSoro/checkpoint_100.pth',type=str,\n help='path to model checkpoint')\n args.add_argument('--save_dir', default='./2Visualize')\n args = args.parse_args()\n main(args)" ]
[ [ "torch.load", "torch.cuda.is_available", "numpy.savetxt", "torch.device", "numpy.array", "numpy.vstack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
levskaya/tensor2tensor
[ "4643800137f802693f880a1fab9e10de7ba32e66", "4643800137f802693f880a1fab9e10de7ba32e66", "4643800137f802693f880a1fab9e10de7ba32e66", "4643800137f802693f880a1fab9e10de7ba32e66" ]
[ "tensor2tensor/layers/common_attention_test.py", "tensor2tensor/models/mtf_resnet.py", "tensor2tensor/data_generators/translate.py", "tensor2tensor/models/video/basic_deterministic.py" ]
[ "# coding=utf-8\n# Copyright 2019 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for common attention.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom absl.testing import parameterized\nimport kfac\nimport numpy as np\n\nfrom tensor2tensor.layers import common_attention\nfrom tensor2tensor.layers import common_layers\nfrom tensor2tensor.utils import test_utils\n\nimport tensorflow as tf\ntf.compat.v1.enable_eager_execution()\n\n\nclass CommonAttentionTest(parameterized.TestCase, tf.test.TestCase):\n\n @test_utils.run_in_graph_and_eager_modes()\n def testAddPositionalEmbedding(self):\n x = np.random.rand(5, 3, 12)\n y = common_attention.add_positional_embedding(\n tf.constant(x, dtype=tf.float32),\n max_length=4,\n name=\"pos_embedding\")\n self.evaluate(tf.global_variables_initializer())\n res = self.evaluate(y)\n self.assertEqual(res.shape, (5, 3, 12))\n\n @parameterized.named_parameters(\n (\"hard_top_k\", 0.0),\n (\"sampled_top_k_default\", 1.0),\n (\"sampled_top_k_2\", 2.0),\n )\n @test_utils.run_in_graph_and_eager_modes()\n def testHardenAttentionWeights(self, gumbel_noise_weight):\n x = np.random.rand(5, 3, 12)\n y = common_attention.harden_attention_weights(\n tf.nn.softmax(tf.constant(x, dtype=tf.float32)), 3, gumbel_noise_weight)\n res = self.evaluate(y)\n self.assertEqual(res.shape, (5, 3, 12))\n\n @parameterized.named_parameters(\n (\"hard_top_k\", -0.5),\n (\"sampled_top_k\", 0.5),\n )\n @test_utils.run_in_graph_and_eager_modes()\n def testHardenAttentionAllZeros(self, gumbel_noise_weight):\n \"\"\"Check if the hardening code does not divide by zero for all zeros.\"\"\"\n x = np.zeros((5, 3, 12), dtype=np.float32)\n y = common_attention.harden_attention_weights(\n tf.constant(x, dtype=tf.float32), 3, gumbel_noise_weight)\n res = self.evaluate(y)\n if gumbel_noise_weight <= 0.0:\n self.assertAllClose(res, x)\n\n @parameterized.parameters(\n {\"input_shape\": (5, 3, 12)},\n {\"input_shape\": (5, 5, 5, 12)},\n {\"input_shape\": (5, 3, 3, 3, 12)},\n )\n @test_utils.run_in_graph_and_eager_modes()\n def testAddPositionalEmbeddingNd(self, input_shape):\n x = np.random.rand(*input_shape)\n y = common_attention.add_positional_embedding_nd(\n tf.constant(x, dtype=tf.float32),\n max_length=5,\n name=\"pos_embedding\")\n self.evaluate(tf.global_variables_initializer())\n res = self.evaluate(y)\n self.assertEqual(res.shape, input_shape)\n\n @test_utils.run_in_graph_and_eager_modes()\n def testDotProductAttention(self):\n x = np.random.rand(5, 7, 12, 32)\n y = np.random.rand(5, 7, 12, 32)\n a = common_attention.dot_product_attention(\n tf.constant(x, dtype=tf.float32),\n tf.constant(y, dtype=tf.float32),\n tf.constant(y, dtype=tf.float32), None)\n res = self.evaluate(a)\n self.assertEqual(res.shape, (5, 7, 12, 32))\n\n @parameterized.parameters(\n ([3, 10, 64], 4),\n ([3, 10, 20, 64], 2),\n ([3, 10, 20, 30, 64], 4),\n )\n def testSplitHeadsND(self, shape, num_heads):\n t = tf.zeros(shape)\n h = common_attention.split_heads_nd(t, num_heads)\n res = self.evaluate(h)\n self.assertEqual(\n res.shape,\n tuple(shape[:1] + [num_heads] + shape[1:-1] + [shape[-1] // num_heads]))\n\n @parameterized.parameters(\n ([3, 4, 10, 64],),\n ([3, 2, 10, 20, 64],),\n ([3, 4, 10, 20, 30, 64],),\n )\n def testCombineHeadsND(self, shape):\n t = tf.zeros(shape)\n h = common_attention.combine_heads_nd(t)\n res = self.evaluate(h)\n self.assertEqual(res.shape,\n tuple(shape[:1] + shape[2:-1] + [shape[-1] * shape[1]]))\n\n @parameterized.parameters(\n ([3, 4, 10, 64], (5,), (10,)),\n ([3, 4, 10, 10, 64], (5, 5), (5, 5)),\n ([3, 4, 10, 10, 10, 64], (5, 5, 5), (5, 5, 5)),\n )\n def testShapeMaskedLocalAttentionND(self, shape, query_shape, memory_flange):\n q = k = v = tf.reshape(tf.range(np.prod(shape), dtype=tf.float32), shape)\n val = common_attention.masked_local_attention_nd(q, k, v, query_shape,\n memory_flange)\n res = self.evaluate(val)\n self.assertEqual(res.shape, tuple(shape))\n\n @test_utils.run_in_graph_and_eager_modes()\n def testRightShiftBlockwiseND(self):\n tensor = tf.convert_to_tensor(np.array([[\n [[1], [2], [3], [4]],\n [[5], [6], [7], [8]],\n [[9], [10], [11], [12]],\n [[13], [14], [15], [16]],\n ]], dtype=np.float32))\n val = common_attention.right_shift_blockwise_nd(tensor, (2, 2))\n res = self.evaluate(val)\n expected_val = np.array([[\n [[0], [1], [6], [3]],\n [[2], [5], [4], [7]],\n [[8], [9], [14], [11]],\n [[10], [13], [12], [15]],\n ]], dtype=np.float32)\n self.assertAllClose(expected_val, res)\n\n @test_utils.run_in_graph_and_eager_modes()\n def testContentMaskedLocalAttentionND(self):\n def softmax(arr):\n return np.exp(arr) / np.sum(np.exp(arr))\n\n q = k = v = tf.convert_to_tensor(\n np.array([[[\n [[0.1], [0.1], [0.1], [0.1]],\n [[0.1], [1.0], [1.0], [0.1]],\n [[0.1], [1.0], [1.0], [0.1]],\n [[0.1], [0.1], [0.1], [0.1]],\n ]]], dtype=np.float32))\n attn_weights = np.array([[[[softmax([-1e9, -1e9, -1e9, -1e9, 0.01]),\n softmax([-1e9, -1e9, -1e9, 0.01, 0.01]),\n softmax([-1e9, -1e9, -1e9, 0.01, 0.01]),\n softmax([-1e9, -1e9, -1e9, 0.01, 0.01])\n ],\n [softmax([-1e9, 0.01, 0.01, -1e9, 0.01]),\n softmax([0.1, 0.1, 0.1, 0.1, 1.0]),\n softmax([0.1, 0.1, 0.1, 1.0, 1.0]),\n softmax([0.01, 0.01, -1e9, 0.1, 0.01])\n ],\n [softmax([-1e9, 0.01, 0.1, -1e9, 0.01]),\n softmax([0.1, 1.0, 1.0, 0.1, 1.0]),\n softmax([1.0, 1.0, 0.1, 1.0, 1.0]),\n softmax([0.1, 0.01, -1e9, 0.1, 0.01])\n ],\n [softmax([-1e9, 0.01, 0.1, -1e9, 0.01]),\n softmax([0.01, 0.1, 0.1, 0.01, 0.01]),\n softmax([0.1, 0.1, 0.01, 0.01, 0.01]),\n softmax([0.1, 0.01, -1e9, 0.01, 0.01])\n ]]]])\n blocked_v = np.array([[[[[0, 0, 0, 0, 0.1],\n [0, 0, 0, 0.1, 0.1],\n [0, 0, 0, 0.1, 0.1],\n [0, 0, 0, 0.1, 0.1]],\n [[0, 0.1, 0.1, 0, 0.1],\n [0.1, 0.1, 0.1, 0.1, 1],\n [0.1, 0.1, 0.1, 1, 1],\n [0.1, 0.1, 0, 1, 0.1]],\n [[0, 0.1, 1, 0, 0.1],\n [0.1, 1, 1, 0.1, 1],\n [1, 1, 0.1, 1, 1],\n [1, 0.1, 0, 1, 0.1]],\n [[0, 0.1, 1, 0, 0.1],\n [0.1, 1, 1, 0.1, 0.1],\n [1, 1, 0.1, 0.1, 0.1],\n [1, 0.1, 0, 0.1, 0.1]]]]])\n expected_val = np.expand_dims(\n np.sum(attn_weights * blocked_v, axis=4), axis=-1)\n val = common_attention.masked_local_attention_nd(q, k, v, (1, 1), (1, 1))\n res = self.evaluate(val)\n self.assertAllClose(expected_val, res)\n\n @test_utils.run_in_graph_and_eager_modes()\n def testSelectBlockForDecodeStep(self):\n tensor = tf.reshape(\n tf.range(2 * 6 * 6 * 4, dtype=tf.float32), [2, 6, 6, 4, 1])\n block = common_attention.select_block_for_decode_step(tensor, 20, (2, 2))\n expected_tensor = tensor[:, 0:1, 5:6, :, :]\n expected_value = self.evaluate(expected_tensor)\n res = self.evaluate(block)\n self.assertAllClose(expected_value, res)\n\n @parameterized.parameters(\n ((2, 6, 4, 10),),\n ((2, 6, 6, 4, 10),),\n ((2, 6, 6, 6, 4, 10),),\n )\n def testFlattenBlocksND(self, shape):\n tensor = tf.zeros(shape, dtype=tf.float32)\n value, _ = common_attention.flatten_blocks_nd(tensor)\n res = self.evaluate(value)\n self.assertAllClose(res.shape,\n (shape[0], np.prod(shape[1:-2]), shape[-2], shape[-1]))\n\n @parameterized.parameters(\n ((5,),),\n ((5, 10),),\n ((5, 10, 15),),\n )\n def testUnflattenBlocksND(self, blocks_per_dim):\n tensor = tf.zeros([2, np.prod(blocks_per_dim), 6, 10])\n value = common_attention.unflatten_blocks_nd(tensor, blocks_per_dim)\n res = self.evaluate(value)\n self.assertAllClose(res.shape, (2,) + blocks_per_dim + (6, 10))\n\n @test_utils.run_in_graph_and_eager_modes()\n def testBreakIntoMemoryBlocksND(self):\n tensor = tf.convert_to_tensor(\n np.array([[\n [[1], [2], [3], [4]],\n [[5], [6], [7], [8]],\n [[9], [10], [11], [12]],\n [[13], [14], [15], [16]],\n ]]))\n value = common_attention.break_into_memory_blocks_nd(tensor,\n (2, 2),\n (2, 2),\n masked=True)\n res = self.evaluate(value)\n expected_value = np.array([[\n [\n [\n [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0],\n [0], [0], [0], [0], [1], [2], [5], [6], [3], [4], [7], [8]\n ],\n [\n [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0],\n [1], [2], [5], [6], [3], [4], [7], [8], [0], [0], [0], [0]\n ]\n ],\n [\n [\n [0], [0], [0], [0], [1], [2], [5], [6], [3], [4], [7], [8], [0],\n [0], [0], [0], [9], [10], [13], [14], [11], [12], [15], [16]\n ],\n [\n [1], [2], [5], [6], [3], [4], [7], [8], [0], [0], [0], [0], [9],\n [10], [13], [14], [11], [12], [15], [16], [0], [0], [0], [0]\n ]\n ]]])\n self.assertAllClose(expected_value, res)\n\n @test_utils.run_in_graph_and_eager_modes()\n def testBreakIntoBlocksND(self):\n tensor = tf.convert_to_tensor(\n np.array([[\n [[1], [2], [3], [4]],\n [[5], [6], [7], [8]],\n [[9], [10], [11], [12]],\n [[13], [14], [15], [16]],\n ]]))\n value = common_attention.break_into_blocks_nd(tensor, (2, 2))\n res = self.evaluate(value)\n expected_value = np.array([[\n [[[1], [2], [5], [6]], [[3], [4], [7], [8]]],\n [[[9], [10], [13], [14]], [[11], [12], [15], [16]]]\n ]])\n self.assertAllClose(expected_value, res)\n\n @test_utils.run_in_graph_and_eager_modes()\n def testPutBackBlocksND(self):\n tensor = tf.convert_to_tensor(\n np.array([[\n [[[1], [2], [5], [6]], [[3], [4], [7], [8]]],\n [[[9], [10], [13], [14]], [[11], [12], [15], [16]]]\n ]]))\n value = common_attention.put_back_blocks_nd(tensor, (2, 2))\n res = self.evaluate(value)\n expected_value = np.array([[\n [[1], [2], [3], [4]],\n [[5], [6], [7], [8]],\n [[9], [10], [11], [12]],\n [[13], [14], [15], [16]],\n ]])\n self.assertAllClose(expected_value, res)\n\n @parameterized.parameters(\n ((2, 100, 5), (7,), (2, 105, 5)),\n ((2, 100, 100, 5), (5, 7), (2, 100, 105, 5)),\n ((2, 100, 100, 100, 5), (10, 20, 30), (2, 100, 100, 120, 5))\n )\n def testPadToMultipleND(self, tensor_shape, block_shape, expected_shape):\n tensor = tf.zeros(tensor_shape)\n value = common_attention.pad_to_multiple_nd(tensor, block_shape)\n res = self.evaluate(value)\n self.assertAllClose(res.shape, expected_shape)\n\n @test_utils.run_in_graph_and_eager_modes()\n def testCausalAttentionBiasND(self):\n bias = common_attention.causal_attention_bias_nd((2, 2), (2, 2))\n res = self.evaluate(bias)\n expected_val = np.array([[[\n [0] * 17 + [-1e9] * 7,\n [0] * 18 + [-1e9] * 6,\n [0] * 19 + [-1e9] * 5,\n [0] * 20 + [-1e9] * 4,\n ]]])\n self.assertAllClose(expected_val, res)\n\n @parameterized.parameters(\n ((1, 64, 10), (80,), (80,)),\n ((1, 64, 64, 10), (8, 8), (16, 16)),\n ((1, 5, 64, 64, 10), (1, 8, 8), (1, 8, 8))\n )\n def testMultiheadAttentionND(self, tensor_shape, query_shape, memory_flange):\n query_antecedent = tf.zeros(tensor_shape)\n value = common_attention.multihead_attention_nd(\n query_antecedent=query_antecedent,\n memory_antecedent=None,\n total_key_depth=256,\n total_value_depth=256,\n output_depth=256,\n num_heads=4,\n query_shape=query_shape,\n memory_flange=memory_flange,\n masked=True)\n res = self.evaluate(value)\n self.assertAllClose(res.shape, tensor_shape[:-1] + (256,))\n\n @parameterized.parameters(\n (15, (5,), (100,), (15,)),\n (10, (2, 2), (4, 4), (3, 0)),\n (25, (2, 2, 3), (10, 10, 12), (0, 0, 7))\n )\n def testDecodeStepToIndex(self, decode_step, query_shape, tensor_shape,\n expected_index):\n res = common_attention.decode_step_to_index(decode_step, query_shape,\n tensor_shape)\n self.assertAllClose(res, expected_index)\n\n @test_utils.run_in_graph_and_eager_modes()\n def testGetItemAtDecodeStep(self):\n tensor = tf.reshape(tf.range(25 * 25 * 4), [1, 4, 25, 25, 1])\n value = common_attention.get_item_at_decode_step(tensor, 100, (2, 5, 5))\n res = self.evaluate(value)\n expected_value = np.array([[[[[10]]]]])\n self.assertAllClose(expected_value, res)\n\n @test_utils.run_in_graph_and_eager_modes()\n def testPutItemAtDecodeStep(self):\n tensor = tf.zeros([1, 1, 10, 10, 1])\n item = tf.ones([1, 1, 1, 1, 1])\n value = common_attention.put_item_in_decode_step(tensor, item, 32, (2, 2))\n res = self.evaluate(value)\n expected_val = np.zeros([1, 1, 10, 10, 1])\n expected_val[0, 0, 2, 6, 0] = 1\n self.assertAllClose(expected_val, res)\n\n @parameterized.named_parameters(\n (\"\", 1, 1, 8, 4, 1, 2),\n (\"dynamic_batch\", None, 1, 8, 4, 1, 2),\n (\"batches\", 4, 3, 8, 4, 1, 2),\n (\"depth_v\", 1, 1, 8, 4, 3, 2),\n (\"block_length\", 1, 1, 8, 4, 1, 4),\n )\n def testMaskedWithinBlockLocalAttention1D(self, batch, heads, length,\n depth_k, depth_v, block_length):\n if batch is None:\n batch = tf.random_uniform([], minval=0, maxval=5, dtype=tf.int32)\n q = tf.random_normal([batch, heads, length, depth_k])\n k = tf.random_normal([batch, heads, length, depth_k])\n v = tf.random_normal([batch, heads, length, depth_v])\n output = common_attention.masked_within_block_local_attention_1d(\n q, k, v, block_length=block_length)\n if isinstance(batch, tf.Tensor):\n batch, res = self.evaluate([batch, output])\n else:\n res = self.evaluate(output)\n\n self.assertEqual(res.shape, (batch, heads, length, depth_v))\n\n @parameterized.named_parameters(\n (\"\", 1, 1, 8, 4, 1, 2),\n (\"dynamic_batch\", None, 1, 8, 4, 1, 2),\n (\"batches\", 4, 3, 8, 4, 1, 2),\n (\"depth_v\", 1, 1, 8, 4, 3, 2),\n (\"block_length\", 1, 1, 8, 4, 1, 4),\n )\n def testMaskedLocalAttention1D(self, batch, heads, length, depth_k, depth_v,\n block_length):\n if batch is None:\n batch = tf.random_uniform([], minval=0, maxval=5, dtype=tf.int32)\n q = tf.random_normal([batch, heads, length, depth_k])\n k = tf.random_normal([batch, heads, length, depth_k])\n v = tf.random_normal([batch, heads, length, depth_v])\n output = common_attention.masked_local_attention_1d(\n q, k, v, block_length=block_length)\n if isinstance(batch, tf.Tensor):\n batch, res = self.evaluate([batch, output])\n else:\n res = self.evaluate(output)\n\n self.assertEqual(res.shape, (batch, heads, length, depth_v))\n\n @parameterized.named_parameters(\n (\"\", 1, 1, 8, 4, 4, (2, 2)),\n (\"dynamic_batch\", None, 1, 8, 4, 4, (2, 2)),\n (\"batches\", 3, 2, 8, 4, 4, (2, 2)),\n # TODO(trandustin): Extend function to enable depth_k != depth_v.\n # (\"depth_v\", 1, 1, 8, 4, 1, (2, 2)),\n (\"query_shape\", 1, 1, 8, 4, 4, (4, 4)),\n )\n def testMaskedLocalAttention2D(self, batch, heads, length, depth_k, depth_v,\n query_shape):\n if batch is None:\n batch = tf.random_uniform([], minval=0, maxval=5, dtype=tf.int32)\n q = tf.random_normal([batch, heads, length, length, depth_k])\n k = tf.random_normal([batch, heads, length, length, depth_k])\n v = tf.random_normal([batch, heads, length, length, depth_v])\n output = common_attention.masked_local_attention_2d(\n q,\n k,\n v,\n query_shape=query_shape,\n memory_flange=(2, 2))\n if isinstance(batch, tf.Tensor):\n batch, res = self.evaluate([batch, output])\n else:\n res = self.evaluate(output)\n\n self.assertEqual(res.shape, (batch, heads, length, length, depth_v))\n\n @parameterized.named_parameters(\n (\"matching_block_length\", 3, 4, 25, 16, 16, 5),\n (\"unmatching_block_length\", 3, 4, 25, 16, 16, 4),\n (\"dynamic_batch\", None, 4, 25, 16, 16, 5),\n (\"different_depth_v\", 3, 4, 25, 16, 17, 5),\n )\n def testLocalUnmaskedAttention1D(self, batch, heads, length,\n depth_k, depth_v, block_length):\n if batch is None:\n batch = tf.random_uniform([], minval=0, maxval=5, dtype=tf.int32)\n q = tf.random_normal([batch, heads, length, depth_k])\n k = tf.random_normal([batch, heads, length, depth_k])\n v = tf.random_normal([batch, heads, length, depth_v])\n output = common_attention.local_attention_1d(\n q, k, v, block_length=block_length, filter_width=3)\n if isinstance(batch, tf.Tensor):\n batch, res = self.evaluate([batch, output])\n else:\n res = self.evaluate(output)\n\n self.assertEqual(res.shape, (batch, heads, length, depth_v))\n\n @parameterized.named_parameters(\n (\"matching_block_length\", 3, 4, 25, 16, 16, (4, 4)),\n (\"unmatching_block_length\", 3, 4, 25, 16, 16, (5, 5)),\n (\"dynamic_batch\", None, 4, 25, 16, 16, (4, 4)),\n # TODO(trandustin): Extend function to enable depth_k != depth_v.\n # (\"different_depth_v\", 3, 4, 25, 16, 17, (4, 4)),\n )\n def testLocalUnmaskedAttention2D(self, batch, heads, length,\n depth_k, depth_v, query_shape):\n if batch is None:\n batch = tf.random_uniform([], minval=0, maxval=5, dtype=tf.int32)\n q = tf.random_normal([batch, heads, length, length, depth_k])\n k = tf.random_normal([batch, heads, length, length, depth_k])\n v = tf.random_normal([batch, heads, length, length, depth_v])\n output = common_attention.local_attention_2d(\n q,\n k,\n v,\n query_shape=query_shape,\n memory_flange=(3, 3))\n if isinstance(batch, tf.Tensor):\n batch, res = self.evaluate([batch, output])\n else:\n res = self.evaluate(output)\n\n self.assertEqual(res.shape, (batch, heads, length, length, depth_v))\n\n @test_utils.run_in_graph_mode_only()\n def testMultiheadSelfAttentionMemoryEfficient(self):\n num_heads = 4\n io_size = 16\n batch = 2\n length = 7\n head_size = 5\n x = np.random.rand(batch, length, io_size)\n dy = np.random.rand(batch, length, io_size)\n with self.test_session() as session:\n x = tf.to_float(x)\n dy = tf.to_float(dy)\n bias = common_attention.attention_bias_lower_triangle(length)\n wqkv = tf.get_variable(\n \"wqkv\", [num_heads, 1, io_size, 3 * head_size],\n initializer=tf.random_normal_initializer(stddev=io_size**-0.5))\n wo = tf.get_variable(\n \"wo\", [num_heads, 1, head_size, io_size],\n initializer=tf.random_normal_initializer(\n stddev=(head_size * num_heads)**-0.5))\n norm_scale, norm_bias = common_layers.layer_norm_vars(io_size)\n y = common_attention.multihead_self_attention_memory_efficient(\n x, bias, num_heads, head_size=head_size, forget=False,\n test_vars=(wqkv, wo, norm_scale, norm_bias))\n y_forget = common_attention.multihead_self_attention_memory_efficient(\n x, bias, num_heads, head_size=head_size, forget=True,\n test_vars=(wqkv, wo, norm_scale, norm_bias))\n dx, dwqkv, dwo, dnorm_scale, dnorm_bias = tf.gradients(\n ys=[y], xs=[x, wqkv, wo, norm_scale, norm_bias], grad_ys=[dy])\n dx_f, dwqkv_f, dwo_f, dnorm_scale_f, dnorm_bias_f = tf.gradients(\n ys=[y_forget], xs=[x, wqkv, wo, norm_scale, norm_bias], grad_ys=[dy])\n session.run(tf.global_variables_initializer())\n (y, y_forget,\n dx, dwqkv, dwo, dnorm_scale, dnorm_bias,\n dx_f, dwqkv_f, dwo_f, dnorm_scale_f, dnorm_bias_f) = session.run(\n [y, y_forget,\n dx, dwqkv, dwo, dnorm_scale, dnorm_bias,\n dx_f, dwqkv_f, dwo_f, dnorm_scale_f, dnorm_bias_f])\n self.assertAllClose(y, y_forget)\n self.assertAllClose(dwo, dwo_f)\n self.assertAllClose(dwqkv, dwqkv_f)\n self.assertAllClose(dnorm_scale, dnorm_scale_f)\n self.assertAllClose(dnorm_bias, dnorm_bias_f)\n self.assertAllClose(dx, dx_f)\n\n @test_utils.run_in_graph_and_eager_modes()\n def test2dGatherAndScatterInvertibility(self):\n \"\"\"2d gather and scatter invertibility test.\"\"\"\n batch_size = 2\n num_heads = 2\n height = 4\n width = 6\n depth = 8\n query_shape = (2, 3)\n x = np.random.rand(batch_size, num_heads, height, width, depth)\n x_indices = common_attention.gather_indices_2d(\n x, query_shape, query_shape)\n gathered_x = common_attention.gather_blocks_2d(x, x_indices)\n x_shape = tf.constant([batch_size, num_heads, height, width, depth])\n scattered_x = common_attention.scatter_blocks_2d(\n gathered_x, x_indices, x_shape)\n res = self.evaluate(scattered_x)\n self.assertAllClose(x, res)\n\n @test_utils.run_in_graph_and_eager_modes()\n def test2dBlockRasterScanMask(self):\n \"\"\"Testing the 2d block raster scan mask.\"\"\"\n query_shape = (2, 3)\n memory_flange = (2, 1)\n mask = common_attention.make_2d_block_raster_mask(\n query_shape, memory_flange)\n res = self.evaluate(mask)\n correct_mask = np.array(\n [[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0,\n 1.0, 0.0, 1.0, 1.0, 1.0, 1.0],\n [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0,\n 1.0, 0.0, 1.0, 1.0, 1.0, 1.0],\n [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,\n 1.0, 0.0, 1.0, 1.0, 1.0, 1.0],\n [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,\n 1.0, 0.0, 0.0, 1.0, 1.0, 1.0],\n [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,\n 1.0, 0.0, 0.0, 0.0, 1.0, 1.0],\n [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,\n 1.0, 0.0, 0.0, 0.0, 0.0, 1.0]])\n self.assertAllClose(correct_mask, res)\n\n @test_utils.run_in_graph_and_eager_modes()\n def test2dGather(self):\n \"\"\"Testing 2d index gather and block gather functions.\"\"\"\n batch_size = 2\n num_heads = 2\n height = 4\n width = 6\n depth = 8\n query_shape = (2, 3)\n x = np.random.rand(batch_size, num_heads, height, width, depth)\n y = np.reshape(x, (batch_size, num_heads, -1, depth))\n correct_indices = [[0, 1, 2, 6, 7, 8],\n [3, 4, 5, 9, 10, 11],\n [12, 13, 14, 18, 19, 20],\n [15, 16, 17, 21, 22, 23]]\n correct_gathered_x = [[[y[0, 0, correct_indices[0]],\n y[0, 0, correct_indices[1]],\n y[0, 0, correct_indices[2]],\n y[0, 0, correct_indices[3]]],\n [y[0, 1, correct_indices[0]],\n y[0, 1, correct_indices[1]],\n y[0, 1, correct_indices[2]],\n y[0, 1, correct_indices[3]]]],\n [[y[1, 0, correct_indices[0]],\n y[1, 0, correct_indices[1]],\n y[1, 0, correct_indices[2]],\n y[1, 0, correct_indices[3]]],\n [y[1, 1, correct_indices[0]],\n y[1, 1, correct_indices[1]],\n y[1, 1, correct_indices[2]],\n y[1, 1, correct_indices[3]]]]]\n\n x_indices = common_attention.gather_indices_2d(\n x, query_shape, query_shape)\n gathered_x = common_attention.gather_blocks_2d(x, x_indices)\n x_indices, gathered_x = self.evaluate([x_indices, gathered_x])\n self.assertAllEqual(correct_indices, x_indices)\n self.assertAllClose(correct_gathered_x, gathered_x)\n\n @test_utils.run_in_graph_and_eager_modes()\n def testGetMemoryRegion(self):\n \"\"\"Testing the function that gathers the flanged memory region.\"\"\"\n np.set_printoptions(threshold=np.inf)\n batch_size = 2\n num_heads = 2\n height = 4\n width = 6\n depth = 3\n query_shape = (2, 3)\n memory_flange = (1, 1)\n\n x = np.random.rand(batch_size, num_heads, height, width, depth)\n y = np.reshape(x, (batch_size, num_heads, -1, depth))\n zeros = np.zeros((depth), dtype=np.float32)\n five_zeros = np.array([zeros]*5)\n seven_zeros = np.array([zeros]*7)\n two_zeros = np.array([zeros]*2)\n zeros = np.array([zeros])\n\n correct_x_flange = [[[seven_zeros,\n np.concatenate((five_zeros, y[0, 0, [2, 8]]),\n axis=0),\n np.concatenate((zeros, y[0, 0, [6, 7, 8, 9]],\n two_zeros), axis=0),\n np.concatenate((y[0, 0, [8, 9, 10, 11]], zeros,\n y[0, 0, [14, 20]]), axis=0)],\n [seven_zeros,\n np.concatenate((five_zeros, y[0, 1, [2, 8]]),\n axis=0),\n np.concatenate((zeros, y[0, 1, [6, 7, 8, 9]],\n two_zeros), axis=0),\n np.concatenate((y[0, 1, [8, 9, 10, 11]], zeros,\n y[0, 1, [14, 20]]), axis=0)]],\n [[seven_zeros,\n np.concatenate((five_zeros, y[1, 0, [2, 8]]),\n axis=0),\n np.concatenate((zeros, y[1, 0, [6, 7, 8, 9]],\n two_zeros), axis=0),\n np.concatenate((y[1, 0, [8, 9, 10, 11]], zeros,\n y[1, 0, [14, 20]]), axis=0)],\n [seven_zeros,\n np.concatenate((five_zeros, y[1, 1, [2, 8]]),\n axis=0),\n np.concatenate((zeros, y[1, 1, [6, 7, 8, 9]],\n two_zeros), axis=0),\n np.concatenate((y[1, 1, [8, 9, 10, 11]], zeros,\n y[1, 1, [14, 20]]), axis=0)]]]\n correct_x_flange = np.array(correct_x_flange)\n correct_x_center = [[[y[0, 0, [0, 1, 2, 6, 7, 8]],\n y[0, 0, [3, 4, 5, 9, 10, 11]],\n y[0, 0, [12, 13, 14, 18, 19, 20]],\n y[0, 0, [15, 16, 17, 21, 22, 23]]],\n [y[0, 1, [0, 1, 2, 6, 7, 8]],\n y[0, 1, [3, 4, 5, 9, 10, 11]],\n y[0, 1, [12, 13, 14, 18, 19, 20]],\n y[0, 1, [15, 16, 17, 21, 22, 23]]]],\n [[y[1, 0, [0, 1, 2, 6, 7, 8]],\n y[1, 0, [3, 4, 5, 9, 10, 11]],\n y[1, 0, [12, 13, 14, 18, 19, 20]],\n y[1, 0, [15, 16, 17, 21, 22, 23]]],\n [y[1, 1, [0, 1, 2, 6, 7, 8]],\n y[1, 1, [3, 4, 5, 9, 10, 11]],\n y[1, 1, [12, 13, 14, 18, 19, 20]],\n y[1, 1, [15, 16, 17, 21, 22, 23]]]]]\n correct_x_center = np.array(correct_x_center)\n x_indices = common_attention.gather_indices_2d(\n x, query_shape, query_shape)\n x_flange, x_center = common_attention.get_memory_region(\n tf.constant(x, dtype=tf.float32),\n query_shape,\n memory_flange,\n x_indices)\n [x_flange, x_center] = self.evaluate([x_flange, x_center])\n self.assertAllClose(correct_x_flange, x_flange)\n self.assertAllClose(correct_x_center, x_center)\n\n @test_utils.run_in_graph_and_eager_modes()\n def testGetShiftedCenterBlocks(self):\n \"\"\"Testing the function that gathers the flanged memory region.\"\"\"\n np.set_printoptions(threshold=np.inf)\n batch_size = 2\n num_heads = 2\n height = 4\n width = 6\n depth = 3\n query_shape = (2, 3)\n\n x = np.random.rand(batch_size, num_heads, height, width, depth)\n y = np.reshape(x, (batch_size, num_heads, -1, depth))\n zeros = np.zeros((depth), dtype=np.float32)\n zeros = np.array([zeros])\n\n correct_gathered_x = [[[np.concatenate((zeros, y[0, 0, [0, 1, 2, 6, 7]]),\n axis=0),\n np.concatenate((zeros, y[0, 0, [3, 4, 5, 9, 10]]),\n axis=0),\n np.concatenate((zeros,\n y[0, 0, [12, 13, 14, 18, 19]]),\n axis=0),\n np.concatenate((zeros,\n y[0, 0, [15, 16, 17, 21, 22]]),\n axis=0)],\n [np.concatenate((zeros, y[0, 1, [0, 1, 2, 6, 7]]),\n axis=0),\n np.concatenate((zeros, y[0, 1, [3, 4, 5, 9, 10]]),\n axis=0),\n np.concatenate((zeros,\n y[0, 1, [12, 13, 14, 18, 19]]),\n axis=0),\n np.concatenate((zeros,\n y[0, 1, [15, 16, 17, 21, 22]]),\n axis=0)]],\n [[np.concatenate((zeros, y[1, 0, [0, 1, 2, 6, 7]]),\n axis=0),\n np.concatenate((zeros, y[1, 0, [3, 4, 5, 9, 10]]),\n axis=0),\n np.concatenate((zeros,\n y[1, 0, [12, 13, 14, 18, 19]]),\n axis=0),\n np.concatenate((zeros,\n y[1, 0, [15, 16, 17, 21, 22]]),\n axis=0)],\n [np.concatenate((zeros, y[1, 1, [0, 1, 2, 6, 7]]),\n axis=0),\n np.concatenate((zeros, y[1, 1, [3, 4, 5, 9, 10]]),\n axis=0),\n np.concatenate((zeros,\n y[1, 1, [12, 13, 14, 18, 19]]),\n axis=0),\n np.concatenate((zeros,\n y[1, 1, [15, 16, 17, 21, 22]]),\n axis=0)]]]\n correct_gathered_x = np.array(correct_gathered_x)\n x_indices = common_attention.gather_indices_2d(\n x, query_shape, query_shape)\n gathered_x = common_attention.get_shifted_center_blocks(\n tf.constant(x, dtype=tf.float32),\n x_indices)\n x_indices, gathered_x = self.evaluate([x_indices, gathered_x])\n self.assertAllClose(correct_gathered_x, gathered_x)\n\n @test_utils.run_in_graph_and_eager_modes()\n def testDotProductAttentionRelative(self):\n x = np.random.rand(5, 7, 12, 32)\n y = np.random.rand(5, 7, 12, 32)\n a = common_attention.dot_product_attention_relative(\n tf.constant(x, dtype=tf.float32),\n tf.constant(y, dtype=tf.float32),\n tf.constant(y, dtype=tf.float32),\n None,\n max_relative_position=3)\n self.evaluate(tf.global_variables_initializer())\n res = self.evaluate(a)\n self.assertEqual(res.shape, (5, 7, 12, 32))\n\n @test_utils.run_in_graph_and_eager_modes()\n def testRelativeAttentionV2(self):\n # (batch, heads, length, depth)\n x = np.random.rand(5, 4, 16, 7)\n y = np.random.rand(5, 4, 16, 7)\n max_relative_position = 3\n a = common_attention.dot_product_self_attention_relative_v2(\n tf.constant(x, dtype=tf.float32),\n tf.constant(y, dtype=tf.float32),\n tf.constant(y, dtype=tf.float32),\n None,\n max_relative_position=max_relative_position,\n heads_share_relative_embedding=False)\n self.evaluate(tf.global_variables_initializer())\n res = self.evaluate(a)\n self.assertEqual(res.shape, (5, 4, 16, 7))\n\n @test_utils.run_in_graph_and_eager_modes()\n def testRelativeAttentionV2SharedRel(self):\n # (batch, heads, length, depth)\n x = np.random.rand(5, 4, 16, 7)\n y = np.random.rand(5, 4, 16, 7)\n max_relative_position = 3\n a = common_attention.dot_product_self_attention_relative_v2(\n tf.constant(x, dtype=tf.float32),\n tf.constant(y, dtype=tf.float32),\n tf.constant(y, dtype=tf.float32),\n None,\n max_relative_position=max_relative_position,\n heads_share_relative_embedding=True)\n self.evaluate(tf.global_variables_initializer())\n res = self.evaluate(a)\n self.assertEqual(res.shape, (5, 4, 16, 7))\n\n @test_utils.run_in_graph_and_eager_modes()\n def testRelativeAttentionV2MaxRelativeLargerThanLength(self):\n # (batch, heads, length, depth)\n x = np.random.rand(5, 4, 3, 7)\n y = np.random.rand(5, 4, 3, 7)\n max_relative_position = 16\n a = common_attention.dot_product_self_attention_relative_v2(\n tf.constant(x, dtype=tf.float32),\n tf.constant(y, dtype=tf.float32),\n tf.constant(y, dtype=tf.float32),\n None,\n max_relative_position=max_relative_position,\n heads_share_relative_embedding=False)\n self.evaluate(tf.global_variables_initializer())\n res = self.evaluate(a)\n self.assertEqual(res.shape, (5, 4, 3, 7))\n\n @test_utils.run_in_graph_and_eager_modes()\n def testDotProductUnMaskedAttentionRelativeV2(self):\n x = np.random.rand(5, 7, 12, 32)\n y = np.random.rand(5, 7, 12, 32)\n a = common_attention.dot_product_unmasked_self_attention_relative_v2(\n tf.constant(x, dtype=tf.float32),\n tf.constant(y, dtype=tf.float32),\n tf.constant(y, dtype=tf.float32),\n None,\n 35)\n self.evaluate(tf.global_variables_initializer())\n res = self.evaluate(a)\n self.assertEqual(res.shape, (5, 7, 12, 32))\n\n @tf.contrib.eager.run_test_in_graph_and_eager_modes()\n def testExtractblocks(self):\n\n batch_size = 1\n num_heads = 3\n height = 6\n width = 10\n depth = 15\n block_h = 3\n block_w = 2\n t = np.random.rand(batch_size * num_heads, height, width, depth)\n a = common_attention._extract_blocks(t, block_h, block_w)\n self.evaluate(tf.global_variables_initializer())\n res = self.evaluate(a)\n self.assertEqual(res.shape, (batch_size * num_heads, height//block_h,\n width//block_w, block_h, block_w, depth))\n # also check if the content is right\n out = np.zeros((batch_size*num_heads, height//block_h,\n width//block_w, block_h, block_w, depth))\n for b in range(batch_size*num_heads):\n for x in range(height//block_h):\n for y in range(width//block_w):\n for v in range(block_h):\n for w in range(block_w):\n out[b, x, y, v, w] = t[b, block_h*x+v, block_w*y+w]\n self.assertAllClose(res, out)\n\n def python_get_2d_local_memory(self, t, batch_size, num_heads, height, width,\n num_h_blocks, num_w_blocks, query_shape,\n memory_flange, depth):\n # also check if the content is right\n out = np.zeros((batch_size, num_heads, height//query_shape[0],\n width//query_shape[1], query_shape[0]+2*memory_flange[0],\n query_shape[1]+2*memory_flange[1], depth))\n memory_height = query_shape[0]+2*memory_flange[0]\n memory_width = query_shape[1]+2*memory_flange[1]\n t_padded = np.pad(t, ((0, 0), (0, 0), (memory_flange[0], memory_flange[0]),\n (memory_flange[1], memory_flange[1]), (0, 0)),\n \"constant\",\n constant_values=((0, 0), (0, 0), (0, 0), (0, 0), (0, 0)))\n for b in range(batch_size):\n for h in range(num_heads):\n for x in range(num_h_blocks):\n for y in range(num_w_blocks):\n for v in range(memory_height):\n for w in range(memory_width):\n memory_h_start = x*query_shape[0]\n memory_w_start = y*query_shape[1]\n memory_h_index = memory_h_start + v\n memory_w_index = memory_w_start + w\n out[b, h, x, y, v, w] = t_padded[b, h, memory_h_index,\n memory_w_index]\n return out\n\n @tf.contrib.eager.run_test_in_graph_and_eager_modes()\n def testGet2dLocalMemory(self):\n batch_size = 3\n num_heads = 3\n height = 6\n width = 6\n depth = 15\n num_h_blocks = 3\n num_w_blocks = 3\n memory_flange = [1, 1]\n query_shape = [2, 2]\n t = np.random.rand(batch_size, num_heads, height, width, depth)\n a = common_attention.get_2d_local_memory_v2(\n np.reshape(t, (batch_size*num_heads, height, width, depth)),\n query_shape, memory_flange)\n self.evaluate(tf.global_variables_initializer())\n res = self.evaluate(a)\n self.assertEqual(res.shape, (batch_size*num_heads,\n num_h_blocks,\n num_w_blocks,\n query_shape[0]+2*memory_flange[0],\n query_shape[1]+2*memory_flange[1], depth))\n out = self.python_get_2d_local_memory(t, batch_size, num_heads,\n height, width, num_h_blocks,\n num_w_blocks, query_shape,\n memory_flange, depth)\n out = np.reshape(out, (batch_size*num_heads,\n num_h_blocks,\n num_w_blocks,\n query_shape[0]+2*memory_flange[0],\n query_shape[1]+2*memory_flange[1], depth))\n\n self.assertAllClose(res, out)\n\n @tf.contrib.eager.run_test_in_graph_and_eager_modes()\n def testSplitAlongWidth(self):\n batch_size = 1\n num_heads = 3\n num_outer_h_blocks = 4\n num_outer_w_blocks = 8\n memory_flange = [2, 2]\n num_w_blocks = 3\n depth = 15\n t = np.random.rand(batch_size*num_heads, num_outer_h_blocks,\n num_outer_w_blocks, memory_flange[0], memory_flange[1],\n depth)\n a = common_attention._split_along_width(t)\n # self.evaluate(tf.global_variables_initializer())\n res_l, res_r = self.evaluate(a)\n # res = self.evaluate(a)\n self.assertEqual(res_l.shape, (batch_size*num_heads, num_outer_h_blocks,\n num_w_blocks, memory_flange[0],\n memory_flange[1], depth))\n self.assertEqual(res_r.shape, (batch_size*num_heads, num_outer_h_blocks,\n num_w_blocks, memory_flange[0],\n memory_flange[1], depth))\n # also check if the content is right\n out_l = np.zeros((batch_size*num_heads, num_outer_h_blocks, num_w_blocks,\n memory_flange[0], memory_flange[1], depth))\n out_r = np.zeros((batch_size*num_heads, num_outer_h_blocks, num_w_blocks,\n memory_flange[0], memory_flange[1], depth))\n block_h = memory_flange[0]\n block_w = memory_flange[1]\n for b in range(batch_size*num_heads):\n for x in range(num_outer_h_blocks):\n for y in range(num_w_blocks):\n for v in range(block_h):\n for w in range(block_w):\n # we should compute the index of the position in the\n out_l[b, x, y, v, w] = (\n t[b, x, 2*y, v, w]\n )\n out_r[b, x, y, v, w] = (\n t[b, x, 2*y+3, v, w]\n )\n self.assertAllClose(res_l, out_l)\n self.assertAllClose(res_r, out_r)\n\n @tf.contrib.eager.run_test_in_graph_and_eager_modes()\n def testGetLeftRightBlocks(self):\n batch_size = 1\n num_heads = 3\n num_outer_h_blocks = 6\n num_outer_w_blocks = 6\n memory_flange = [2, 2]\n num_h_blocks = 2\n num_w_blocks = 2\n depth = 15\n t = np.random.rand(batch_size*num_heads, num_outer_h_blocks,\n num_outer_w_blocks, memory_flange[0], memory_flange[1],\n depth)\n a = common_attention._get_left_right_blocks(t)\n self.evaluate(tf.global_variables_initializer())\n res_l, res_r = self.evaluate(a)\n self.assertEqual(res_l.shape, (batch_size*num_heads, num_h_blocks,\n num_w_blocks, memory_flange[0]*2,\n memory_flange[1], depth))\n self.assertEqual(res_r.shape, (batch_size*num_heads, num_h_blocks,\n num_w_blocks, memory_flange[0]*2,\n memory_flange[1], depth))\n # also check if the content is right\n block_h = memory_flange[0]*2\n block_w = memory_flange[1]\n out_l = np.zeros((batch_size*num_heads, num_h_blocks,\n num_w_blocks, memory_flange[0]*2, memory_flange[1],\n depth))\n out_r = np.zeros((batch_size*num_heads, num_h_blocks,\n num_w_blocks, memory_flange[0]*2, memory_flange[1],\n depth))\n block_h = memory_flange[0]*2\n block_w = memory_flange[1]\n for b in range(batch_size*num_heads):\n for x in range(num_h_blocks):\n for y in range(num_w_blocks):\n for v in range(block_h):\n for w in range(block_w):\n # we should compute the index of the position in the\n outer_block_h_index = (\n 1 + block_h//memory_flange[0]*x + v//2)\n h_index = v%memory_flange[0]\n left_outer_w_index = 2*y\n right_outer_w_index = 2*y + 3\n out_l[b, x, y, v, w] = (\n t[b, outer_block_h_index, left_outer_w_index, h_index,\n w]\n )\n out_r[b, x, y, v, w] = (\n t[b, outer_block_h_index, right_outer_w_index, h_index,\n w]\n )\n self.assertAllClose(res_l, out_l)\n self.assertAllClose(res_r, out_r)\n\n @tf.contrib.eager.run_test_in_graph_and_eager_modes()\n def testDotProductUnmaskedAttentionLocal2dTpu(self):\n batch_size = 1\n num_heads = 3\n height = 7\n width = 12\n depth = 15\n num_h_blocks = 4\n num_w_blocks = 6\n memory_flange = [1, 1]\n query_shape = [2, 2]\n memory_h = query_shape[0] + 2*memory_flange[0]\n memory_w = query_shape[1] + 2*memory_flange[1]\n\n q = np.random.rand(batch_size, num_heads, height, width, depth)\n k = np.random.rand(batch_size, num_heads, height, width, depth)\n v = np.random.rand(batch_size, num_heads, height, width, depth)\n a = common_attention.dot_product_unmasked_attention_local_2d_tpu(\n tf.constant(q, dtype=tf.float32),\n tf.constant(k, dtype=tf.float32),\n tf.constant(v, dtype=tf.float32), None, max_relative_position=None,\n query_shape=query_shape, dropout_rate=0.0, image_shapes=None,\n name=None, make_image_summary=False, dropout_broadcast_dims=None)\n self.evaluate(tf.global_variables_initializer())\n res = self.evaluate(a)\n self.assertEqual(res.shape, (batch_size, num_heads,\n height, width, depth))\n # now to check the content too\n # first pad q, k, ad v\n height_padding = -height % query_shape[0]\n width_padding = -width % query_shape[1]\n new_height = height + -height % query_shape[0]\n new_width = width + -width % query_shape[1]\n q = np.pad(q, ((0, 0), (0, 0), (0, height_padding),\n (0, width_padding), (0, 0)), \"constant\",\n constant_values=((0, 0), (0, 0), (0, 0), (0, 0), (0, 0)))\n k = np.pad(k, ((0, 0), (0, 0), (0, height_padding),\n (0, width_padding), (0, 0)), \"constant\",\n constant_values=((0, 0), (0, 0), (0, 0), (0, 0), (0, 0)))\n v = np.pad(v, ((0, 0), (0, 0), (0, height_padding),\n (0, width_padding), (0, 0)), \"constant\",\n constant_values=((0, 0), (0, 0), (0, 0), (0, 0), (0, 0)))\n queries = self.python_get_2d_local_memory(q, batch_size, num_heads,\n new_height, new_width,\n num_h_blocks, num_w_blocks,\n query_shape, [0, 0],\n depth)\n keys = self.python_get_2d_local_memory(k, batch_size, num_heads,\n new_height, new_width, num_h_blocks,\n num_w_blocks, query_shape,\n memory_flange, depth)\n values = self.python_get_2d_local_memory(v, batch_size, num_heads,\n new_height, new_width,\n num_h_blocks, num_w_blocks,\n query_shape,\n memory_flange, depth)\n logits = np.matmul(\n np.reshape(queries, (batch_size, num_heads,\n num_h_blocks, num_w_blocks,\n query_shape[0]*query_shape[1], depth)),\n np.transpose(\n np.reshape(keys, (batch_size, num_heads, num_h_blocks, num_w_blocks,\n memory_h*memory_w, depth)), (0, 1, 2, 3, 5, 4)))\n # now to do a softmax across the logits\n att = np.exp(logits) / np.sum(np.exp(logits), axis=-1, keepdims=True)\n att_output = np.matmul(att, np.reshape(\n values, (batch_size, num_heads, num_h_blocks, num_w_blocks,\n memory_h*memory_w, depth)))\n att_output = np.reshape(att_output,\n (batch_size, num_heads, num_h_blocks, num_w_blocks,\n query_shape[0], query_shape[1], depth))\n # putting the attention results back into the right place\n out = np.zeros((batch_size, num_heads, new_height, new_width, depth))\n for b in range(batch_size):\n for h in range(num_heads):\n for x in range(new_height):\n for y in range(new_width):\n h_block_index = x//query_shape[0]\n w_block_index = y//query_shape[1]\n inside_h_index = x%query_shape[0]\n inside_w_index = y%query_shape[1]\n out[b, h, x, y] = (\n att_output[b, h, h_block_index, w_block_index, inside_h_index,\n inside_w_index])\n out = out[:, :, :height, :width, :]\n self.assertAllClose(res, out)\n\n @tf.contrib.eager.run_test_in_graph_and_eager_modes()\n def testDotProductUnmaskedAttentionLocal2dTpuSimple(self):\n batch_size = 1\n num_heads = 3\n height = 8\n width = 12\n total_depth = 15\n num_h_blocks = 4\n num_w_blocks = 6\n depth = 5\n query_shape = [2, 2]\n\n x = np.random.rand(batch_size, height, width, total_depth)\n a = (\n common_attention.dot_product_unmasked_attention_local_2d_tpu_simple(\n tf.constant(x, dtype=tf.float32),\n None, total_depth, total_depth, num_heads,\n query_shape=query_shape))\n self.evaluate(tf.global_variables_initializer())\n res, q, k, v = self.evaluate(a)\n self.assertEqual(res.shape, (batch_size, height, width, total_depth))\n # reshape q, k, v from batch, heads, height*width to batch, heads,\n # num_h_blocks, num_w_blocks, query_shape[0], query_shape[1], depth\n resh_shape = (batch_size, num_h_blocks, num_w_blocks,\n num_heads, query_shape[0], query_shape[1],\n depth)\n resh = lambda l: np.reshape(l, resh_shape)\n q, k, v = map(resh, [q, k, v])\n trans = lambda l: np.transpose(l, (0, 3, 1, 2, 4, 5, 6))\n q, k, v = map(trans, [q, k, v])\n new_height = height + -height % query_shape[0]\n new_width = width + -width % query_shape[1]\n (queries, keys, values) = (q, k, v)\n logits = np.matmul(\n np.reshape(queries, (batch_size, num_heads,\n num_h_blocks, num_w_blocks,\n query_shape[0]*query_shape[1], depth)),\n np.transpose(\n np.reshape(keys, (batch_size, num_heads, num_h_blocks, num_w_blocks,\n query_shape[0]*query_shape[1], depth)),\n (0, 1, 2, 3, 5, 4)))\n # now to do a softmax across the logits\n att = np.exp(logits) / np.sum(np.exp(logits), axis=-1, keepdims=True)\n att_output = np.matmul(att, np.reshape(\n values, (batch_size, num_heads, num_h_blocks, num_w_blocks,\n query_shape[0]*query_shape[1], depth)))\n att_output = np.reshape(att_output,\n (batch_size, num_heads, num_h_blocks, num_w_blocks,\n query_shape[0], query_shape[1], depth))\n # putting the attention results back into the right place\n out = np.zeros((batch_size, num_heads, new_height, new_width, depth))\n for b in range(batch_size):\n for h in range(num_heads):\n for x in range(new_height):\n for y in range(new_width):\n h_block_index = x//query_shape[0]\n w_block_index = y//query_shape[1]\n inside_h_index = x%query_shape[0]\n inside_w_index = y%query_shape[1]\n out[b, h, x, y] = (\n att_output[b, h, h_block_index, w_block_index, inside_h_index,\n inside_w_index])\n out = np.transpose(out, (0, 2, 3, 1, 4))\n out = np.reshape(out, (batch_size, new_height, new_width, total_depth))\n out = out[:, :height, :width, :]\n\n self.assertAllClose(res, out)\n\n def python_relative_att(self, q, k, v, batch, num_heads, height, width,\n depth, height_key_relative_embeddings,\n width_key_relative_embeddings,\n heads_share_relative_embedding):\n \"\"\"Relative attention computation in numpy.\n\n For query index (i,j) and key index (l, m) the logit is\n q_i k_j^T + q_i rh_{l-i}^T + q_i rw_{m-j}^T, where rh and ry are the set of\n relative embeddings in height and width spatial dimensions, respectively.\n\n Args:\n q: [batch, heads, height, width, depth] tensor\n k: [batch, heads, height, width, depth] tensor\n v: [batch, heads, height, width, depth] tensor\n batch: int scalar\n num_heads: int scalar\n height: int scalar\n width: int scalar\n depth: int scalar\n height_key_relative_embeddings: a tensor of relative embeddings\n width_key_relative_embeddings: a tensor of relative embeddings\n heads_share_relative_embedding: a boolean\n\n Returns:\n att_output: A tensor\n \"\"\"\n\n logits = np.zeros((batch, num_heads, height*width, height*width))\n for b in range(batch):\n for h in range(num_heads):\n for i in range(height*width):\n q_col = i%width\n q_row = int((i-q_col)/width)\n for j in range(height*width):\n k_col = j%width\n k_row = int((j-k_col)/width)\n logit = np.dot(q[b][h][q_row][q_col], k[b][h][k_row][k_col])\n width_rel_dist = k_col - q_col\n width_rel_index = width-1 + width_rel_dist\n if heads_share_relative_embedding:\n width_rel_logit = (\n np.dot(q[b][h][q_row][q_col],\n width_key_relative_embeddings[width_rel_index]))\n else:\n width_rel_logit = (\n np.dot(q[b][h][q_row][q_col],\n width_key_relative_embeddings[h][width_rel_index]))\n height_rel_dist = k_row - q_row\n height_rel_index = height-1 + height_rel_dist\n if heads_share_relative_embedding:\n height_rel_logit = (\n np.dot(q[b][h][q_row][q_col],\n height_key_relative_embeddings[height_rel_index]))\n else:\n height_rel_logit = (\n np.dot(q[b][h][q_row][q_col],\n height_key_relative_embeddings[h][height_rel_index]))\n logits[b, h, i, j] = logit + width_rel_logit + height_rel_logit\n # now to do a softmax across the logits\n att = np.exp(logits) / np.sum(np.exp(logits), axis=-1, keepdims=True)\n # comparing the outputs\n att_output = np.matmul(att,\n np.reshape(v, (\n batch, num_heads, height*width, depth)))\n att_output = np.reshape(att_output,\n (batch, num_heads, height, width, depth))\n return att_output\n\n @test_utils.run_in_graph_and_eager_modes()\n def testDotProductUnMaskedAttentionRelative2d(self):\n batch = 1\n height = 3\n width = 3\n num_heads = 2\n max_relative_position = 6\n depth = 5\n heads_share_relative_embedding = False\n q = np.random.rand(batch, num_heads, height, width, depth)\n k = np.random.rand(batch, num_heads, height, width, depth)\n v = np.random.rand(batch, num_heads, height, width, depth)\n a = common_attention.dot_product_unmasked_self_attention_relative_2d(\n tf.constant(q, dtype=tf.float32),\n tf.constant(k, dtype=tf.float32),\n tf.constant(v, dtype=tf.float32),\n None,\n max_relative_position=max_relative_position,\n heads_share_relative_embedding=heads_share_relative_embedding)\n\n self.evaluate(tf.global_variables_initializer())\n res, height_key_relative_embeddings, width_key_relative_embeddings = (\n self.evaluate(a))\n att_output = self.python_relative_att(\n q, k, v, batch, num_heads, height, width, depth,\n height_key_relative_embeddings, width_key_relative_embeddings,\n heads_share_relative_embedding)\n self.assertEqual(res.shape, (batch, num_heads, height, width, depth))\n self.assertAllClose(res, att_output)\n\n @parameterized.parameters(\n (1, 10, 12, 2, 6, 3),\n (1, 1, 12, 2, 6, 3),\n (2, 10, 1, 2, 6, 3),\n (1, 10, 12, 2, 1, 1),\n (1, 10, 12, 2, 2, 8),\n (4, 10, 12, 2, 12, 10),\n )\n @test_utils.run_in_graph_and_eager_modes()\n def testDotProductUnMaskedAttentionRelative2dSharedOneRow(\n self, batch, height, width, num_heads, max_relative_position, depth):\n heads_share_relative_embedding = True\n q = np.random.rand(batch, num_heads, height, width, depth)\n k = np.random.rand(batch, num_heads, height, width, depth)\n v = np.random.rand(batch, num_heads, height, width, depth)\n\n a = common_attention.dot_product_unmasked_self_attention_relative_2d(\n tf.constant(q, dtype=tf.float32),\n tf.constant(k, dtype=tf.float32),\n tf.constant(v, dtype=tf.float32),\n None,\n max_relative_position=max_relative_position,\n heads_share_relative_embedding=heads_share_relative_embedding)\n\n self.evaluate(tf.global_variables_initializer())\n (res, height_key_relative_embeddings,\n width_key_relative_embeddings) = self.evaluate(a)\n att_output = self.python_relative_att(\n q, k, v, batch, num_heads, height, width, depth,\n height_key_relative_embeddings, width_key_relative_embeddings,\n heads_share_relative_embedding)\n self.assertEqual(res.shape,\n (batch, num_heads, height, width, depth))\n self.assertAllClose(res, att_output)\n\n @test_utils.run_in_graph_and_eager_modes()\n def testRelativeAttentionV2Unmasked(self):\n # (batch, heads, length, depth)\n x = np.random.rand(5, 4, 16, 7)\n y = np.random.rand(5, 4, 16, 7)\n max_relative_position = 3\n a = common_attention.dot_product_unmasked_self_attention_relative_v2(\n tf.constant(x, dtype=tf.float32),\n tf.constant(y, dtype=tf.float32),\n tf.constant(y, dtype=tf.float32),\n None,\n max_relative_position=max_relative_position,\n heads_share_relative_embedding=False)\n self.evaluate(tf.global_variables_initializer())\n res = self.evaluate(a)\n self.assertEqual(res.shape, (5, 4, 16, 7))\n\n @test_utils.run_in_graph_and_eager_modes()\n def testRelativeAttentionV2UnmaskedSharedRel(self):\n # (batch, heads, length, depth)\n x = np.random.rand(5, 4, 16, 7)\n y = np.random.rand(5, 4, 16, 7)\n max_relative_position = 3\n a = common_attention.dot_product_unmasked_self_attention_relative_v2(\n tf.constant(x, dtype=tf.float32),\n tf.constant(y, dtype=tf.float32),\n tf.constant(y, dtype=tf.float32),\n None,\n max_relative_position=max_relative_position,\n heads_share_relative_embedding=True)\n self.evaluate(tf.global_variables_initializer())\n res = self.evaluate(a)\n self.assertEqual(res.shape, (5, 4, 16, 7))\n\n @test_utils.run_in_graph_and_eager_modes()\n def testRelativeAttentionV2UnmaskedRelativeLargerThanLength(self):\n # (batch, heads, length, depth)\n x = np.random.rand(5, 4, 3, 7)\n y = np.random.rand(5, 4, 3, 7)\n max_relative_position = 16\n a = common_attention.dot_product_unmasked_self_attention_relative_v2(\n tf.constant(x, dtype=tf.float32),\n tf.constant(y, dtype=tf.float32),\n tf.constant(y, dtype=tf.float32),\n None,\n max_relative_position=max_relative_position,\n heads_share_relative_embedding=False)\n self.evaluate(tf.global_variables_initializer())\n res = self.evaluate(a)\n self.assertEqual(res.shape, (5, 4, 3, 7))\n\n @test_utils.run_in_graph_and_eager_modes()\n def testMaskedRelativeLocalAttentionV2(self):\n # (batch, heads, length, depth)\n x = np.random.rand(5, 4, 16, 7)\n y = np.random.rand(5, 4, 16, 7)\n block_length = 3\n a = common_attention.masked_relative_local_attention_1d(\n tf.constant(x, dtype=tf.float32),\n tf.constant(y, dtype=tf.float32),\n tf.constant(y, dtype=tf.float32),\n block_length=block_length,\n heads_share_relative_embedding=True,\n add_relative_to_values=False,\n name=\"masked_relative_local_attention_1d\")\n self.evaluate(tf.global_variables_initializer())\n res = self.evaluate(a)\n self.assertEqual(res.shape, (5, 4, 16, 7))\n\n @test_utils.run_in_graph_and_eager_modes()\n def testMaskedRelativeLocalAttentionV2AddRelativeValues(self):\n # (batch, heads, length, depth)\n x = np.random.rand(5, 4, 16, 7)\n y = np.random.rand(5, 4, 16, 7)\n block_length = 3\n a = common_attention.masked_relative_local_attention_1d(\n tf.constant(x, dtype=tf.float32),\n tf.constant(y, dtype=tf.float32),\n tf.constant(y, dtype=tf.float32),\n block_length=block_length,\n heads_share_relative_embedding=True,\n add_relative_to_values=False,\n name=\"masked_relative_local_attention_1d\")\n self.evaluate(tf.global_variables_initializer())\n res = self.evaluate(a)\n self.assertEqual(res.shape, (5, 4, 16, 7))\n\n @test_utils.run_in_graph_and_eager_modes()\n def testMaskedRelativeLocalAttentionV2SeqShorterThanBlockLength(self):\n # (batch, heads, length, depth)\n x = np.random.rand(5, 7, 2, 7)\n y = np.random.rand(5, 7, 2, 7)\n block_length = 3\n a = common_attention.masked_relative_local_attention_1d(\n tf.constant(x, dtype=tf.float32),\n tf.constant(y, dtype=tf.float32),\n tf.constant(y, dtype=tf.float32),\n block_length=block_length,\n heads_share_relative_embedding=True,\n name=\"masked_relative_local_attention_1d\")\n self.evaluate(tf.global_variables_initializer())\n res = self.evaluate(a)\n self.assertEqual(res.shape, (5, 7, 2, 7))\n\n @test_utils.run_in_graph_and_eager_modes()\n def testMaskedRelativeLocalAttentionV2SeqShorterThanTwiceBlockLength(self):\n # (batch, heads, length, depth)\n x = np.random.rand(5, 7, 5, 7)\n y = np.random.rand(5, 7, 5, 7)\n block_length = 3\n a = common_attention.masked_relative_local_attention_1d(\n tf.constant(x, dtype=tf.float32),\n tf.constant(y, dtype=tf.float32),\n tf.constant(y, dtype=tf.float32),\n block_length=block_length,\n heads_share_relative_embedding=True,\n name=\"masked_relative_local_attention_1d\")\n self.evaluate(tf.global_variables_initializer())\n res = self.evaluate(a)\n self.assertEqual(res.shape, (5, 7, 5, 7))\n\n def testBiasBatchCoordinates(self):\n \"\"\"Testing the batch coordinates mask.\"\"\"\n q = tf.constant([0, 0, 1, 1, 1, 1, 2, 2, 2], dtype=tf.int32)\n q = tf.expand_dims(q, axis=-1)\n\n k = tf.constant([0, 0, 0, 2, 2, 3, 3, 3], dtype=tf.int32)\n k = tf.expand_dims(k, axis=-1)\n\n ground_truth = np.array([\n [0, 0, 0, 1, 1, 1, 1, 1], # 0\n [0, 0, 0, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1, 1, 1], # 1 (just masked)\n [1, 1, 1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1, 1, 1],\n [1, 1, 1, 0, 0, 1, 1, 1], # 2\n [1, 1, 1, 0, 0, 1, 1, 1],\n [1, 1, 1, 0, 0, 1, 1, 1],\n ], np.float32) * -1e9\n\n bias = common_attention.attention_bias_coordinates(q, k)\n self.assertAllClose(self.evaluate(bias), ground_truth)\n\n @test_utils.run_in_graph_and_eager_modes()\n def testBiasFuture(self):\n \"\"\"Testing the sequence order mask.\"\"\"\n q = tf.constant([0, 1, 2, 3, 0, 1, 2, 0, 1], dtype=tf.int32)\n q = tf.expand_dims(q, axis=-1)\n\n k = tf.constant([0, 1, 2, 3, 4, 0, 1, 2], dtype=tf.int32)\n k = tf.expand_dims(k, axis=-1)\n\n ground_truth = np.array([\n [0, 1, 1, 1, 1, 0, 1, 1], # 0\n [0, 0, 1, 1, 1, 0, 0, 1], # 1\n [0, 0, 0, 1, 1, 0, 0, 0], # 2\n [0, 0, 0, 0, 1, 0, 0, 0], # 3\n [0, 1, 1, 1, 1, 0, 1, 1], # 0\n [0, 0, 1, 1, 1, 0, 0, 1], # 1\n [0, 0, 0, 1, 1, 0, 0, 0], # 2\n [0, 1, 1, 1, 1, 0, 1, 1], # 0\n [0, 0, 1, 1, 1, 0, 0, 1], # 1\n ], np.float32) * -1e9\n\n bias = common_attention.attention_bias_future(q, k)\n self.assertAllClose(self.evaluate(bias), ground_truth)\n\n @test_utils.run_in_graph_mode_only()\n def testMultiheadAttentionWithLayerCollection(self):\n \"\"\"Testing multihead attention with layer collection for kfac.\"\"\"\n x = tf.zeros([3, 4, 5], tf.float32)\n layer_collection = kfac.LayerCollection()\n common_attention.multihead_attention(\n x, None, None, 10, 10, 10, 2, 0.2,\n layer_collection=layer_collection)\n self.assertLen(layer_collection.get_blocks(), 4)\n\n @parameterized.named_parameters(\n (\"\", 1, 1, 8, 4, 3),\n (\"dynamic_batch\", None, 1, 8, 4, 2),\n (\"batches\", 4, 3, 8, 4, 2),\n (\"block_length\", 1, 1, 8, 4, 4),\n )\n def testDilatedAttention(self, batch, heads, length, depth_v, block_length):\n if batch is None:\n batch = tf.random_uniform([], minval=0, maxval=5, dtype=tf.int32)\n q = tf.random_normal([batch, heads, length, depth_v])\n k = tf.random_normal([batch, heads, length, depth_v])\n v = tf.random_normal([batch, heads, length, depth_v])\n output = common_attention.dilated_self_attention_1d(\n q, k, v,\n query_block_size=block_length,\n memory_block_size=block_length,\n gap_size=2,\n num_memory_blocks=2)\n if isinstance(batch, tf.Tensor):\n batch, res = self.evaluate([batch, output])\n else:\n res = self.evaluate(output)\n\n self.assertEqual(res.shape, (batch, heads, length, depth_v))\n\n @parameterized.named_parameters(\n (\"\", 1, 1, 8, 4, 3),\n (\"dynamic_batch\", None, 1, 8, 4, 2),\n (\"batches\", 4, 3, 8, 4, 2),\n (\"block_length\", 1, 1, 8, 4, 4),\n )\n def testMaskedDilatedAttention(self, batch, heads, length, depth_v,\n block_length):\n if batch is None:\n batch = tf.random_uniform([], minval=0, maxval=5, dtype=tf.int32)\n q = tf.random_normal([batch, heads, length, depth_v])\n k = tf.random_normal([batch, heads, length, depth_v])\n v = tf.random_normal([batch, heads, length, depth_v])\n output = common_attention.masked_dilated_self_attention_1d(\n q, k, v,\n query_block_size=block_length,\n memory_block_size=block_length,\n gap_size=2,\n num_memory_blocks=2)\n if isinstance(batch, tf.Tensor):\n batch, res = self.evaluate([batch, output])\n else:\n res = self.evaluate(output)\n\n self.assertEqual(res.shape, (batch, heads, length, depth_v))\n\nif __name__ == \"__main__\":\n tf.test.main()\n", "# coding=utf-8\n# Copyright 2019 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"ResNet model with model and data parallelism using MTF.\n\nIntegration of Mesh tensorflow with ResNet to do model parallelism.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport copy\nimport mesh_tensorflow as mtf\n\nfrom tensor2tensor.layers import common_hparams\nfrom tensor2tensor.utils import mtf_model\nfrom tensor2tensor.utils import registry\nimport tensorflow as tf\n\n\nBATCH_NORM_DECAY = 0.9\nBATCH_NORM_EPSILON = 1e-5\n\n\ndef batch_norm_relu(inputs, is_training, relu=True):\n \"\"\"Block of batch norm and relu.\"\"\"\n inputs = mtf.layers.batch_norm(\n inputs,\n is_training,\n BATCH_NORM_DECAY,\n epsilon=BATCH_NORM_EPSILON,\n init_zero=(not relu))\n if relu:\n inputs = mtf.relu(inputs)\n return inputs\n\n\ndef bottleneck_block(inputs,\n filters,\n is_training,\n strides,\n projection_shortcut=None,\n row_blocks_dim=None,\n col_blocks_dim=None):\n \"\"\"Bottleneck block variant for residual networks with BN after convolutions.\n\n Args:\n inputs: a `mtf.Tensor` of shape\n `[batch_dim, row_blocks, col_blocks, rows, cols, in_channels]`.\n filters: `int` number of filters for the first two convolutions. Note\n that the third and final convolution will use 4 times as many filters.\n is_training: `bool` for whether the model is in training mode.\n strides: `int` block stride. If greater than 1, this block will ultimately\n downsample the input.\n projection_shortcut: `function` to use for projection shortcuts (typically\n a 1x1 convolution to match the filter dimensions). If None, no\n projection is used and the input is passed as unchanged through the\n shortcut connection.\n row_blocks_dim: a mtf.Dimension, row dimension which is\n spatially partitioned along mesh axis\n col_blocks_dim: a mtf.Dimension, row dimension which is\n spatially partitioned along mesh axis\n\n Returns:\n The output `Tensor` of the block.\n \"\"\"\n shortcut = inputs\n\n if projection_shortcut is not None:\n filters_dim = mtf.Dimension(\"filtersp\", filters)\n shortcut = projection_shortcut(inputs, filters_dim)\n\n # First conv block\n inputs = mtf.layers.conv2d_with_blocks(\n inputs,\n mtf.Dimension(\"filters1\", filters),\n filter_size=[1, 1],\n strides=[1, 1],\n padding=\"SAME\",\n h_blocks_dim=None, w_blocks_dim=col_blocks_dim,\n name=\"conv0\")\n\n # TODO(nikip): Add Dropout?\n inputs = batch_norm_relu(inputs, is_training)\n\n # Second conv block\n inputs = mtf.layers.conv2d_with_blocks(\n inputs,\n mtf.Dimension(\"filters2\", 4 * filters),\n filter_size=[3, 3],\n strides=[1, 1],\n padding=\"SAME\",\n h_blocks_dim=row_blocks_dim, w_blocks_dim=col_blocks_dim,\n name=\"conv1\")\n\n inputs = batch_norm_relu(inputs, is_training)\n\n # Third wide conv filter block\n inputs = mtf.layers.conv2d_with_blocks(\n inputs,\n mtf.Dimension(\"filters3\", filters),\n filter_size=[1, 1],\n strides=strides,\n padding=\"SAME\",\n h_blocks_dim=None, w_blocks_dim=col_blocks_dim,\n name=\"conv2\")\n\n # TODO(nikip): Althought the original resnet code has this batch norm, in our\n # setup this is causing no gradients to be passed. Investigate further.\n # inputs = batch_norm_relu(inputs, is_training, relu=True)\n\n # TODO(nikip): Maybe add residual with a projection?\n return mtf.relu(\n shortcut + mtf.rename_dimension(\n inputs, inputs.shape.dims[-1].name, shortcut.shape.dims[-1].name))\n\n\ndef block_layer(inputs,\n filters,\n blocks,\n strides,\n is_training,\n name,\n row_blocks_dim=None,\n col_blocks_dim=None):\n \"\"\"Creates one layer of blocks for the ResNet model.\n\n Args:\n inputs: `Tensor` of size `[batch, channels, height, width]`.\n filters: `int` number of filters for the first convolution of the layer.\n blocks: `int` number of blocks contained in the layer.\n strides: `int` stride to use for the first convolution of the layer. If\n greater than 1, this layer will downsample the input.\n is_training: `bool` for whether the model is training.\n name: `str`name for the Tensor output of the block layer.\n row_blocks_dim: a mtf.Dimension, row dimension which is\n spatially partitioned along mesh axis\n col_blocks_dim: a mtf.Dimension, row dimension which is\n spatially partitioned along mesh axis\n\n Returns:\n The output `Tensor` of the block layer.\n \"\"\"\n with tf.variable_scope(name, default_name=\"block_layer\"):\n # Only the first block per block_layer uses projection_shortcut and strides\n def projection_shortcut(inputs, output_dim):\n \"\"\"Project identity branch.\"\"\"\n inputs = mtf.layers.conv2d_with_blocks(\n inputs,\n output_dim,\n filter_size=[1, 1],\n strides=strides,\n padding=\"SAME\",\n h_blocks_dim=None, w_blocks_dim=col_blocks_dim,\n name=\"shortcut0\")\n return batch_norm_relu(\n inputs, is_training, relu=False)\n\n inputs = bottleneck_block(\n inputs,\n filters,\n is_training,\n strides=strides,\n projection_shortcut=projection_shortcut,\n row_blocks_dim=row_blocks_dim,\n col_blocks_dim=col_blocks_dim)\n\n for i in range(1, blocks):\n with tf.variable_scope(\"bottleneck_%d\" % i):\n inputs = bottleneck_block(\n inputs,\n filters,\n is_training,\n strides=[1, 1, 1, 1],\n projection_shortcut=None,\n row_blocks_dim=row_blocks_dim,\n col_blocks_dim=col_blocks_dim)\n\n return inputs\n\n\[email protected]_model\nclass MtfResNet(mtf_model.MtfModel):\n \"\"\"ResNet in mesh_tensorflow.\"\"\"\n\n def set_activation_type(self):\n hparams = self._hparams\n if hparams.activation_dtype == \"float32\":\n activation_dtype = tf.float32\n elif hparams.activation_dtype == \"float16\":\n activation_dtype = tf.float16\n elif hparams.activation_dtype == \"bfloat16\":\n activation_dtype = tf.bfloat16\n else:\n raise ValueError(\n \"unknown hparams.activation_dtype %s\" % hparams.activation_dtype)\n return activation_dtype\n\n def mtf_model_fn(self, features, mesh):\n features = copy.copy(features)\n tf.logging.info(\"features = %s\" % features)\n hparams = self._hparams\n activation_dtype = self.set_activation_type()\n is_training = hparams.mode == tf.estimator.ModeKeys.TRAIN\n\n # Declare all the dimensions\n batch_dim = mtf.Dimension(\"batch\", hparams.batch_size)\n hidden_dim = mtf.Dimension(\"hidden\", hparams.hidden_size)\n filter_dim = mtf.Dimension(\"filters\", hparams.filter_sizes[0])\n rows_dim = mtf.Dimension(\"rows_size\", hparams.rows_size)\n cols_dim = mtf.Dimension(\"cols_size\", hparams.cols_size)\n row_blocks_dim = mtf.Dimension(\"row_blocks\", hparams.row_blocks)\n col_blocks_dim = mtf.Dimension(\"col_blocks\", hparams.col_blocks)\n classes_dim = mtf.Dimension(\"classes\", 10)\n channels_dim = mtf.Dimension(\"channels\", 3)\n one_channel_dim = mtf.Dimension(\"one_channel\", 1)\n\n inputs = features[\"inputs\"]\n x = mtf.import_tf_tensor(\n mesh, tf.reshape(inputs, [\n hparams.batch_size,\n hparams.row_blocks,\n hparams.rows_size // hparams.row_blocks,\n hparams.col_blocks,\n hparams.num_channels*hparams.cols_size // hparams.col_blocks,\n hparams.num_channels]),\n mtf.Shape(\n [batch_dim, row_blocks_dim, rows_dim,\n col_blocks_dim, cols_dim, channels_dim]))\n x = mtf.transpose(x, [batch_dim, row_blocks_dim, col_blocks_dim,\n rows_dim, cols_dim, channels_dim])\n\n x = mtf.to_float(x)\n x = mtf.layers.conv2d_with_blocks(\n x,\n filter_dim,\n filter_size=[3, 3],\n strides=[1, 1],\n padding=\"SAME\",\n h_blocks_dim=None, w_blocks_dim=col_blocks_dim,\n name=\"initial_filter\")\n\n x = batch_norm_relu(x, is_training)\n\n # Conv blocks\n # [block - strided block layer - strided block layer] x n\n for layer in range(hparams.num_layers):\n layer_name = \"block_layer_%d\" % layer\n with tf.variable_scope(layer_name):\n # Residual block layer\n x = block_layer(\n inputs=x,\n filters=hparams.filter_sizes[0],\n blocks=hparams.layer_sizes[0],\n strides=[1, 1],\n is_training=is_training,\n name=\"block_layer1\",\n row_blocks_dim=None,\n col_blocks_dim=None)\n x = block_layer(\n inputs=x,\n filters=hparams.filter_sizes[1],\n blocks=hparams.layer_sizes[1],\n strides=[1, 1],\n is_training=is_training,\n name=\"block_layer2\",\n row_blocks_dim=None,\n col_blocks_dim=None)\n x = block_layer(\n inputs=x,\n filters=hparams.filter_sizes[2],\n blocks=hparams.layer_sizes[2],\n strides=[1, 1],\n is_training=is_training,\n name=\"block_layer3\",\n row_blocks_dim=None,\n col_blocks_dim=None)\n\n # Calculate the logits and loss.\n out = x\n outputs = mtf.layers.dense(\n out, hidden_dim,\n reduced_dims=out.shape.dims[-5:],\n activation=mtf.relu, name=\"dense\")\n\n # We assume fixed vocab size for targets\n labels = tf.squeeze(tf.to_int32(features[\"targets\"]), [2, 3])\n labels = mtf.import_tf_tensor(\n mesh, tf.reshape(labels, [hparams.batch_size]), mtf.Shape([batch_dim]))\n\n logits = mtf.layers.dense(outputs, classes_dim, name=\"logits\")\n soft_targets = mtf.one_hot(labels, classes_dim, dtype=activation_dtype)\n loss = mtf.layers.softmax_cross_entropy_with_logits(\n logits, soft_targets, classes_dim)\n\n # Reshape logits so it doesn't break inside t2t.\n logits = mtf.reshape(\n logits,\n mtf.Shape([batch_dim, one_channel_dim, classes_dim]))\n loss = mtf.reduce_mean(loss)\n return logits, loss\n\n\[email protected]_hparams\ndef mtf_resnet_base():\n \"\"\"Set of hyperparameters.\"\"\"\n hparams = common_hparams.basic_params1()\n hparams.no_data_parallelism = True\n hparams.use_fixed_batch_size = True\n hparams.batch_size = 32\n hparams.max_length = 3072\n hparams.hidden_size = 256\n hparams.label_smoothing = 0.0\n # 8-way model-parallelism\n hparams.add_hparam(\"mesh_shape\", \"batch:8\")\n hparams.add_hparam(\"layout\", \"batch:batch\")\n hparams.add_hparam(\"filter_size\", 1024)\n\n hparams.add_hparam(\"num_layers\", 6)\n # Share weights between input and target embeddings\n hparams.shared_embedding = True\n\n hparams.shared_embedding_and_softmax_weights = True\n hparams.optimizer = \"Adafactor\"\n hparams.learning_rate_schedule = \"rsqrt_decay\"\n hparams.learning_rate_warmup_steps = 10000\n hparams.add_hparam(\"d_kv\", 32)\n\n # Image related hparams\n hparams.add_hparam(\"img_len\", 32)\n hparams.add_hparam(\"num_channels\", 3)\n hparams.add_hparam(\"row_blocks\", 1)\n hparams.add_hparam(\"col_blocks\", 1)\n hparams.add_hparam(\"rows_size\", 32)\n hparams.add_hparam(\"cols_size\", 32)\n\n # Model-specific parameters\n hparams.add_hparam(\"layer_sizes\", [3, 4, 6, 3])\n hparams.add_hparam(\"filter_sizes\", [64, 64, 128, 256, 512])\n hparams.add_hparam(\"is_cifar\", False)\n\n # Variable init\n hparams.initializer = \"normal_unit_scaling\"\n hparams.initializer_gain = 2.\n\n # TODO(nikip): Change optimization scheme?\n hparams.learning_rate = 0.1\n return hparams\n\n\[email protected]_hparams\ndef mtf_resnet_tiny():\n \"\"\"Catch bugs locally...\"\"\"\n hparams = mtf_resnet_base()\n hparams.num_layers = 2\n hparams.hidden_size = 64\n hparams.filter_size = 64\n hparams.batch_size = 16\n # data parallelism and model-parallelism\n hparams.col_blocks = 1\n hparams.mesh_shape = \"batch:2\"\n hparams.layout = \"batch:batch\"\n hparams.layer_sizes = [1, 2, 3]\n hparams.filter_sizes = [64, 64, 64]\n return hparams\n\n\[email protected]_hparams\ndef mtf_resnet_single():\n \"\"\"Small single parameters.\"\"\"\n hparams = mtf_resnet_tiny()\n hparams.mesh_shape = \"\"\n hparams.layout = \"\"\n hparams.hidden_size = 32\n hparams.filter_size = 32\n hparams.batch_size = 1\n hparams.num_encoder_layers = 1\n hparams.num_layers = 1\n hparams.block_length = 16\n return hparams\n\n\[email protected]_hparams\ndef mtf_resnet_base_single():\n \"\"\"Small single parameters.\"\"\"\n hparams = mtf_resnet_base()\n hparams.num_layers = 6\n hparams.filter_size = 256\n hparams.block_length = 128\n hparams.mesh_shape = \"\"\n hparams.layout = \"\"\n return hparams\n\n\[email protected]_hparams\ndef mtf_resnet_base_cifar():\n \"\"\"Data parallel CIFAR parameters.\"\"\"\n hparams = mtf_resnet_base()\n hparams.mesh_shape = \"batch:32\"\n hparams.layoyt = \"batch:batch\"\n hparams.batch_size = 8\n hparams.num_layers = 12\n hparams.block_length = 256\n hparams.hidden_size = 512\n hparams.filter_size = 2048\n hparams.learning_rate = 0.5\n hparams.learning_rate_warmup_steps = 4000\n hparams.layer_preprocess_sequence = \"none\"\n hparams.layer_postprocess_sequence = \"dan\"\n hparams.layer_prepostprocess_dropout = 0.3\n hparams.unconditional = True\n return hparams\n", "# coding=utf-8\n# Copyright 2019 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Data generators for translation data-sets.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport gzip\nimport os\nimport tarfile\nimport zipfile\nfrom tensor2tensor.data_generators import cleaner_en_xx\nfrom tensor2tensor.data_generators import generator_utils\nfrom tensor2tensor.data_generators import problem\nfrom tensor2tensor.data_generators import text_encoder\nfrom tensor2tensor.data_generators import text_problems\nfrom tensor2tensor.utils import bleu_hook\nfrom tensor2tensor.utils import mlperf_log\n\nimport tensorflow as tf\n\nFLAGS = tf.flags.FLAGS\n\n\nclass TranslateProblem(text_problems.Text2TextProblem):\n \"\"\"Base class for translation problems.\"\"\"\n\n @property\n def is_generate_per_split(self):\n return True\n\n @property\n def approx_vocab_size(self):\n return 2**15\n\n @property\n def datatypes_to_clean(self):\n return None\n\n def source_data_files(self, dataset_split):\n \"\"\"Files to be passed to compile_data.\"\"\"\n raise NotImplementedError()\n\n def vocab_data_files(self):\n \"\"\"Files to be passed to get_or_generate_vocab.\"\"\"\n return self.source_data_files(problem.DatasetSplit.TRAIN)\n\n def generate_samples(\n self,\n data_dir,\n tmp_dir,\n dataset_split,\n custom_iterator=text_problems.text2text_txt_iterator):\n datasets = self.source_data_files(dataset_split)\n tag = \"dev\"\n datatypes_to_clean = None\n if dataset_split == problem.DatasetSplit.TRAIN:\n tag = \"train\"\n datatypes_to_clean = self.datatypes_to_clean\n data_path = compile_data(\n tmp_dir, datasets, \"%s-compiled-%s\" % (self.name, tag),\n datatypes_to_clean=datatypes_to_clean)\n\n return custom_iterator(data_path + \".lang1\", data_path + \".lang2\")\n\n def generate_text_for_vocab(self, data_dir, tmp_dir):\n return generator_utils.generate_lines_for_vocab(tmp_dir,\n self.vocab_data_files())\n\n @property\n def decode_hooks(self):\n return [compute_bleu_summaries]\n\n\ndef compute_bleu_summaries(hook_args):\n \"\"\"Compute BLEU core summaries using the decoder output.\n\n Args:\n hook_args: DecodeHookArgs namedtuple\n Returns:\n A list of tf.Summary values if hook_args.hparams contains the\n reference file and the translated file.\n \"\"\"\n decode_hparams = hook_args.decode_hparams\n\n if not (decode_hparams.decode_reference and decode_hparams.decode_to_file):\n return None\n\n values = []\n bleu = 100 * bleu_hook.bleu_wrapper(\n decode_hparams.decode_reference, decode_hparams.decode_to_file)\n values.append(tf.Summary.Value(tag=\"BLEU\", simple_value=bleu))\n tf.logging.info(\"%s: BLEU = %6.2f\" % (decode_hparams.decode_to_file, bleu))\n if hook_args.hparams.mlperf_mode:\n current_step = decode_hparams.mlperf_decode_step\n mlperf_log.transformer_print(\n key=mlperf_log.EVAL_TARGET, value=decode_hparams.mlperf_threshold)\n mlperf_log.transformer_print(\n key=mlperf_log.EVAL_ACCURACY,\n value={\n \"epoch\": max(current_step // decode_hparams.iterations_per_loop - 1,\n 0),\n \"value\": bleu\n })\n mlperf_log.transformer_print(key=mlperf_log.EVAL_STOP)\n\n if bleu >= decode_hparams.mlperf_threshold:\n decode_hparams.set_hparam(\"mlperf_success\", True)\n\n return values\n\n\ndef _preprocess_sgm(line, is_sgm):\n \"\"\"Preprocessing to strip tags in SGM files.\"\"\"\n if not is_sgm:\n return line\n # In SGM files, remove <srcset ...>, <p>, <doc ...> lines.\n if line.startswith(\"<srcset\") or line.startswith(\"</srcset\"):\n return \"\"\n if line.startswith(\"<doc\") or line.startswith(\"</doc\"):\n return \"\"\n if line.startswith(\"<p>\") or line.startswith(\"</p>\"):\n return \"\"\n # Strip <seg> tags.\n line = line.strip()\n if line.startswith(\"<seg\") and line.endswith(\"</seg>\"):\n i = line.index(\">\")\n return line[i + 1:-6] # Strip first <seg ...> and last </seg>.\n\n\ndef _clean_sentences(sentence_pairs):\n res_pairs = []\n for cleaned in cleaner_en_xx.clean_en_xx_pairs(sentence_pairs):\n res_pairs.append(cleaned)\n return res_pairs\n\n\ndef _tmx_to_source_target(tmx_file, source_resfile, target_resfile,\n do_cleaning=False):\n source_target_pairs = cleaner_en_xx.paracrawl_v3_pairs(tmx_file)\n if do_cleaning:\n source_target_pairs = cleaner_en_xx.clean_en_xx_pairs(source_target_pairs)\n for source, target in source_target_pairs:\n source_resfile.write(source)\n source_resfile.write(\"\\n\")\n target_resfile.write(target)\n target_resfile.write(\"\\n\")\n\n\ndef compile_data(tmp_dir, datasets, filename, datatypes_to_clean=None):\n \"\"\"Concatenates all `datasets` and saves to `filename`.\"\"\"\n datatypes_to_clean = datatypes_to_clean or []\n filename = os.path.join(tmp_dir, filename)\n lang1_fname = filename + \".lang1\"\n lang2_fname = filename + \".lang2\"\n if tf.gfile.Exists(lang1_fname) and tf.gfile.Exists(lang2_fname):\n tf.logging.info(\"Skipping compile data, found files:\\n%s\\n%s\", lang1_fname,\n lang2_fname)\n return filename\n with tf.gfile.GFile(lang1_fname, mode=\"w\") as lang1_resfile:\n with tf.gfile.GFile(lang2_fname, mode=\"w\") as lang2_resfile:\n for dataset in datasets:\n url = dataset[0]\n compressed_filename = os.path.basename(url)\n compressed_filepath = os.path.join(tmp_dir, compressed_filename)\n if url.startswith(\"http\"):\n generator_utils.maybe_download(tmp_dir, compressed_filename, url)\n if compressed_filename.endswith(\".zip\"):\n zipfile.ZipFile(os.path.join(compressed_filepath),\n \"r\").extractall(tmp_dir)\n\n if dataset[1][0] == \"tmx\":\n cleaning_requested = \"tmx\" in datatypes_to_clean\n tmx_filename = os.path.join(tmp_dir, dataset[1][1])\n if tmx_filename.endswith(\".gz\"):\n with gzip.open(tmx_filename, \"rb\") as tmx_file:\n _tmx_to_source_target(tmx_file, lang1_resfile, lang2_resfile,\n do_cleaning=cleaning_requested)\n else:\n with tf.gfile.Open(tmx_filename) as tmx_file:\n _tmx_to_source_target(tmx_file, lang1_resfile, lang2_resfile,\n do_cleaning=cleaning_requested)\n\n elif dataset[1][0] == \"tsv\":\n _, src_column, trg_column, glob_pattern = dataset[1]\n filenames = tf.gfile.Glob(os.path.join(tmp_dir, glob_pattern))\n if not filenames:\n # Capture *.tgz and *.tar.gz too.\n mode = \"r:gz\" if compressed_filepath.endswith(\"gz\") else \"r\"\n with tarfile.open(compressed_filepath, mode) as corpus_tar:\n corpus_tar.extractall(tmp_dir)\n filenames = tf.gfile.Glob(os.path.join(tmp_dir, glob_pattern))\n for tsv_filename in filenames:\n if tsv_filename.endswith(\".gz\"):\n new_filename = tsv_filename.strip(\".gz\")\n generator_utils.gunzip_file(tsv_filename, new_filename)\n tsv_filename = new_filename\n with tf.gfile.Open(tsv_filename) as tsv_file:\n for line in tsv_file:\n if line and \"\\t\" in line:\n parts = line.split(\"\\t\")\n source, target = parts[src_column], parts[trg_column]\n source, target = source.strip(), target.strip()\n clean_pairs = [(source, target)]\n if \"tsv\" in datatypes_to_clean:\n clean_pairs = cleaner_en_xx.clean_en_xx_pairs(clean_pairs)\n for source, target in clean_pairs:\n if source and target:\n lang1_resfile.write(source)\n lang1_resfile.write(\"\\n\")\n lang2_resfile.write(target)\n lang2_resfile.write(\"\\n\")\n\n else:\n lang1_filename, lang2_filename = dataset[1]\n lang1_filepath = os.path.join(tmp_dir, lang1_filename)\n lang2_filepath = os.path.join(tmp_dir, lang2_filename)\n is_sgm = (\n lang1_filename.endswith(\"sgm\") and lang2_filename.endswith(\"sgm\"))\n\n if not (tf.gfile.Exists(lang1_filepath) and\n tf.gfile.Exists(lang2_filepath)):\n # For .tar.gz and .tgz files, we read compressed.\n mode = \"r:gz\" if compressed_filepath.endswith(\"gz\") else \"r\"\n with tarfile.open(compressed_filepath, mode) as corpus_tar:\n corpus_tar.extractall(tmp_dir)\n if lang1_filepath.endswith(\".gz\"):\n new_filepath = lang1_filepath.strip(\".gz\")\n generator_utils.gunzip_file(lang1_filepath, new_filepath)\n lang1_filepath = new_filepath\n if lang2_filepath.endswith(\".gz\"):\n new_filepath = lang2_filepath.strip(\".gz\")\n generator_utils.gunzip_file(lang2_filepath, new_filepath)\n lang2_filepath = new_filepath\n\n for example in text_problems.text2text_txt_iterator(\n lang1_filepath, lang2_filepath):\n line1res = _preprocess_sgm(example[\"inputs\"], is_sgm)\n line2res = _preprocess_sgm(example[\"targets\"], is_sgm)\n clean_pairs = [(line1res, line2res)]\n if \"txt\" in datatypes_to_clean:\n clean_pairs = cleaner_en_xx.clean_en_xx_pairs(clean_pairs)\n for line1res, line2res in clean_pairs:\n if line1res and line2res:\n lang1_resfile.write(line1res)\n lang1_resfile.write(\"\\n\")\n lang2_resfile.write(line2res)\n lang2_resfile.write(\"\\n\")\n\n return filename\n\n\nclass TranslateDistillProblem(TranslateProblem):\n \"\"\"Base class for translation problems.\"\"\"\n\n def is_generate_per_split(self):\n return True\n\n def example_reading_spec(self):\n data_fields = {\"dist_targets\": tf.VarLenFeature(tf.int64)}\n\n if self.has_inputs:\n data_fields[\"inputs\"] = tf.VarLenFeature(tf.int64)\n\n # hack: ignoring true targets and putting dist_targets in targets\n data_items_to_decoders = {\n \"inputs\": tf.contrib.slim.tfexample_decoder.Tensor(\"inputs\"),\n \"targets\": tf.contrib.slim.tfexample_decoder.Tensor(\"dist_targets\"),\n }\n\n return (data_fields, data_items_to_decoders)\n\n def get_or_create_vocab(self, data_dir, tmp_dir, force_get=False):\n \"\"\"Get vocab for distill problems.\"\"\"\n # We assume that vocab file is present in data_dir directory where the\n # data generated will be stored.\n vocab_filepath = os.path.join(data_dir, self.vocab_filename)\n encoder = text_encoder.SubwordTextEncoder(vocab_filepath)\n return encoder\n\n def generate_encoded_samples(self, data_dir, tmp_dir, dataset_split):\n generator = self.generate_samples(data_dir, tmp_dir, dataset_split)\n vocab = self.get_or_create_vocab(data_dir, tmp_dir)\n # For each example, encode the text and append EOS ID.\n for sample in generator:\n if self.has_inputs:\n sample[\"inputs\"] = vocab.encode(sample[\"inputs\"])\n sample[\"inputs\"].append(text_encoder.EOS_ID)\n sample[\"targets\"] = vocab.encode(sample[\"targets\"])\n sample[\"targets\"].append(text_encoder.EOS_ID)\n sample[\"dist_targets\"] = vocab.encode(sample[\"dist_targets\"])\n sample[\"dist_targets\"].append(text_encoder.EOS_ID)\n yield sample\n\n def generate_samples(self, data_dir, tmp_dir, dataset_split):\n data_path = self.source_data_files(dataset_split)\n assert tf.gfile.Exists(data_path)\n return text_problems.text2text_distill_iterator(data_path + \"inputs\",\n data_path + \"gold\",\n data_path + \"prediction\")\n", "# coding=utf-8\n# Copyright 2019 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Basic models for testing simple tasks.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensor2tensor.layers import common_attention\nfrom tensor2tensor.layers import common_layers\nfrom tensor2tensor.layers import common_video\nfrom tensor2tensor.layers import discretization\nfrom tensor2tensor.models.video import base\nfrom tensor2tensor.models.video import basic_deterministic_params # pylint: disable=unused-import\nfrom tensor2tensor.utils import registry\n\nimport tensorflow as tf\n\n\[email protected]_model\nclass NextFrameBasicDeterministic(base.NextFrameBase):\n \"\"\"Basic next-frame model, may take actions and predict rewards too.\"\"\"\n\n @property\n def is_recurrent_model(self):\n return False\n\n def inject_latent(self, layer, inputs, target, action):\n del inputs, target, action\n return layer, 0.0\n\n def middle_network(self, layer, internal_states):\n # Run a stack of convolutions.\n activation_fn = common_layers.belu\n if self.hparams.activation_fn == \"relu\":\n activation_fn = tf.nn.relu\n x = layer\n kernel1 = (3, 3)\n filters = common_layers.shape_list(x)[-1]\n for i in range(self.hparams.num_hidden_layers):\n with tf.variable_scope(\"layer%d\" % i):\n y = tf.nn.dropout(x, 1.0 - self.hparams.residual_dropout)\n y = tf.layers.conv2d(y, filters, kernel1, activation=activation_fn,\n strides=(1, 1), padding=\"SAME\")\n if i == 0:\n x = y\n else:\n x = common_layers.layer_norm(x + y)\n return x, internal_states\n\n def update_internal_states_early(self, internal_states, frames):\n \"\"\"Update the internal states early in the network if requested.\"\"\"\n del frames\n return internal_states\n\n def next_frame(self, frames, actions, rewards, target_frame,\n internal_states, video_extra):\n del rewards, video_extra\n\n hparams = self.hparams\n filters = hparams.hidden_size\n kernel2 = (4, 4)\n action = actions[-1]\n activation_fn = common_layers.belu\n if self.hparams.activation_fn == \"relu\":\n activation_fn = tf.nn.relu\n\n # Normalize frames.\n frames = [common_layers.standardize_images(f) for f in frames]\n\n # Stack the inputs.\n if internal_states is not None and hparams.concat_internal_states:\n # Use the first part of the first internal state if asked to concatenate.\n batch_size = common_layers.shape_list(frames[0])[0]\n internal_state = internal_states[0][0][:batch_size, :, :, :]\n stacked_frames = tf.concat(frames + [internal_state], axis=-1)\n else:\n stacked_frames = tf.concat(frames, axis=-1)\n inputs_shape = common_layers.shape_list(stacked_frames)\n\n # Update internal states early if requested.\n if hparams.concat_internal_states:\n internal_states = self.update_internal_states_early(\n internal_states, frames)\n\n # Using non-zero bias initializer below for edge cases of uniform inputs.\n x = tf.layers.dense(\n stacked_frames, filters, name=\"inputs_embed\",\n bias_initializer=tf.random_normal_initializer(stddev=0.01))\n x = common_attention.add_timing_signal_nd(x)\n\n # Down-stride.\n layer_inputs = [x]\n for i in range(hparams.num_compress_steps):\n with tf.variable_scope(\"downstride%d\" % i):\n layer_inputs.append(x)\n x = tf.nn.dropout(x, 1.0 - self.hparams.dropout)\n x = common_layers.make_even_size(x)\n if i < hparams.filter_double_steps:\n filters *= 2\n x = common_attention.add_timing_signal_nd(x)\n x = tf.layers.conv2d(x, filters, kernel2, activation=activation_fn,\n strides=(2, 2), padding=\"SAME\")\n x = common_layers.layer_norm(x)\n\n if self.has_actions:\n with tf.variable_scope(\"policy\"):\n x_flat = tf.layers.flatten(x)\n policy_pred = tf.layers.dense(x_flat, self.hparams.problem.num_actions)\n value_pred = tf.layers.dense(x_flat, 1)\n value_pred = tf.squeeze(value_pred, axis=-1)\n else:\n policy_pred, value_pred = None, None\n\n # Add embedded action if present.\n if self.has_actions:\n x = common_video.inject_additional_input(\n x, action, \"action_enc\", hparams.action_injection)\n\n # Inject latent if present. Only for stochastic models.\n norm_target_frame = common_layers.standardize_images(target_frame)\n x, extra_loss = self.inject_latent(x, frames, norm_target_frame, action)\n\n x_mid = tf.reduce_mean(x, axis=[1, 2], keepdims=True)\n x, internal_states = self.middle_network(x, internal_states)\n\n # Up-convolve.\n layer_inputs = list(reversed(layer_inputs))\n for i in range(hparams.num_compress_steps):\n with tf.variable_scope(\"upstride%d\" % i):\n x = tf.nn.dropout(x, 1.0 - self.hparams.dropout)\n if self.has_actions:\n x = common_video.inject_additional_input(\n x, action, \"action_enc\", hparams.action_injection)\n if i >= hparams.num_compress_steps - hparams.filter_double_steps:\n filters //= 2\n x = tf.layers.conv2d_transpose(\n x, filters, kernel2, activation=activation_fn,\n strides=(2, 2), padding=\"SAME\")\n y = layer_inputs[i]\n shape = common_layers.shape_list(y)\n x = x[:, :shape[1], :shape[2], :]\n x = common_layers.layer_norm(x + y)\n x = common_attention.add_timing_signal_nd(x)\n\n # Cut down to original size.\n x = x[:, :inputs_shape[1], :inputs_shape[2], :]\n x_fin = tf.reduce_mean(x, axis=[1, 2], keepdims=True)\n if hparams.do_autoregressive_rnn:\n # If enabled, we predict the target frame autoregregressively using rnns.\n # To this end, the current prediciton is flattened into one long sequence\n # of sub-pixels, and so is the target frame. Each sub-pixel (RGB value,\n # from 0 to 255) is predicted with an RNN. To avoid doing as many steps\n # as width * height * channels, we only use a number of pixels back,\n # as many as hparams.autoregressive_rnn_lookback.\n with tf.variable_scope(\"autoregressive_rnn\"):\n batch_size = common_layers.shape_list(frames[0])[0]\n # Height, width, channels and lookback are the constants we need.\n h, w = inputs_shape[1], inputs_shape[2] # 105, 80 on Atari games\n c = hparams.problem.num_channels\n lookback = hparams.autoregressive_rnn_lookback\n assert (h * w) % lookback == 0, \"Number of pixels must divide lookback.\"\n m = (h * w) // lookback # Batch size multiplier for the RNN.\n # These are logits that will be used as inputs to the RNN.\n rnn_inputs = tf.layers.dense(x, c * 64, name=\"rnn_inputs\")\n # They are of shape [batch_size, h, w, c, 64], reshaping now.\n rnn_inputs = tf.reshape(rnn_inputs, [batch_size * m, lookback * c, 64])\n # Same for the target frame.\n rnn_target = tf.reshape(target_frame, [batch_size * m, lookback * c])\n # Construct rnn starting state: flatten rnn_inputs, apply a relu layer.\n rnn_start_state = tf.nn.relu(tf.layers.dense(tf.nn.relu(\n tf.layers.flatten(rnn_inputs)), 256, name=\"rnn_start_state\"))\n # Our RNN function API is on bits, each subpixel has 8 bits.\n total_num_bits = lookback * c * 8\n # We need to provide RNN targets as bits (due to the API).\n rnn_target_bits = discretization.int_to_bit(rnn_target, 8)\n rnn_target_bits = tf.reshape(\n rnn_target_bits, [batch_size * m, total_num_bits])\n if self.is_training:\n # Run the RNN in training mode, add it's loss to the losses.\n rnn_predict, rnn_loss = discretization.predict_bits_with_lstm(\n rnn_start_state, 128, total_num_bits, target_bits=rnn_target_bits,\n extra_inputs=rnn_inputs)\n extra_loss += rnn_loss\n # We still use non-RNN predictions too in order to guide the network.\n x = tf.layers.dense(x, c * 256, name=\"logits\")\n x = tf.reshape(x, [batch_size, h, w, c, 256])\n rnn_predict = tf.reshape(rnn_predict, [batch_size, h, w, c, 256])\n # Mix non-RNN and RNN predictions so that after warmup the RNN is 90%.\n x = tf.reshape(tf.nn.log_softmax(x), [batch_size, h, w, c * 256])\n rnn_predict = tf.nn.log_softmax(rnn_predict)\n rnn_predict = tf.reshape(rnn_predict, [batch_size, h, w, c * 256])\n alpha = 0.9 * common_layers.inverse_lin_decay(\n hparams.autoregressive_rnn_warmup_steps)\n x = alpha * rnn_predict + (1.0 - alpha) * x\n else:\n # In prediction mode, run the RNN without any targets.\n bits, _ = discretization.predict_bits_with_lstm(\n rnn_start_state, 128, total_num_bits, extra_inputs=rnn_inputs,\n temperature=0.0) # No sampling from this RNN, just greedy.\n # The output is in bits, get back the predicted pixels.\n bits = tf.reshape(bits, [batch_size * m, lookback * c, 8])\n ints = discretization.bit_to_int(tf.maximum(bits, 0), 8)\n ints = tf.reshape(ints, [batch_size, h, w, c])\n x = tf.reshape(tf.one_hot(ints, 256), [batch_size, h, w, c * 256])\n elif self.is_per_pixel_softmax:\n x = tf.layers.dense(x, hparams.problem.num_channels * 256, name=\"logits\")\n else:\n x = tf.layers.dense(x, hparams.problem.num_channels, name=\"logits\")\n\n reward_pred = None\n if self.has_rewards:\n # Reward prediction based on middle and final logits.\n reward_pred = tf.concat([x_mid, x_fin], axis=-1)\n reward_pred = tf.nn.relu(tf.layers.dense(\n reward_pred, 128, name=\"reward_pred\"))\n reward_pred = tf.squeeze(reward_pred, axis=1) # Remove extra dims\n reward_pred = tf.squeeze(reward_pred, axis=1) # Remove extra dims\n\n return x, reward_pred, policy_pred, value_pred, extra_loss, internal_states\n" ]
[ [ "numpy.dot", "tensorflow.contrib.eager.run_test_in_graph_and_eager_modes", "tensorflow.zeros", "tensorflow.compat.v1.enable_eager_execution", "numpy.concatenate", "numpy.exp", "numpy.pad", "numpy.reshape", "tensorflow.gradients", "tensorflow.test.main", "tensorflow.to_float", "tensorflow.random_normal_initializer", "numpy.zeros", "tensorflow.global_variables_initializer", "numpy.random.rand", "numpy.transpose", "numpy.array", "numpy.sum", "tensorflow.constant", "tensorflow.range", "numpy.set_printoptions", "tensorflow.ones", "tensorflow.expand_dims", "numpy.prod", "tensorflow.random_uniform", "tensorflow.random_normal" ], [ "tensorflow.variable_scope", "tensorflow.to_int32", "tensorflow.reshape", "tensorflow.logging.info" ], [ "tensorflow.gfile.Open", "tensorflow.gfile.Exists", "tensorflow.gfile.GFile", "tensorflow.Summary.Value", "tensorflow.logging.info", "tensorflow.contrib.slim.tfexample_decoder.Tensor", "tensorflow.VarLenFeature" ], [ "tensorflow.layers.conv2d", "tensorflow.layers.flatten", "tensorflow.concat", "tensorflow.nn.log_softmax", "tensorflow.reduce_mean", "tensorflow.maximum", "tensorflow.reshape", "tensorflow.squeeze", "tensorflow.layers.dense", "tensorflow.layers.conv2d_transpose", "tensorflow.one_hot", "tensorflow.variable_scope", "tensorflow.random_normal_initializer", "tensorflow.nn.dropout" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.4", "1.5", "1.7", "0.12", "1.0", "1.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
NCLPhD/FedML
[ "ffa15262ee963b9c856f34f0b2202f4dfeb3a76b", "ffa15262ee963b9c856f34f0b2202f4dfeb3a76b", "ffa15262ee963b9c856f34f0b2202f4dfeb3a76b", "ffa15262ee963b9c856f34f0b2202f4dfeb3a76b" ]
[ "python/fedml/cross_silo/hierarchical/trainer_dist_adapter.py", "python/fedml/simulation/mpi_p2p_mp/fedavg_robust/FedAvgRobustAggregator.py", "python/fedml/simulation/single_process/fednova/client.py", "python/fedml/simulation/mpi_p2p_mp/turboaggregate/mpc_function.py" ]
[ "from torch.nn.parallel import DistributedDataParallel as DDP\nimport torch.distributed as dist\n\nfrom .fedml_trainer import FedMLTrainer\nfrom .process_group_manager import ProcessGroupManager\nfrom torch.nn.parallel import DistributedDataParallel as DDP\nfrom .trainer.my_model_trainer_classification import MyModelTrainer as MyModelTrainerCLS\nfrom .trainer.my_model_trainer_nwp import MyModelTrainer as MyModelTrainerNWP\nfrom .trainer.my_model_trainer_tag_prediction import MyModelTrainer as MyModelTrainerTAG\nfrom ...utils.logging import logger\nfrom .fedml_trainer import FedMLTrainer\n# import torch\n# import time\n\n# from ...standalone.fedavg.my_model_trainer_classification import MyModelTrainer as MyModelTrainerCLS\n# from ...standalone.fedavg.my_model_trainer_nwp import MyModelTrainer as MyModelTrainerNWP\n# from ...standalone.fedavg.my_model_trainer_tag_prediction import MyModelTrainer as MyModelTrainerTAG\n# from .process_group_manager import ProcessGroupManager\n# from .utils import transform_list_to_tensor, post_complete_message_to_sweep_process\n# from .message_define import MyMessage\n# import logging\n# import os\n# import sys\n\n# sys.path.insert(0, os.path.abspath(os.path.join(os.getcwd(), \"../../../\")))\n# sys.path.insert(0, os.path.abspath(\n# os.path.join(os.getcwd(), \"../../../../FedML\")))\n\n# try:\n# from fedml_core.distributed.client.client_manager import ClientManager\n# from fedml_core.distributed.communication.message import Message\n# from fedml_core.distributed.communication.utils import log_round_start, log_round_end\n# except ImportError:\n# from fedml_core.distributed.client.client_manager import ClientManager\n# from fedml_core.distributed.communication.message import Message\n# from fedml_core.distributed.communication.utils import log_round_start, log_round_end\n\n\nclass TrainerDistAdapter:\n def __init__(\n self,\n args,\n device,\n client_rank,\n model,\n train_data_num,\n train_data_local_num_dict,\n train_data_local_dict,\n test_data_local_dict,\n model_trainer=None,\n ):\n\n only_gpu = args.using_gpu\n\n self.process_group_manager = ProcessGroupManager(\n args.silo_proc_rank, args.silo_proc_num, args.pg_master_address, args.pg_master_port, only_gpu\n )\n\n # if not args.is_mobile:\n model.to(device)\n model = DDP(model, device_ids=[device] if only_gpu else None)\n\n\n client_index = client_rank - 1\n if model_trainer is None:\n model_trainer = self.get_model_trainer(model, args)\n model_trainer.set_id(client_index)\n logger.info(\"Initiating Trainer\")\n trainer = self.get_trainer(\n client_index,\n train_data_local_dict,\n train_data_local_num_dict,\n test_data_local_dict,\n train_data_num,\n device,\n args,\n model_trainer,\n )\n self.client_index = client_index\n self.client_rank = client_rank\n self.device = device\n self.trainer = trainer\n self.args = args\n\n def get_trainer(\n self,\n client_index,\n train_data_local_dict,\n train_data_local_num_dict,\n test_data_local_dict,\n train_data_num,\n device,\n args,\n model_trainer,\n ):\n return FedMLTrainer(\n client_index,\n train_data_local_dict,\n train_data_local_num_dict,\n test_data_local_dict,\n train_data_num,\n device,\n args,\n model_trainer,\n )\n\n def get_model_trainer(self, model, args):\n\n if args.dataset == \"stackoverflow_lr\":\n model_trainer = MyModelTrainerTAG(model, args, args.enable_cuda_rpc)\n elif args.dataset in [\"fed_shakespeare\", \"stackoverflow_nwp\"]:\n model_trainer = MyModelTrainerNWP(model, args, args.enable_cuda_rpc)\n else: # default model trainer is for classification problem\n model_trainer = MyModelTrainerCLS(model, args, args.enable_cuda_rpc)\n return model_trainer\n\n def train(self, round_idx):\n\n # log_round_start(self.client_rank, round_idx)\n\n dist.barrier()\n weights, local_sample_num = self.trainer.train(round_idx)\n return weights, local_sample_num\n\n def update_model(self, model_params):\n self.trainer.update_model(model_params)\n\n def update_dataset(self, client_index=None):\n _client_index = client_index or self.client_index\n self.trainer.update_dataset(int(_client_index))\n\n def cleanup_pg(self):\n logger.info(\n \"Cleaningup process group for client %s in silo %s\" % (\n self.args.silo_proc_rank, self.args.client_rank)\n )\n self.process_group_manager.cleanup()\n", "import copy\nimport logging\nimport time\n\nimport numpy as np\nimport torch\nimport wandb\nfrom torch import nn\n\nfrom .utils import transform_list_to_tensor\nfrom ....core.robustness.robust_aggregation import RobustAggregator, is_weight_param\n\nfrom ....utils.logging import logger\n\n\ndef test(\n model,\n device,\n test_loader,\n criterion,\n mode=\"raw-task\",\n dataset=\"cifar10\",\n poison_type=\"fashion\",\n):\n class_correct = list(0.0 for i in range(10))\n class_total = list(0.0 for i in range(10))\n\n if dataset in (\"mnist\", \"emnist\"):\n target_class = 7\n if mode == \"raw-task\":\n classes = [str(i) for i in range(10)]\n elif mode == \"targetted-task\":\n if poison_type == \"ardis\":\n classes = [str(i) for i in range(10)]\n else:\n classes = [\n \"T-shirt/top\",\n \"Trouser\",\n \"Pullover\",\n \"Dress\",\n \"Coat\",\n \"Sandal\",\n \"Shirt\",\n \"Sneaker\",\n \"Bag\",\n \"Ankle boot\",\n ]\n elif dataset == \"cifar10\":\n classes = (\n \"plane\",\n \"car\",\n \"bird\",\n \"cat\",\n \"deer\",\n \"dog\",\n \"frog\",\n \"horse\",\n \"ship\",\n \"truck\",\n )\n # target_class = 2 for greencar, 9 for southwest\n if poison_type in (\"howto\", \"greencar-neo\"):\n target_class = 2\n else:\n target_class = 9\n\n model.eval()\n test_loss = 0\n correct = 0\n backdoor_correct = 0\n backdoor_tot = 0\n final_acc = 0\n task_acc = None\n\n with torch.no_grad():\n for data, target in test_loader:\n data, target = data.to(device), target.to(device)\n output = model(data)\n _, predicted = torch.max(output, 1)\n c = (predicted == target).squeeze()\n\n # test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss\n test_loss += criterion(output, target).item()\n pred = output.argmax(\n dim=1, keepdim=True\n ) # get the index of the max log-probability\n correct += pred.eq(target.view_as(pred)).sum().item()\n # check backdoor accuracy\n if poison_type == \"ardis\":\n backdoor_index = torch.where(target == target_class)\n target_backdoor = torch.ones_like(target[backdoor_index])\n predicted_backdoor = predicted[backdoor_index]\n backdoor_correct += (predicted_backdoor == target_backdoor).sum().item()\n backdoor_tot = backdoor_index[0].shape[0]\n # logger.info(\"Target: {}\".format(target_backdoor))\n # logger.info(\"Predicted: {}\".format(predicted_backdoor))\n\n # for image_index in range(test_batch_size):\n for image_index in range(len(target)):\n label = target[image_index]\n class_correct[label] += c[image_index].item()\n class_total[label] += 1\n test_loss /= len(test_loader.dataset)\n\n if mode == \"raw-task\":\n for i in range(10):\n logger.info(\n \"Accuracy of %5s : %.2f %%\"\n % (classes[i], 100 * class_correct[i] / class_total[i])\n )\n\n if i == target_class:\n task_acc = 100 * class_correct[i] / class_total[i]\n\n logger.info(\n \"\\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\\n\".format(\n test_loss,\n correct,\n len(test_loader.dataset),\n 100.0 * correct / len(test_loader.dataset),\n )\n )\n final_acc = 100.0 * correct / len(test_loader.dataset)\n\n elif mode == \"targetted-task\":\n\n if dataset in (\"mnist\", \"emnist\"):\n for i in range(10):\n logger.info(\n \"Accuracy of %5s : %.2f %%\"\n % (classes[i], 100 * class_correct[i] / class_total[i])\n )\n if poison_type == \"ardis\":\n # ensure 7 is being classified as 1\n logger.info(\n \"Backdoor Accuracy of %.2f : %.2f %%\"\n % (target_class, 100 * backdoor_correct / backdoor_tot)\n )\n final_acc = 100 * backdoor_correct / backdoor_tot\n else:\n # trouser acc\n final_acc = 100 * class_correct[1] / class_total[1]\n\n elif dataset == \"cifar10\":\n logger.info(\n \"#### Targetted Accuracy of %5s : %.2f %%\"\n % (\n classes[target_class],\n 100 * class_correct[target_class] / class_total[target_class],\n )\n )\n final_acc = 100 * class_correct[target_class] / class_total[target_class]\n return final_acc, task_acc\n\n\nclass FedAvgRobustAggregator(object):\n def __init__(\n self,\n train_global,\n test_global,\n all_train_data_num,\n train_data_local_dict,\n test_data_local_dict,\n train_data_local_num_dict,\n worker_num,\n device,\n model,\n targetted_task_test_loader,\n num_dps_poisoned_dataset,\n args,\n ):\n self.train_global = train_global\n self.test_global = test_global\n self.all_train_data_num = all_train_data_num\n\n self.train_data_local_dict = train_data_local_dict\n self.test_data_local_dict = test_data_local_dict\n self.train_data_local_num_dict = train_data_local_num_dict\n\n self.worker_num = worker_num\n self.device = device\n self.args = args\n self.model_dict = dict()\n self.sample_num_dict = dict()\n self.flag_client_model_uploaded_dict = dict()\n\n self.robust_aggregator = RobustAggregator(args)\n\n self.targetted_task_test_loader = targetted_task_test_loader\n self.num_dps_poisoned_dataset = num_dps_poisoned_dataset\n\n self.adversary_fl_rounds = [\n i for i in range(1, args.comm_round + 1) if (i - 1) % args.attack_freq == 0\n ]\n\n for idx in range(self.worker_num):\n self.flag_client_model_uploaded_dict[idx] = False\n self.model, _ = self.init_model(model)\n\n def init_model(self, model):\n model_params = model.state_dict()\n # logging.info(model)\n return model, model_params\n\n def get_global_model_params(self):\n return self.model.state_dict()\n\n def add_local_trained_result(self, index, model_params, sample_num):\n logging.info(\"add_model. index = %d\" % index)\n self.model_dict[index] = model_params\n self.sample_num_dict[index] = sample_num\n self.flag_client_model_uploaded_dict[index] = True\n\n def check_whether_all_receive(self):\n for idx in range(self.worker_num):\n if not self.flag_client_model_uploaded_dict[idx]:\n return False\n for idx in range(self.worker_num):\n self.flag_client_model_uploaded_dict[idx] = False\n return True\n\n def aggregate(self):\n start_time = time.time()\n model_list = []\n training_num = 0\n\n for idx in range(self.worker_num):\n if self.args.is_mobile == 1:\n self.model_dict[idx] = transform_list_to_tensor(self.model_dict[idx])\n\n # conduct the defense here:\n local_sample_number, local_model_params = (\n self.sample_num_dict[idx],\n self.model_dict[idx],\n )\n\n if self.robust_aggregator.defense_type in (\"norm_diff_clipping\", \"weak_dp\"):\n clipped_local_state_dict = self.robust_aggregator.norm_diff_clipping(\n local_model_params, self.model.state_dict()\n )\n else:\n raise NotImplementedError(\"Non-supported Defense type ... \")\n model_list.append((local_sample_number, clipped_local_state_dict))\n\n training_num += self.sample_num_dict[idx]\n\n logging.info(\"len of self.model_dict[idx] = \" + str(len(self.model_dict)))\n\n # logging.info(\"################aggregate: %d\" % len(model_list))\n (num0, averaged_params) = model_list[0]\n\n for k in averaged_params.keys():\n for i in range(0, len(model_list)):\n local_sample_number, local_model_params = model_list[i]\n w = local_sample_number / training_num\n\n local_layer_update = local_model_params[k]\n\n if self.robust_aggregator.defense_type == \"weak_dp\":\n if is_weight_param(k):\n local_layer_update = self.robust_aggregator.add_noise(\n local_layer_update, self.device\n )\n\n if i == 0:\n averaged_params[k] = local_model_params[k] * w\n else:\n averaged_params[k] += local_model_params[k] * w\n\n # update the global model which is cached at the server side\n self.model.load_state_dict(averaged_params)\n\n end_time = time.time()\n logging.info(\"aggregate time cost: %d\" % (end_time - start_time))\n return averaged_params\n\n def client_sampling(self, round_idx, client_num_in_total, client_num_per_round):\n num_clients = min(client_num_per_round, client_num_in_total)\n np.random.seed(\n round_idx\n ) # make sure for each comparison, we are selecting the same clients each round\n if round_idx not in self.adversary_fl_rounds:\n client_indexes = np.random.choice(\n range(client_num_in_total), num_clients, replace=False\n )\n else:\n client_indexes = np.array(\n [1]\n + list(\n np.random.choice(\n range(client_num_in_total), num_clients, replace=False\n )\n )\n ) # we gaurantee that the attacker will participate in a certain frequency\n logging.info(\"client_indexes = %s\" % str(client_indexes))\n return client_indexes\n\n def test_on_all_clients(self, round_idx):\n if (\n round_idx % self.args.frequency_of_the_test == 0\n or round_idx == self.args.comm_round - 1\n ):\n logging.info(\n \"################local_test_on_all_clients : {}\".format(round_idx)\n )\n train_num_samples = []\n train_tot_corrects = []\n train_losses = []\n\n test_num_samples = []\n test_tot_corrects = []\n test_losses = []\n for client_idx in range(self.args.client_num_in_total):\n # train data\n train_tot_correct, train_num_sample, train_loss = self._infer(\n self.train_data_local_dict[client_idx]\n )\n train_tot_corrects.append(copy.deepcopy(train_tot_correct))\n train_num_samples.append(copy.deepcopy(train_num_sample))\n train_losses.append(copy.deepcopy(train_loss))\n\n # test data\n test_tot_correct, test_num_sample, test_loss = self._infer(\n self.test_data_local_dict[client_idx]\n )\n test_tot_corrects.append(copy.deepcopy(test_tot_correct))\n test_num_samples.append(copy.deepcopy(test_num_sample))\n test_losses.append(copy.deepcopy(test_loss))\n\n # test on training dataset\n train_acc = sum(train_tot_corrects) / sum(train_num_samples)\n train_loss = sum(train_losses) / sum(train_num_samples)\n wandb.log({\"Train/Acc\": train_acc, \"round\": round_idx})\n wandb.log({\"Train/Loss\": train_loss, \"round\": round_idx})\n stats = {\"training_acc\": train_acc, \"training_loss\": train_loss}\n logging.info(stats)\n\n # test on test dataset\n test_acc = sum(test_tot_corrects) / sum(test_num_samples)\n test_loss = sum(test_losses) / sum(test_num_samples)\n wandb.log({\"Test/Acc\": test_acc, \"round\": round_idx})\n wandb.log({\"Test/Loss\": test_loss, \"round\": round_idx})\n stats = {\"test_acc\": test_acc, \"test_loss\": test_loss}\n logging.info(stats)\n\n def test_target_accuracy(self, round_idx):\n test(\n self.model,\n self.device,\n self.targetted_task_test_loader,\n criterion=nn.CrossEntropyLoss().to(self.device),\n mode=\"targetted-task\",\n dataset=self.args.dataset,\n poison_type=self.args.poison_type,\n )\n\n def _infer(self, test_data):\n self.model.eval()\n self.model.to(self.device)\n\n test_loss = test_acc = test_total = 0.0\n criterion = nn.CrossEntropyLoss().to(self.device)\n with torch.no_grad():\n for batch_idx, (x, target) in enumerate(test_data):\n x = x.to(self.device)\n target = target.to(self.device)\n pred = self.model(x)\n loss = criterion(pred, target)\n _, predicted = torch.max(pred, -1)\n correct = predicted.eq(target).sum()\n\n test_acc += correct.item()\n test_loss += loss.item() * target.size(0)\n test_total += target.size(0)\n\n return test_acc, test_total, test_loss\n", "import logging\r\nimport copy\r\n\r\nimport torch\r\nfrom torch import nn\r\nfrom .fednova import FedNova\r\n\r\n\r\nclass Client:\r\n\r\n def __init__(self, client_idx, local_training_data, local_test_data, local_sample_number, args, device):\r\n self.client_idx = client_idx\r\n self.local_training_data = local_training_data\r\n self.local_test_data = local_test_data\r\n self.local_sample_number = local_sample_number\r\n logging.info(\"self.local_sample_number = \" + str(self.local_sample_number))\r\n\r\n self.args = args\r\n self.device = device\r\n\r\n '''\r\n stackoverflow_lr is the task of multi-label classification\r\n please refer to following links for detailed explainations on cross-entropy and corresponding implementation of tff research:\r\n https://towardsdatascience.com/cross-entropy-for-classification-d98e7f974451\r\n https://github.com/google-research/federated/blob/49a43456aa5eaee3e1749855eed89c0087983541/optimization/stackoverflow_lr/federated_stackoverflow_lr.py#L131\r\n '''\r\n if self.args.dataset == \"stackoverflow_lr\":\r\n self.criterion = nn.BCELoss(reduction = 'sum').to(device)\r\n else:\r\n self.criterion = nn.CrossEntropyLoss().to(device)\r\n\r\n def update_local_dataset(self, client_idx, local_training_data, local_test_data, local_sample_number):\r\n self.client_idx = client_idx\r\n self.local_training_data = local_training_data\r\n self.local_test_data = local_test_data\r\n self.local_sample_number = local_sample_number\r\n\r\n def get_sample_number(self):\r\n return self.local_sample_number\r\n \r\n def get_local_norm_grad(self, opt, cur_params, init_params, weight=0):\r\n if weight == 0:\r\n weight = opt.ratio\r\n grad_dict = {}\r\n for k in cur_params.keys():\r\n scale = 1.0/opt.local_normalizing_vec\r\n cum_grad = init_params[k] - cur_params[k] \r\n cum_grad.mul_(weight*scale)\r\n grad_dict[k] = cum_grad\r\n return grad_dict\r\n \r\n def get_local_tau_eff(self, opt):\r\n if opt.mu != 0:\r\n return opt.local_steps*opt.ratio\r\n else:\r\n return opt.local_normalizing_vec*opt.ratio\r\n \r\n def reset_fednova_optimizer(self, opt):\r\n opt.local_counter = 0\r\n opt.local_normalizing_vec = 0\r\n opt.local_steps = 0\r\n for group in opt.param_groups:\r\n for p in group['params']:\r\n param_state = opt.state[p]\r\n param_state['cum_grad'].zero_()\r\n # Reinitialize momentum buffer\r\n if 'momentum_buffer' in param_state:\r\n param_state['momentum_buffer'].zero_()\r\n \r\n def train(self, net, ratio):\r\n net.train()\r\n # train and update\r\n init_params = copy.deepcopy(net.state_dict())\r\n optimizer = FedNova(net.parameters(), \r\n lr=self.args.lr, \r\n gmf=self.args.gmf, \r\n mu=self.args.mu, \r\n ratio=ratio,\r\n momentum=self.args.momentum,\r\n dampening=self.args.dampening,\r\n weight_decay=self.args.wd, \r\n nesterov=self.args.nesterov)\r\n\r\n epoch_loss = []\r\n for epoch in range(self.args.epochs):\r\n batch_loss = []\r\n for batch_idx, (x, labels) in enumerate(self.local_training_data):\r\n x, labels = x.to(self.device), labels.to(self.device)\r\n net = net.to(self.device)\r\n net.zero_grad()\r\n log_probs = net(x)\r\n loss = self.criterion(log_probs, labels)\r\n loss.backward()\r\n\r\n # to avoid nan loss\r\n # torch.nn.utils.clip_grad_norm_(net.parameters(), 0.5)\r\n\r\n optimizer.step()\r\n # logging.info('Update Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format(\r\n # epoch, (batch_idx + 1) * self.args.batch_size, len(self.local_training_data) * self.args.batch_size,\r\n # 100. * (batch_idx + 1) / len(self.local_training_data), loss.item()))\r\n batch_loss.append(loss.item())\r\n epoch_loss.append(sum(batch_loss) / len(batch_loss))\r\n # logging.info('Client Index = {}\\tEpoch: {}\\tLoss: {:.6f}'.format(\r\n # self.client_idx, epoch, sum(epoch_loss) / len(epoch_loss)))\r\n norm_grad = self.get_local_norm_grad(optimizer, net.state_dict(), init_params)\r\n tau_eff = self.get_local_tau_eff(optimizer)\r\n # self.reset_fednova_optimizer(optimizer)\r\n return sum(epoch_loss) / len(epoch_loss), norm_grad, tau_eff\r\n \r\n\r\n def local_test(self, model_global, b_use_test_dataset=False):\r\n model_global.eval()\r\n model_global.to(self.device)\r\n metrics = { \r\n 'test_correct': 0, \r\n 'test_loss' : 0, \r\n 'test_precision': 0,\r\n 'test_recall': 0,\r\n 'test_total' : 0\r\n }\r\n if b_use_test_dataset:\r\n test_data = self.local_test_data\r\n else:\r\n test_data = self.local_training_data\r\n with torch.no_grad():\r\n for batch_idx, (x, target) in enumerate(test_data):\r\n x = x.to(self.device)\r\n target = target.to(self.device)\r\n pred = model_global(x)\r\n loss = self.criterion(pred, target)\r\n\r\n if self.args.dataset == \"stackoverflow_lr\":\r\n predicted = (pred > .5).int()\r\n correct = predicted.eq(target).sum(axis=-1).eq(target.size(1)).sum()\r\n true_positive = ((target * predicted) > .1).int().sum(axis=-1)\r\n precision = true_positive / (predicted.sum(axis=-1) + 1e-13)\r\n recall = true_positive / (target.sum(axis=-1) + 1e-13)\r\n metrics['test_precision'] += precision.sum().item()\r\n metrics['test_recall'] += recall.sum().item()\r\n else:\r\n _, predicted = torch.max(pred, -1)\r\n correct = predicted.eq(target).sum()\r\n\r\n metrics['test_correct'] += correct.item()\r\n metrics['test_loss'] += loss.item() * target.size(0)\r\n metrics['test_total'] += target.size(0)\r\n\r\n return metrics\r\n", "import numpy as np\n\n\ndef modular_inv(a, p):\n x, y, m = 1, 0, p\n while a > 1:\n q = a // m\n t = m\n\n m = np.mod(a, m)\n a = t\n t = y\n\n y, x = x - np.int64(q) * np.int64(y), t\n\n if x < 0:\n x = np.mod(x, p)\n return np.mod(x, p)\n\n\ndef divmod(_num, _den, _p):\n # compute num / den modulo prime p\n _num = np.mod(_num, _p)\n _den = np.mod(_den, _p)\n _inv = modular_inv(_den, _p)\n # print(_num,_den,_inv)\n return np.mod(np.int64(_num) * np.int64(_inv), _p)\n\n\ndef PI(vals, p): # upper-case PI -- product of inputs\n accum = 1\n\n for v in vals:\n tmp = np.mod(v, p)\n accum = np.mod(accum * tmp, p)\n return accum\n\n\ndef gen_Lagrange_coeffs(alpha_s, beta_s, p, is_K1=0):\n if is_K1 == 1:\n num_alpha = 1\n else:\n num_alpha = len(alpha_s)\n U = np.zeros((num_alpha, len(beta_s)), dtype=\"int64\")\n # U = [[0 for col in range(len(beta_s))] for row in range(len(alpha_s))]\n # print(alpha_s)\n # print(beta_s)\n for i in range(num_alpha):\n for j in range(len(beta_s)):\n cur_beta = beta_s[j]\n\n den = PI([cur_beta - o for o in beta_s if cur_beta != o], p)\n num = PI([alpha_s[i] - o for o in beta_s if cur_beta != o], p)\n U[i][j] = divmod(num, den, p)\n # for debugging\n # print(i,j,cur_beta,alpha_s[i])\n # print(test)\n # print(den,num)\n return U.astype(\"int64\")\n\n\ndef BGW_encoding(X, N, T, p):\n m = len(X)\n d = len(X[0])\n\n alpha_s = range(1, N + 1)\n alpha_s = np.int64(np.mod(alpha_s, p))\n X_BGW = np.zeros((N, m, d), dtype=\"int64\")\n R = np.random.randint(p, size=(T + 1, m, d))\n R[0, :, :] = np.mod(X, p)\n\n for i in range(N):\n for t in range(T + 1):\n X_BGW[i, :, :] = np.mod(X_BGW[i, :, :] + R[t, :, :] * (alpha_s[i] ** t), p)\n return X_BGW\n\n\ndef gen_BGW_lambda_s(alpha_s, p):\n lambda_s = np.zeros((1, len(alpha_s)), dtype=\"int64\")\n\n for i in range(len(alpha_s)):\n cur_alpha = alpha_s[i]\n\n den = PI([cur_alpha - o for o in alpha_s if cur_alpha != o], p)\n num = PI([0 - o for o in alpha_s if cur_alpha != o], p)\n lambda_s[0][i] = divmod(num, den, p)\n return lambda_s.astype(\"int64\")\n\n\ndef BGW_decoding(f_eval, worker_idx, p): # decode the output from T+1 evaluation points\n # f_eval : [RT X d ]\n # worker_idx : [ 1 X RT]\n # output : [ 1 X d ]\n\n # t0 = time.time()\n max = np.max(worker_idx) + 2\n alpha_s = range(1, max)\n alpha_s = np.int64(np.mod(alpha_s, p))\n alpha_s_eval = [alpha_s[i] for i in worker_idx]\n # t1 = time.time()\n # print(alpha_s_eval)\n lambda_s = gen_BGW_lambda_s(alpha_s_eval, p).astype(\"int64\")\n # t2 = time.time()\n # print(lambda_s.shape)\n f_recon = np.mod(np.dot(lambda_s, f_eval), p)\n # t3 = time.time()\n # print 'time info for BGW_dec', t1-t0, t2-t1, t3-t2\n return f_recon\n\n\ndef LCC_encoding(X, N, K, T, p):\n m = len(X)\n d = len(X[0])\n # print(m,d,m//K)\n X_sub = np.zeros((K + T, m // K, d), dtype=\"int64\")\n for i in range(K):\n X_sub[i] = X[i * m // K : (i + 1) * m // K :]\n for i in range(K, K + T):\n X_sub[i] = np.random.randint(p, size=(m // K, d))\n\n n_beta = K + T\n stt_b, stt_a = -int(np.floor(n_beta / 2)), -int(np.floor(N / 2))\n beta_s, alpha_s = range(stt_b, stt_b + n_beta), range(stt_a, stt_a + N)\n alpha_s = np.array(np.mod(alpha_s, p)).astype(\"int64\")\n beta_s = np.array(np.mod(beta_s, p)).astype(\"int64\")\n\n U = gen_Lagrange_coeffs(alpha_s, beta_s, p)\n # print U\n\n X_LCC = np.zeros((N, m // K, d), dtype=\"int64\")\n for i in range(N):\n for j in range(K + T):\n X_LCC[i, :, :] = np.mod(\n X_LCC[i, :, :] + np.mod(U[i][j] * X_sub[j, :, :], p), p\n )\n return X_LCC\n\n\ndef LCC_encoding_w_Random(X, R_, N, K, T, p):\n m = len(X)\n d = len(X[0])\n # print(m,d,m//K)\n X_sub = np.zeros((K + T, m // K, d), dtype=\"int64\")\n for i in range(K):\n X_sub[i] = X[i * m // K : (i + 1) * m // K :]\n for i in range(K, K + T):\n X_sub[i] = R_[i - K, :, :].astype(\"int64\")\n\n n_beta = K + T\n stt_b, stt_a = -int(np.floor(n_beta / 2)), -int(np.floor(N / 2))\n beta_s, alpha_s = range(stt_b, stt_b + n_beta), range(stt_a, stt_a + N)\n\n alpha_s = np.array(np.mod(alpha_s, p)).astype(\"int64\")\n beta_s = np.array(np.mod(beta_s, p)).astype(\"int64\")\n\n # alpha_s = np.int64(np.mod(alpha_s,p))\n # beta_s = np.int64(np.mod(beta_s,p))\n\n U = gen_Lagrange_coeffs(alpha_s, beta_s, p)\n # print U\n\n X_LCC = np.zeros((N, m // K, d), dtype=\"int64\")\n for i in range(N):\n for j in range(K + T):\n X_LCC[i, :, :] = np.mod(\n X_LCC[i, :, :] + np.mod(U[i][j] * X_sub[j, :, :], p), p\n )\n return X_LCC\n\n\ndef LCC_encoding_w_Random_partial(X, R_, N, K, T, p, worker_idx):\n m = len(X)\n d = len(X[0])\n # print(m,d,m//K)\n X_sub = np.zeros((K + T, m // K, d), dtype=\"int64\")\n for i in range(K):\n X_sub[i] = X[i * m // K : (i + 1) * m // K :]\n for i in range(K, K + T):\n X_sub[i] = R_[i - K, :, :].astype(\"int64\")\n\n n_beta = K + T\n stt_b, stt_a = -int(np.floor(n_beta / 2)), -int(np.floor(N / 2))\n beta_s, alpha_s = range(stt_b, stt_b + n_beta), range(stt_a, stt_a + N)\n alpha_s = np.array(np.mod(alpha_s, p)).astype(\"int64\")\n beta_s = np.array(np.mod(beta_s, p)).astype(\"int64\")\n alpha_s_eval = [alpha_s[i] for i in worker_idx]\n\n U = gen_Lagrange_coeffs(alpha_s_eval, beta_s, p)\n # print U\n\n N_out = U.shape[0]\n X_LCC = np.zeros((N_out, m // K, d), dtype=\"int64\")\n for i in range(N_out):\n for j in range(K + T):\n X_LCC[i, :, :] = np.mod(\n X_LCC[i, :, :] + np.mod(U[i][j] * X_sub[j, :, :], p), p\n )\n return X_LCC\n\n\ndef LCC_decoding(f_eval, f_deg, N, K, T, worker_idx, p):\n # RT_LCC = f_deg * (K + T - 1) + 1\n\n n_beta = K # +T\n stt_b, stt_a = -int(np.floor(n_beta / 2)), -int(np.floor(N / 2))\n beta_s, alpha_s = range(stt_b, stt_b + n_beta), range(stt_a, stt_a + N)\n alpha_s = np.array(np.mod(alpha_s, p)).astype(\"int64\")\n beta_s = np.array(np.mod(beta_s, p)).astype(\"int64\")\n alpha_s_eval = [alpha_s[i] for i in worker_idx]\n\n U_dec = gen_Lagrange_coeffs(beta_s, alpha_s_eval, p)\n\n # print U_dec\n\n f_recon = np.mod((U_dec).dot(f_eval), p)\n\n return f_recon.astype(\"int64\")\n\n\ndef Gen_Additive_SS(d, n_out, p):\n # x_model should be one dimension\n\n temp = np.random.randint(0, p, size=(n_out - 1, d))\n # print temp\n\n last_row = np.reshape(np.mod(-np.sum(temp, axis=0), p), (1, d))\n Additive_SS = np.concatenate((temp, last_row), axis=0)\n # print np.mod(np.sum(Additive_SS,axis=0),p)\n\n return Additive_SS\n\n\ndef LCC_encoding_with_points(X, alpha_s, beta_s, p):\n m, d = np.shape(X)\n\n # print alpha_s\n # print beta_s\n\n # for debugging LCC Enc & Dec\n # beta_s = np.concatenate((alpha_s, beta_s))\n # print beta_s\n\n U = gen_Lagrange_coeffs(beta_s, alpha_s, p).astype(\"int\")\n # print U\n\n X_LCC = np.zeros((len(beta_s), d), dtype=\"int\")\n for i in range(len(beta_s)):\n X_LCC[i, :] = np.dot(np.reshape(U[i, :], (1, len(alpha_s))), X)\n # print X\n # print np.mod(X_LCC, p)\n\n return np.mod(X_LCC, p)\n\n\ndef LCC_decoding_with_points(f_eval, eval_points, target_points, p):\n alpha_s_eval = eval_points\n beta_s = target_points\n\n U_dec = gen_Lagrange_coeffs(beta_s, alpha_s_eval, p)\n\n # print U_dec\n\n f_recon = np.mod((U_dec).dot(f_eval), p)\n # print f_recon\n\n return f_recon\n\n\ndef my_pk_gen(my_sk, p, g):\n # print 'my_pk_gen option: g=',g\n if g == 0:\n return my_sk\n else:\n return np.mod(g ** my_sk, p)\n\n\ndef my_key_agreement(my_sk, u_pk, p, g):\n if g == 0:\n return np.mod(my_sk * u_pk, p)\n else:\n return np.mod(u_pk ** my_sk, p)\n" ]
[ [ "torch.distributed.barrier", "torch.nn.parallel.DistributedDataParallel" ], [ "torch.nn.CrossEntropyLoss", "torch.max", "numpy.random.seed", "torch.no_grad", "torch.where", "torch.ones_like" ], [ "torch.nn.CrossEntropyLoss", "torch.no_grad", "torch.max", "torch.nn.BCELoss" ], [ "numpy.dot", "numpy.concatenate", "numpy.max", "numpy.int64", "numpy.shape", "numpy.floor", "numpy.mod", "numpy.zeros", "numpy.sum", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
alexjuda2/z-quantum-core
[ "c258100dbd091f0b22495b77b36399426ae9abac" ]
[ "src/python/zquantum/core/circuits/conversions/qiskit_conversions.py" ]
[ "import hashlib\nfrom typing import Dict, Iterable, List, NamedTuple, Sequence, Tuple, Union\n\nimport numpy as np\nimport qiskit\nimport sympy\n\nfrom .. import _builtin_gates, _circuit, _gates\nfrom ..symbolic.qiskit_expressions import QISKIT_DIALECT, expression_from_qiskit\nfrom ..symbolic.sympy_expressions import SYMPY_DIALECT, expression_from_sympy\nfrom ..symbolic.translations import translate_expression\n\nQiskitOperation = Tuple[\n qiskit.circuit.Instruction, List[qiskit.circuit.Qubit], List[qiskit.circuit.Clbit]\n]\n\n\ndef qiskit_qubit(index: int, num_qubits_in_circuit: int) -> qiskit.circuit.Qubit:\n return qiskit.circuit.Qubit(\n qiskit.circuit.QuantumRegister(num_qubits_in_circuit, \"q\"), index\n )\n\n\ndef _import_qiskit_qubit(qubit: qiskit.circuit.Qubit) -> int:\n return qubit.index\n\n\ndef _qiskit_expr_from_zquantum(expr):\n intermediate = expression_from_sympy(expr)\n return translate_expression(intermediate, QISKIT_DIALECT)\n\n\ndef _zquantum_expr_from_qiskit(expr):\n intermediate = expression_from_qiskit(expr)\n return translate_expression(intermediate, SYMPY_DIALECT)\n\n\nZQUANTUM_QISKIT_GATE_MAP = {\n _builtin_gates.X: qiskit.circuit.library.XGate,\n _builtin_gates.Y: qiskit.circuit.library.YGate,\n _builtin_gates.Z: qiskit.circuit.library.ZGate,\n _builtin_gates.S: qiskit.circuit.library.SGate,\n _builtin_gates.T: qiskit.circuit.library.TGate,\n _builtin_gates.H: qiskit.circuit.library.HGate,\n _builtin_gates.I: qiskit.circuit.library.IGate,\n _builtin_gates.CNOT: qiskit.circuit.library.CXGate,\n _builtin_gates.CZ: qiskit.circuit.library.CZGate,\n _builtin_gates.SWAP: qiskit.circuit.library.SwapGate,\n _builtin_gates.ISWAP: qiskit.circuit.library.iSwapGate,\n _builtin_gates.RX: qiskit.circuit.library.RXGate,\n _builtin_gates.RY: qiskit.circuit.library.RYGate,\n _builtin_gates.RZ: qiskit.circuit.library.RZGate,\n _builtin_gates.PHASE: qiskit.circuit.library.PhaseGate,\n _builtin_gates.CPHASE: qiskit.circuit.library.CPhaseGate,\n _builtin_gates.XX: qiskit.circuit.library.RXXGate,\n _builtin_gates.YY: qiskit.circuit.library.RYYGate,\n _builtin_gates.ZZ: qiskit.circuit.library.RZZGate,\n _builtin_gates.U3: qiskit.circuit.library.U3Gate,\n}\n\n\ndef _make_gate_instance(gate_ref, gate_params) -> _gates.Gate:\n \"\"\"Returns a gate instance that's applicable to qubits.\n For non-parametric gate refs like X, returns just the `X`\n For parametric gate factories like `RX`, returns the produced gate, like `RX(0.2)`\n \"\"\"\n if _gates.gate_is_parametric(gate_ref, gate_params):\n return gate_ref(*gate_params)\n else:\n return gate_ref\n\n\ndef _make_controlled_gate_prototype(wrapped_gate_ref, num_control_qubits=1):\n def _factory(*gate_params):\n return _gates.ControlledGate(\n _make_gate_instance(wrapped_gate_ref, gate_params), num_control_qubits\n )\n\n return _factory\n\n\nQISKIT_ZQUANTUM_GATE_MAP = {\n **{q_cls: z_ref for z_ref, q_cls in ZQUANTUM_QISKIT_GATE_MAP.items()},\n qiskit.circuit.library.CSwapGate: _builtin_gates.SWAP.controlled(1),\n qiskit.circuit.library.CRXGate: _make_controlled_gate_prototype(_builtin_gates.RX),\n qiskit.circuit.library.CRYGate: _make_controlled_gate_prototype(_builtin_gates.RY),\n qiskit.circuit.library.CRZGate: _make_controlled_gate_prototype(_builtin_gates.RZ),\n}\n\n\ndef export_to_qiskit(circuit: _circuit.Circuit) -> qiskit.QuantumCircuit:\n q_circuit = qiskit.QuantumCircuit(circuit.n_qubits)\n custom_names = {\n gate_def.gate_name for gate_def in circuit.collect_custom_gate_definitions()\n }\n q_triplets = [\n _export_gate_to_qiskit(\n gate_op.gate,\n applied_qubit_indices=gate_op.qubit_indices,\n n_qubits_in_circuit=circuit.n_qubits,\n custom_names=custom_names,\n )\n for gate_op in circuit.operations\n ]\n for q_gate, q_qubits, q_clbits in q_triplets:\n q_circuit.append(q_gate, q_qubits, q_clbits)\n return q_circuit\n\n\ndef _export_gate_to_qiskit(\n gate, applied_qubit_indices, n_qubits_in_circuit, custom_names\n):\n try:\n return _export_gate_via_mapping(\n gate, applied_qubit_indices, n_qubits_in_circuit, custom_names\n )\n except ValueError:\n pass\n\n try:\n return _export_controlled_gate(\n gate, applied_qubit_indices, n_qubits_in_circuit, custom_names\n )\n except ValueError:\n pass\n\n try:\n return _export_custom_gate(\n gate, applied_qubit_indices, n_qubits_in_circuit, custom_names\n )\n except ValueError:\n pass\n\n raise NotImplementedError(f\"Exporting gate {gate} to Qiskit is unsupported\")\n\n\ndef _export_gate_via_mapping(\n gate, applied_qubit_indices, n_qubits_in_circuit, custom_names\n):\n try:\n qiskit_cls = ZQUANTUM_QISKIT_GATE_MAP[\n _builtin_gates.builtin_gate_by_name(gate.name)\n ]\n except KeyError:\n raise ValueError(f\"Can't export gate {gate} to Qiskit via mapping\")\n\n qiskit_params = [_qiskit_expr_from_zquantum(param) for param in gate.params]\n qiskit_qubits = [\n qiskit_qubit(qubit_i, n_qubits_in_circuit) for qubit_i in applied_qubit_indices\n ]\n\n return qiskit_cls(*qiskit_params), qiskit_qubits, []\n\n\ndef _export_controlled_gate(\n gate: _gates.ControlledGate,\n applied_qubit_indices,\n n_qubits_in_circuit,\n custom_names,\n):\n if not isinstance(gate, _gates.ControlledGate):\n # Raising an exception here is redundant to the type hint, but it allows us\n # to handle exporting all gates in the same way, regardless of type\n raise ValueError(f\"Can't export gate {gate} as a controlled gate\")\n\n target_indices = applied_qubit_indices[gate.num_control_qubits :]\n target_gate, _, _ = _export_gate_to_qiskit(\n gate.wrapped_gate,\n applied_qubit_indices=target_indices,\n n_qubits_in_circuit=n_qubits_in_circuit,\n custom_names=custom_names,\n )\n controlled_gate = target_gate.control(gate.num_control_qubits)\n qiskit_qubits = [\n qiskit_qubit(qubit_i, n_qubits_in_circuit) for qubit_i in applied_qubit_indices\n ]\n return controlled_gate, qiskit_qubits, []\n\n\ndef _export_custom_gate(\n gate: _gates.MatrixFactoryGate,\n applied_qubit_indices,\n n_qubits_in_circuit,\n custom_names,\n):\n if gate.name not in custom_names:\n raise ValueError(\n f\"Can't export gate {gate} as a custom gate, the circuit is missing its \"\n \"definition\"\n )\n\n if gate.params:\n raise ValueError(\n f\"Can't export parametrized gate {gate}, Qiskit doesn't support \"\n \"parametrized custom gates\"\n )\n # At that time of writing it Qiskit doesn't support parametrized gates defined with\n # a symbolic matrix.\n # See https://github.com/Qiskit/qiskit-terra/issues/4751 for more info.\n\n qiskit_qubits = [\n qiskit_qubit(qubit_i, n_qubits_in_circuit) for qubit_i in applied_qubit_indices\n ]\n qiskit_matrix = np.array(gate.matrix)\n return (\n qiskit.extensions.UnitaryGate(qiskit_matrix, label=gate.name),\n qiskit_qubits,\n [],\n )\n\n\nclass AnonGateOperation(NamedTuple):\n gate_name: str\n matrix: sympy.Matrix\n qubit_indices: Tuple[int, ...]\n\n\nImportedOperation = Union[_gates.GateOperation, AnonGateOperation]\n\n\ndef _apply_custom_gate(\n anon_op: AnonGateOperation, custom_defs_map: Dict[str, _gates.CustomGateDefinition]\n) -> _gates.GateOperation:\n gate_def = custom_defs_map[anon_op.gate_name]\n # Qiskit doesn't support custom gates with parametrized matrices\n # so we can assume empty params list.\n gate_params: Tuple[sympy.Symbol, ...] = tuple()\n gate = gate_def(*gate_params)\n\n return gate(*anon_op.qubit_indices)\n\n\ndef import_from_qiskit(circuit: qiskit.QuantumCircuit) -> _circuit.Circuit:\n q_ops = [_import_qiskit_triplet(triplet) for triplet in circuit.data]\n anon_ops = [op for op in q_ops if isinstance(op, AnonGateOperation)]\n\n # Qiskit doesn't support custom gates with parametrized matrices\n # so we can assume empty params list.\n params_ordering: Tuple[sympy.Symbol, ...] = tuple()\n custom_defs = {\n anon_op.gate_name: _gates.CustomGateDefinition(\n gate_name=anon_op.gate_name,\n matrix=anon_op.matrix,\n params_ordering=params_ordering,\n )\n for anon_op in anon_ops\n }\n imported_ops = [\n _apply_custom_gate(op, custom_defs) if isinstance(op, AnonGateOperation) else op\n for op in q_ops\n ]\n return _circuit.Circuit(\n operations=imported_ops,\n n_qubits=circuit.num_qubits,\n )\n\n\ndef _import_qiskit_triplet(qiskit_triplet: QiskitOperation) -> ImportedOperation:\n qiskit_op, qiskit_qubits, _ = qiskit_triplet\n\n return _import_qiskit_op(qiskit_op, qiskit_qubits)\n\n\ndef _import_qiskit_op(qiskit_op, qiskit_qubits) -> ImportedOperation:\n # We always wanna try importing via mapping to handle complex gate structures\n # represented by a single class, like CNOT (Control + X) or CSwap (Control + Swap).\n try:\n return _import_qiskit_op_via_mapping(qiskit_op, qiskit_qubits)\n except ValueError:\n pass\n\n try:\n return _import_controlled_qiskit_op(qiskit_op, qiskit_qubits)\n except ValueError:\n pass\n\n return _import_custom_qiskit_gate(qiskit_op, qiskit_qubits)\n\n\ndef _import_qiskit_op_via_mapping(\n qiskit_gate: qiskit.circuit.Instruction,\n qiskit_qubits: Iterable[qiskit.circuit.Qubit],\n) -> _gates.GateOperation:\n try:\n gate_ref = QISKIT_ZQUANTUM_GATE_MAP[type(qiskit_gate)]\n except KeyError:\n raise ValueError(f\"Conversion of {qiskit_gate} from Qiskit is unsupported.\")\n\n # values to consider:\n # - gate matrix parameters (only parametric gates)\n # - gate application indices (all gates)\n zquantum_params = [\n _zquantum_expr_from_qiskit(param) for param in qiskit_gate.params\n ]\n qubit_indices = [_import_qiskit_qubit(qubit) for qubit in qiskit_qubits]\n gate = _make_gate_instance(gate_ref, zquantum_params)\n return _gates.GateOperation(gate=gate, qubit_indices=tuple(qubit_indices))\n\n\ndef _import_controlled_qiskit_op(\n qiskit_gate: qiskit.circuit.ControlledGate,\n qiskit_qubits: Sequence[qiskit.circuit.Qubit],\n) -> _gates.GateOperation:\n if not isinstance(qiskit_gate, qiskit.circuit.ControlledGate):\n # Raising an exception here is redundant to the type hint, but it allows us\n # to handle exporting all gates in the same way, regardless of type\n raise ValueError(f\"Can't import gate {qiskit_gate} as a controlled gate\")\n\n wrapped_qubits = qiskit_qubits[qiskit_gate.num_ctrl_qubits :]\n wrapped_op = _import_qiskit_op(qiskit_gate.base_gate, wrapped_qubits)\n qubit_indices = map(_import_qiskit_qubit, qiskit_qubits)\n if isinstance(wrapped_op, _gates.GateOperation):\n return wrapped_op.gate.controlled(qiskit_gate.num_ctrl_qubits)(*qubit_indices)\n else:\n raise NotImplementedError(\n \"Importing of controlled anonymous gates not yet supported.\"\n )\n\n\ndef _hash_hex(bytes_):\n return hashlib.sha256(bytes_).hexdigest()\n\n\ndef _custom_qiskit_gate_name(gate_label: str, gate_name: str, matrix: np.ndarray):\n matrix_hash = _hash_hex(matrix.tobytes())\n target_name = gate_label or gate_name\n return f\"{target_name}.{matrix_hash}\"\n\n\ndef _import_custom_qiskit_gate(\n qiskit_op: qiskit.circuit.Gate, qiskit_qubits\n) -> AnonGateOperation:\n value_matrix = qiskit_op.to_matrix()\n return AnonGateOperation(\n gate_name=_custom_qiskit_gate_name(\n qiskit_op.label, qiskit_op.name, value_matrix\n ),\n matrix=sympy.Matrix(value_matrix),\n qubit_indices=tuple(_import_qiskit_qubit(qubit) for qubit in qiskit_qubits),\n )\n" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
aalaprana995/Turtlebot_Navigation_Non_Holonomic_Constrains-
[ "9978467def69080fcd4da7c856e54b6ebda98248" ]
[ "code/final_rrl.py" ]
[ "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Apr 28 00:33:10 2019\r\n\r\n@author: Aalap\r\n\"\"\"\r\n\r\n\r\n# -*- coding: utf-8 -*\r\n\"\"\"\r\nCreated on Thu Mar 28 18:47:25 2019\r\n\r\n@author: Aalap\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport math\r\n\r\nclass Node:\r\n def __init__(self, nodex, nodey,nodetheta, cost, parentnode,vx,vy,vt):\r\n self.nodex = nodex\r\n self.nodey = nodey\r\n self.nodetheta=nodetheta\r\n self.cost = cost\r\n self.parentnode = parentnode\r\n self.vx=vx\r\n self.vy=vy\r\n self.vt=vt\r\n def get_nodex(self):\r\n return self.nodex\r\n def get_nodey(self):\r\n return self.nodey\r\n def get_nodetheta(self):\r\n return self.nodetheta\r\n def get_vx(self):\r\n return vx\r\n def get_vy(self):\r\n return vy\r\n def get_vt(self):\r\n return vt\r\n\r\ndef motion(current_node,ur,ul,time):\r\n r=3.8\r\n l=23\r\n ur=0.104666667*ur\r\n ul=0.104666667*ul\r\n \r\n \r\n thetadot=(r/l)*(ur-ul)\r\n newnodetheta=thetadot*time+current_node.nodetheta\r\n xdot=(r/2)*(ur+ul)*(math.cos(current_node.nodetheta))\r\n ydot=(r/2)*(ur+ul)*(math.sin(current_node.nodetheta))\r\n d=math.sqrt((ydot)**2+(xdot)**2)\r\n #delta_x=d*math.cos(newnodetheta)\r\n #delta_y=d*math.sin(newnodetheta)\r\n cost=math.sqrt((xdot*time)**2+(ydot*time)**2)\r\n newcost=round(cost+current_node.cost)\r\n newnodex=round(xdot*time+current_node.nodex)\r\n newnodey=round(ydot*time+current_node.nodey)\r\n xvelocity=(ur)\r\n yvelocity=(ul)\r\n thetavelocity=thetadot\r\n newnodex,newnodey,newnodetheta,newcost,xvelocity,yvelocity,thetavelocity\r\n \r\n\r\n return newnodex,newnodey,newnodetheta,newcost,xvelocity,yvelocity,thetavelocity\r\n\r\n\r\n\r\n\r\n\r\ndef shortest_path(goalnode, visited, reso):\r\n #shortest path found until parent id is -1\r\n path_x = []#stroes path x coordinates\r\n path_y = []#stroes path x coordinates\r\n xvelocity = []\r\n yvelocity = []\r\n thetavelocity =[]\r\n path_x.append((goalnode.nodex))\r\n path_y.append((goalnode.nodey))\r\n xvelocity.append((goalnode.vx))\r\n yvelocity.append((goalnode.vy))\r\n thetavelocity.append((goalnode.vt))\r\n p = goalnode.parentnode\r\n \r\n print(p)\r\n while (p != -1):\r\n print('lll')\r\n tracknode = visited[p]\r\n path_x.append((tracknode.nodex))\r\n path_y.append((tracknode.nodey))\r\n xvelocity.append((tracknode.vx))\r\n yvelocity.append((tracknode.vy))\r\n thetavelocity.append((tracknode.vt))\r\n p = tracknode.parentnode\r\n return path_x, path_y,xvelocity,yvelocity,thetavelocity\r\n\r\ndef node_key(node):\r\n node_key = (node.nodex) * 250 + node.nodey#unique key generation by equation\r\n return node_key\r\n\r\ndef hd(node,goalnode):\r\n d=math.sqrt((node.nodex-goalnode.nodex)**2+(node.nodey-goalnode.nodey)**2)#cost to go\r\n return d \r\n\r\ndef check_node(node,obsmap,obs_x,obs_y):\r\n #check of node correctness\r\n if (node.nodex < (min(obs_x)) or node.nodex > (max(obs_x)) or node.nodey < (min(obs_y)) or node.nodey > (max(obs_y))):\r\n return False\r\n if (obsmap[node.nodex][node.nodey]):\r\n return False\r\n if (node.nodex < 0):\r\n return False\r\n if (node.nodex > 1110):\r\n return False\r\n if (node.nodey < 0):\r\n return False\r\n if (node.nodey > 1011):\r\n return False\r\n return True\r\n\r\ndef check_goal_node(node,goalnode):\r\n d=math.sqrt((node.nodex-goalnode.nodex)**2+(node.nodey-goalnode.nodey)**2)\r\n \r\n if(d<10):\r\n #check goalnode reached\r\n return True\r\n\r\ndef obstacle_map(obs_x, obs_y):\r\n max_x = round(max(obs_x))\r\n max_y = round(max(obs_y))\r\n min_x = round(min(obs_x))\r\n min_y = round(min(obs_y))\r\n\r\n obsmap = np.zeros((1111,1011))#make a world space which is all false \r\n for i in range(min_x,max_x):\r\n for j in range(min_y,max_y):\r\n obsmap[i][j]=False#make a obstacle space that is all false\r\n for index,i in enumerate(obs_x):\r\n obsmap[obs_x[index]][obs_y[index]] = True#update the obstacle space at points where there is obstacle to true\r\n return obsmap\r\n\r\ndef obstacle_space(r,c):\r\n points=[]#stores points of obstacle space\r\n obs_x=[]#stores x coordinates of obstacle space\r\n obs_y=[]#stores y coordinates of obstacle space\r\n e=r+c\r\n \r\n ##circular obstacle space\r\n print(\"computing circle1 obstacle\")\r\n k = 40.5 + (r) + c\r\n for i in range(e,(1111-e)):\r\n for j in range(e,(1011-e)):\r\n if (((i - 390) ** 2 + (j - 45) ** 2 - (k ** 2)) <= 0):\r\n obs_x.append(i)\r\n obs_y.append(j)\r\n points.append([i,j])\r\n print(\"circle1 obstacle computed\")\r\n #print(\"c1x\",obs_x)\r\n #print(\"c1y\",obs_y) \r\n \r\n print(\"computing circle2 obstacle\")\r\n k = 40.5 + (r) + c\r\n for i in range(e,(1111-e)):\r\n for j in range(e,(1011-e)):\r\n if (((i - 438) ** 2 + (j - 274) ** 2 - (k ** 2)) <= 0):\r\n obs_x.append(i)\r\n obs_y.append(j)\r\n points.append([i,j])\r\n print(\"circle2 obstacle computed\")\r\n #print(\"c2x\",obs_x)\r\n #print(\"c2y\",obs_y) \r\n \r\n print(\"computing circle3 obstacle\")\r\n k = 40.5 + (r) + c\r\n for i in range(e,(1111-e)):\r\n for j in range(e,(1011-e)):\r\n if (((i - 438) ** 2 + (j - 736) ** 2 - (k ** 2)) <= 0):\r\n obs_x.append(i)\r\n obs_y.append(j)\r\n points.append([i,j])\r\n print(\"circle3 obstacle computed\")\r\n #print(\"c3x\",obs_x)\r\n #print(\"c3y\",obs_y) \r\n \r\n \r\n print(\"computing circle4 obstacle\")\r\n k = 40.5 + (r) + c\r\n for i in range(e,(1111-e)):\r\n for j in range(e,(1011-e)):\r\n if (((i - 390) ** 2 + (j - 965) ** 2 - (k ** 2)) <= 0):\r\n obs_x.append(i)\r\n obs_y.append(j)\r\n points.append([i,j])\r\n print(\"circle4 obstacle computed\")\r\n #print(\"c4x\",obs_x)\r\n #print(\"c4y\",obs_y) \r\n \r\n print(\"computing rectangle1 obstacle\")\r\n for i in range(e,1111-e):\r\n for j in range(e,1011-e):\r\n if ((i - 1110-r-c <= 0) & (j - 35+r+c >= 0) & (j - 111-r-c <= 0) &(i -927+r+c >= 0)):\r\n obs_x.append(i)\r\n obs_y.append(j)\r\n points.append([i, j])\r\n print(\"computed rectangle1 obstacle\")\r\n \r\n print(\"computing rectangle2 obstacle\")\r\n for i in range(e,1111-e):\r\n for j in range(e,1011-e):\r\n if ((i - 896-r-c <= 0) & (j - 35+r+c >= 0) & (j - 93-r-c <= 0) &(i -779+r+c >= 0)):\r\n obs_x.append(i)\r\n obs_y.append(j)\r\n points.append([i, j])\r\n print(\"computed rectangle2 obstacle\")\r\n \r\n print(\"computing rectangle3 obstacle\")\r\n for i in range(e,1111-e):\r\n for j in range(e,1011-e):\r\n if ((i - 748-r-c <= 0) & (j - 35+r+c >= 0) & (j - 187-r-c <= 0) &(i -474+r+c >= 0)):\r\n obs_x.append(i)\r\n obs_y.append(j)\r\n points.append([i, j])\r\n print(\"computed rectangle3 obstacle\")\r\n \r\n print(\"computing rectangle4 obstacle\")\r\n for i in range(e,1111-e):\r\n for j in range(e,1011-e):\r\n if ((i - 1110-r-c <= 0) & (j - 621+r+c >= 0) & (j - 697-r-c <= 0) &(i -744+r+c >= 0)):\r\n obs_x.append(i)\r\n obs_y.append(j)\r\n points.append([i, j])\r\n print(\"computed rectangle4 obstacle\")\r\n \r\n print(\"computing rectangle5 obstacle\")\r\n for i in range(e,1111-e):\r\n for j in range(e,1011-e):\r\n if ((i - 1110-r-c <= 0) & (j - 448.5+r+c >= 0) & (j - 565.5-r-c <= 0) &(i -1052+r+c >= 0)):\r\n obs_x.append(i)\r\n obs_y.append(j)\r\n points.append([i, j])\r\n print(\"computed rectangle5 obstacle\")\r\n \r\n print(\"computing rectangle6 obstacle\")\r\n for i in range(e,1111-e):\r\n for j in range(e,1011-e):\r\n if ((i - 1110-r-c <= 0) & (j - 362.5+r+c >= 0) & (j - 448.5-r-c <= 0) &(i -1019+r+c >= 0)):\r\n obs_x.append(i)\r\n obs_y.append(j)\r\n points.append([i, j])\r\n print(\"computed rectangle6 obstacle\")\r\n \r\n print(\"computing rectangle7 obstacle\")\r\n for i in range(e,1111-e):\r\n for j in range(e,1011-e):\r\n if ((i - 1110-r-c <= 0) & (j - 178.25+r+c >= 0) & (j - 295.25-r-c <= 0) &(i -1052+r+c >= 0)):\r\n obs_x.append(i)\r\n obs_y.append(j)\r\n points.append([i, j])\r\n print(\"computed rectangle7 obstacle\")\r\n \r\n print(\"computing rectangle8 obstacle\")\r\n for i in range(e,1111-e):\r\n for j in range(e,1011-e):\r\n if ((i - 529-r-c <= 0) & (j - 314.5+r+c >= 0) & (j - 497.5-r-c <= 0) &(i -438+r+c >= 0)):\r\n obs_x.append(i)\r\n obs_y.append(j)\r\n points.append([i, j])\r\n print(\"computed rectangle8 obstacle\")\r\n \r\n print(\"computing rectangle9 obstacle\")\r\n for i in range(e,1111-e):\r\n for j in range(e,1011-e):\r\n if ((i - 712-r-c <= 0) & (j - 256+r+c >= 0) & (j - 332-r-c <= 0) &(i -529+r+c >= 0)):\r\n obs_x.append(i)\r\n obs_y.append(j)\r\n points.append([i, j])\r\n print(\"computed rectangle9 obstacle\")\r\n \r\n print(\"computing rectangle10 obstacle\")\r\n for i in range(e,1111-e):\r\n for j in range(e,1011-e):\r\n if ((i -1026 -r-c <= 0) & (j -919+r+c >= 0) & (j - 1010-r-c <= 0) &(i -983+r+c >= 0)):\r\n obs_x.append(i)\r\n obs_y.append(j)\r\n points.append([i, j])\r\n print(\"computed rectangle10 obstacle\")\r\n \r\n print(\"computing rectangle11 obstacle\")\r\n for i in range(e,1111-e):\r\n for j in range(e,1011-e):\r\n if ((i -918 -r-c <= 0) & (j -827+r+c >= 0) & (j - 1010-r-c <= 0) &(i -832+r+c >= 0)):\r\n obs_x.append(i)\r\n obs_y.append(j)\r\n points.append([i, j])\r\n print(\"computed rectangle11 obstacle\")\r\n \r\n print(\"computing rectangle12 obstacle\")\r\n for i in range(e,1111-e):\r\n for j in range(e,1011-e):\r\n if ((i -1110 -r-c <= 0) & (j -0+r+c >= 0) & (j - 58-r-c <= 0) &(i -585+r+c >= 0)):\r\n obs_x.append(i)\r\n obs_y.append(j)\r\n points.append([i, j])\r\n print(\"computed rectangle12 obstacle\")\r\n \r\n \r\n print(\"computing rectangle13 obstacle\")\r\n for i in range(e,1111-e):\r\n for j in range(e,1011-e):\r\n if ((i -936 -r-c <= 0) & (j -267+r+c >= 0) & (j - 384-r-c <= 0) &(i -784+r+c >= 0)):\r\n obs_x.append(i)\r\n obs_y.append(j)\r\n points.append([i, j])\r\n print(\"computed rectangle13 obstacle\")\r\n \r\n \r\n \r\n \r\n print(\"computing rectangle14 obstacle\")\r\n for i in range(e,1111-e):\r\n for j in range(e,1011-e):\r\n if ((i -309 -r-c <= 0) & (j -750+r+c >= 0) & (j - 910-r-c <= 0) &(i -150+r+c >= 0)):\r\n obs_x.append(i)\r\n obs_y.append(j)\r\n points.append([i, j])\r\n print(\"computed rectangle14 obstacle\")\r\n \r\n #semi circle\r\n print(\"computing semicircle5 obstacle\")\r\n k = 80 + (r) + c\r\n for i in range(e,(1111-e)):\r\n for j in range(e,(1011-e)):\r\n if (((i - 150) ** 2 + (j - 830) ** 2 - (k ** 2)) <= 0):\r\n obs_x.append(i)\r\n obs_y.append(j)\r\n points.append([i,j])\r\n print(\"semicircle5 obstacle computed\")\r\n \r\n print(\"computing semicircle6 obstacle\")\r\n k = 80 + (r) + c\r\n for i in range(e,(1111-e)):\r\n for j in range(e,(1011-e)):\r\n if (((i - 310) ** 2 + (j - 830) ** 2 - (k ** 2)) <= 0):\r\n obs_x.append(i)\r\n obs_y.append(j)\r\n points.append([i,j])\r\n print(\"semicircle6 obstacle computed\")\r\n #boundary obstacle space\r\n print(\"computing boundary \")\r\n if(r==0 and c==0):\r\n for i in range(1111):\r\n for j in range(1011):\r\n if(i==0 or i==1110 or j==1010 or j==0):\r\n obs_x.append(i)\r\n obs_y.append(j)\r\n points.append([i,j])\r\n else:\r\n \r\n e=r+c\r\n for i in range(e,1111-e):\r\n for j in range(e,1011-e):\r\n if(i==r+c or i==1110-r-c or j==1010-r-c or j==r+c):\r\n obs_x.append(i)\r\n obs_y.append(j)\r\n points.append([i,j])\r\n print(\"boundary computed\")\r\n print(min(obs_x))\r\n print(max(obs_x))\r\n print(min(obs_y))\r\n print(max(obs_y)) \r\n return obs_x,obs_y\r\n \r\n\r\ndef a_algo(startx,starty,starttheta,goalx,goaly,goaltheta,reso,r,c,time):\r\n show=True\r\n lx = []#used to store all explored node x\r\n ly = []#used to store all explored node y\r\n flag=0\r\n unvisited=dict()#dictionary to storedunvisited node\r\n visited=dict()#dictionary to stored visited node for back tracking\r\n moves = [[60, 0], [40, 0], [60, 40], [40, 60], [60, 60], [40, 40],\r\n [0,60], [0, 40]]#all possible moves allowed\r\n\r\n startnode = Node(round(startx / reso), round(starty / reso), 0,0, -1,0,0,0)#start node formation\r\n goalnode = Node(round(goalx / reso), round(goaly / reso), 0,1000, 0,0,0,0)#goal node formation\r\n obs_x, obs_y = obstacle_space(r, c)#obstacle space fromed \r\n #obstacle space in discretized formate\r\n obs_x = [round(x / reso) for x in obs_x]\r\n obs_y = [round(y / reso) for y in obs_y]\r\n #obstacle space converted to true false obstacle map \r\n obsmap= obstacle_map(obs_x,obs_y)\r\n #checking if the startnode or goalnode is not in obstacle or out of world space\r\n if not(startnode.nodex < min(obs_x) or startnode.nodex > max(obs_x) or startnode.nodey < min(obs_y) or startnode.nodey > max(obs_y)):\r\n if not(goalnode.nodex < min(obs_x) or goalnode.nodex > max(obs_x) or goalnode.nodey < min(obs_y) or goalnode.nodey > max(obs_y)):\r\n if not obsmap[startnode.nodex][startnode.nodey] and not obsmap[goalnode.nodex][goalnode.nodey]:\r\n flag = 1\r\n \r\n unvisited[node_key(startnode)] = startnode\r\n while (flag):\r\n current_node_id = min(unvisited, key=lambda o: unvisited[o].cost+hd(goalnode,unvisited[o]))#finding minimum cost node\r\n current_node = unvisited[current_node_id]#making it the current node\r\n visited[current_node_id] = current_node#putting current node to visited dictionary\r\n del unvisited[current_node_id]#removing current node from unvisited dictionary\r\n for i, _ in enumerate(moves):#node exploration\r\n newnodex,newnodey,newnodetheta,newcost,xvelocity,yvelocity,thetavelocity = motion(current_node , moves[i][0], moves[i][1],time)\r\n node=Node(newnodex,newnodey,newnodetheta,newcost,current_node_id,xvelocity,yvelocity,thetavelocity)\r\n lx.append(Node.get_nodex(node))#used get node to store new nodex in lx\r\n ly.append(Node.get_nodey(node))#used get node to store new nodey in ly\r\n \r\n if (len(lx)%1000==0):\r\n if(show):\r\n plt.plot(lx,ly,\".r\")\r\n plt.plot(obs_x, obs_y,\".k\")#obstacle space\r\n plt.show()\r\n plt.grid()\r\n \r\n if (check_goal_node(node, goalnode)):\r\n goalnode.nodex=node.nodex\r\n goalnode.parentnode=node.parentnode\r\n goalnode.nodey=node.nodey\r\n goalnode.cost=node.cost\r\n goalnode.vt=node.vt\r\n goalnode.vx=node.vx\r\n goalnode.vy=node.vy\r\n goalnode.nodetheta=node.nodetheta\r\n print(node.parentnode,\"sdaadsas\")\r\n \r\n flag=False\r\n break\r\n f = node_key(node)\r\n if not check_node(node, obsmap,obs_x,obs_y):#check the new node is not in obstacle\r\n continue\r\n if f in visited:#check new node in visited\r\n continue\r\n if f in unvisited:#check node in unvisited and update the parameters\r\n if (unvisited[f].cost > node.cost):\r\n unvisited[f].cost = node.cost\r\n unvisited[f].parentnode = node.parentnode\r\n else:\r\n unvisited[f] = node#add new node to unvisited dictionary\r\n print(visited) \r\n a, b,xvelocity,yvelocity,thetavelocity = shortest_path(goalnode, visited, reso)#return shortest path\r\n \r\n if(flag):\r\n print(\"shortest path aaya\")\r\n else:\r\n print(\"end\") \r\n return a, b, obs_x, obs_y, lx,ly,xvelocity,yvelocity,thetavelocity\r\n\r\n \r\n \r\n \r\n \r\n\r\n \r\n\r\n\r\n\r\n\r\n\r\n\r\ndef main():\r\n print( \"astar algorithm start!!\")\r\n show=True#flag used to display the result\r\n \r\n startx = 50.0 # startx coordinate\r\n starty = 50.0 # starty coordinate\r\n starttheta=0\r\n goalx = 250.0 # goalx coordinate\r\n goaly = 250.0 # goaly coordinate\r\n goaltheta=0\r\n reso = 1 # resolution\r\n r = 24 #robot radius\r\n c= 0# clearance\r\n time=1\r\n if show:\r\n plt.plot(startx/reso, starty/reso, \"xc\")\r\n plt.plot(goalx/reso, goaly/reso, \"xb\")\r\n a,b, obs_x, obs_y, lx,ly,xvelocity,yvelocity,thetavelocity =a_algo(startx,starty,starttheta,goalx,goaly,goaltheta,reso,r,c,time)\r\n print(a)\r\n print(b)\r\n print(xvelocity)\r\n print(yvelocity)\r\n print(thetavelocity)\r\n \r\n \r\n \r\n \r\n if show:\r\n#displaying the result\r\n#if input or output is incorrect then only obstacle and start and goal is displayed \r\n print(\"final output for astar!!!!\")\r\n plt.plot(lx,ly,\".g\")#node explored\r\n plt.plot(obs_x, obs_y,\".k\")#obstacle space\r\n plt.plot(a, b, \"-r\")#shortest path\r\n plt.grid()\r\n plt.show()\r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\nif __name__ == '__main__':\r\n main()# -*- coding: utf-8 -*-\r\n\"\"\"\r\nSpyder Editor\r\n\r\nThis is a temporary script file.\r\n\"\"\"\r\n\r\n" ]
[ [ "matplotlib.pyplot.plot", "matplotlib.pyplot.show", "numpy.zeros", "matplotlib.pyplot.grid" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
plopresti/tensorflow
[ "8b0c84d30d957596cbb3bcac9245e114c3f0b65b", "8b0c84d30d957596cbb3bcac9245e114c3f0b65b", "8b0c84d30d957596cbb3bcac9245e114c3f0b65b", "8b0c84d30d957596cbb3bcac9245e114c3f0b65b", "8b0c84d30d957596cbb3bcac9245e114c3f0b65b", "8b0c84d30d957596cbb3bcac9245e114c3f0b65b" ]
[ "tensorflow/python/framework/func_graph.py", "tensorflow/python/eager/core_test.py", "tensorflow/python/keras/layers/gru_test.py", "tensorflow/python/ops/linalg/linear_operator_diag.py", "tensorflow/python/keras/utils/generic_utils_test.py", "tensorflow/python/framework/auto_control_deps.py" ]
[ "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"FuncGraph and related functionality.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections as py_collections\nimport itertools\nimport weakref\n\nfrom tensorflow.core.framework import attr_value_pb2\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.eager import execute\nfrom tensorflow.python.eager import tape\nfrom tensorflow.python.eager.graph_only_ops import graph_placeholder\nfrom tensorflow.python.framework import composite_tensor\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_spec\nfrom tensorflow.python.framework.auto_control_deps import AutomaticControlDependencies\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import custom_gradient\nfrom tensorflow.python.ops import resource_variable_ops\nfrom tensorflow.python.ops import tensor_array_ops\nfrom tensorflow.python.ops import variable_scope\nfrom tensorflow.python.util import compat\nfrom tensorflow.python.util import memory\nfrom tensorflow.python.util import nest\nfrom tensorflow.python.util import tf_contextlib\nfrom tensorflow.python.util import tf_decorator\nfrom tensorflow.python.util.lazy_loader import LazyLoader\n\n# This is to avoid a circular dependency:\n# function -> func_graph\nfunction = LazyLoader(\"function\", globals(),\n \"tensorflow.python.eager.function\")\ndef_function = LazyLoader(\n \"def_function\", globals(),\n \"tensorflow.python.eager.def_function\")\n\nWHITELIST_COLLECTIONS = [\n ops.GraphKeys.GLOBAL_VARIABLES,\n ops.GraphKeys.LOCAL_VARIABLES,\n ops.GraphKeys.TRAINABLE_VARIABLES,\n variable_scope._VARSTORE_KEY, # pylint: disable=protected-access\n variable_scope._VARSCOPESTORE_KEY # pylint: disable=protected-access\n]\n\n\nclass UnknownArgument(object):\n \"\"\"Signifies an argument which is not currently handled.\"\"\"\n pass\n\n\ndef convert_structure_to_signature(structure, arg_names=None):\n \"\"\"Convert a potentially nested structure to a signature.\n\n Args:\n structure: Structure to convert, where top level collection is a list or a\n tuple.\n arg_names: Optional list of arguments that has equal number of elements as\n `structure` and is used for naming corresponding TensorSpecs.\n\n Returns:\n Identical structure that has TensorSpec objects instead of Tensors and\n UknownArgument instead of any unsupported types.\n \"\"\"\n def encode_arg(arg, path):\n \"\"\"A representation for this argument, for converting into signatures.\"\"\"\n if isinstance(arg, ops.Tensor):\n user_specified_name = None\n try:\n user_specified_name = compat.as_str(\n arg.op.get_attr(\"_user_specified_name\"))\n except ValueError:\n pass\n\n if path and user_specified_name and user_specified_name != path[0]:\n # The user has explicitly named the argument differently than the name\n # of the function argument.\n name = user_specified_name\n else:\n name = \"/\".join([str(p) for p in path])\n return tensor_spec.TensorSpec(arg.shape, arg.dtype, name)\n if isinstance(arg, composite_tensor.CompositeTensor):\n # TODO(b/133606651) Do we need to inject arg_name?\n return arg._type_spec # pylint: disable=protected-access\n if isinstance(arg, (\n int,\n float,\n bool,\n type(None),\n dtypes.DType,\n tensor_spec.TensorSpec,\n )):\n return arg\n return UnknownArgument()\n\n # We are using the flattened paths to name the TensorSpecs. We need an\n # explicit name for them downstream.\n flattened = nest.flatten_with_tuple_paths(structure)\n if arg_names:\n if len(arg_names) != len(structure):\n raise ValueError(\n \"Passed in arg_names don't match actual signature (%s).\" % arg_names)\n # Replace all top-level names with their actual arg_names. If a path before\n # was \"(2,'a',1)\", it will become \"(arg_names[2],'a',1)\".\n flattened = [\n ((arg_names[path[0]],) + path[1:], arg) for path, arg in flattened\n ]\n\n mapped = [encode_arg(arg, path) for path, arg in flattened]\n return nest.pack_sequence_as(structure, mapped)\n\n\nclass FuncGraph(ops.Graph):\n \"\"\"Graph representing a function body.\n\n Attributes:\n name: The name of the function.\n inputs: Placeholder tensors representing the inputs to this function. The\n tensors are in this FuncGraph. This represents \"regular\" inputs as well as\n captured inputs (i.e. the values of self.captures), with the regular\n inputs coming first.\n outputs: Tensors that will be returned by this function. The tensors are in\n this FuncGraph.\n control_outputs: Operations that must be executed before the function\n represented by this graph can be said to have been executed.\n structured_input_signature: A tuple of (args, kwargs), which are both\n possibly-nested python objects that were received by this function. Note\n that these structures might contain Python `None`s.\n structured_outputs: A possibly-nested python object which will be returned\n by this function. The Tensors in this structure are the same as those of\n self.outputs. Note that this structure might contain Python `None`s.\n variables: Variables that should be watched during function execution.\n outer_graph: The graph this function is defined in. May be another FuncGraph\n or the global default Graph.\n captures: Maps external tensor -> internal tensor (i.e. input placeholder).\n The entries are in the order they were captured.\n deferred_captures: Maps arbitrary key -> (closure, nest of placeholders),\n where at function call time the value of closure() will be used to feed\n the nest of placeholders.\n control_captures: Set of external ops on which this graph has a control\n dependency.\n seed: The graph-level random seed.\n capture_by_value: If True, the func graph will capture Variables by value\n instead of reference.\n \"\"\"\n\n def __init__(self, name, collections=None, capture_by_value=None):\n \"\"\"Construct a new FuncGraph.\n\n The graph will inherit its graph key, collections, seed, and distribution\n strategy stack from the current context or graph.\n\n Args:\n name: the name of the function.\n collections: a dictionary of collections this FuncGraph should start\n with. If not specified (None), the FuncGraph will read (but not write\n to) the outer graph's collections that are not whitelisted, and both\n read and write to the outer graph's collections that are whitelisted.\n The current whitelisted collections are the global variables, the\n local variables, and the trainable variables.\n Defaults to None.\n capture_by_value: An optional boolean. If True, the func graph will\n capture Variables by value instead of reference. By default inherit\n from outer graphs, and failing that will default to False.\n \"\"\"\n super(FuncGraph, self).__init__()\n\n self.name = name\n self.inputs = []\n self.outputs = []\n self.control_outputs = []\n self.control_captures = set()\n self.structured_input_signature = None\n self.structured_outputs = None\n self._weak_variables = []\n self._watched_variables = weakref.WeakSet()\n self.outer_graph = ops.get_default_graph()\n self.captures = py_collections.OrderedDict()\n # If not None, records the names of output args of this function. Used to\n # preserve the output names in the signature of a serialized+deserialized\n # function. Private at the moment mostly because it's often out of date.\n self._output_names = None\n self.deferred_captures = py_collections.OrderedDict()\n # Inherit capture-by-value from outer graph.\n if capture_by_value is not None:\n self.capture_by_value = capture_by_value\n elif self.outer_graph is not None and isinstance(\n self.outer_graph, FuncGraph):\n self.capture_by_value = self.outer_graph.capture_by_value\n else:\n self.capture_by_value = False\n\n self._building_function = True\n # Map from resource tensor name to last op (in program order) which uses\n # this tensor. Used to enforce that execution order matches program order\n # for resource tensors.\n self._last_op_using_resource_tensor = {}\n\n graph = self.outer_graph\n\n if context.executing_eagerly():\n self.seed = context.global_seed()\n # [for tf-data user migration from TF1.0 to 2.0] seed_used keep track of\n # any None op_seed for random_op in the function, in which case we end up\n # using function seed, which could be unintended behavior for the op.\n self._seed_used = False\n else:\n self.seed = graph.seed\n self._seed_used = False\n # TODO(allenl): Figure out if we can remove colocation stack\n # specialization (currently used in cond_v2), here and in the cache key.\n self._colocation_stack = graph._colocation_stack.copy() # pylint: disable=protected-access\n\n if collections is None:\n for collection_name in graph.get_all_collection_keys():\n if collection_name not in WHITELIST_COLLECTIONS:\n self._collections[collection_name] = graph.get_collection(\n collection_name)\n for collection_name in WHITELIST_COLLECTIONS:\n self._collections[collection_name] = graph.get_collection_ref(\n collection_name)\n else:\n self._collections = collections\n\n def __str__(self):\n return \"FuncGraph(name=%s, id=%s)\" % (self.name, id(self))\n\n def watch_variable(self, v):\n \"\"\"Marks the variable v as accessed while building this graph.\"\"\"\n while self is not None and isinstance(self, FuncGraph):\n self._watched_variables.add(v)\n self = self.outer_graph\n\n def capture_call_time_value(self, closure, spec, key=None):\n \"\"\"Creates a placeholder which at call time has the value closure().\n\n Useful, for example, to respect TensorFlow context managers, which are often\n dynamically scoped.\n\n Args:\n closure: function which takes no arguments, to be evaluated at function\n call time, returning a nest of tensors compatible with `spec`.\n spec: nest of TypeSpec for the value to capture.\n key: optional. If not None, multiple calls to lazy_capture with the same\n key in the same graph will return the same placeholder, and the\n first closure will be used at function call time.\n\n Returns:\n Nest of placeholders which, at function call time, will be fed with the\n result of calling closure().\n\n Raises:\n ValueError: at function call time, if the return value of closure() is\n not compatible with `spec`.\n \"\"\"\n if key is None:\n key = object()\n if key not in self.deferred_captures:\n\n def convert_to_placeholder(s):\n if not isinstance(s, tensor_spec.TensorSpec):\n raise TypeError(\n \"Expected a nest of `TypeSpec` objects, found %s of type %s.\" %\n (s, type(s)))\n return array_ops.placeholder(dtype=s.dtype, shape=s.shape)\n\n placeholder = nest.map_structure(\n convert_to_placeholder, spec, expand_composites=True)\n\n def wrapped_closure():\n ret_nest = closure()\n nest.assert_same_structure(spec, ret_nest, expand_composites=True)\n # This uses the tensor dtype defined in `spec` when converting values\n # in `ret_nest` to tensors.\n # pylint: disable=protected-access\n y = nest.map_structure(lambda s, r: s._to_components(r), spec, ret_nest,\n expand_composites=False)\n # pylint: enable=protected-access\n return nest.flatten(y, expand_composites=True)\n\n self.deferred_captures[key] = (wrapped_closure, placeholder)\n return self.deferred_captures[key][1]\n\n def control_dependencies(self, control_inputs):\n \"\"\"Handles control dependencies.\n\n FuncGraph wraps Graph's control_dependencies logic by first filtering out\n any external tensors / operations and storing them in the graph's\n control_captures member. Any consumers of this function graph must then\n decide how to handle the control captures.\n\n Args:\n control_inputs: A list of `Operation` or `Tensor` objects which\n must be executed or computed before running the operations\n defined in the context. Can also be `None` to clear the control\n dependencies.\n\n Returns:\n A context manager that specifies control dependencies for all\n operations constructed within the context.\n\n Raises:\n TypeError: If `control_inputs` is not a list of `Operation` or\n `Tensor` objects.\n \"\"\"\n if control_inputs is None:\n return super(FuncGraph, self).control_dependencies(control_inputs)\n\n filtered_control_inputs = []\n for c in control_inputs:\n # Check for _UnreadVariable\n if (isinstance(c, ops.IndexedSlices) or\n (hasattr(c, \"_handle\") and hasattr(c, \"op\"))):\n c = c.op\n graph_element = ops._as_graph_element(c) # pylint: disable=protected-access\n if graph_element is None:\n graph_element = c\n if graph_element is not None and getattr(\n graph_element, \"graph\", None) is not self:\n self.control_captures.add(graph_element)\n else:\n filtered_control_inputs.append(graph_element)\n return super(FuncGraph, self).control_dependencies(filtered_control_inputs)\n\n def as_default(self):\n outer_cm = super(FuncGraph, self).as_default()\n\n @tf_contextlib.contextmanager\n def inner_cm():\n \"\"\"Context manager for copying distribute.Strategy scope information.\"\"\"\n graph = ops.get_default_graph()\n # pylint: disable=protected-access\n # TODO(b/112906995, nareshmodi): distribution strategy depends on\n # inheriting this stack from the default graph even in eager mode. Maybe\n # it should be part of the eager context? This would also allow us to\n # remove a get_default_graph() call from the function cache lookup.\n old_strategy_stack = self._distribution_strategy_stack\n self._distribution_strategy_stack = list(\n graph._distribution_strategy_stack)\n # We ignore device placements from any outer scopes while tracing the\n # function when possible, to avoid hard-coding them in the function\n # graph. \"Default\" placements come from the PartitionedCallOp's placement,\n # so that the same trace of the Python function may be placed on several\n # different devices and saved functions may be placed on new devices when\n # restored.\n old_device_stack = self._device_function_stack\n if context.executing_eagerly():\n if self._distribution_strategy_stack:\n self._device_function_stack = self._device_function_stack.copy()\n self._add_device_to_stack(context.context().device_name)\n else:\n if (self._distribution_strategy_stack\n or device_stack_has_callable(graph._device_function_stack)):\n # Hard-code devices from device functions in the function body\n self._device_function_stack = graph._device_function_stack.copy()\n\n old_creator_stack = self._variable_creator_stack\n self._variable_creator_stack = graph._variable_creator_stack\n # Inherit the graph key, since this is used for matching variables in\n # optimizers.\n old_graph_key = self._graph_key\n self._graph_key = graph._graph_key\n # Inherit the auto_cast_variable_read_dtype, since this should not change\n # inside a function.\n old_auto_cast_var_read_dtype = self._auto_cast_variable_read_dtype\n self._auto_cast_variable_read_dtype = graph._auto_cast_variable_read_dtype\n # pylint: enable=protected-access\n\n with outer_cm as g:\n try:\n yield g\n finally:\n self._distribution_strategy_stack = old_strategy_stack\n self._device_function_stack = old_device_stack\n self._variable_creator_stack = old_creator_stack\n self._graph_key = old_graph_key\n self._auto_cast_variable_read_dtype = old_auto_cast_var_read_dtype\n return inner_cm()\n\n @property\n def output_types(self):\n return [t.dtype for t in self.outputs]\n\n @property\n def output_shapes(self):\n return [t.shape for t in self.outputs]\n\n @property\n def variables(self):\n \"\"\"A list of variables accessed by this FuncGraph.\n\n Note that functions keep only weak references to variables. Calling the\n function after a variable it accesses has been deleted is an error.\n\n Yields:\n Strong references to variables accessed by this FuncGraph.\n \"\"\"\n for weak_v in self._weak_variables:\n v = weak_v()\n if v is None:\n raise AssertionError(\n \"Called a function referencing variables which have been deleted. \"\n \"This likely means that function-local variables were created and \"\n \"not referenced elsewhere in the program. This is generally a \"\n \"mistake; consider storing variables in an object attribute on \"\n \"first call.\")\n yield v\n\n @variables.setter\n def variables(self, var_list):\n self._weak_variables = [weakref.ref(v) for v in var_list]\n\n def _capture_by_value(\n self,\n op_type,\n inputs,\n dtypes, # pylint: disable=redefined-outer-name\n input_types=None,\n name=None,\n attrs=None,\n op_def=None,\n compute_device=True):\n # When capturing by value, do the read outside\n reverse_captures = dict((v, k) for k, v in self.captures.items())\n uncaptured_inputs = [reverse_captures.get(t, t) for t in inputs]\n with ops.init_scope():\n if context.executing_eagerly():\n attr_list = (\"dtype\", int(attrs[\"dtype\"].type))\n value, = execute.execute(\n compat.as_bytes(op_type), 1, uncaptured_inputs, attr_list,\n context.context())\n else:\n op = ops.get_default_graph()._create_op_internal( # pylint: disable=protected-access\n op_type,\n uncaptured_inputs,\n dtypes,\n input_types,\n name,\n attrs,\n op_def,\n compute_device)\n value = op.outputs[0]\n captured_value = self.capture(value)\n return captured_value.op\n\n def create_op(\n self,\n op_type,\n inputs,\n dtypes=None, # pylint: disable=redefined-outer-name\n input_types=None,\n name=None,\n attrs=None,\n op_def=None,\n compute_shapes=True,\n compute_device=True):\n \"\"\"Like Graph.create_op, except handles external input tensors.\n\n This overload adds functionality to create_op to \"capture\" any external\n input tensors, i.e. tensors from the eager context or outer function graphs\n if this is a nested function. See `capture` for more information.\n\n Args:\n op_type: The `Operation` type to create. This corresponds to the\n `OpDef.name` field for the proto that defines the operation.\n inputs: A list of `Tensor` objects that will be inputs to the `Operation`.\n dtypes: (Optional) A list of `DType` objects that will be the types of the\n tensors that the operation produces.\n input_types: (Optional.) A list of `DType`s that will be the types of\n the tensors that the operation consumes. By default, uses the base\n `DType` of each input in `inputs`. Operations that expect\n reference-typed inputs must specify `input_types` explicitly.\n name: (Optional.) A string name for the operation. If not specified, a\n name is generated based on `op_type`.\n attrs: (Optional.) A dictionary where the key is the attribute name (a\n string) and the value is the respective `attr` attribute of the\n `NodeDef` proto that will represent the operation (an `AttrValue`\n proto).\n op_def: (Optional.) The `OpDef` proto that describes the `op_type` that\n the operation will have.\n compute_shapes: (Optional.) Deprecated. Has no effect (shapes are always\n computed).\n compute_device: (Optional.) If True, device functions will be executed\n to compute the device property of the Operation.\n\n Returns:\n An `Operation` object.\n \"\"\"\n del compute_shapes\n if self.capture_by_value and op_type in [\"ReadVariableOp\",\n \"ResourceGather\"]:\n return self._capture_by_value(op_type, inputs, dtypes, input_types, name,\n attrs, op_def, compute_device)\n\n # This capturing logic interacts poorly with control flow contexts which\n # want to replace inputs of ops far too late in the process. This can lead\n # the context to get confused and try to create an Enter for an Enter. We\n # can detect this here and skip the additional Enter which can confuse loop\n # validation logic.\n if op_type == \"Enter\" and inputs[0].op.type == \"Enter\":\n if inputs[0].op.get_attr(\"frame_name\") == attrs[\"frame_name\"].s:\n return inputs[0].op\n # Calling AddValue on the control flow contexts to force creation of the\n # backward accumulators in the original graph before we create placeholders\n # to capture the inputs.\n ctxt = ops.get_default_graph()._control_flow_context # pylint: disable=protected-access\n for i, inp in enumerate(inputs):\n # TPU Estimator defines a control flow context with no AddValue method.\n if ctxt is not None and hasattr(ctxt, \"AddValue\"):\n inp = ctxt.AddValue(inp)\n inp = self.capture(inp)\n inputs[i] = inp\n return super(FuncGraph, self)._create_op_internal( # pylint: disable=protected-access\n op_type, inputs, dtypes, input_types, name, attrs, op_def,\n compute_device)\n\n def capture(self, tensor, name=None):\n \"\"\"Captures `tensor` if it's external to this graph.\n\n If `tensor` is from a different graph, returns a placeholder for it.\n `tensor` and the placeholder will appear in self.captures, and the\n placeholder will appear in self.inputs. Multiple calls to this method with\n the same `tensor` argument will return the same placeholder. If `tensor` is\n from this graph, returns `tensor`.\n\n Args:\n tensor: Tensor. May be from this FuncGraph or a different graph.\n name: Optional name if a placeholder is created.\n\n Returns:\n Tensor from this FuncGraph.\n \"\"\"\n # Note: _forward_func_graph is currently only set when building the gradient\n # graph graph of a defun call. If the backwards graph tries to capture\n # tensors those will be captured first in the forward graph. This\n # makes sure that any tensor needed by a custom_gradient is correctly\n # captured.\n\n # TODO(b/134097853): figure out a better way to check distributed variables\n if hasattr(tensor, \"_distribute_strategy\") and hasattr(tensor, \"_values\"):\n # This checks if the 'tensor' is a DistributedVariable. When it is a\n # DistributedVariable, we do not want to check its \"graph\" attr as the\n # following if branch does, because \"graph\" is not an attr for the\n # container DistributedVariable object, and the underlying components may\n # not have been initialized yet.\n # The reason we do not use isinstance() is due to cyclic dependency issue.\n if name is None:\n name = str(\"distributed_variable\")\n return self._capture_helper(tensor, name)\n if (getattr(tensor, \"graph\", None) is not self and\n hasattr(self, \"_forward_func_graph\") and\n isinstance(self._forward_func_graph, FuncGraph)):\n tensor = self._forward_func_graph.capture(tensor)\n if isinstance(tensor, ops.EagerTensor):\n if name is None:\n name = str(ops.uid())\n return self._capture_helper(tensor, name)\n if tensor.graph is not self:\n if name is None:\n name = tensor.op.name\n inner_graph = tensor.graph\n while inner_graph is not None and isinstance(inner_graph, FuncGraph):\n if inner_graph is self:\n raise ValueError(\n \"Trying to capture a tensor from an inner function. This can be \"\n \"caused by accessing a tensor defined inside a loop or \"\n \"conditional body, or a subfunction, from a calling function, \"\n \"without going through the proper return value mechanism. \"\n \"Consider using TensorFlow mechanisms such as TensorArrays \"\n \"to return tensors from inner functions or loop / conditional \"\n \"bodies. Tensor: %s; tensor graph: %s; this graph: %s\"\n % (tensor, tensor.graph, self))\n inner_graph = inner_graph.outer_graph\n return self._capture_helper(tensor, name)\n return tensor\n\n def _capture_helper(self, tensor, name):\n captured_tensor = self.captures.get(tensor, None)\n if captured_tensor is None:\n captured_tensor = _create_substitute_placeholder(tensor, name=name,\n dtype=tensor.dtype)\n self.captures[tensor] = captured_tensor\n self.inputs.append(captured_tensor)\n tape.record_operation(\"captured_value\", [captured_tensor], [tensor],\n lambda x: [x])\n return captured_tensor\n\n @property\n def external_captures(self):\n \"\"\"External tensors captured by this function.\"\"\"\n return list(self.captures.keys())\n\n @property\n def internal_captures(self):\n \"\"\"Placeholders in this function corresponding captured tensors.\"\"\"\n return list(self.captures.values())\n\n\ndef func_graph_from_py_func(name,\n python_func,\n args,\n kwargs,\n signature=None,\n func_graph=None,\n autograph=False,\n autograph_options=None,\n add_control_dependencies=True,\n arg_names=None,\n op_return_value=None,\n collections=None,\n capture_by_value=None,\n override_flat_arg_shapes=None):\n \"\"\"Returns a `FuncGraph` generated from `python_func`.\n\n Args:\n name: an identifier for the function.\n python_func: the Python function to trace.\n args: the positional args with which the Python function should be called;\n ignored if a signature is provided.\n kwargs: the keyword args with which the Python function should be called;\n ignored if a signature is provided.\n signature: a possibly nested sequence of `TensorSpecs` specifying the shapes\n and dtypes of the arguments. When a signature is provided, `args` and\n `kwargs` are ignored, and `python_func` is traced with Tensors conforming\n to `signature`. If `None`, the shapes and dtypes are inferred from the\n inputs.\n func_graph: Optional. An instance of FuncGraph. If provided, we will use\n this graph else a new one is built and returned.\n autograph: whether to use autograph to compile `python_func`.\n See https://www.tensorflow.org/guide/autograph for more information.\n autograph_options: additional knobs to control when `autograph=True`.\n See https://www.tensorflow.org/guide/autograph for more information.\n add_control_dependencies: If True, automatically adds control dependencies\n to ensure program order matches execution order and stateful ops always\n execute.\n arg_names: Optional list of argument names, used to give input placeholders\n recognizable names.\n op_return_value: Optional. A Tensor. If set and `python_func` returns\n Operations, those return values will be replaced with this value. If not\n set, returning an Operation triggers an error.\n collections: a dictionary of collections this FuncGraph should start\n with. If not specified (None), the FuncGraph will read (but not write to)\n the outer graph's collections that are not whitelisted, and both\n read and write to the outer graph's collections that are whitelisted.\n The current whitelisted collections are the global variables, the\n local variables, and the trainable variables.\n Defaults to None.\n capture_by_value: An optional boolean. If True, the func graph will capture\n Variables by value instead of reference. By default inherit from outer\n graphs, and failing that will default to False.\n override_flat_arg_shapes: An optional list of instances that are either\n `None` or `TensorShape`. The length must match that of\n `nest.flatten((args, kwargs), expand_composites=True)`. The entries\n containing value `None` must match entries in flattened arguments\n containing non-tensors, while entries containing a `TensorShape` must\n match entries in the flattened arguments containing tensors.\n\n Returns:\n A FuncGraph.\n\n Raises:\n TypeError: If any of `python_func`'s return values is neither `None` nor a\n `Tensor`.\n ValueError: If both `signature` and `override_flat_arg_shapes` are\n passed in.\n \"\"\"\n if op_return_value is not None:\n assert isinstance(op_return_value, ops.Tensor), op_return_value\n if func_graph is None:\n func_graph = FuncGraph(name, collections=collections,\n capture_by_value=capture_by_value)\n assert isinstance(func_graph, FuncGraph)\n if add_control_dependencies:\n control_manager = AutomaticControlDependencies()\n else:\n control_manager = ops.NullContextmanager()\n with func_graph.as_default(), control_manager as a:\n current_scope = variable_scope.get_variable_scope()\n default_use_recource = current_scope.use_resource\n current_scope.set_use_resource(True)\n\n if signature is not None and override_flat_arg_shapes is not None:\n raise ValueError(\n \"Passed both signature and override_flat_arg_shapes: %s and %s.\"\n % (signature, override_flat_arg_shapes))\n\n if signature is not None:\n args = signature\n kwargs = {}\n\n # Creates and names placeholders for all arguments.\n if override_flat_arg_shapes is not None:\n flat_args = nest.flatten(args, expand_composites=True)\n arg_shapes = override_flat_arg_shapes[:len(flat_args)]\n kwarg_shapes = override_flat_arg_shapes[len(flat_args):]\n else:\n arg_shapes = None\n kwarg_shapes = None\n func_args = _get_defun_inputs_from_args(\n args, arg_names, flat_shapes=arg_shapes)\n func_kwargs = _get_defun_inputs_from_kwargs(\n kwargs, flat_shapes=kwarg_shapes)\n\n # Convert all Tensors into TensorSpecs before saving the structured inputs.\n # If storing pure concrete functions that are not called through polymorphic\n # functions, we don't have access to FunctionSpec, so we need to call the\n # TensorSpecs by their `arg_names` for later binding.\n func_graph.structured_input_signature = (\n convert_structure_to_signature(func_args, arg_names),\n convert_structure_to_signature(func_kwargs))\n\n flat_func_args = nest.flatten(func_args, expand_composites=True)\n flat_func_kwargs = nest.flatten(func_kwargs, expand_composites=True)\n # Temporarily set inputs to allow graph building code to inspect\n # them. Reassigned below.\n func_graph.inputs = [arg for arg in flat_func_args + flat_func_kwargs\n if isinstance(arg, ops.Tensor)]\n\n # Note: `nest.flatten` sorts by keys, as does `_deterministic_dict_values`.\n # Variables to help check whether mutation happens in calling the function\n # Copy the recursive list, tuple and map structure, but not base objects\n func_args_before = nest.pack_sequence_as(func_args, flat_func_args,\n expand_composites=True)\n func_kwargs_before = nest.pack_sequence_as(\n func_kwargs, flat_func_kwargs, expand_composites=True)\n\n def convert(x):\n \"\"\"Converts a function output to a Tensor.\"\"\"\n if x is None:\n return None\n if op_return_value is not None and isinstance(x, ops.Operation):\n # TODO(b/79881896): we currently can't capture external control deps, so\n # this won't work if x needs to be captured (i.e. if python_func returns\n # captured Operations).\n with ops.control_dependencies([x]):\n x = array_ops.identity(op_return_value)\n elif not isinstance(x, tensor_array_ops.TensorArray):\n try:\n x = ops.convert_to_tensor_or_composite(x)\n except (ValueError, TypeError):\n raise TypeError(\n \"To be compatible with tf.contrib.eager.defun, Python functions \"\n \"must return zero or more Tensors; in compilation of %s, found \"\n \"return value of type %s, which is not a Tensor.\" %\n (str(python_func), type(x)))\n if add_control_dependencies:\n x = a.mark_as_return(x)\n return x\n\n try:\n if autograph:\n from tensorflow.python import autograph # pylint: disable=g-import-not-at-top\n _, original_func = tf_decorator.unwrap(python_func)\n\n def wrapper(*args, **kwargs):\n \"\"\"Calls a converted version of original_func.\"\"\"\n # TODO(mdan): Push this block higher in tf.function's call stack.\n try:\n return autograph.converted_call(\n original_func,\n autograph.ConversionOptions(\n recursive=True,\n optional_features=autograph_options,\n force_conversion=True,\n ), args, kwargs)\n except Exception as e: # pylint:disable=broad-except\n if hasattr(e, \"ag_error_metadata\"):\n raise e.ag_error_metadata.to_exception(type(e))\n else:\n raise\n\n # Wrapping around a decorator allows checks like tf_inspect.getargspec\n # to be accurate.\n converted_func = tf_decorator.make_decorator(original_func, wrapper)\n python_func = tf_decorator.rewrap(python_func, original_func,\n converted_func)\n\n func_outputs = python_func(*func_args, **func_kwargs)\n\n # invariant: `func_outputs` contains only Tensors, CompositeTensors,\n # TensorArrays and `None`s.\n func_outputs = nest.map_structure(convert, func_outputs,\n expand_composites=True)\n\n check_mutation(func_args_before, func_args)\n check_mutation(func_kwargs_before, func_kwargs)\n finally:\n current_scope.set_use_resource(default_use_recource)\n\n # Variables in `func_args`, `func_kwargs` should be explicit inputs\n # to the function, not captured inputs.\n graph_variables = list(func_graph._watched_variables) # pylint: disable=protected-access\n arg_variables = set()\n inputs = []\n for arg in (nest.flatten(func_args, expand_composites=True) +\n nest.flatten(func_kwargs, expand_composites=True)):\n if isinstance(arg, resource_variable_ops.BaseResourceVariable):\n # Even if an argument variable was not used in the function, we've\n # already manually captured the resource Tensor when creating argument\n # placeholders.\n resource_placeholder = func_graph.captures.pop(arg.handle, None)\n if resource_placeholder is None:\n continue\n arg_variables.add(arg)\n inputs.append(resource_placeholder)\n elif isinstance(arg, ops.Tensor):\n inputs.append(arg)\n variables = [v for v in graph_variables if v not in arg_variables]\n func_graph.inputs = (\n inputs +\n list(func_graph.captures.values()) +\n nest.flatten(\n [x[1] for x in func_graph.deferred_captures.values()],\n expand_composites=True))\n\n func_graph.structured_outputs = func_outputs\n # Returning a closed-over tensor does not trigger convert_to_tensor.\n func_graph.outputs.extend(\n func_graph.capture(x)\n for x in flatten(func_graph.structured_outputs)\n if x is not None)\n\n func_graph.variables = variables\n\n if add_control_dependencies:\n func_graph.control_outputs.extend(control_manager.ops_which_must_run)\n\n return func_graph\n\n\ndef maybe_captured(tensor):\n \"\"\"If t is a captured value placeholder, returns the original captured value.\n\n Args:\n tensor: Tensor.\n\n Returns:\n A tensor, potentially from a different Graph/FuncGraph.\n \"\"\"\n if (not isinstance(tensor, ops.EagerTensor) and\n tensor.op.graph.building_function and tensor.op.type == \"Placeholder\"):\n for input_t, placeholder_t in tensor.op.graph.captures.items():\n if tensor == placeholder_t:\n return maybe_captured(input_t)\n # pylint: enable=protected-access\n return tensor\n\n\ndef device_stack_has_callable(device_stack):\n \"\"\"Checks whether a device stack contains a callable.\"\"\"\n return any(callable(spec._device_name_or_function) # pylint: disable=protected-access\n for spec in device_stack.peek_objs())\n\n\ndef check_mutation(n1, n2):\n \"\"\"Check if two list of arguments are exactly the same.\"\"\"\n errmsg = (\"Function to be traced should not modify structure of input \"\n \"arguments. Check if your function has list and dictionary \"\n \"operations that alter input arguments, \"\n \"such as `list.pop`, `list.append`\")\n try:\n nest.assert_same_structure(n1, n2, expand_composites=True)\n except ValueError:\n raise ValueError(errmsg)\n\n for arg1, arg2 in zip(nest.flatten(n1, expand_composites=True),\n nest.flatten(n2, expand_composites=True)):\n if arg1 is not arg2:\n raise ValueError(errmsg)\n\n\n# TODO(edloper): If TensorArray becomes a CompositeTensor, then delete this.\ndef flatten(sequence):\n \"\"\"Like nest.flatten w/ expand_composites, but returns flow for TensorArrays.\n\n Args:\n sequence: A nested structure of Tensors, CompositeTensors, and\n TensorArrays.\n\n Returns:\n A list of tensors.\n \"\"\"\n flat_sequence = nest.flatten(sequence, expand_composites=True)\n return [\n item.flow if isinstance(item, tensor_array_ops.TensorArray) else item\n for item in flat_sequence]\n\n\n# TODO(edloper): If TensorArray becomes a CompositeTensor, then delete this.\ndef pack_sequence_as(structure, flat_sequence):\n \"\"\"Like `nest.pack_sequence_as` but also builds TensorArrays from flows.\n\n Args:\n structure: The structure to pack into. May contain Tensors,\n CompositeTensors, or TensorArrays.\n flat_sequence: An iterable containing tensors.\n\n Returns:\n A nested structure.\n\n Raises:\n AssertionError if `structure` and `flat_sequence` are not compatible.\n \"\"\"\n flat_sequence = list(flat_sequence)\n flattened_structure = nest.flatten(structure, expand_composites=True)\n if len(flattened_structure) != len(flat_sequence):\n raise ValueError(\"Mismatch in element count\")\n for i in range(len(flat_sequence)):\n if isinstance(flattened_structure[i], tensor_array_ops.TensorArray):\n flat_sequence[i] = tensor_array_ops.build_ta_with_new_flow(\n old_ta=flattened_structure[i], flow=flat_sequence[i])\n return nest.pack_sequence_as(structure, flat_sequence, expand_composites=True)\n\n\ndef _create_substitute_placeholder(value, name=None, dtype=None):\n \"\"\"Creates a placeholder for `value` and propagates shape info to it.\"\"\"\n # Note: setting ops.control_dependencies(None) ensures we always put\n # capturing placeholders outside of any control flow context.\n with ops.control_dependencies(None):\n placeholder = graph_placeholder(\n dtype=dtype or value.dtype, shape=value.shape, name=name)\n custom_gradient.copy_handle_data(value, placeholder)\n return placeholder\n\n\ndef _get_defun_inputs_from_args(args, names, flat_shapes=None):\n \"\"\"Maps Python function positional args to graph-construction inputs.\"\"\"\n return _get_defun_inputs(\n args, names, structure=args, flat_shapes=flat_shapes)\n\n\ndef _get_defun_inputs(args, names, structure, flat_shapes=None):\n \"\"\"Maps python function args to graph-construction inputs.\n\n Args:\n args: A flat list of user-specified arguments.\n names: A list of strings with user-specified argument names, same length as\n `args`. May be `None`, in which case a generic name is used.\n structure: The original argument list or dictionary.\n flat_shapes: A flat list of values that are either `None` or\n instances of `TensorShape`. If provided, then length must match\n that of `nest.flatten(args, expand_composites=True)`; and locations where\n `args` are instances of `Tensor` must have a corresponding `TensorShape`\n in `flat_shapes`. May be `None`, in which case exact shapes are read\n directly from the args.\n\n Returns:\n Placeholders with the same structure as `structure`.\n\n Raises:\n RuntimeError: if `flat_shapes` is provided, but\n `len(flat_shapes) != len(nest.flatten(args, expand_composites=True))`.\n RuntimeError: if a shape from `flat_shapes` is not None\n for an argument that is not a `Tensor`, `TensorSpec`,\n or `ResourceVariable`.\n \"\"\"\n func_graph = ops.get_default_graph()\n function_inputs = []\n if names is None:\n names = [None] * len(args)\n if flat_shapes is None:\n shapes_iter = itertools.repeat(None)\n else:\n len_flat_args = len(nest.flatten(args, expand_composites=True))\n if len_flat_args != len(flat_shapes):\n raise RuntimeError(\n \"Length of fully flat shapes (%d) must match that of \"\n \"flatten(args) (%d). args: %s, flat_shapes: %s\"\n % (len(flat_shapes),\n len_flat_args,\n args,\n flat_shapes))\n shapes_iter = iter(flat_shapes)\n for arg_value, name in zip(args, names):\n flattened = nest.flatten(arg_value, expand_composites=True)\n tensor_specs = [\n arg for arg in flattened if isinstance(arg, tensor_spec.TensorSpec)\n ]\n specified_names = [arg.name for arg in tensor_specs if arg.name]\n if specified_names and len(specified_names) < len(tensor_specs):\n raise ValueError(\"If specifying TensorSpec names for nested structures, \"\n \"either zero or all names have to be specified.\")\n\n for arg in flattened:\n # We have a shape entry for each arg, regadless of whether it's a real\n # Tensor or not. For non-tensor entries it should be None.\n shape = next(shapes_iter)\n if isinstance(arg, (ops.Tensor, tensor_spec.TensorSpec)):\n if isinstance(arg, tensor_spec.TensorSpec) and arg.name:\n requested_name = arg.name\n else:\n requested_name = name\n placeholder_shape = shape if shape is not None else arg.shape\n try:\n placeholder = graph_placeholder(\n arg.dtype, placeholder_shape,\n name=requested_name)\n except ValueError:\n # Sometimes parameter names are not valid op names, so fall back to\n # unnamed placeholders.\n placeholder = graph_placeholder(arg.dtype, placeholder_shape)\n if name is not None:\n # Record the requested/user-specified name in case it's different than\n # the uniquified name, for validation when exporting signatures.\n placeholder.op._set_attr( # pylint: disable=protected-access\n \"_user_specified_name\",\n attr_value_pb2.AttrValue(s=compat.as_bytes(requested_name)))\n function_inputs.append(placeholder)\n elif isinstance(arg, resource_variable_ops.BaseResourceVariable):\n # Capture arg variables to create placeholders for them. These will be\n # removed as captures after the function is traced (since otherwise we'd\n # just add it back with a new placeholder when the variable was\n # referenced).\n placeholder = func_graph.capture(arg.handle, name=name)\n placeholder.op._set_attr( # pylint: disable=protected-access\n \"_user_specified_name\",\n attr_value_pb2.AttrValue(s=compat.as_bytes(name)))\n function_inputs.append(arg)\n else:\n if shape is not None:\n raise RuntimeError(\n \"Expected provided shape override to be None for arg that isn't \"\n \"a Tensor, but saw arg: '%s', shape: '%s'. args: %s\"\n % (arg, shape, args))\n function_inputs.append(arg)\n return nest.pack_sequence_as(structure, function_inputs,\n expand_composites=True)\n\n\ndef _get_defun_inputs_from_kwargs(kwargs, flat_shapes):\n \"\"\"Maps Python function keyword args to graph-construction inputs.\"\"\"\n if kwargs:\n names, args = zip(*sorted(kwargs.items()))\n else:\n names = []\n args = []\n return _get_defun_inputs(\n args, names, structure=kwargs, flat_shapes=flat_shapes)\n\n\ndef dismantle_func_graph(func_graph):\n \"\"\"Removes reference cycles in `func_graph` FuncGraph.\n\n Helpful for making sure the garbage collector doesn't need to run when\n the FuncGraph goes out of scope, e.g. in tests using defun with\n @test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True).\n\n Args:\n func_graph: A `FuncGraph` object to destroy. `func_graph` is unusable\n after this function.\n \"\"\"\n # TODO(b/115366440): Delete this method when a custom OrderedDict is added.\n # Clearing captures using clear() leaves some cycles around.\n while func_graph.captures:\n func_graph.captures.popitem()\n memory.dismantle_ordered_dict(func_graph.captures)\n while func_graph.deferred_captures:\n func_graph.deferred_captures.popitem()\n memory.dismantle_ordered_dict(func_graph.deferred_captures)\n ops.dismantle_graph(func_graph)\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for core.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport os\nimport pickle\nimport threading\n\nimport numpy as np\n\nfrom tensorflow.core.protobuf import config_pb2\nfrom tensorflow.python import pywrap_tensorflow\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.eager import core\nfrom tensorflow.python.eager import def_function\nfrom tensorflow.python.eager import execute as execute_lib\nfrom tensorflow.python.eager import test\nfrom tensorflow.python.framework import config\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import gen_resource_variable_ops\nfrom tensorflow.python.ops import nn_ops\nfrom tensorflow.python.ops import resource_variable_ops\nfrom tensorflow.python.ops import script_ops\nfrom tensorflow.python.ops import variables\n\n\ndef execute(op_name, num_outputs, inputs, attrs=None):\n return execute_lib.execute(\n op_name, num_outputs, inputs, attrs, context.context())\n\n\ndef truncated_normal(shape):\n return execute(\n b'TruncatedNormal',\n 1,\n inputs=[shape],\n attrs=('dtype', dtypes.float32.as_datatype_enum, 'T',\n shape.dtype.as_datatype_enum, 'seed', 0, 'seed2', 0))[0]\n\n\ndef current_device():\n return constant_op.constant(1.).device\n\n\ndef configure_virtual_cpus():\n cpus = config.list_physical_devices('CPU')\n # Set 2 virtual CPUs\n config.set_virtual_device_configuration(cpus[0], [\n context.VirtualDeviceConfiguration(),\n context.VirtualDeviceConfiguration()\n ])\n\n\nclass TFETest(test_util.TensorFlowTestCase):\n\n def setUp(self):\n super(TFETest, self).setUp()\n configure_virtual_cpus()\n\n def _test_hashable(self, a, b, hashable):\n if hashable:\n self.assertIsInstance(b, collections.Hashable)\n self.assertLen(set([a, b]), 2)\n else:\n # TODO(gjn): Figure out how to make this work for tf.Tensor\n # self.assertNotIsInstance(b, collections.Hashable)\n with self.assertRaisesRegexp(TypeError, 'unhashable'):\n set([a, b])\n\n def testEquality(self):\n default = ops.Tensor._USE_EQUALITY\n\n def _v1_check(a, b):\n self.assertEqual(a, a)\n self.assertIs(a, a)\n self.assertNotEqual(a, 1.0)\n self.assertIsNot(a, 1.0)\n self.assertNotEqual(a, b)\n self.assertIsNot(a, b)\n\n def _v2_check(a, b):\n self.assertEqual(a, a)\n self.assertIs(a, a)\n self.assertEqual(a, 1.0)\n self.assertIsNot(a, 1.0)\n self.assertEqual(a, b)\n self.assertIsNot(a, b)\n\n constant_a = constant_op.constant(1.0)\n constant_b = constant_op.constant(1.0)\n\n ops.disable_tensor_equality()\n self._test_hashable(constant_a, constant_b, True)\n _v1_check(constant_a, constant_b)\n ops.enable_tensor_equality()\n _v2_check(constant_a, constant_b)\n self._test_hashable(constant_a, constant_b, False)\n\n variable_a = variables.Variable(1.0)\n variable_b = variables.Variable(1.0)\n\n ops.disable_tensor_equality()\n _v1_check(variable_a, variable_b)\n self._test_hashable(variable_a, variable_b, True)\n ops.enable_tensor_equality()\n _v2_check(variable_a, variable_b)\n self._test_hashable(variable_a, variable_b, True)\n\n if default:\n ops.enable_tensor_equality()\n else:\n ops.disable_tensor_equality()\n\n # We only test numpy behaviour in v2 mode since we'd like to match that.\n numpy_a = np.array(1.0)\n numpy_b = np.array(1.0)\n _v2_check(numpy_a, numpy_b)\n self._test_hashable(numpy_a, numpy_b, False)\n\n def testEqualityNan(self):\n default = ops.Tensor._USE_EQUALITY\n\n def _v1_check(a, b):\n self.assertEqual(a, a)\n self.assertIs(a, a)\n self.assertNotEqual(a, float('nan'))\n self.assertIsNot(a, float('nan'))\n self.assertNotEqual(a, b)\n self.assertIsNot(a, b)\n\n def _v2_check(a, b):\n self.assertNotEqual(a, a)\n self.assertIs(a, a)\n self.assertNotEqual(a, float('nan'))\n self.assertIsNot(a, float('nan'))\n self.assertNotEqual(a, b)\n self.assertIsNot(a, b)\n\n constant_a = constant_op.constant(float('nan'))\n constant_b = constant_op.constant(float('nan'))\n\n ops.disable_tensor_equality()\n self._test_hashable(constant_a, constant_b, True)\n _v1_check(constant_a, constant_b)\n ops.enable_tensor_equality()\n _v2_check(constant_a, constant_b)\n self._test_hashable(constant_a, constant_b, False)\n\n variable_a = variables.Variable(float('nan'))\n variable_b = variables.Variable(float('nan'))\n\n ops.disable_tensor_equality()\n _v1_check(variable_a, variable_b)\n self._test_hashable(variable_a, variable_b, True)\n ops.enable_tensor_equality()\n _v2_check(variable_a, variable_b)\n self._test_hashable(variable_a, variable_b, True)\n\n if default:\n ops.enable_tensor_equality()\n else:\n ops.disable_tensor_equality()\n\n numpy_a = np.array(float('nan'))\n numpy_b = np.array(float('nan'))\n _v2_check(numpy_a, numpy_b)\n self._test_hashable(numpy_a, numpy_b, False)\n\n def testContext(self):\n ctx = context.Context()\n self.assertTrue(ctx.executing_eagerly())\n\n self.assertEqual('', ctx.scope_name)\n ctx.scope_name = 'foo'\n self.assertEqual('foo', ctx.scope_name)\n\n self.assertEqual(context.SYNC, ctx.execution_mode)\n ctx.execution_mode = context.ASYNC\n self.assertEqual(context.ASYNC, ctx.execution_mode)\n ctx.execution_mode = context.SYNC\n self.assertEqual(context.SYNC, ctx.execution_mode)\n\n self.assertIsNone(ctx.summary_writer)\n ctx.summary_writer = 'mock'\n self.assertEqual('mock', ctx.summary_writer)\n self.assertIsNone(ctx.summary_recording)\n ctx.summary_recording = 'mock'\n self.assertEqual('mock', ctx.summary_recording)\n self.assertIsNone(ctx.summary_step)\n ctx.summary_step = 'mock'\n self.assertEqual('mock', ctx.summary_step)\n\n self.assertEqual('', ctx.device_name)\n self.assertEqual(ctx.device_name, ctx.device_spec.to_string())\n with ctx.device('GPU:0'):\n self.assertEqual('/job:localhost/replica:0/task:0/device:GPU:0',\n ctx.device_name)\n self.assertEqual(ctx.device_name, ctx.device_spec.to_string())\n with ctx.device(None):\n self.assertEqual('', ctx.device_name)\n self.assertEqual(ctx.device_name, ctx.device_spec.to_string())\n with ctx.device('CPU:0'):\n self.assertEqual('/job:localhost/replica:0/task:0/device:CPU:0',\n ctx.device_name)\n self.assertEqual(ctx.device_name, ctx.device_spec.to_string())\n\n has_cpu_device = False\n for x in ctx.devices():\n has_cpu_device = has_cpu_device or 'CPU' in x\n self.assertTrue(has_cpu_device)\n del ctx\n\n def testAsyncBasic(self):\n ctx = context.Context(execution_mode=context.ASYNC)\n ctx.ensure_initialized()\n has_cpu_device = False\n for x in ctx.devices():\n has_cpu_device = has_cpu_device or 'CPU' in x\n self.assertTrue(has_cpu_device)\n del ctx\n\n def testRunMetadata(self):\n context.enable_run_metadata()\n t = constant_op.constant(1.0)\n _ = t + t # Runs an operation which will be in the RunMetadata\n run_metadata = context.export_run_metadata()\n context.disable_run_metadata()\n step_stats = run_metadata.step_stats\n self.assertGreater(len(step_stats.dev_stats), 0)\n cpu_stats = step_stats.dev_stats[0]\n self.assertEqual('/job:localhost/replica:0/task:0/device:CPU:0',\n cpu_stats.device)\n self.assertGreaterEqual(len(cpu_stats.node_stats), 1)\n\n def testMultiCpuPlacement(self):\n with ops.device('cpu:1'):\n x = constant_op.constant(1.0)\n y = array_ops.identity(x)\n self.assertEqual(x.device, '/job:localhost/replica:0/task:0/device:CPU:1')\n self.assertEqual(y.device, '/job:localhost/replica:0/task:0/device:CPU:0')\n\n @test_util.run_gpu_only\n def testShouldCopy(self):\n with ops.device('gpu:0'):\n x = constant_op.constant(1.0)\n y = array_ops.identity(x)\n # The value we're testing y.device against will depend on what the behavior\n # of not explicitly specifying a device in the context is. This behavior is\n # subject to change (for example, in the future we may want to use GPUs, if\n # available, when no device is explicitly provided)\n self.assertEqual(y.device, '/job:localhost/replica:0/task:0/device:CPU:0')\n\n def testContextSwitchStackContainsEagerMode(self):\n # Eager execution has been enabled, and no other context switch has\n # occurred, so `context_switches` should contain exactly one entry.\n self.assertEqual(len(context.context().context_switches.stack), 1)\n switch = context.context().context_switches.stack[0]\n\n # The entry should log that eager mode was entered.\n self.assertIs(switch.enter_context_fn, context.eager_mode)\n\n # It is not possible to build a graph function when eager execution\n # is enabled; the stack entry should reflect this fact.\n self.assertFalse(switch.is_building_function)\n\n @test_util.run_gpu_only\n def testInt32GPU(self):\n with ops.device('gpu:0'):\n xent = nn_ops.sparse_softmax_cross_entropy_with_logits(\n logits=[[0.0, 0.0]], labels=[0])\n self.assertAllClose(xent, [0.69314718])\n\n def _runInThread(self, target, args):\n t = threading.Thread(target=target, args=args)\n try:\n t.start()\n t.join()\n except Exception as e:\n raise e\n\n # Test that different thread local values are initialized to the same values\n # in different threads.\n def testContextThreadLocalMembers(self):\n\n def get_context_values(ctx):\n return [\n ctx.executing_eagerly(),\n ctx.scope_name,\n ctx.summary_writer,\n ctx.summary_recording,\n ctx.summary_step,\n ctx.device_name,\n ctx.num_gpus()\n ]\n\n def get_values(ctx, values):\n values.extend(get_context_values(ctx))\n\n context_values = []\n ctx = context.Context()\n self._runInThread(get_values, (ctx, context_values))\n self.assertAllEqual(context_values, get_context_values(ctx))\n\n @test_util.run_gpu_only\n def testContextConfig(self):\n ctx = context.Context(config=config_pb2.ConfigProto(\n device_count={'GPU': 0}))\n self.assertEquals(0, ctx.num_gpus())\n\n def testPickle(self):\n tmp_dir = self.get_temp_dir()\n fname = os.path.join(tmp_dir, 't.pickle')\n with open(fname, 'wb') as f:\n t = constant_op.constant(10.0)\n pickle.dump(t, f)\n\n with open(fname, 'rb') as f:\n t = pickle.load(f)\n self.assertAllEqual(t.numpy(), 10.0)\n\n @test_util.run_gpu_only\n def testDevicePlacementEnforcesConsistency(self):\n cpu = context.device('cpu:0')\n gpu = context.device('gpu:0')\n cpu.__enter__()\n self.assertEndsWith(current_device(), 'CPU:0')\n gpu.__enter__()\n self.assertEndsWith(current_device(), 'GPU:0')\n with self.assertRaisesRegexp(\n RuntimeError, 'Exiting device scope without proper scope nesting'):\n cpu.__exit__()\n self.assertEndsWith(current_device(), 'GPU:0')\n gpu.__exit__()\n self.assertEndsWith(current_device(), 'CPU:0')\n\n @test_util.run_gpu_only\n def testReEntrant(self):\n cpu = context.device('cpu:0')\n gpu = context.device('gpu:0')\n with cpu:\n with gpu:\n with gpu:\n self.assertEndsWith(current_device(), 'GPU:0')\n self.assertEndsWith(current_device(), 'GPU:0')\n self.assertEndsWith(current_device(), 'CPU:0')\n with gpu:\n self.assertEndsWith(current_device(), 'GPU:0')\n\n @test_util.run_gpu_only\n def testTensorPlacement(self):\n x = constant_op.constant(1.).gpu()\n with context.device('gpu:0'):\n y = constant_op.constant(2.)\n # Add would fail if t2 were not on GPU\n result = execute(\n b'Add', 1, inputs=[x, y],\n attrs=('T', x.dtype.as_datatype_enum))[0].cpu().numpy()\n self.assertEqual(3, result)\n\n @test_util.run_gpu_only\n def testResourceTensorPlacement(self):\n with context.device('gpu:0'):\n v = resource_variable_ops.ResourceVariable(1.0)\n with context.device('cpu:0'):\n # Check that even though we specified the cpu device we'll run the read op\n # in the device where the handle is.\n self.assertAllEqual(\n gen_resource_variable_ops.read_variable_op(v.handle, v.dtype), 1.0)\n\n @test_util.run_gpu_only\n def testCopyBetweenDevices(self):\n x = constant_op.constant([[1., 2.], [3., 4.]])\n x = x.cpu()\n x = x.gpu()\n x = x.gpu()\n x = x.cpu()\n\n # Invalid device\n with self.assertRaises(RuntimeError):\n x.gpu(context.context().num_gpus() + 1)\n\n @test_util.run_gpu_only\n def testCopyBetweenDevicesAsync(self):\n with context.execution_mode(context.ASYNC):\n x = constant_op.constant([[1., 2.], [3., 4.]])\n x = x.cpu()\n x = x.gpu()\n x = x.gpu()\n x = x.cpu()\n context.async_wait()\n\n # Invalid device\n with self.assertRaises(RuntimeError):\n x.gpu(context.context().num_gpus() + 1)\n context.async_wait()\n context.async_clear_error()\n\n @test_util.run_gpu_only\n def testCopyScope(self):\n constant = constant_op.constant(1.0)\n with ops.device('gpu:0'):\n with context.device_policy(context.DEVICE_PLACEMENT_SILENT):\n c = constant + 1.0\n self.assertAllEqual(c, 2.0)\n\n def testPyFunctionNullContext(self):\n def simple_fn(unused_handle):\n return 1.\n\n @def_function.function\n def test_fn(v):\n script_ops.eager_py_func(simple_fn, [v.handle], dtypes.float32)\n return 1.\n\n test_var = variables.Variable([2., 3.])\n self.assertAllEqual(test_fn(test_var), 1.0)\n\n @test_util.run_gpu_only\n def testNumpyForceCPU(self):\n cpu = constant_op.constant([[1., 2.], [3., 4.]])\n c2g = cpu.gpu()\n self.assertAllEqual(c2g, cpu.numpy())\n\n def testCopyFromCPUToCPU(self):\n ta = constant_op.constant([[1, 2], [3, 4]])\n tb = ta.cpu()\n\n self.assertNotEqual(id(ta), id(tb))\n self.assertAllEqual(ta, tb.numpy())\n\n def testRegisterExceptionClass(self):\n with self.assertRaises(TypeError):\n pywrap_tensorflow.TFE_Py_RegisterExceptionClass(str)\n pywrap_tensorflow.TFE_Py_RegisterExceptionClass(core._NotOkStatusException) # pylint: disable=protected-access\n\n # TODO(agarwal): add tests passing incorrect typed values to attrs.\n def testExecuteBasic(self):\n three = constant_op.constant(3)\n five = constant_op.constant(5)\n product = execute(\n b'Mul',\n num_outputs=1,\n inputs=[three, five],\n attrs=('T', three.dtype.as_datatype_enum))[0]\n self.assertAllEqual(15, product)\n\n def testExecuteBasicAsync(self):\n with context.execution_mode(context.ASYNC):\n three = constant_op.constant(3)\n five = constant_op.constant(5)\n product = execute(\n b'Mul',\n num_outputs=1,\n inputs=[three, five],\n attrs=('T', three.dtype.as_datatype_enum))[0]\n self.assertAllEqual(15, product)\n # Error: Invalid arguments\n context.set_execution_mode(context.ASYNC)\n with self.assertRaises(errors.InvalidArgumentError):\n execute(\n b'MatMul',\n num_outputs=1,\n inputs=[three, five],\n attrs=('transpose_a', False, 'transpose_b', False, 'T',\n three.dtype.as_datatype_enum))\n context.async_wait()\n context.async_clear_error()\n context.context().execution_mode = context.SYNC\n\n def testExecuteTooManyNumOutputs(self):\n # num_outputs provided is 50, but only one output is produced.\n product = execute(\n b'Mul',\n num_outputs=50,\n inputs=[constant_op.constant(3),\n constant_op.constant(5)],\n attrs=('T', dtypes.int32.as_datatype_enum))[0]\n self.assertAllEqual(15, product)\n\n def testExecuteTooFewNumOutputs(self):\n # num_outputs provided is 0, but one output is produced.\n with self.assertRaises(errors.InvalidArgumentError):\n _ = execute(\n b'Mul',\n num_outputs=0,\n inputs=[constant_op.constant(3),\n constant_op.constant(5)],\n attrs=('T', dtypes.int32.as_datatype_enum))[0]\n\n @test_util.run_gpu_only\n def testMatMulGPU(self):\n three = constant_op.constant([[3.]]).gpu()\n five = constant_op.constant([[5.]]).gpu()\n product = execute(\n b'MatMul',\n num_outputs=1,\n inputs=[three, five],\n attrs=('transpose_a', False, 'transpose_b', False, 'T',\n three.dtype.as_datatype_enum))[0]\n self.assertAllEqual([[15.0]], product)\n\n def testExecuteStringAttr(self):\n checked_three = execute(\n b'CheckNumerics',\n num_outputs=1,\n inputs=[constant_op.constant(3.)],\n attrs=('message', 'just checking', 'T',\n dtypes.float32.as_datatype_enum))[0]\n self.assertEqual([[3]], checked_three.numpy())\n\n def testExecuteStringAttrBadValue(self):\n with self.assertRaises(errors.InvalidArgumentError):\n _ = execute(\n b'CheckNumerics',\n num_outputs=1,\n inputs=[constant_op.constant(3.)],\n attrs=('message', 1, 'T', dtypes.float32.as_datatype_enum))\n\n def testExecuteFloatAttr(self):\n almost_equal = execute(\n b'ApproximateEqual',\n num_outputs=1,\n inputs=[constant_op.constant(3.0), constant_op.constant(2.9)],\n attrs=('tolerance', 0.3, 'T', dtypes.float32.as_datatype_enum))[0]\n self.assertTrue(almost_equal)\n\n def testExecuteFloatAttrBadValue(self):\n with self.assertRaises(errors.InvalidArgumentError):\n _ = execute(\n b'ApproximateEqual',\n num_outputs=1,\n inputs=[constant_op.constant(3.0), constant_op.constant(2.9)],\n attrs=('tolerance', '0.3', 'T', dtypes.float32.as_datatype_enum))\n\n def testExecuteIntAttr(self):\n total = execute(\n b'AddN',\n num_outputs=1,\n inputs=[constant_op.constant(3), constant_op.constant(4)],\n attrs=('T', dtypes.int32.as_datatype_enum, 'N', 2))[0]\n self.assertAllEqual(7, total)\n\n def testExecuteIntAttrBadValue(self):\n with self.assertRaises(errors.InvalidArgumentError):\n _ = execute(\n b'AddN',\n num_outputs=1,\n inputs=[constant_op.constant(3), constant_op.constant(4)],\n attrs=('T', dtypes.int32.as_datatype_enum, 'N', '2'))\n\n # Looks like we don't have an existing op with list(bool) attrs.\n def testExecuteBoolAttr(self):\n product = execute(\n b'MatMul',\n num_outputs=1,\n inputs=[constant_op.constant([[3]]),\n constant_op.constant([[5]])],\n attrs=('transpose_a', True, 'transpose_b', False, 'T',\n dtypes.int32.as_datatype_enum))[0]\n self.assertAllEqual([[15]], product)\n\n def testExecuteShapeAttr(self):\n execute(\n b'VarHandleOp',\n num_outputs=1,\n inputs=[],\n attrs=('shape', [1, 2], 'dtype', dtypes.int32.as_datatype_enum,\n 'container', '', 'shared_name', ''))\n\n def testExecuteShapeAttrBadValue(self):\n with self.assertRaises(errors.InvalidArgumentError):\n execute(\n b'VarHandleOp',\n num_outputs=1,\n inputs=[],\n attrs=('shape', 1, 'dtype', dtypes.int32.as_datatype_enum,\n 'container', '', 'shared_name', ''))\n\n def testExecuteListStringAttr(self):\n execute(\n b'TensorSummary',\n num_outputs=1,\n inputs=[constant_op.constant(3.0)],\n attrs=('T', dtypes.float32.as_datatype_enum, 'description',\n 'tensor_summary', 'labels', ['3',\n 'summary'], 'display_name', 'test'))\n\n def testExecuteListStringAttrBadValue(self):\n with self.assertRaises(errors.InvalidArgumentError):\n execute(\n b'TensorSummary',\n num_outputs=1,\n inputs=[constant_op.constant(3.0)],\n attrs=('T', dtypes.float32.as_datatype_enum, 'description', '',\n 'labels', 3, 'display_name', 'test'))\n\n def testExecuteListStringAttrBadListValue(self):\n with self.assertRaises(errors.InvalidArgumentError):\n execute(\n b'TensorSummary',\n num_outputs=1,\n inputs=[constant_op.constant(3.0)],\n attrs=('T', dtypes.float32.as_datatype_enum, 'description', '',\n 'labels', [3], 'display_name', 'test'))\n\n def testExecuteListFloatAttr(self):\n b = execute(\n b'Bucketize',\n num_outputs=1,\n inputs=[constant_op.constant([3.0, 5.0, 7.0])],\n attrs=('T', dtypes.float32.as_datatype_enum, 'boundaries', [4.0,\n 6.0]))[0]\n self.assertAllEqual([0, 1, 2], b)\n\n def testExecuteListFloatAttrBadValue(self):\n with self.assertRaises(errors.InvalidArgumentError):\n execute(\n b'Bucketize',\n num_outputs=1,\n inputs=[constant_op.constant([3.0, 5.0, 7.0])],\n attrs=('T', dtypes.float32.as_datatype_enum, 'boundaries', 4.0))\n\n def testExecuteListFloatAttrBadListValue(self):\n with self.assertRaises(errors.InvalidArgumentError):\n execute(\n b'Bucketize',\n num_outputs=1,\n inputs=[constant_op.constant([3.0, 5.0, 7.0])],\n attrs=('T', dtypes.float32.as_datatype_enum, 'boundaries',\n ['4.0', '6.0']))\n\n def testExecuteListIntAttr(self):\n b = execute(\n b'Squeeze',\n num_outputs=1,\n inputs=[constant_op.constant([[[3.0]]])],\n attrs=('T', dtypes.float32.as_datatype_enum, 'squeeze_dims', [0, 2]))[0]\n self.assertAllEqual([3], b)\n\n def testExecuteListIntAttrBadValue(self):\n with self.assertRaises(errors.InvalidArgumentError):\n execute(\n b'Squeeze',\n num_outputs=1,\n inputs=[constant_op.constant([[[3.0]]])],\n attrs=('T', dtypes.float32.as_datatype_enum, 'squeeze_dims', 0))\n\n def testExecuteListIntAttrBadListValue(self):\n with self.assertRaises(errors.InvalidArgumentError):\n execute(\n b'Squeeze',\n num_outputs=1,\n inputs=[constant_op.constant([[[3.0]]])],\n attrs=('T', dtypes.float32.as_datatype_enum, 'squeeze_dims',\n ['0', '2']))\n\n def testExecuteListTypeListShapeAttr(self):\n execute(\n b'Barrier',\n num_outputs=1,\n inputs=[],\n attrs=('component_types', [dtypes.float64.as_datatype_enum], 'shapes',\n [[1, 2]], 'capacity', -1, 'container', '', 'shared_name', ''))\n\n def testExecuteListTypeAttrBadValue(self):\n with self.assertRaises(errors.InvalidArgumentError):\n execute(\n b'Barrier',\n num_outputs=1,\n inputs=[],\n attrs=('component_types', dtypes.float64.as_datatype_enum, 'shapes',\n [[1, 2]], 'capacity', -1, 'container', '', 'shared_name', ''))\n\n def testExecuteListTypeAttrBadListValue(self):\n with self.assertRaises(errors.InvalidArgumentError):\n execute(\n b'Barrier',\n num_outputs=1,\n inputs=[],\n attrs=('component_types', '1', 'shapes', [[1, 2]], 'capacity', -1,\n 'container', '', 'shared_name', ''))\n\n def testExecuteListShapeAttrBadValue(self):\n with self.assertRaises(errors.InvalidArgumentError):\n execute(\n b'Barrier',\n num_outputs=1,\n inputs=[],\n attrs=('component_types', [dtypes.float64.as_datatype_enum], 'shapes',\n [1, 2], 'capacity', -1, 'container', '', 'shared_name', ''))\n\n def testExecuteListShapeAttrBadListValue(self):\n with self.assertRaises(errors.InvalidArgumentError):\n execute(\n b'Barrier',\n num_outputs=1,\n inputs=[],\n attrs=('component_types', [dtypes.float64.as_datatype_enum], 'shapes',\n [1], 'capacity', -1, 'container', '', 'shared_name', ''))\n\n def testExecuteMultipleOutputs(self):\n split_dim = 1\n value = [[0, 1, 2], [3, 4, 5]]\n x1, x2, x3 = execute(\n b'Split',\n num_outputs=3,\n inputs=[constant_op.constant(split_dim),\n constant_op.constant(value)],\n attrs=('num_split', 3, 'T', dtypes.int32.as_datatype_enum))\n self.assertAllEqual([[0], [3]], x1)\n self.assertAllEqual([[1], [4]], x2)\n self.assertAllEqual([[2], [5]], x3)\n\n def testExecuteBadNumOutputsArgument(self):\n with self.assertRaises(TypeError):\n execute(\n b'Relu', [],\n inputs=[constant_op.constant(3.0)],\n attrs=('T', dtypes.float32.as_datatype_enum))\n\n def testExecuteUnknownOp(self):\n with self.assertRaises(errors.NotFoundError):\n execute(b'BlahBlahBlah', num_outputs=1, inputs=[], attrs=None)\n\n def testExecuteUnknownAttr(self):\n with self.assertRaises(errors.InvalidArgumentError):\n execute(\n b'Identity',\n num_outputs=1,\n inputs=[constant_op.constant(3)],\n attrs=('T', dtypes.int32.as_datatype_enum, 'unknown_attr', 'blah'))\n\n def testComposition(self):\n\n def add(x, y):\n return execute(\n b'Add',\n num_outputs=1,\n inputs=[x, y],\n attrs=('T', dtypes.int32.as_datatype_enum))[0]\n\n x = constant_op.constant(1)\n three_x = add(add(x, x), x)\n self.assertEquals(dtypes.int32, three_x.dtype)\n self.assertAllEqual(3, three_x)\n\n @test_util.run_gpu_only\n def testOperationWithNoInputsRunsOnDevice(self):\n shape = constant_op.constant([], dtype=dtypes.int32)\n\n # x: Run the \"TruncatedNormal\" op CPU and copy result to GPU.\n x = truncated_normal(shape).gpu()\n # y: Explicitly run the \"TruncatedNormal\" op on GPU.\n with context.device('gpu:0'):\n y = truncated_normal(shape)\n # Add would fail if x and y were not on the same device.\n execute(\n b'Add', 1, inputs=[x, y], attrs=('T', x.dtype.as_datatype_enum))\n\n def testInvalidDevice(self):\n with self.assertRaises(ValueError):\n with context.device('pu:0'):\n _ = constant_op.constant(1)\n\n def testConvertMixedEagerTensors(self):\n array = np.zeros((), dtype=np.float32)\n tensor = constant_op.constant(0., dtype=dtypes.float32)\n types, tensors = execute_lib.convert_to_mixed_eager_tensors(\n [array, tensor], context.context())\n for typ, t in zip(types, tensors):\n self.assertEquals(typ, dtypes.float32)\n self.assertIsInstance(t, ops.EagerTensor)\n\n def testConvertMixedEagerTensorsWithVariables(self):\n var = resource_variable_ops.ResourceVariable(1.0)\n types, tensors = execute_lib.convert_to_mixed_eager_tensors(\n ['foo', var], context.context())\n self.assertAllEqual([dtypes.string, dtypes.float32], types)\n for t in tensors:\n self.assertIsInstance(t, ops.EagerTensor)\n\n # TODO(b/123637108): re-enable\n @test_util.run_gpu_only\n def disabled_testSmallIntegerOpsForcedToCPU(self):\n a = constant_op.constant((1, 2, 3, 4, 5), dtype=dtypes.int64)\n b = constant_op.constant((2, 3, 4, 5, 6), dtype=dtypes.int64)\n with context.device('gpu:0'):\n c = a + b\n\n # Op forced to CPU since all constants are integers and small.\n self.assertEqual(c.device, '/job:localhost/replica:0/task:0/device:CPU:0')\n\n a = array_ops.zeros((8, 10), dtype=dtypes.int64)\n b = array_ops.ones((8, 10), dtype=dtypes.int64)\n\n with context.device('gpu:0'):\n c = a + b\n\n # Op not forced to CPU since the tensors are larger than 64 elements.\n self.assertEqual(c.device, '/job:localhost/replica:0/task:0/device:GPU:0')\n\n a = constant_op.constant((1, 2, 3, 4, 5), dtype=dtypes.float32)\n b = constant_op.constant((2, 3, 4, 5, 6), dtype=dtypes.float32)\n with context.device('gpu:0'):\n c = a + b\n\n # Op not forced to CPU since the constants are not integers.\n self.assertEqual(c.device, '/job:localhost/replica:0/task:0/device:GPU:0')\n\n def testExecutionModeIsStoredThreadLocal(self):\n cv = threading.Condition()\n count = [0]\n num_threads = 10\n\n def execution_mode_test(cond, count, num_threads, ctx, mode):\n cond.acquire()\n # Ensure that all threads set their mode simultaneously\n # Note that this is not a simple assignment, as the execution_mode is an\n # @property with a custom setter.\n ctx.execution_mode = mode\n count[0] = count[0] + 1\n if count[0] < num_threads:\n cond.wait()\n else:\n cond.notify_all()\n cond.release()\n self.assertEqual(ctx.execution_mode, mode)\n\n ctx = context.Context()\n threads = []\n for i in range(num_threads):\n t = threading.Thread(\n target=execution_mode_test,\n args=(cv, count, num_threads, ctx,\n context.SYNC if i % 2 == 0 else context.ASYNC))\n t.start()\n threads.append(t)\n\n for t in threads:\n t.join()\n\n\nclass SendRecvTest(test_util.TensorFlowTestCase):\n\n cpu_device = '/job:localhost/replica:0/task:0/device:CPU:0'\n\n def _send(self, tensor, tensor_name, to_device):\n return execute(\n b'_Send', num_outputs=0, inputs=[tensor],\n attrs=('T', tensor.dtype.as_datatype_enum,\n 'tensor_name', tensor_name,\n 'send_device', tensor.device,\n 'send_device_incarnation', 0,\n 'recv_device', to_device,\n 'client_terminated', True))\n\n def _recv(self, dtype, tensor_name, from_device):\n device_name = context.context().device_name\n if not device_name:\n device_name = self.cpu_device\n return execute(\n b'_Recv', num_outputs=1, inputs=[],\n attrs=('tensor_type', dtype.as_datatype_enum,\n 'tensor_name', tensor_name,\n 'send_device', from_device,\n 'send_device_incarnation', 0,\n 'recv_device', device_name,\n 'client_terminated', False))[0]\n\n def setUp(self):\n super(SendRecvTest, self).setUp()\n configure_virtual_cpus()\n\n def testBasic(self):\n t0 = constant_op.constant(1.0)\n t1 = constant_op.constant(2.0)\n self._send(t0, 't0', self.cpu_device)\n self._send(t1, 't1', self.cpu_device)\n self.assertAllEqual(\n self._recv(dtypes.float32, 't0', self.cpu_device),\n 1.0)\n self.assertAllEqual(\n self._recv(dtypes.float32, 't1', self.cpu_device),\n 2.0)\n\n @test_util.run_gpu_only\n def testLocalCrossDevice(self):\n gpu_device_name = '/job:localhost/replica:0/task:0/device:GPU:0'\n with ops.device('GPU:0'):\n t0 = constant_op.constant(1.0)\n self._send(t0, 't0', self.cpu_device)\n with ops.device('cpu:0'):\n self.assertAllEqual(\n self._recv(dtypes.float32, 't0', gpu_device_name),\n 1.0)\n self._send(constant_op.constant(2.0), 't1', gpu_device_name)\n with ops.device('GPU:0'):\n self.assertAllEqual(\n self._recv(dtypes.float32, 't1', self.cpu_device),\n 2.0)\n\n\nclass EagerTensorCacheTest(test_util.TensorFlowTestCase):\n\n def setUp(self):\n super(EagerTensorCacheTest, self).setUp()\n configure_virtual_cpus()\n\n def testCacheSkipsTensorsTooLarge(self):\n cache = context._EagerTensorCache(max_items=100, max_tensor_size=3)\n cache.put('1', array_ops.zeros((2, 2)))\n self.assertEqual(cache.get('1'), None)\n\n cache.put('2', array_ops.zeros((2)))\n self.assertNotEqual(cache.get('2'), None)\n\n\nif __name__ == '__main__':\n test.main()\n", "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for GRU layer.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom absl.testing import parameterized\nimport numpy as np\n\nfrom tensorflow.python import keras\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.framework import test_util as tf_test_util\nfrom tensorflow.python.keras import keras_parameterized\nfrom tensorflow.python.keras import testing_utils\nfrom tensorflow.python.platform import test\n\n\n@keras_parameterized.run_all_keras_modes\nclass GRULayerTest(keras_parameterized.TestCase):\n\n def test_return_sequences_GRU(self):\n num_samples = 2\n timesteps = 3\n embedding_dim = 4\n units = 2\n testing_utils.layer_test(\n keras.layers.GRU,\n kwargs={'units': units,\n 'return_sequences': True},\n input_shape=(num_samples, timesteps, embedding_dim))\n\n def test_dynamic_behavior_GRU(self):\n num_samples = 2\n timesteps = 3\n embedding_dim = 4\n units = 2\n layer = keras.layers.GRU(units, input_shape=(None, embedding_dim))\n model = keras.models.Sequential()\n model.add(layer)\n model.compile(\n 'rmsprop',\n 'mse',\n run_eagerly=testing_utils.should_run_eagerly(),\n run_distributed=testing_utils.should_run_distributed())\n x = np.random.random((num_samples, timesteps, embedding_dim))\n y = np.random.random((num_samples, units))\n model.train_on_batch(x, y)\n\n def test_dropout_GRU(self):\n num_samples = 2\n timesteps = 3\n embedding_dim = 4\n units = 2\n testing_utils.layer_test(\n keras.layers.GRU,\n kwargs={'units': units,\n 'dropout': 0.1,\n 'recurrent_dropout': 0.1},\n input_shape=(num_samples, timesteps, embedding_dim))\n\n @parameterized.parameters([0, 1, 2])\n def test_implementation_mode_GRU(self, implementation_mode):\n num_samples = 2\n timesteps = 3\n embedding_dim = 4\n units = 2\n testing_utils.layer_test(\n keras.layers.GRU,\n kwargs={'units': units,\n 'implementation': implementation_mode},\n input_shape=(num_samples, timesteps, embedding_dim))\n\n def test_reset_after_GRU(self):\n num_samples = 2\n timesteps = 3\n embedding_dim = 4\n units = 2\n\n (x_train, y_train), _ = testing_utils.get_test_data(\n train_samples=num_samples,\n test_samples=0,\n input_shape=(timesteps, embedding_dim),\n num_classes=units)\n y_train = keras.utils.to_categorical(y_train, units)\n\n inputs = keras.layers.Input(shape=[timesteps, embedding_dim])\n gru_layer = keras.layers.GRU(units,\n reset_after=True)\n output = gru_layer(inputs)\n gru_model = keras.models.Model(inputs, output)\n gru_model.compile(\n 'rmsprop',\n 'mse',\n run_eagerly=testing_utils.should_run_eagerly(),\n run_distributed=testing_utils.should_run_distributed())\n gru_model.fit(x_train, y_train)\n gru_model.predict(x_train)\n\n def test_with_masking_layer_GRU(self):\n layer_class = keras.layers.GRU\n inputs = np.random.random((2, 3, 4))\n targets = np.abs(np.random.random((2, 3, 5)))\n targets /= targets.sum(axis=-1, keepdims=True)\n model = keras.models.Sequential()\n model.add(keras.layers.Masking(input_shape=(3, 4)))\n model.add(layer_class(units=5, return_sequences=True, unroll=False))\n model.compile(\n loss='categorical_crossentropy',\n optimizer='rmsprop',\n run_eagerly=testing_utils.should_run_eagerly(),\n run_distributed=testing_utils.should_run_distributed())\n model.fit(inputs, targets, epochs=1, batch_size=2, verbose=1)\n\n def test_statefulness_GRU(self):\n num_samples = 2\n timesteps = 3\n embedding_dim = 4\n units = 2\n layer_class = keras.layers.GRU\n\n model = keras.models.Sequential()\n model.add(\n keras.layers.Embedding(\n 4,\n embedding_dim,\n mask_zero=True,\n input_length=timesteps,\n batch_input_shape=(num_samples, timesteps)))\n layer = layer_class(\n units, return_sequences=False, stateful=True, weights=None)\n model.add(layer)\n model.compile(\n optimizer='sgd',\n loss='mse',\n run_eagerly=testing_utils.should_run_eagerly(),\n run_distributed=testing_utils.should_run_distributed())\n out1 = model.predict(np.ones((num_samples, timesteps)))\n self.assertEqual(out1.shape, (num_samples, units))\n\n # train once so that the states change\n model.train_on_batch(\n np.ones((num_samples, timesteps)), np.ones((num_samples, units)))\n out2 = model.predict(np.ones((num_samples, timesteps)))\n\n # if the state is not reset, output should be different\n self.assertNotEqual(out1.max(), out2.max())\n\n # check that output changes after states are reset\n # (even though the model itself didn't change)\n layer.reset_states()\n out3 = model.predict(np.ones((num_samples, timesteps)))\n self.assertNotEqual(out2.max(), out3.max())\n\n # check that container-level reset_states() works\n model.reset_states()\n out4 = model.predict(np.ones((num_samples, timesteps)))\n np.testing.assert_allclose(out3, out4, atol=1e-5)\n\n # check that the call to `predict` updated the states\n out5 = model.predict(np.ones((num_samples, timesteps)))\n self.assertNotEqual(out4.max(), out5.max())\n\n # Check masking\n layer.reset_states()\n\n left_padded_input = np.ones((num_samples, timesteps))\n left_padded_input[0, :1] = 0\n left_padded_input[1, :2] = 0\n out6 = model.predict(left_padded_input)\n\n layer.reset_states()\n\n right_padded_input = np.ones((num_samples, timesteps))\n right_padded_input[0, -1:] = 0\n right_padded_input[1, -2:] = 0\n out7 = model.predict(right_padded_input)\n\n np.testing.assert_allclose(out7, out6, atol=1e-5)\n\n\n@tf_test_util.run_all_in_graph_and_eager_modes\nclass GRULayerGenericTest(test.TestCase):\n\n def test_constraints_GRU(self):\n embedding_dim = 4\n layer_class = keras.layers.GRU\n k_constraint = keras.constraints.max_norm(0.01)\n r_constraint = keras.constraints.max_norm(0.01)\n b_constraint = keras.constraints.max_norm(0.01)\n layer = layer_class(\n 5,\n return_sequences=False,\n weights=None,\n input_shape=(None, embedding_dim),\n kernel_constraint=k_constraint,\n recurrent_constraint=r_constraint,\n bias_constraint=b_constraint)\n layer.build((None, None, embedding_dim))\n self.assertEqual(layer.cell.kernel.constraint, k_constraint)\n self.assertEqual(layer.cell.recurrent_kernel.constraint, r_constraint)\n self.assertEqual(layer.cell.bias.constraint, b_constraint)\n\n def test_from_config_GRU(self):\n layer_class = keras.layers.GRU\n for stateful in (False, True):\n l1 = layer_class(units=1, stateful=stateful)\n l2 = layer_class.from_config(l1.get_config())\n assert l1.get_config() == l2.get_config()\n\n def test_regularizers_GRU(self):\n embedding_dim = 4\n layer_class = keras.layers.GRU\n layer = layer_class(\n 5,\n return_sequences=False,\n weights=None,\n input_shape=(None, embedding_dim),\n kernel_regularizer=keras.regularizers.l1(0.01),\n recurrent_regularizer=keras.regularizers.l1(0.01),\n bias_regularizer='l2',\n activity_regularizer='l1')\n layer.build((None, None, 2))\n self.assertEqual(len(layer.losses), 3)\n\n x = keras.backend.variable(np.ones((2, 3, 2)))\n layer(x)\n if context.executing_eagerly():\n self.assertEqual(len(layer.losses), 4)\n else:\n self.assertEqual(len(layer.get_losses_for(x)), 1)\n\n\nif __name__ == '__main__':\n test.main()\n", "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"`LinearOperator` acting like a diagonal matrix.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import check_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops.linalg import linalg_impl as linalg\nfrom tensorflow.python.ops.linalg import linear_operator\nfrom tensorflow.python.ops.linalg import linear_operator_util\nfrom tensorflow.python.util.tf_export import tf_export\n\n__all__ = [\"LinearOperatorDiag\",]\n\n\n@tf_export(\"linalg.LinearOperatorDiag\")\nclass LinearOperatorDiag(linear_operator.LinearOperator):\n \"\"\"`LinearOperator` acting like a [batch] square diagonal matrix.\n\n This operator acts like a [batch] diagonal matrix `A` with shape\n `[B1,...,Bb, N, N]` for some `b >= 0`. The first `b` indices index a\n batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is\n an `N x N` matrix. This matrix `A` is not materialized, but for\n purposes of broadcasting this shape will be relevant.\n\n `LinearOperatorDiag` is initialized with a (batch) vector.\n\n ```python\n # Create a 2 x 2 diagonal linear operator.\n diag = [1., -1.]\n operator = LinearOperatorDiag(diag)\n\n operator.to_dense()\n ==> [[1., 0.]\n [0., -1.]]\n\n operator.shape\n ==> [2, 2]\n\n operator.log_abs_determinant()\n ==> scalar Tensor\n\n x = ... Shape [2, 4] Tensor\n operator.matmul(x)\n ==> Shape [2, 4] Tensor\n\n # Create a [2, 3] batch of 4 x 4 linear operators.\n diag = tf.random.normal(shape=[2, 3, 4])\n operator = LinearOperatorDiag(diag)\n\n # Create a shape [2, 1, 4, 2] vector. Note that this shape is compatible\n # since the batch dimensions, [2, 1], are broadcast to\n # operator.batch_shape = [2, 3].\n y = tf.random.normal(shape=[2, 1, 4, 2])\n x = operator.solve(y)\n ==> operator.matmul(x) = y\n ```\n\n #### Shape compatibility\n\n This operator acts on [batch] matrix with compatible shape.\n `x` is a batch matrix with compatible shape for `matmul` and `solve` if\n\n ```\n operator.shape = [B1,...,Bb] + [N, N], with b >= 0\n x.shape = [C1,...,Cc] + [N, R],\n and [C1,...,Cc] broadcasts with [B1,...,Bb] to [D1,...,Dd]\n ```\n\n #### Performance\n\n Suppose `operator` is a `LinearOperatorDiag` of shape `[N, N]`,\n and `x.shape = [N, R]`. Then\n\n * `operator.matmul(x)` involves `N * R` multiplications.\n * `operator.solve(x)` involves `N` divisions and `N * R` multiplications.\n * `operator.determinant()` involves a size `N` `reduce_prod`.\n\n If instead `operator` and `x` have shape `[B1,...,Bb, N, N]` and\n `[B1,...,Bb, N, R]`, every operation increases in complexity by `B1*...*Bb`.\n\n #### Matrix property hints\n\n This `LinearOperator` is initialized with boolean flags of the form `is_X`,\n for `X = non_singular, self_adjoint, positive_definite, square`.\n These have the following meaning:\n\n * If `is_X == True`, callers should expect the operator to have the\n property `X`. This is a promise that should be fulfilled, but is *not* a\n runtime assert. For example, finite floating point precision may result\n in these promises being violated.\n * If `is_X == False`, callers should expect the operator to not have `X`.\n * If `is_X == None` (the default), callers should have no expectation either\n way.\n \"\"\"\n\n def __init__(self,\n diag,\n is_non_singular=None,\n is_self_adjoint=None,\n is_positive_definite=None,\n is_square=None,\n name=\"LinearOperatorDiag\"):\n r\"\"\"Initialize a `LinearOperatorDiag`.\n\n Args:\n diag: Shape `[B1,...,Bb, N]` `Tensor` with `b >= 0` `N >= 0`.\n The diagonal of the operator. Allowed dtypes: `float16`, `float32`,\n `float64`, `complex64`, `complex128`.\n is_non_singular: Expect that this operator is non-singular.\n is_self_adjoint: Expect that this operator is equal to its hermitian\n transpose. If `diag.dtype` is real, this is auto-set to `True`.\n is_positive_definite: Expect that this operator is positive definite,\n meaning the quadratic form `x^H A x` has positive real part for all\n nonzero `x`. Note that we do not require the operator to be\n self-adjoint to be positive-definite. See:\n https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices\n is_square: Expect that this operator acts like square [batch] matrices.\n name: A name for this `LinearOperator`.\n\n Raises:\n TypeError: If `diag.dtype` is not an allowed type.\n ValueError: If `diag.dtype` is real, and `is_self_adjoint` is not `True`.\n \"\"\"\n\n with ops.name_scope(name, values=[diag]):\n self._diag = linear_operator_util.convert_nonref_to_tensor(\n diag, name=\"diag\")\n self._check_diag(self._diag)\n\n # Check and auto-set hints.\n if not self._diag.dtype.is_complex:\n if is_self_adjoint is False:\n raise ValueError(\"A real diagonal operator is always self adjoint.\")\n else:\n is_self_adjoint = True\n\n if is_square is False:\n raise ValueError(\"Only square diagonal operators currently supported.\")\n is_square = True\n\n super(LinearOperatorDiag, self).__init__(\n dtype=self._diag.dtype,\n graph_parents=[self._diag],\n is_non_singular=is_non_singular,\n is_self_adjoint=is_self_adjoint,\n is_positive_definite=is_positive_definite,\n is_square=is_square,\n name=name)\n\n def _check_diag(self, diag):\n \"\"\"Static check of diag.\"\"\"\n if diag.get_shape().ndims is not None and diag.get_shape().ndims < 1:\n raise ValueError(\"Argument diag must have at least 1 dimension. \"\n \"Found: %s\" % diag)\n\n def _shape(self):\n # If d_shape = [5, 3], we return [5, 3, 3].\n d_shape = self._diag.get_shape()\n return d_shape.concatenate(d_shape[-1:])\n\n def _shape_tensor(self):\n d_shape = array_ops.shape(self._diag)\n k = d_shape[-1]\n return array_ops.concat((d_shape, [k]), 0)\n\n def _assert_non_singular(self):\n return linear_operator_util.assert_no_entries_with_modulus_zero(\n self._diag,\n message=\"Singular operator: Diagonal contained zero values.\")\n\n def _assert_positive_definite(self):\n if self.dtype.is_complex:\n message = (\n \"Diagonal operator had diagonal entries with non-positive real part, \"\n \"thus was not positive definite.\")\n else:\n message = (\n \"Real diagonal operator had non-positive diagonal entries, \"\n \"thus was not positive definite.\")\n\n return check_ops.assert_positive(\n math_ops.real(self._diag),\n message=message)\n\n def _assert_self_adjoint(self):\n return linear_operator_util.assert_zero_imag_part(\n self._diag,\n message=(\n \"This diagonal operator contained non-zero imaginary values. \"\n \" Thus it was not self-adjoint.\"))\n\n def _matmul(self, x, adjoint=False, adjoint_arg=False):\n diag_term = math_ops.conj(self._diag) if adjoint else self._diag\n x = linalg.adjoint(x) if adjoint_arg else x\n diag_mat = array_ops.expand_dims(diag_term, -1)\n return diag_mat * x\n\n def _matvec(self, x, adjoint=False):\n diag_term = math_ops.conj(self._diag) if adjoint else self._diag\n return diag_term * x\n\n def _determinant(self):\n return math_ops.reduce_prod(self._diag, axis=[-1])\n\n def _log_abs_determinant(self):\n log_det = math_ops.reduce_sum(\n math_ops.log(math_ops.abs(self._diag)), axis=[-1])\n if self.dtype.is_complex:\n log_det = math_ops.cast(log_det, dtype=self.dtype)\n return log_det\n\n def _solve(self, rhs, adjoint=False, adjoint_arg=False):\n diag_term = math_ops.conj(self._diag) if adjoint else self._diag\n rhs = linalg.adjoint(rhs) if adjoint_arg else rhs\n inv_diag_mat = array_ops.expand_dims(1. / diag_term, -1)\n return rhs * inv_diag_mat\n\n def _to_dense(self):\n return array_ops.matrix_diag(self._diag)\n\n def _diag_part(self):\n return self.diag\n\n def _add_to_tensor(self, x):\n x_diag = array_ops.matrix_diag_part(x)\n new_diag = self._diag + x_diag\n return array_ops.matrix_set_diag(x, new_diag)\n\n @property\n def diag(self):\n return self._diag\n", "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for Keras generic Python utils.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.python import keras\nfrom tensorflow.python.platform import test\n\n\nclass HasArgTest(test.TestCase):\n\n def test_has_arg(self):\n\n def f_x(x):\n return x\n\n def f_x_args(x, *args):\n _ = args\n return x\n\n def f_x_kwargs(x, **kwargs):\n _ = kwargs\n return x\n\n self.assertTrue(keras.utils.generic_utils.has_arg(\n f_x, 'x', accept_all=False))\n self.assertFalse(keras.utils.generic_utils.has_arg(\n f_x, 'y', accept_all=False))\n self.assertTrue(keras.utils.generic_utils.has_arg(\n f_x_args, 'x', accept_all=False))\n self.assertFalse(keras.utils.generic_utils.has_arg(\n f_x_args, 'y', accept_all=False))\n self.assertTrue(keras.utils.generic_utils.has_arg(\n f_x_kwargs, 'x', accept_all=False))\n self.assertFalse(keras.utils.generic_utils.has_arg(\n f_x_kwargs, 'y', accept_all=False))\n self.assertTrue(keras.utils.generic_utils.has_arg(\n f_x_kwargs, 'y', accept_all=True))\n\n\nclass TestCustomObjectScope(test.TestCase):\n\n def test_custom_object_scope(self):\n\n def custom_fn():\n pass\n\n class CustomClass(object):\n pass\n\n with keras.utils.generic_utils.custom_object_scope(\n {'CustomClass': CustomClass, 'custom_fn': custom_fn}):\n act = keras.activations.get('custom_fn')\n self.assertEqual(act, custom_fn)\n cl = keras.regularizers.get('CustomClass')\n self.assertEqual(cl.__class__, CustomClass)\n\n\nclass SerializeKerasObjectTest(test.TestCase):\n\n def test_serialize_none(self):\n serialized = keras.utils.generic_utils.serialize_keras_object(None)\n self.assertEqual(serialized, None)\n deserialized = keras.utils.generic_utils.deserialize_keras_object(\n serialized)\n self.assertEqual(deserialized, None)\n\n\nclass SliceArraysTest(test.TestCase):\n\n def test_slice_arrays(self):\n input_a = list([1, 2, 3])\n self.assertEqual(\n keras.utils.generic_utils.slice_arrays(input_a, start=0),\n [None, None, None])\n self.assertEqual(\n keras.utils.generic_utils.slice_arrays(input_a, stop=3),\n [None, None, None])\n self.assertEqual(\n keras.utils.generic_utils.slice_arrays(input_a, start=0, stop=1),\n [None, None, None])\n\n\nif __name__ == '__main__':\n test.main()\n", "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"AutomaticControlDependencies and related functionality.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.framework import dtypes as dtypes_module\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import sparse_tensor\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import control_flow_util\nfrom tensorflow.python.ops import tensor_array_ops\nfrom tensorflow.python.util import nest\nfrom tensorflow.python.util import object_identity\nfrom tensorflow.python.util import tf_decorator\n\n# LINT.IfChange\n# Op types that should not run in program order, e.g. because they need to run\n# asynchronously to avoid deadlock.\nASYNC_STATEFUL_OPS = [\n \"CollectiveGather\",\n \"CollectiveReduce\",\n \"CollectiveBcastSend\",\n \"CollectiveBcastRecv\",\n \"NcclAllReduce\",\n]\n\nLEGACY_RANDOM_OPS = [\n # These may be used in variable initializers -- thus their execution should\n # not be dependent on other stateful operations. This is because although\n # according to program order, tf.Variables may be created in sequence,\n # their initialization happens outside of the program order (specifically,\n # in graph mode their initialization happens by calling a grouped\n # initializer operation or in eager mode, where initialization is lifted\n # out of the tf.function and executed the first time the function is\n # executed).\n #\n # Unless there is a specific dependency between the initializers\n # themselves (e.g. one initializer depends on a Variable whose value depends\n # on another initializer), the initialization can happen in any order so\n # long as it's before the associated Variable read operations.\n #\n # Note that in general the randomness of legacy random operations is only\n # guaranteed by providing a graph-level and op-level seed (and ordering of\n # the same op across multiple iterations of a while_loop is specifically not\n # guaranteed; see the discussion below).\n #\n # There is a possible race condition inside while_loop where the same\n # random OpKernel instantiation is reused across multiple steps\n # of the loop. Since legacy Random OpKernels have an internal rng state,\n # automatic dependency tracking across loop steps would likely\n # fix this race; and for that case this blacklist is problematic.\n # However, since automatic dependency tracking inside while loops is not\n # currently supported, and there are no other examples of OpKernel reuse\n # (each OpKernel is associated with a unique op in graph mode),\n # this blacklist has no effect on the aforementioned behavior.\n #\n # TODO(ebrevdo,skyewm): Modify the check against this blacklist to\n # only occur when the op is inside a \"variable initialization scope\"; and\n # add proper autodeps inside while_loops that respects this updated check.\n \"RandomUniform\",\n \"RandomUniformInt\",\n \"RandomStandardNormal\",\n \"ParameterizedTruncatedNormal\",\n \"TruncatedNormal\",\n \"RandomShuffle\",\n \"Multinomial\",\n \"RandomGamma\",\n \"RandomGammaGrad\",\n \"RandomPoisson\",\n \"RandomPoissonV2\",\n]\n# LINT.ThenChange(//tensorflow/core/grappler/optimizers/function_optimizer.cc)\n\n_ALL_BLACKLISTED_OPS = set(ASYNC_STATEFUL_OPS) | set(LEGACY_RANDOM_OPS)\n\n\ndef op_is_stateful(op):\n # pylint: disable=protected-access\n return op._is_stateful and op.type not in _ALL_BLACKLISTED_OPS\n\n\nclass AutomaticControlDependencies(object):\n \"\"\"Context manager to automatically add control dependencies.\n\n Code under this context manager will act as if a sensible set of control\n dependencies were present. More specifically:\n 1. All stateful ops in the scope will execute (with the exception of ops in\n ASYNC_STATEFUL_OPS and LEGACY_RANDOM_OPS)\n 2. Stateful ops which modify the same resource will execute in program order\n\n Note: creating variables in an automatic control dependencies context is not\n supported (the value of the variables will never change as they will keep\n getting reinitialized).\n\n NOT THREAD SAFE\n \"\"\"\n\n def __init__(self):\n self._returned_tensors = object_identity.ObjectIdentitySet()\n self.ops_which_must_run = set()\n\n def mark_as_return(self, tensor):\n \"\"\"Acts like identity but marks the `Tensor` as a return value.\n\n This will possibly return a copy of the `Tensor`. Usage:\n\n ```\n with AutomaticControlDependencies() as a:\n ...\n t = a.mark_as_return(t)\n _ = ...(t...) # i.e. it's safe to use t here\n ```\n\n Args:\n tensor: the `Tensor` to be marked\n\n Returns:\n a copy of the `Tensor`.\n \"\"\"\n if isinstance(tensor, ops.IndexedSlices):\n values = array_ops.identity(tensor.values)\n indices = array_ops.identity(tensor.indices)\n self._returned_tensors.add(indices)\n self._returned_tensors.add(values)\n return ops.IndexedSlices(values, indices, dense_shape=tensor.dense_shape)\n elif isinstance(tensor, sparse_tensor.SparseTensor):\n values = array_ops.identity(tensor.values)\n indices = array_ops.identity(tensor.indices)\n self._returned_tensors.add(indices)\n self._returned_tensors.add(values)\n return sparse_tensor.SparseTensor(\n indices, values, dense_shape=tensor.dense_shape)\n elif isinstance(tensor, tensor_array_ops.TensorArray):\n flow = array_ops.identity(tensor.flow)\n self._returned_tensors.add(flow)\n return tensor_array_ops.build_ta_with_new_flow(tensor, flow)\n # We want to make the return values depend on the stateful operations, but\n # we don't want to introduce a cycle, so we make the return value the result\n # of a new identity operation that the stateful operations definitely don't\n # depend on.\n tensor = array_ops.identity(tensor)\n self._returned_tensors.add(tensor)\n return tensor\n\n def __enter__(self):\n if context.executing_eagerly():\n return self\n # This code assumes no other thread is adding ops to the graph while\n # we're adding ops to the graph.\n # TODO(apassos): Fix this by locking the graph or using a temporary\n # graph (but that would mess up devices and collections at least,\n # probably other things as well).\n self._graph = ops.get_default_graph()\n self._graph._add_control_dependencies = True # pylint: disable=protected-access\n self._n_operations = len(self._graph.get_operations())\n return self\n\n def _process_switch(self, switch_op, ops_which_must_run,\n last_op_using_resource_tensor, merge_for_resource):\n \"\"\"Processes a switch node for a resource input.\n\n When tensorflow creates a cond, it creates a control flow context for each\n branch of the cond. Each external tensor accessed by that branch is routed\n through a switch op, which gets created in the graph _after_ the op which\n uses that tensor get created.\n\n If the resource comes from another switch op we process that one first.\n\n _process_switch creates a corresponding merge node for the switch node. This\n merge node is added to the outer control flow context of the switch\n node. We also ensure that:\n\n 1. The switch node executes after the previous op which used the resource\n tensor\n\n 2. Any op which uses a resource output of the switch node executes before\n the merge for the switch node.\n\n 3. The next op which uses the input resource to the switch node (which\n might be another switch node for the other branch of the conditional)\n will execute after the merge node is done.\n\n 4. The merge node is marked as must_run so it will run even if no\n subsequent operation uses the resource.\n\n Args:\n switch_op: the switch op to be processed\n ops_which_must_run: the set of ops which must run\n last_op_using_resource_tensor: map from resource tensor to last op using\n it\n merge_for_resource: map from resource tensor to merge which must follow\n all usages of it.\n \"\"\"\n inp = switch_op.inputs[0]\n if inp.dtype == dtypes_module.resource and inp.op.type == \"Switch\":\n self._process_switch(inp.op, ops_which_must_run,\n last_op_using_resource_tensor, merge_for_resource)\n if switch_op.outputs[0] in merge_for_resource:\n return\n new_merge = control_flow_ops.merge(switch_op.outputs,\n name=\"artificial_merge\")\n new_merge[0].op._control_flow_context = ( # pylint: disable=protected-access\n switch_op._control_flow_context.outer_context) # pylint: disable=protected-access\n # Ensures the merge always runs\n ops_which_must_run.add(new_merge[0].op)\n if inp in last_op_using_resource_tensor:\n # Ensures the switch executes after the previous op using the resource.\n switch_op._add_control_input(last_op_using_resource_tensor[inp]) # pylint: disable=protected-access\n # Ensure the next op outside the cond happens after the merge.\n last_op_using_resource_tensor[inp] = new_merge[0].op\n if inp in merge_for_resource:\n merge_for_resource[inp]._add_control_input(new_merge[0].op) # pylint: disable=protected-access\n for o in switch_op.outputs:\n # Ensures the merge will execute after all ops inside the cond\n merge_for_resource[o] = new_merge[0].op\n\n def __exit__(self, unused_type, unused_value, unused_traceback):\n if context.executing_eagerly():\n return\n\n if self._graph is not ops.get_default_graph():\n raise RuntimeError(\n \"Graph changed while trying to add control dependencies.\")\n\n # pylint: disable=protected-access\n if hasattr(self._graph, \"outer_graph\"):\n outer_val = self._graph.outer_graph._add_control_dependencies\n self._graph._add_control_dependencies = outer_val\n else:\n self._graph._add_control_dependencies = False\n # pylint: enable=protected-access\n\n # map from resource tensor to the last op which used it\n last_op_using_resource_tensor = {}\n # set of conditional and loop exits\n ops_which_must_run = set()\n # merge which must depend on ops which use this resource\n merge_for_resource = {}\n\n new_operations = self._graph.get_operations()[self._n_operations:]\n\n # Ensures that uses of resource tensors get serialized properly and all\n # execute. This is done by keeping a map from resource tensor to the last op\n # in graph-construction order which used it (last_op_using_resource_tensor).\n #\n # Conditionals are written in TensorFlow such that every external tensor\n # accessed in the conditional goes through a switch op and every return\n # tensor (it's guaranteed that there will be at least one) goes through a\n # merge op.\n #\n # To handle conditionals, switches are handled in a special way (see\n # comments for _process_switch). Merge nodes created by TF's conditional\n # logic (as opposed to by _process_switch) are forced to run and also get a\n # control dependency added to them to ensure all stateful ops inside their\n # control flow context run.\n #\n # We also ensure that if an op is using a resource output by a switch node\n # (that is, a resource tensor for which there's a value in\n # merge_for_resource) this op will run before the merge for that resource.\n #\n # We try to add control inputs to nodes respecting their control flow\n # contexts to avoid dead nodes propagating everywhere and leading to\n # \"retval[0] doesn't have value\" errors. If a node gets a control dependency\n # on a dead node (i.e. a note from an untaken control flow branch) that node\n # will be marked as dead unless it's a merge node.\n #\n # TODO(apassos): serialize non-resource-taking stateful ops as well, and\n # test that it works. Support while loops. Support init_scope escaping from\n # this.\n for op in new_operations:\n # TODO(apassos) make this code safely support while loops.\n if control_flow_util.IsInWhileLoop(op):\n continue\n control_inputs = set()\n # Ensure stateful ops run\n if (op.type not in self._graph._registered_ops # pylint: disable=protected-access\n or op_is_stateful(op)):\n ops_which_must_run.add(op)\n # Ignore switches (they're handled separately)\n if op.type == \"Switch\" and op.inputs[0].dtype == dtypes_module.resource:\n continue\n # Make merges trigger all other computation which must run\n if op.type == \"Merge\":\n for o in ops_which_must_run:\n op._add_control_input(o) # pylint: disable=protected-access\n for inp in o.inputs:\n if inp in last_op_using_resource_tensor:\n last_op_using_resource_tensor[inp] = op\n ops_which_must_run = set([op])\n continue\n\n resource_inputs = set()\n # Check for any resource inputs. If we find any, we update control_inputs\n # and last_op_using_resource_tensor.\n for inp in op.inputs:\n if inp.dtype != dtypes_module.resource:\n continue\n\n # If the op receives the same resource tensor twice as an input, we skip\n # to avoid the op getting a control dependency on itself.\n if id(inp) in resource_inputs:\n continue\n\n resource_inputs.add(id(inp))\n # Deal with switches, finally.\n if inp.op.type == \"Switch\":\n self._process_switch(inp.op, ops_which_must_run,\n last_op_using_resource_tensor,\n merge_for_resource)\n # Ensure uses of resources are serialized\n if inp in last_op_using_resource_tensor:\n if (last_op_using_resource_tensor[inp]._control_flow_context # pylint: disable=protected-access\n is op._control_flow_context): # pylint: disable=protected-access\n control_inputs.add(last_op_using_resource_tensor[inp])\n # Ensure merges happen after the closing of a cond block\n if inp in merge_for_resource:\n merge_for_resource[inp]._add_control_input(op) # pylint: disable=protected-access\n last_op_using_resource_tensor[inp] = op\n\n if (op_is_stateful(op) and not resource_inputs\n and op._control_flow_context is None): # pylint: disable=protected-access\n if None in last_op_using_resource_tensor:\n op._add_control_input(last_op_using_resource_tensor[None]) # pylint: disable=protected-access\n last_op_using_resource_tensor[None] = op\n control_inputs = [c for c in control_inputs\n if c._control_flow_context is op._control_flow_context] # pylint: disable=protected-access\n op._add_control_inputs(control_inputs) # pylint: disable=protected-access\n\n # Ensure all ops which must run do run\n self.ops_which_must_run.update(ops_which_must_run)\n for r in nest.flatten(list(self._returned_tensors), expand_composites=True):\n if self.ops_which_must_run:\n r.op._add_control_inputs( # pylint: disable=protected-access\n [o for o in self.ops_which_must_run\n if o._control_flow_context is r.op._control_flow_context]) # pylint: disable=protected-access\n\n\ndef automatic_control_dependencies(f):\n \"\"\"Wraps f to automatically insert control dependencies.\n\n The inserted dependencies ensure that:\n 1. All stateful ops in f run when the result of f runs\n 2. Updates to the same resources happen in order.\n\n Args:\n f: the function to be wrapped.\n\n Returns:\n The wrapped function.\n \"\"\"\n\n def wrapper(*args, **kwargs):\n with AutomaticControlDependencies() as a:\n result = f(*args, **kwargs)\n result_flat = [a.mark_as_return(t) for t in nest.flatten(result)]\n return nest.pack_sequence_as(result, result_flat)\n\n return tf_decorator.make_decorator(f, wrapper)\n" ]
[ [ "tensorflow.python.eager.tape.record_operation", "tensorflow.python.framework.ops._as_graph_element", "tensorflow.python.util.tf_decorator.rewrap", "tensorflow.python.ops.array_ops.placeholder", "tensorflow.python.util.tf_decorator.make_decorator", "tensorflow.python.eager.context.context", "tensorflow.python.eager.context.executing_eagerly", "tensorflow.python.ops.array_ops.identity", "tensorflow.python.eager.graph_only_ops.graph_placeholder", "tensorflow.python.framework.auto_control_deps.AutomaticControlDependencies", "tensorflow.python.util.nest.flatten_with_tuple_paths", "tensorflow.python.eager.context.global_seed", "tensorflow.python.framework.ops.NullContextmanager", "tensorflow.python.util.nest.map_structure", "tensorflow.python.framework.ops.control_dependencies", "tensorflow.python.framework.ops.dismantle_graph", "tensorflow.python.util.tf_decorator.unwrap", "tensorflow.python.framework.ops.convert_to_tensor_or_composite", "tensorflow.python.ops.custom_gradient.copy_handle_data", "tensorflow.python.framework.ops.init_scope", "tensorflow.python.ops.variable_scope.get_variable_scope", "tensorflow.python.util.nest.pack_sequence_as", "tensorflow.python.ops.tensor_array_ops.build_ta_with_new_flow", "tensorflow.python.util.nest.assert_same_structure", "tensorflow.python.framework.tensor_spec.TensorSpec", "tensorflow.python.framework.ops.uid", "tensorflow.python.util.compat.as_bytes", "tensorflow.python.framework.ops.get_default_graph", "tensorflow.python.util.memory.dismantle_ordered_dict", "tensorflow.python.autograph.ConversionOptions", "tensorflow.python.util.nest.flatten" ], [ "tensorflow.python.framework.ops.enable_tensor_equality", "tensorflow.python.eager.context.enable_run_metadata", "tensorflow.python.framework.config.list_physical_devices", "tensorflow.python.eager.context._EagerTensorCache", "tensorflow.python.eager.context.async_clear_error", "tensorflow.python.pywrap_tensorflow.TFE_Py_RegisterExceptionClass", "tensorflow.python.eager.context.async_wait", "tensorflow.python.ops.variables.Variable", "tensorflow.python.ops.array_ops.zeros", "tensorflow.python.eager.context.Context", "tensorflow.python.framework.ops.device", "tensorflow.python.eager.context.context", "tensorflow.python.ops.array_ops.identity", "tensorflow.python.eager.context.VirtualDeviceConfiguration", "tensorflow.python.eager.context.device", "tensorflow.python.ops.resource_variable_ops.ResourceVariable", "tensorflow.python.eager.context.execution_mode", "tensorflow.python.ops.script_ops.eager_py_func", "tensorflow.python.ops.array_ops.ones", "numpy.zeros", "tensorflow.python.eager.test.main", "tensorflow.python.eager.context.device_policy", "tensorflow.python.eager.context.export_run_metadata", "tensorflow.python.eager.context.disable_run_metadata", "numpy.array", "tensorflow.python.ops.gen_resource_variable_ops.read_variable_op", "tensorflow.python.eager.context.set_execution_mode", "tensorflow.python.ops.nn_ops.sparse_softmax_cross_entropy_with_logits", "tensorflow.python.framework.ops.disable_tensor_equality", "tensorflow.core.protobuf.config_pb2.ConfigProto", "tensorflow.python.framework.constant_op.constant" ], [ "tensorflow.python.eager.context.executing_eagerly", "tensorflow.python.keras.testing_utils.get_test_data", "tensorflow.python.keras.testing_utils.should_run_eagerly", "numpy.random.random", "tensorflow.python.keras.layers.Embedding", "tensorflow.python.keras.utils.to_categorical", "tensorflow.python.keras.regularizers.l1", "tensorflow.python.keras.layers.Masking", "tensorflow.python.keras.models.Sequential", "tensorflow.python.keras.testing_utils.layer_test", "numpy.ones", "tensorflow.python.keras.testing_utils.should_run_distributed", "tensorflow.python.keras.models.Model", "tensorflow.python.platform.test.main", "numpy.testing.assert_allclose", "tensorflow.python.keras.layers.GRU", "tensorflow.python.keras.layers.Input", "tensorflow.python.keras.constraints.max_norm" ], [ "tensorflow.python.ops.array_ops.matrix_set_diag", "tensorflow.python.ops.math_ops.conj", "tensorflow.python.ops.linalg.linalg_impl.adjoint", "tensorflow.python.ops.array_ops.concat", "tensorflow.python.ops.array_ops.shape", "tensorflow.python.ops.math_ops.abs", "tensorflow.python.ops.linalg.linear_operator_util.assert_zero_imag_part", "tensorflow.python.ops.linalg.linear_operator_util.convert_nonref_to_tensor", "tensorflow.python.util.tf_export.tf_export", "tensorflow.python.ops.linalg.linear_operator_util.assert_no_entries_with_modulus_zero", "tensorflow.python.ops.math_ops.reduce_prod", "tensorflow.python.framework.ops.name_scope", "tensorflow.python.ops.array_ops.matrix_diag", "tensorflow.python.ops.math_ops.cast", "tensorflow.python.ops.array_ops.expand_dims", "tensorflow.python.ops.math_ops.real", "tensorflow.python.ops.array_ops.matrix_diag_part" ], [ "tensorflow.python.keras.utils.generic_utils.slice_arrays", "tensorflow.python.keras.utils.generic_utils.custom_object_scope", "tensorflow.python.keras.activations.get", "tensorflow.python.keras.regularizers.get", "tensorflow.python.keras.utils.generic_utils.deserialize_keras_object", "tensorflow.python.platform.test.main", "tensorflow.python.keras.utils.generic_utils.has_arg", "tensorflow.python.keras.utils.generic_utils.serialize_keras_object" ], [ "tensorflow.python.ops.array_ops.identity", "tensorflow.python.util.nest.flatten", "tensorflow.python.util.object_identity.ObjectIdentitySet", "tensorflow.python.framework.ops.IndexedSlices", "tensorflow.python.framework.ops.get_default_graph", "tensorflow.python.util.tf_decorator.make_decorator", "tensorflow.python.ops.control_flow_util.IsInWhileLoop", "tensorflow.python.util.nest.pack_sequence_as", "tensorflow.python.framework.sparse_tensor.SparseTensor", "tensorflow.python.ops.control_flow_ops.merge", "tensorflow.python.eager.context.executing_eagerly", "tensorflow.python.ops.tensor_array_ops.build_ta_with_new_flow" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.3", "2.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "2.6", "2.2", "1.13", "2.3", "2.4", "1.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.8", "1.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.5", "1.4" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.8", "1.10", "1.12", "2.7", "2.6", "1.13", "2.3", "2.4", "2.9", "1.5", "1.7", "2.5", "2.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.8", "1.10", "1.12", "2.7", "2.6", "1.13", "2.3", "2.4", "2.9", "2.5", "2.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.8", "2.7", "2.6", "1.13", "2.3", "2.4", "2.9", "2.5", "2.2", "2.10" ] } ]
hanhou/map-ephys
[ "4262e1ba68671b342a77f386e4ebabcce138c453" ]
[ "pipeline/plot/unit_characteristic_plot.py" ]
[ "import numpy as np\nimport datajoint as dj\nfrom PIL import ImageColor\nfrom collections import Counter\n\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport itertools\nimport pandas as pd\n\nfrom pipeline import experiment, ephys, psth, lab, histology, ccf, psth_foraging\n\nfrom pipeline.plot.util import (_plot_with_sem, _extract_one_stim_dur,\n _plot_stacked_psth_diff, _plot_avg_psth, _jointplot_w_hue)\nfrom pipeline.plot import unit_psth\nfrom pipeline.util import (_get_units_hemisphere, _get_trial_event_times,\n _get_stim_onset_time, _get_clustering_method)\n\nfrom . import PhotostimError\n\n_plt_xmin = -3\n_plt_xmax = 2\n\n\ndef plot_clustering_quality(probe_insertion, clustering_method=None, axs=None):\n probe_insertion = probe_insertion.proj()\n\n if clustering_method is None:\n try:\n clustering_method = _get_clustering_method(probe_insertion)\n except ValueError as e:\n raise ValueError(str(e) + '\\nPlease specify one with the kwarg \"clustering_method\"')\n\n amp, snr, spk_rate, isi_violation = (ephys.Unit * ephys.UnitStat * ephys.ProbeInsertion.InsertionLocation\n & probe_insertion & {'clustering_method': clustering_method}).fetch(\n 'unit_amp', 'unit_snr', 'avg_firing_rate', 'isi_violation')\n\n metrics = {'amp': amp,\n 'snr': snr,\n 'isi': np.array(isi_violation) * 100, # to percentage\n 'rate': np.array(spk_rate)}\n label_mapper = {'amp': 'Amplitude',\n 'snr': 'Signal to noise ratio (SNR)',\n 'isi': 'ISI violation (%)',\n 'rate': 'Firing rate (spike/s)'}\n\n fig = None\n if axs is None:\n fig, axs = plt.subplots(2, 3, figsize = (12, 8))\n fig.subplots_adjust(wspace=0.4)\n\n assert axs.size == 6\n\n for (m1, m2), ax in zip(itertools.combinations(list(metrics.keys()), 2), axs.flatten()):\n ax.plot(metrics[m1], metrics[m2], '.k')\n ax.set_xlabel(label_mapper[m1])\n ax.set_ylabel(label_mapper[m2])\n\n # cosmetic\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n\n return fig\n\n\ndef plot_unit_characteristic(probe_insertion, clustering_method=None, axs=None):\n probe_insertion = probe_insertion.proj()\n\n if clustering_method is None:\n try:\n clustering_method = _get_clustering_method(probe_insertion)\n except ValueError as e:\n raise ValueError(str(e) + '\\nPlease specify one with the kwarg \"clustering_method\"')\n\n if clustering_method in ('kilosort2'):\n q_unit = (ephys.Unit * ephys.ProbeInsertion.InsertionLocation.proj('depth') * ephys.UnitStat\n * lab.ElectrodeConfig.Electrode.proj() * lab.ProbeType.Electrode.proj('x_coord', 'y_coord')\n & probe_insertion & {'clustering_method': clustering_method} & 'unit_quality != \"all\"').proj(\n ..., x='x_coord', y='y_coord')\n else:\n q_unit = (ephys.Unit * ephys.ProbeInsertion.InsertionLocation.proj('depth') * ephys.UnitStat\n & probe_insertion & {'clustering_method': clustering_method} & 'unit_quality != \"all\"').proj(\n ..., x='unit_posx', y='unit_posy')\n\n amp, snr, spk_rate, x, y, insertion_depth = q_unit.fetch(\n 'unit_amp', 'unit_snr', 'avg_firing_rate', 'x', 'y', 'depth')\n\n metrics = pd.DataFrame(list(zip(*(amp/amp.max(), snr/snr.max(), spk_rate/spk_rate.max(),\n x, insertion_depth.astype(float) + y))))\n metrics.columns = ['amp', 'snr', 'rate', 'x', 'y']\n\n # --- prepare for plotting\n shank_count = (ephys.ProbeInsertion & probe_insertion).aggr(lab.ElectrodeConfig.Electrode * lab.ProbeType.Electrode,\n shank_count='count(distinct shank)').fetch1('shank_count')\n m_scale = get_m_scale(shank_count)\n\n ymin = metrics.y.min() - 100\n ymax = metrics.y.max() + 200\n xmax = 1.3 * metrics.x.max()\n xmin = -1/6*xmax\n cosmetic = {'legend': None,\n 'linewidth': 1.75,\n 'alpha': 0.9,\n 'facecolor': 'none', 'edgecolor': 'k'}\n\n # --- plot\n fig = None\n if axs is None:\n fig, axs = plt.subplots(1, 3, figsize=(10, 8))\n fig.subplots_adjust(wspace=0.6)\n\n assert axs.size == 3\n\n sns.scatterplot(data=metrics, x='x', y='y', s=metrics.amp*m_scale, ax=axs[0], **cosmetic)\n sns.scatterplot(data=metrics, x='x', y='y', s=metrics.snr*m_scale, ax=axs[1], **cosmetic)\n sns.scatterplot(data=metrics, x='x', y='y', s=metrics.rate*m_scale, ax=axs[2], **cosmetic)\n\n # manually draw the legend\n lg_ypos = ymax\n data = pd.DataFrame({'x': [0.1*xmax, 0.4*xmax, 0.75*xmax], 'y': [lg_ypos, lg_ypos, lg_ypos],\n 'size_ratio': np.array([0.2, 0.5, 0.8])})\n for ax, ax_maxval in zip(axs.flatten(), (amp.max(), snr.max(), spk_rate.max())):\n sns.scatterplot(data=data, x='x', y='y', s=data.size_ratio*m_scale, ax=ax, **dict(cosmetic, facecolor='k'))\n for _, r in data.iterrows():\n ax.text(r['x']-4, r['y']+70, (r['size_ratio']*ax_maxval).astype(int))\n\n # cosmetic\n for title, ax in zip(('Amplitude', 'SNR', 'Firing rate'), axs.flatten()):\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n ax.set_title(title)\n ax.set_xlim((xmin, xmax))\n ax.plot([0.5*xmin, xmax], [lg_ypos-80, lg_ypos-80], '-k')\n ax.set_ylim((ymin, ymax + 150))\n\n return fig\n\n\ndef plot_unit_selectivity(probe_insertion, clustering_method=None, axs=None):\n probe_insertion = probe_insertion.proj()\n\n if clustering_method is None:\n try:\n clustering_method = _get_clustering_method(probe_insertion)\n except ValueError as e:\n raise ValueError(str(e) + '\\nPlease specify one with the kwarg \"clustering_method\"')\n\n if clustering_method in ('kilosort2'):\n q_unit = (psth.PeriodSelectivity * ephys.Unit * ephys.ProbeInsertion.InsertionLocation\n * lab.ElectrodeConfig.Electrode.proj() * lab.ProbeType.Electrode.proj('x_coord', 'y_coord')\n * experiment.Period & probe_insertion & {'clustering_method': clustering_method}\n & 'period_selectivity != \"non-selective\"').proj(..., x='unit_posx', y='unit_posy').proj(\n ..., x='x_coord', y='y_coord')\n else:\n q_unit = (psth.PeriodSelectivity * ephys.Unit * ephys.ProbeInsertion.InsertionLocation\n * experiment.Period & probe_insertion & {'clustering_method': clustering_method}\n & 'period_selectivity != \"non-selective\"').proj(..., x='unit_posx', y='unit_posy')\n\n attr_names = ['unit', 'period', 'period_selectivity', 'contra_firing_rate',\n 'ipsi_firing_rate', 'x', 'y', 'depth']\n selective_units = q_unit.fetch(*attr_names)\n selective_units = pd.DataFrame(selective_units).T\n selective_units.columns = attr_names\n selective_units.period_selectivity.astype('category')\n\n # --- account for insertion depth (manipulator depth)\n selective_units.y = selective_units.depth.values.astype(float) + selective_units.y\n\n # --- get ipsi vs. contra firing rate difference\n f_rate_diff = np.abs(selective_units.ipsi_firing_rate - selective_units.contra_firing_rate)\n selective_units['f_rate_diff'] = f_rate_diff / f_rate_diff.max()\n\n # --- prepare for plotting\n shank_count = (ephys.ProbeInsertion & probe_insertion).aggr(lab.ElectrodeConfig.Electrode * lab.ProbeType.Electrode,\n shank_count='count(distinct shank)').fetch1('shank_count')\n m_scale = get_m_scale(shank_count)\n\n cosmetic = {'legend': None,\n 'linewidth': 0.0001}\n ymin = selective_units.y.min() - 100\n ymax = selective_units.y.max() + 100\n xmax = 1.3 * selective_units.x.max()\n xmin = -1/6*xmax\n\n # a bit of hack to get the 'open circle'\n pts = np.linspace(0, np.pi * 2, 24)\n circ = np.c_[np.sin(pts) / 2, -np.cos(pts) / 2]\n vert = np.r_[circ, circ[::-1] * .7]\n\n open_circle = mpl.path.Path(vert)\n\n # --- plot\n fig = None\n if axs is None:\n fig, axs = plt.subplots(1, 3, figsize=(10, 8))\n fig.subplots_adjust(wspace=0.6)\n\n assert axs.size == 3\n\n for (title, df), ax in zip(((p, selective_units[selective_units.period == p])\n for p in ('sample', 'delay', 'response')), axs):\n sns.scatterplot(data=df, x='x', y='y',\n s=df.f_rate_diff.values.astype(float)*m_scale,\n hue='period_selectivity', marker=open_circle,\n palette={'contra-selective': 'b', 'ipsi-selective': 'r'},\n ax=ax, **cosmetic)\n contra_p = (df.period_selectivity == 'contra-selective').sum() / len(df) * 100\n # cosmetic\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n ax.set_title(f'{title}\\n% contra: {contra_p:.2f}\\n% ipsi: {100-contra_p:.2f}')\n ax.set_xlim((xmin, xmax))\n ax.set_xlabel('x')\n ax.set_ylabel('y')\n ax.set_ylim((ymin, ymax))\n\n return fig\n\n\ndef plot_unit_bilateral_photostim_effect(probe_insertion, clustering_method=None, axs=None):\n probe_insertion = probe_insertion.proj()\n\n if not (psth.TrialCondition().get_trials('all_noearlylick_both_alm_stim') & probe_insertion):\n raise PhotostimError('No Bilateral ALM Photo-stimulation present')\n\n if clustering_method is None:\n try:\n clustering_method = _get_clustering_method(probe_insertion)\n except ValueError as e:\n raise ValueError(str(e) + '\\nPlease specify one with the kwarg \"clustering_method\"')\n\n dv_loc = (ephys.ProbeInsertion.InsertionLocation & probe_insertion).fetch1('depth')\n\n no_stim_cond = (psth.TrialCondition\n & {'trial_condition_name':\n 'all_noearlylick_nostim'}).fetch1('KEY')\n\n bi_stim_cond = (psth.TrialCondition\n & {'trial_condition_name':\n 'all_noearlylick_both_alm_stim'}).fetch1('KEY')\n\n units = ephys.Unit & probe_insertion & {'clustering_method': clustering_method} & 'unit_quality != \"all\"'\n\n metrics = pd.DataFrame(columns=['unit', 'x', 'y', 'frate_change'])\n\n # get photostim onset and duration\n stim_durs = np.unique((experiment.Photostim & experiment.PhotostimEvent\n * psth.TrialCondition().get_trials('all_noearlylick_both_alm_stim')\n & probe_insertion).fetch('duration'))\n stim_dur = _extract_one_stim_dur(stim_durs)\n stim_time = _get_stim_onset_time(units, 'all_noearlylick_both_alm_stim')\n\n # XXX: could be done with 1x fetch+join\n for u_idx, unit in enumerate(units.fetch('KEY', order_by='unit')):\n if clustering_method in ('kilosort2'):\n x, y = (ephys.Unit * lab.ElectrodeConfig.Electrode.proj()\n * lab.ProbeType.Electrode.proj('x_coord', 'y_coord') & unit).fetch1('x_coord', 'y_coord')\n else:\n x, y = (ephys.Unit & unit).fetch1('unit_posx', 'unit_posy')\n\n # obtain unit psth per trial, for all nostim and bistim trials\n nostim_trials = ephys.Unit.TrialSpikes & unit & psth.TrialCondition.get_trials(no_stim_cond['trial_condition_name'])\n bistim_trials = ephys.Unit.TrialSpikes & unit & psth.TrialCondition.get_trials(bi_stim_cond['trial_condition_name'])\n\n nostim_psths, nostim_edge = psth.compute_unit_psth(unit, nostim_trials.fetch('KEY'), per_trial=True)\n bistim_psths, bistim_edge = psth.compute_unit_psth(unit, bistim_trials.fetch('KEY'), per_trial=True)\n\n # compute the firing rate difference between contra vs. ipsi within the stimulation time window\n ctrl_frate = np.array([nostim_psth[np.logical_and(nostim_edge >= stim_time,\n nostim_edge <= stim_time + stim_dur)].mean()\n for nostim_psth in nostim_psths])\n stim_frate = np.array([bistim_psth[np.logical_and(bistim_edge >= stim_time,\n bistim_edge <= stim_time + stim_dur)].mean()\n for bistim_psth in bistim_psths])\n\n frate_change = (stim_frate.mean() - ctrl_frate.mean()) / ctrl_frate.mean()\n frate_change = abs(frate_change) if frate_change < 0 else 0.0001\n\n metrics.loc[u_idx] = (int(unit['unit']), x, float(dv_loc) + y, frate_change)\n\n metrics.frate_change = metrics.frate_change / metrics.frate_change.max()\n\n # --- prepare for plotting\n shank_count = (ephys.ProbeInsertion & probe_insertion).aggr(lab.ElectrodeConfig.Electrode * lab.ProbeType.Electrode,\n shank_count='count(distinct shank)').fetch1('shank_count')\n m_scale = get_m_scale(shank_count)\n\n fig = None\n if axs is None:\n fig, axs = plt.subplots(1, 1, figsize=(4, 8))\n\n xmax = 1.3 * metrics.x.max()\n xmin = -1/6*xmax\n\n cosmetic = {'legend': None,\n 'linewidth': 1.75,\n 'alpha': 0.9,\n 'facecolor': 'none', 'edgecolor': 'k'}\n\n sns.scatterplot(data=metrics, x='x', y='y', s=metrics.frate_change*m_scale,\n ax=axs, **cosmetic)\n\n axs.spines['right'].set_visible(False)\n axs.spines['top'].set_visible(False)\n axs.set_title('% change')\n axs.set_xlim((xmin, xmax))\n\n return fig\n\n\ndef plot_pseudocoronal_slice(probe_insertion, shank_no=1):\n # ---- Electrode sites ----\n annotated_electrodes = (lab.ElectrodeConfig.Electrode * lab.ProbeType.Electrode\n * ephys.ProbeInsertion\n * histology.ElectrodeCCFPosition.ElectrodePosition\n & probe_insertion & {'shank': shank_no})\n\n electrode_coords = np.array(list(zip(*annotated_electrodes.fetch(\n 'ccf_z', 'ccf_y', 'ccf_x', order_by='ccf_y')))) # (AP, DV, ML)\n probe_track_coords = np.array(list(zip(*(histology.LabeledProbeTrack.Point\n & probe_insertion & {'shank': shank_no}).fetch(\n 'ccf_z', 'ccf_y', 'ccf_x', order_by='ccf_y'))))\n\n voxel_res = ccf.CCFLabel.CCF_R3_20UM_RESOLUTION\n lr_max, dv_max, _ = ccf.get_ccf_xyz_max()\n\n pseudocoronal_points, shank_ccfs = histology.retrieve_pseudocoronal_slice(probe_insertion, shank_no)\n\n dv_pts, lr_pts, ap_pts, color_codes = pseudocoronal_points.T\n dv_pts = dv_pts.astype(int)\n lr_pts = lr_pts.astype(int)\n color_codes = color_codes.astype(str)\n\n # ---- paint annotation color code ----\n coronal_slice = np.full((dv_max + 1, lr_max + 1, 3), np.nan)\n for color in set(color_codes):\n matched_ind = np.where(color_codes == color)[0]\n dv_ind = dv_pts[matched_ind] # rows\n lr_ind = lr_pts[matched_ind] # cols\n try:\n c_rgb = ImageColor.getcolor(\"#\" + color, \"RGB\")\n except ValueError as e:\n print(str(e))\n continue\n coronal_slice[dv_ind, lr_ind, :] = np.full((len(matched_ind), 3), c_rgb)\n\n # ---- paint the interpolated track of this probe/shank in gray ----\n in_probe_range = np.logical_and(shank_ccfs[:, 1] >= probe_track_coords[:, 1].min(),\n shank_ccfs[:, 1] <= probe_track_coords[:, 1].max())\n in_electrode_range = np.logical_and(shank_ccfs[:, 1] >= electrode_coords[:, 1].min(),\n shank_ccfs[:, 1] <= electrode_coords[:, 1].max())\n\n tracks_coords = shank_ccfs[np.logical_and(in_probe_range, ~in_electrode_range), :]\n coronal_slice[tracks_coords[:, 1], tracks_coords[:, 0], :] = np.full(\n (tracks_coords.shape[0], 3), ImageColor.getcolor(\"#FFFFFF\", \"RGB\"))\n\n # ---- paint electrode sites on this probe/shank in black ----\n coronal_slice[electrode_coords[:, 1], electrode_coords[:, 2], :] = np.full(\n (electrode_coords.shape[0], 3), ImageColor.getcolor(\"#080808\", \"RGB\"))\n\n # ---- downsample the 2D slice to the voxel resolution ----\n coronal_slice = coronal_slice[::voxel_res, ::voxel_res, :]\n\n # paint outside region white\n nan_r, nan_c = np.where(np.nansum(coronal_slice, axis=2) == 0)\n coronal_slice[nan_r, nan_c, :] = np.full((len(nan_r), 3), ImageColor.getcolor(\"#FFFFFF\", \"RGB\"))\n\n # ---- plot ----\n fig, ax = plt.subplots(1, 1)\n ax.imshow(coronal_slice.astype(np.uint8), extent=[0, lr_max, dv_max, 0])\n\n ax.invert_xaxis()\n ax.set_xticks([])\n ax.set_yticks([])\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n ax.spines['bottom'].set_visible(False)\n ax.spines['left'].set_visible(False)\n\n return fig\n\n\ndef plot_driftmap(probe_insertion, clustering_method=None, shank_no=1):\n probe_insertion = probe_insertion.proj()\n\n assert histology.InterpolatedShankTrack & probe_insertion\n\n if clustering_method is None:\n try:\n clustering_method = _get_clustering_method(probe_insertion)\n except ValueError as e:\n raise ValueError(str(e) + '\\nPlease specify one with the kwarg \"clustering_method\"')\n\n units = (ephys.Unit * lab.ElectrodeConfig.Electrode\n & probe_insertion & {'clustering_method': clustering_method}\n & 'unit_quality != \"all\"')\n units = (units.proj('spike_times', 'spike_depths', 'unit_posy')\n * ephys.ProbeInsertion.proj()\n * lab.ProbeType.Electrode.proj('shank') & {'shank': shank_no})\n\n # ---- ccf region ----\n annotated_electrodes = (lab.ElectrodeConfig.Electrode * lab.ProbeType.Electrode\n * ephys.ProbeInsertion\n * histology.ElectrodeCCFPosition.ElectrodePosition\n * ccf.CCFAnnotation * ccf.CCFBrainRegion.proj(..., annotation='region_name')\n & probe_insertion & {'shank': shank_no})\n pos_y, ccf_y, color_code = annotated_electrodes.fetch(\n 'y_coord', 'ccf_y', 'color_code', order_by='y_coord DESC')\n\n # CCF position of most ventral recording site\n last_electrode_site = np.array((histology.InterpolatedShankTrack.DeepestElectrodePoint\n & probe_insertion & {'shank': shank_no}).fetch1(\n 'ccf_x', 'ccf_y', 'ccf_z'))\n # CCF position of the brain surface where this shank crosses\n brain_surface_site = np.array((histology.InterpolatedShankTrack.BrainSurfacePoint\n & probe_insertion & {'shank': shank_no}).fetch1(\n 'ccf_x', 'ccf_y', 'ccf_z'))\n\n # CCF position of most ventral recording site, with respect to the brain surface\n y_ref = -np.linalg.norm(last_electrode_site - brain_surface_site)\n\n # ---- spikes ----brain_surface_site\n spike_times, spike_depths = units.fetch('spike_times', 'spike_depths', order_by='unit')\n\n spike_times = np.hstack(spike_times)\n spike_depths = np.hstack(spike_depths)\n\n # histogram\n # time_res = 10 # time resolution: 1sec\n # depth_res = 10 # depth resolution: 10um\n #\n # spike_bins = np.arange(0, spike_times.max() + time_res, time_res)\n # depth_bins = np.arange(spike_depths.min() - depth_res, spike_depths.max() + depth_res, depth_res)\n\n # time-depth 2D histogram\n time_bin_count = 1000\n depth_bin_count = 200\n\n spike_bins = np.linspace(0, spike_times.max(), time_bin_count)\n depth_bins = np.linspace(0, np.nanmax(spike_depths), depth_bin_count)\n\n spk_count, spk_edges, depth_edges = np.histogram2d(spike_times, spike_depths, bins=[spike_bins, depth_bins])\n spk_rates = spk_count / np.mean(np.diff(spike_bins))\n spk_edges = spk_edges[:-1]\n depth_edges = depth_edges[:-1]\n\n # region colorcode, by depths\n binned_hexcodes = []\n\n y_spacing = np.abs(np.nanmedian(np.where(np.diff(pos_y)==0, np.nan, np.diff(pos_y))))\n anno_depth_bins = np.arange(0, depth_bins[-1], y_spacing)\n for s, e in zip(anno_depth_bins[:-1], anno_depth_bins[1:]):\n hexcodes = color_code[np.logical_and(pos_y > s, pos_y <= e)]\n if len(hexcodes):\n binned_hexcodes.append(Counter(hexcodes).most_common()[0][0])\n else:\n binned_hexcodes.append('FFFFFF')\n\n region_rgba = np.array([list(ImageColor.getcolor(\"#\" + chex, \"RGBA\")) for chex in binned_hexcodes])\n region_rgba = np.repeat(region_rgba[:, np.newaxis, :], 10, axis=1)\n\n # canvas setup\n fig = plt.figure(figsize=(16, 8))\n grid = plt.GridSpec(12, 12)\n\n ax_main = plt.subplot(grid[1:, 0:9])\n ax_cbar = plt.subplot(grid[0, 0:9])\n ax_spkcount = plt.subplot(grid[1:, 9:11])\n ax_anno = plt.subplot(grid[1:, 11:])\n\n # -- plot main --\n im = ax_main.imshow(spk_rates.T, aspect='auto', cmap='gray_r',\n extent=[spike_bins[0], spike_bins[-1], depth_bins[-1], depth_bins[0]])\n # cosmetic\n ax_main.invert_yaxis()\n ax_main.set_xlabel('Time (sec)')\n ax_main.set_ylabel('Distance from tip sites (um)')\n ax_main.set_ylim(depth_edges[0], depth_edges[-1])\n ax_main.spines['right'].set_visible(False)\n ax_main.spines['top'].set_visible(False)\n\n cb = fig.colorbar(im, cax=ax_cbar, orientation='horizontal')\n cb.outline.set_visible(False)\n cb.ax.xaxis.tick_top()\n cb.set_label('Firing rate (Hz)')\n cb.ax.xaxis.set_label_position('top')\n\n # -- plot spikecount --\n ax_spkcount.plot(spk_count.sum(axis=0) / 10e3, depth_edges, 'k')\n ax_spkcount.set_xlabel('Spike count (x$10^3$)')\n ax_spkcount.set_yticks([])\n ax_spkcount.set_ylim(depth_edges[0], depth_edges[-1])\n\n ax_spkcount.spines['right'].set_visible(False)\n ax_spkcount.spines['top'].set_visible(False)\n ax_spkcount.spines['bottom'].set_visible(False)\n ax_spkcount.spines['left'].set_visible(False)\n\n # -- plot colored region annotation\n ax_anno.imshow(region_rgba, aspect='auto',\n extent=[0, 10, (anno_depth_bins[-1] + y_ref) / 1000, (anno_depth_bins[0] + y_ref) / 1000])\n\n ax_anno.invert_yaxis()\n\n ax_anno.spines['right'].set_visible(False)\n ax_anno.spines['top'].set_visible(False)\n ax_anno.spines['bottom'].set_visible(False)\n ax_anno.spines['left'].set_visible(False)\n\n ax_anno.set_xticks([])\n ax_anno.yaxis.tick_right()\n ax_anno.set_ylabel('Depth in the brain (mm)')\n ax_anno.yaxis.set_label_position('right')\n\n return fig\n\n\ndef plot_stacked_contra_ipsi_psth(units, axs=None):\n units = units.proj()\n\n # get event start times: sample, delay, response\n period_names, period_starts = _get_trial_event_times(['sample', 'delay', 'go'], units, 'good_noearlylick_hit')\n\n hemi = _get_units_hemisphere(units)\n\n conds_i = (psth.TrialCondition\n & {'trial_condition_name':\n 'good_noearlylick_left_hit' if hemi == 'left' else 'good_noearlylick_right_hit'}).fetch1('KEY')\n\n conds_c = (psth.TrialCondition\n & {'trial_condition_name':\n 'good_noearlylick_right_hit' if hemi == 'left' else 'good_noearlylick_left_hit'}).fetch1('KEY')\n\n sel_i = (ephys.Unit * psth.UnitSelectivity\n & 'unit_selectivity = \"ipsi-selective\"' & units)\n\n sel_c = (ephys.Unit * psth.UnitSelectivity\n & 'unit_selectivity = \"contra-selective\"' & units)\n\n # ipsi selective ipsi trials\n psth_is_it = (psth.UnitPsth * sel_i.proj('unit_posy') & conds_i).fetch(order_by='unit_posy desc')\n\n # ipsi selective contra trials\n psth_is_ct = (psth.UnitPsth * sel_i.proj('unit_posy') & conds_c).fetch(order_by='unit_posy desc')\n\n # contra selective contra trials\n psth_cs_ct = (psth.UnitPsth * sel_c.proj('unit_posy') & conds_c).fetch(order_by='unit_posy desc')\n\n # contra selective ipsi trials\n psth_cs_it = (psth.UnitPsth * sel_c.proj('unit_posy') & conds_i).fetch(order_by='unit_posy desc')\n\n fig = None\n if axs is None:\n fig, axs = plt.subplots(1, 2, figsize=(20, 20))\n assert axs.size == 2\n\n _plot_stacked_psth_diff(psth_cs_ct, psth_cs_it, ax=axs[0], vlines=period_starts, flip=True)\n\n axs[0].set_title('Contra-selective Units')\n axs[0].set_ylabel('Unit (by depth)')\n axs[0].set_xlabel('Time to go (s)')\n\n _plot_stacked_psth_diff(psth_is_it, psth_is_ct, ax=axs[1], vlines=period_starts)\n\n axs[1].set_title('Ipsi-selective Units')\n axs[1].set_ylabel('Unit (by depth)')\n axs[1].set_xlabel('Time to go (s)')\n\n return fig\n\n\ndef plot_avg_contra_ipsi_psth(units, axs=None):\n units = units.proj()\n\n # get event start times: sample, delay, response\n period_names, period_starts = _get_trial_event_times(['sample', 'delay', 'go'], units, 'good_noearlylick_hit')\n\n hemi = _get_units_hemisphere(units)\n\n good_unit = ephys.Unit & 'unit_quality != \"all\"'\n\n conds_i = (psth.TrialCondition\n & {'trial_condition_name':\n 'good_noearlylick_left_hit' if hemi == 'left' else 'good_noearlylick_right_hit'}).fetch('KEY')\n\n conds_c = (psth.TrialCondition\n & {'trial_condition_name':\n 'good_noearlylick_right_hit' if hemi == 'left' else 'good_noearlylick_left_hit'}).fetch('KEY')\n\n sel_i = (ephys.Unit * psth.UnitSelectivity\n & 'unit_selectivity = \"ipsi-selective\"' & units)\n\n sel_c = (ephys.Unit * psth.UnitSelectivity\n & 'unit_selectivity = \"contra-selective\"' & units)\n\n psth_is_it = (((psth.UnitPsth & conds_i)\n * ephys.Unit.proj('unit_posy'))\n & good_unit.proj() & sel_i.proj()).fetch(\n 'unit_psth', order_by='unit_posy desc')\n\n psth_is_ct = (((psth.UnitPsth & conds_c)\n * ephys.Unit.proj('unit_posy'))\n & good_unit.proj() & sel_i.proj()).fetch(\n 'unit_psth', order_by='unit_posy desc')\n\n psth_cs_ct = (((psth.UnitPsth & conds_c)\n * ephys.Unit.proj('unit_posy'))\n & good_unit.proj() & sel_c.proj()).fetch(\n 'unit_psth', order_by='unit_posy desc')\n\n psth_cs_it = (((psth.UnitPsth & conds_i)\n * ephys.Unit.proj('unit_posy'))\n & good_unit.proj() & sel_c.proj()).fetch(\n 'unit_psth', order_by='unit_posy desc')\n\n fig = None\n if axs is None:\n fig, axs = plt.subplots(1, 2, figsize=(16, 6))\n assert axs.size == 2\n\n _plot_avg_psth(psth_cs_it, psth_cs_ct, period_starts, axs[0],\n 'Contra-selective')\n _plot_avg_psth(psth_is_it, psth_is_ct, period_starts, axs[1],\n 'Ipsi-selective')\n\n ymax = max([ax.get_ylim()[1] for ax in axs])\n for ax in axs:\n ax.set_ylim((0, ymax))\n\n return fig\n\n\ndef plot_psth_photostim_effect(units, condition_name_kw=['both_alm'], axs=None):\n \"\"\"\n For the specified `units`, plot PSTH comparison between stim vs. no-stim with left/right trial instruction\n The stim location (or other appropriate search keywords) can be specified in `condition_name_kw` (default: both ALM)\n \"\"\"\n units = units.proj()\n\n fig = None\n if axs is None:\n fig, axs = plt.subplots(1, 2, figsize=(16, 6))\n assert axs.size == 2\n\n hemi = _get_units_hemisphere(units)\n\n # no photostim:\n psth_n_l = psth.TrialCondition.get_cond_name_from_keywords(['_nostim', '_left'])[0]\n psth_n_r = psth.TrialCondition.get_cond_name_from_keywords(['_nostim', '_right'])[0]\n\n psth_n_l = (psth.UnitPsth * psth.TrialCondition & units\n & {'trial_condition_name': psth_n_l} & 'unit_psth is not NULL').fetch('unit_psth')\n psth_n_r = (psth.UnitPsth * psth.TrialCondition & units\n & {'trial_condition_name': psth_n_r} & 'unit_psth is not NULL').fetch('unit_psth')\n\n # with photostim\n psth_s_l = psth.TrialCondition.get_cond_name_from_keywords(condition_name_kw + ['_stim_left'])[0]\n psth_s_r = psth.TrialCondition.get_cond_name_from_keywords(condition_name_kw + ['_stim_right'])[0]\n\n psth_s_l = (psth.UnitPsth * psth.TrialCondition & units\n & {'trial_condition_name': psth_s_l} & 'unit_psth is not NULL').fetch('unit_psth')\n psth_s_r = (psth.UnitPsth * psth.TrialCondition & units\n & {'trial_condition_name': psth_s_r} & 'unit_psth is not NULL').fetch('unit_psth')\n\n # get event start times: sample, delay, response\n period_names, period_starts = _get_trial_event_times(['sample', 'delay', 'go'], units, 'good_noearlylick_hit')\n\n # get photostim onset and duration\n stim_trial_cond_name = psth.TrialCondition.get_cond_name_from_keywords(condition_name_kw + ['_stim'])[0]\n stim_durs = np.unique((experiment.Photostim & experiment.PhotostimEvent\n * psth.TrialCondition().get_trials(stim_trial_cond_name)\n & units).fetch('duration'))\n stim_dur = _extract_one_stim_dur(stim_durs)\n stim_time = _get_stim_onset_time(units, stim_trial_cond_name)\n\n if hemi == 'left':\n psth_s_i = psth_s_l\n psth_n_i = psth_n_l\n psth_s_c = psth_s_r\n psth_n_c = psth_n_r\n else:\n psth_s_i = psth_s_r\n psth_n_i = psth_n_r\n psth_s_c = psth_s_l\n psth_n_c = psth_n_l\n\n _plot_avg_psth(psth_n_i, psth_n_c, period_starts, axs[0],\n 'Control')\n _plot_avg_psth(psth_s_i, psth_s_c, period_starts, axs[1],\n 'Photostim')\n\n # cosmetic\n ymax = max([ax.get_ylim()[1] for ax in axs])\n for ax in axs:\n ax.set_ylim((0, ymax))\n ax.set_xlim([_plt_xmin, _plt_xmax])\n\n # add shaded bar for photostim\n axs[1].axvspan(stim_time, stim_time + stim_dur, alpha=0.3, color='royalblue')\n\n return fig\n\n\ndef plot_coding_direction(units, time_period=None, label=None, axs=None):\n # get event start times: sample, delay, response\n period_names, period_starts = _get_trial_event_times(['sample', 'delay', 'go'], units, 'good_noearlylick_hit')\n\n _, proj_contra_trial, proj_ipsi_trial, time_stamps, _ = psth.compute_CD_projected_psth(\n units.fetch('KEY'), time_period=time_period)\n\n fig = None\n if axs is None:\n fig, axs = plt.subplots(1, 1, figsize=(8, 6))\n\n # plot\n _plot_with_sem(proj_contra_trial, time_stamps, ax=axs, c='b')\n _plot_with_sem(proj_ipsi_trial, time_stamps, ax=axs, c='r')\n\n for x in period_starts:\n axs.axvline(x=x, linestyle = '--', color = 'k')\n # cosmetic\n axs.spines['right'].set_visible(False)\n axs.spines['top'].set_visible(False)\n axs.set_ylabel('CD projection (a.u.)')\n axs.set_xlabel('Time (s)')\n if label:\n axs.set_title(label)\n\n return fig\n\n\ndef plot_paired_coding_direction(unit_g1, unit_g2, labels=None, time_period=None):\n \"\"\"\n Plot trial-to-trial CD-endpoint correlation between CD-projected trial-psth from two unit-groups (e.g. two brain regions)\n Note: coding direction is calculated on selective units, contra vs. ipsi, within the specified time_period\n \"\"\"\n _, proj_contra_trial_g1, proj_ipsi_trial_g1, time_stamps, unit_g1_hemi = psth.compute_CD_projected_psth(\n unit_g1.fetch('KEY'), time_period=time_period)\n _, proj_contra_trial_g2, proj_ipsi_trial_g2, time_stamps, unit_g2_hemi = psth.compute_CD_projected_psth(\n unit_g2.fetch('KEY'), time_period=time_period)\n\n # get event start times: sample, delay, response\n period_names, period_starts = _get_trial_event_times(['sample', 'delay', 'go'], unit_g1, 'good_noearlylick_hit')\n\n if labels:\n assert len(labels) == 2\n else:\n labels = ('unit group 1', 'unit group 2')\n\n # plot projected trial-psth\n fig, axs = plt.subplots(1, 2, figsize=(16, 6))\n\n _plot_with_sem(proj_contra_trial_g1, time_stamps, ax=axs[0], c='b')\n _plot_with_sem(proj_ipsi_trial_g1, time_stamps, ax=axs[0], c='r')\n _plot_with_sem(proj_contra_trial_g2, time_stamps, ax=axs[1], c='b')\n _plot_with_sem(proj_ipsi_trial_g2, time_stamps, ax=axs[1], c='r')\n\n # cosmetic\n for ax, label in zip(axs, labels):\n for x in period_starts:\n ax.axvline(x=x, linestyle = '--', color = 'k')\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n ax.set_ylabel('CD projection (a.u.)')\n ax.set_xlabel('Time (s)')\n ax.set_title(label)\n\n # plot trial CD-endpoint correlation - if 2 unit-groups are from 2 hemispheres,\n # then contra-ipsi definition is based on the first group\n p_start, p_end = time_period\n contra_cdend_1 = proj_contra_trial_g1[:, np.logical_and(time_stamps >= p_start, time_stamps < p_end)].mean(axis=1)\n ipsi_cdend_1 = proj_ipsi_trial_g1[:, np.logical_and(time_stamps >= p_start, time_stamps < p_end)].mean(axis=1)\n if unit_g1_hemi == unit_g1_hemi:\n contra_cdend_2 = proj_contra_trial_g2[:, np.logical_and(time_stamps >= p_start, time_stamps < p_end)].mean(axis=1)\n ipsi_cdend_2 = proj_ipsi_trial_g2[:, np.logical_and(time_stamps >= p_start, time_stamps < p_end)].mean(axis=1)\n else:\n contra_cdend_2 = proj_ipsi_trial_g2[:, np.logical_and(time_stamps >= p_start, time_stamps < p_end)].mean(axis=1)\n ipsi_cdend_2 = proj_contra_trial_g2[:, np.logical_and(time_stamps >= p_start, time_stamps < p_end)].mean(axis=1)\n\n c_df = pd.DataFrame([contra_cdend_1, contra_cdend_2]).T\n c_df.columns = labels\n c_df['trial-type'] = 'contra'\n i_df = pd.DataFrame([ipsi_cdend_1, ipsi_cdend_2]).T\n i_df.columns = labels\n i_df['trial-type'] = 'ipsi'\n df = c_df.append(i_df)\n\n jplot = _jointplot_w_hue(data=df, x=labels[0], y=labels[1], hue= 'trial-type', colormap=['b', 'r'],\n figsize=(8, 6), fig=None, scatter_kws=None)\n jplot['fig'].show()\n\n return fig\n\n\n# ========== Foraging task ==========\ndef plot_unit_period_fit(linear_model='Q_rel + Q_tot + rpe'):\n#%%\n # linear_model='Q_c + Q_i + rpe'\n q_unit = ((ephys.Unit * ephys.ClusterMetric * ephys.UnitStat * ephys.MAPClusterMetric.DriftMetric)\n & 'presence_ratio > 0.95'\n & 'amplitude_cutoff < 0.1'\n & 'isi_violation < 0.5' \n & 'unit_amp > 70'\n # & 'drift_metric < 0.1'\n )\n\n q_hist = (q_unit * histology.ElectrodeCCFPosition.ElectrodePosition) * ccf.CCFAnnotation\n q_unit_n = dj.U('annotation').aggr(q_hist, area_num_units='count(*)')\n q_hist *= q_unit_n\n\n lvs = (psth_foraging.LinearModel.X & {'multi_linear_model': linear_model}).fetch('var_name')\n q_all = ((psth_foraging.UnitPeriodLinearFit\n * psth_foraging.UnitPeriodLinearFit.Param\n * q_hist)\n & {'multi_linear_model': linear_model}) \n\n#%%\n # -- Heatmap ---\n # for lv in zip(lvs):\n # fig, ax = plt.subplots(1, 1, figsize=(13, 15))\n\n # df = pd.DataFrame((q_all & {'var_name': lv}).proj('beta', 'p', 't', 'area_num_units').fetch())\n # df = df.pivot(index=['subject_id', 'session', 'insertion_number',\n # 'clustering_method', 'unit', 'annotation', 'area_num_units'], \n # columns='period', values='t')\n # df.sort_values(by=['area_num_units', 'iti_all'], ascending=False , inplace=True)\n # df = df.reset_index().drop(['subject_id', 'session', 'insertion_number', \n # 'clustering_method', 'unit', 'area_num_units'], axis=1)\n # df = df.set_index('annotation')\n # sns.heatmap(df, ax=ax, cmap='coolwarm')\n # ax.set_position([0.5, 0.1, 0.2, 0.8])\n\n#%%\n # -- t distribution --\n epochs = ['delay', 'go_to_end', 'iti_first_2', 'iti_last_2']\n fig, axs = plt.subplots(len(lvs), len(epochs), figsize=(4*len(epochs), 4*len(lvs)))\n areas = q_unit_n.fetch(order_by='area_num_units desc', format='frame')\n\n # Areas that have most number of neurons\n areas = list(areas.index[:10])\n\n for i, lv in enumerate(lvs):\n for j, ep in enumerate(epochs):\n ax = axs[i, j]\n ax.axhline(y=0.95, color='k', linestyle=':')\n ax.axvline(x=1.96, color='k', linestyle=':')\n\n for area in areas:\n this_ts = (q_all & {'var_name': lv, 'period': ep, 'annotation': area}).fetch('t')\n values, bin = np.histogram(np.abs(this_ts), 100)\n ax.plot(bin[:-1], np.cumsum(values)/len(this_ts), label=f'{area}, n = {len(this_ts)}')\n\n ax.set(xlim=(0, 10))\n ax.label_outer()\n\n if i == 0:\n ax.set_title(ep)\n if j == 0:\n ax.set_ylabel(lv)\n if i == len(lvs) - 1 and j == 0:\n ax.set_xlabel('|t value|')\n \n ax.legend(bbox_to_anchor=(-1,3), loc='upper left')\n\n#%%\n # -- ipsi and contra action value weights --\n fig, axs = plt.subplots(1,2, figsize=(8,4))\n if linear_model == 'Q_c + Q_i + rpe':\n lvs = ['ipsi_action_value', 'contra_action_value'] \n elif linear_model == 'Q_l + Q_r + rpe':\n lvs = ['left_action_value', 'right_action_value']\n else:\n lvs = ['relative_action_value_ic', 'total_action_value']\n\n for j, ep in enumerate(['go_to_end', 'iti_all']):\n ax = axs[j]\n\n for area in areas:\n # if not 'thalamus' in area:\n # continue\n\n ax.axhline(y=-2, color='k', ls='--', lw=.5)\n ax.axhline(y=2, color='k', ls='--', lw=.5)\n ax.axvline(x=-2, color='k', ls='--', lw=.5)\n ax.axvline(x=2, color='k', ls='--', lw=.5)\n\n df = pd.DataFrame((q_all\n & {'annotation': area}\n & {'period': ep}).proj('beta', 'p', 'area_num_units', 't').fetch())\n\n betas = df.pivot(index=['subject_id', 'session', 'insertion_number',\n 'clustering_method', 'unit', 'annotation', 'area_num_units'], \n columns='var_name', values='t')\n ps = df.pivot(index=['subject_id', 'session', 'insertion_number',\n 'clustering_method', 'unit', 'annotation', 'area_num_units'], \n columns='var_name', values='p')\n sizes = 2 + 2 * np.sum(ps.values < 0.05, axis=1)\n ax.scatter(x=betas[lvs[0]], y=betas[lvs[1]], s=sizes)\n ax.set_xlim([-20, 20])\n ax.set_ylim([-20, 20])\n ax.set_xlabel(lvs[0])\n ax.set_ylabel(lvs[1])\n ax.set_title(ep)\n ax.label_outer()\n\n # sns.scatterplot(data=betas, x='ipsi_action_value', y='contra_action_value',\n # hue='annotation', sizes=sizes, legend=False)\n\n\n#%%\n\ndef plot_example_cells(sort_lv = 'relative_action_value_ic', \n sort_ep = 'iti_all',\n best_n = 10, linear_model='Q_rel + Q_tot + rpe'):\n \n#%%\n q_unit = ((ephys.Unit * ephys.ClusterMetric * ephys.UnitStat * ephys.MAPClusterMetric.DriftMetric)\n & 'presence_ratio > 0.95'\n & 'amplitude_cutoff < 0.1'\n & 'isi_violation < 0.5' \n & 'unit_amp > 100'\n # & 'drift_metric < 0.1'\n )\n\n q_hist = (q_unit * histology.ElectrodeCCFPosition.ElectrodePosition) * ccf.CCFAnnotation\n q_unit_n = dj.U('annotation').aggr(q_hist, area_num_units='count(*)')\n q_hist *= q_unit_n\n\n lvs = (psth_foraging.LinearModel.X & {'multi_linear_model': linear_model}).fetch('var_name')\n q_all = ((psth_foraging.UnitPeriodLinearFit\n * psth_foraging.UnitPeriodLinearFit.Param\n * q_hist)\n & {'multi_linear_model': linear_model}) \n\n # Best n (absolute value)\n best_models = (q_all & f'var_name = \"{sort_lv}\"' & f'period = \"{sort_ep}\"').proj(\n 'actual_behavior_model', abs_t='abs(t)').fetch(order_by='abs_t desc', limit=best_n, format='frame')\n\n for unit_key in best_models.reset_index().to_dict('records'):\n unit_psth.plot_unit_psth_choice_outcome(unit_key)\n unit_psth.plot_unit_psth_latent_variable_quantile(unit_key, \n model_id=unit_key['actual_behavior_model'])\n unit_psth.plot_unit_period_tuning(unit_key)\n\n\n#%%\n# =========== HELPER ==============\n\ndef get_m_scale(shank_count):\n return 1350 - 150*shank_count\n" ]
[ [ "numpy.nanmax", "numpy.linspace", "numpy.cumsum", "pandas.DataFrame", "numpy.where", "numpy.hstack", "numpy.arange", "numpy.full", "numpy.sin", "matplotlib.pyplot.subplot", "numpy.nansum", "numpy.diff", "numpy.repeat", "matplotlib.pyplot.figure", "matplotlib.path.Path", "matplotlib.pyplot.GridSpec", "numpy.array", "numpy.logical_and", "numpy.sum", "numpy.histogram2d", "numpy.abs", "matplotlib.pyplot.subplots", "numpy.linalg.norm", "numpy.cos" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
Azarattum/Jumpcutter
[ "f8439776689c7baecfc68516b2ef3afe743284db" ]
[ "jumpcutter.py" ]
[ "from contextlib import closing\nfrom PIL import Image\nimport subprocess\nfrom audiotsm import phasevocoder\nfrom audiotsm.io.wav import WavReader, WavWriter\nfrom scipy.io import wavfile\nimport numpy as np\nimport re\nimport math\nfrom shutil import copyfile, rmtree\nimport os\nimport argparse\nfrom pytube import YouTube\n\ndef downloadFile(url):\n name = YouTube(url).streams.first().download()\n newname = name.replace(' ','_')\n os.rename(name,newname)\n return newname\n\ndef getMaxVolume(s):\n maxv = float(np.max(s))\n minv = float(np.min(s))\n return max(maxv,-minv)\n\ndef copyFrame(inputFrame,outputFrame):\n src = TEMP_FOLDER+\"/frame{:06d}\".format(inputFrame+1)+\".jpg\"\n dst = TEMP_FOLDER+\"/newFrame{:06d}\".format(outputFrame+1)+\".jpg\"\n if not os.path.isfile(src):\n return False\n copyfile(src, dst)\n if outputFrame%20 == 19:\n print(str(outputFrame+1)+\" time-altered frames saved.\")\n return True\n\ndef inputToOutputFilename(filename):\n dotIndex = filename.rfind(\".\")\n return filename[:dotIndex]+\"_ALTERED\"+filename[dotIndex:]\n\ndef createPath(s):\n #assert (not os.path.exists(s)), \"The filepath \"+s+\" already exists. Don't want to overwrite it. Aborting.\"\n\n try: \n os.mkdir(s)\n except OSError: \n assert False, \"Creation of the directory %s failed. (The TEMP folder may already exist. Delete or rename it, and try again.)\"\n\ndef deletePath(s): # Dangerous! Watch out!\n try: \n rmtree(s,ignore_errors=False)\n except OSError: \n print (\"Deletion of the directory %s failed\" % s)\n print(OSError)\n\ndef writeELD(start, end, number):\n startFrame = int(start % frameRate)\n startSecond = int((start / frameRate) % 60)\n startMinute = int((start / frameRate / 60) % 60)\n startHour = int((start / frameRate / 60 / 60))\n\n endFrame = int(end % frameRate)\n endSecond = int((end / frameRate) % 60)\n endMinute = int((end / frameRate / 60) % 60)\n endHour = int((end / frameRate / 60 / 60))\n\n eld_file = open(OUTPUT_FILE, \"a\")\n eld_file.write(\"{0} 001 V C {4}:{3}:{2}:{1} {8}:{7}:{6}:{5} {4}:{3}:{2}:{1} {8}:{7}:{6}:{5}\\r\\n\".format(\n str(number).zfill(3),\n str(startFrame).zfill(2),\n str(startSecond).zfill(2),\n str(startMinute).zfill(2),\n str(startHour).zfill(2),\n str(endFrame).zfill(2),\n str(endSecond).zfill(2),\n str(endMinute).zfill(2),\n str(endHour).zfill(2)\n ))\n eld_file.close()\n\n\nparser = argparse.ArgumentParser(description='Modifies a video file to play at different speeds when there is sound vs. silence.')\nparser.add_argument('--input_file', type=str, help='the video file you want modified')\nparser.add_argument('--edl', type=bool, help='EDL export option. (Supports only cuts off)')\nparser.add_argument('--url', type=str, help='A youtube url to download and process')\nparser.add_argument('--output_file', type=str, default=\"\", help=\"the output file. (optional. if not included, it'll just modify the input file name)\")\nparser.add_argument('--silent_threshold', type=float, default=0.03, help=\"the volume amount that frames' audio needs to surpass to be consider \\\"sounded\\\". It ranges from 0 (silence) to 1 (max volume)\")\nparser.add_argument('--sounded_speed', type=float, default=1.00, help=\"the speed that sounded (spoken) frames should be played at. Typically 1.\")\nparser.add_argument('--silent_speed', type=float, default=5.00, help=\"the speed that silent frames should be played at. 999999 for jumpcutting.\")\nparser.add_argument('--frame_margin', type=float, default=1, help=\"some silent frames adjacent to sounded frames are included to provide context. How many frames on either the side of speech should be included? That's this variable.\")\nparser.add_argument('--sample_rate', type=float, default=44100, help=\"sample rate of the input and output videos\")\nparser.add_argument('--frame_rate', type=float, default=30, help=\"frame rate of the input and output videos. optional... I try to find it out myself, but it doesn't always work.\")\nparser.add_argument('--frame_quality', type=int, default=3, help=\"quality of frames to be extracted from input video. 1 is highest, 31 is lowest, 3 is the default.\")\n\nargs = parser.parse_args()\n\n\n\nframeRate = args.frame_rate\nSAMPLE_RATE = args.sample_rate\nSILENT_THRESHOLD = args.silent_threshold\nFRAME_SPREADAGE = args.frame_margin\nNEW_SPEED = [args.silent_speed, args.sounded_speed]\nif args.url != None:\n INPUT_FILE = downloadFile(args.url)\nelse:\n INPUT_FILE = args.input_file\nURL = args.url\nFRAME_QUALITY = args.frame_quality\nEDL = args.edl\n\nassert INPUT_FILE != None , \"why u put no input file, that dum\"\n \nOUTPUT_FILE = inputToOutputFilename(INPUT_FILE)\nif len(args.output_file) >= 1:\n OUTPUT_FILE = args.output_file\nelse:\n OUTPUT_FILE = inputToOutputFilename(INPUT_FILE)\n\nTEMP_FOLDER = \"TEMP\"\nAUDIO_FADE_ENVELOPE_SIZE = 400 # smooth out transitiion's audio by quickly fading in/out (arbitrary magic number whatever)\n \ncreatePath(TEMP_FOLDER)\n\nif not EDL:\n command = \"ffmpeg -i \"+INPUT_FILE+\" -qscale:v \"+str(FRAME_QUALITY)+\" \"+TEMP_FOLDER+\"/frame%06d.jpg -hide_banner\"\n subprocess.call(command, shell=True)\n\ncommand = \"ffmpeg -i \"+INPUT_FILE+\" -ab 160k -ac 2 -ar \"+str(SAMPLE_RATE)+\" -vn \"+TEMP_FOLDER+\"/audio.wav\"\n\nsubprocess.call(command, shell=True)\n\ncommand = \"ffmpeg -i \"+TEMP_FOLDER+\"/input.mp4 2>&1\"\nf = open(TEMP_FOLDER+\"/params.txt\", \"w\")\nsubprocess.call(command, shell=True, stdout=f)\n\n\n\nsampleRate, audioData = wavfile.read(TEMP_FOLDER+\"/audio.wav\")\naudioSampleCount = audioData.shape[0]\nmaxAudioVolume = getMaxVolume(audioData)\n\nf = open(TEMP_FOLDER+\"/params.txt\", 'r+')\npre_params = f.read()\nf.close()\nparams = pre_params.split('\\n')\nfor line in params:\n m = re.search('Stream #.*Video.* ([0-9]*) fps',line)\n if m is not None:\n frameRate = float(m.group(1))\n\nsamplesPerFrame = sampleRate/frameRate\n\naudioFrameCount = int(math.ceil(audioSampleCount/samplesPerFrame))\n\nhasLoudAudio = np.zeros((audioFrameCount))\n\n\n\nfor i in range(audioFrameCount):\n start = int(i*samplesPerFrame)\n end = min(int((i+1)*samplesPerFrame),audioSampleCount)\n audiochunks = audioData[start:end]\n maxchunksVolume = float(getMaxVolume(audiochunks))/maxAudioVolume\n if maxchunksVolume >= SILENT_THRESHOLD:\n hasLoudAudio[i] = 1\n\nchunks = [[0,0,0]]\nshouldIncludeFrame = np.zeros((audioFrameCount))\nfor i in range(audioFrameCount):\n start = int(max(0,i-FRAME_SPREADAGE))\n end = int(min(audioFrameCount,i+1+FRAME_SPREADAGE))\n shouldIncludeFrame[i] = np.max(hasLoudAudio[start:end])\n if (i >= 1 and shouldIncludeFrame[i] != shouldIncludeFrame[i-1]): # Did we flip?\n chunks.append([chunks[-1][1],i,shouldIncludeFrame[i-1]])\n\nchunks.append([chunks[-1][1],audioFrameCount,shouldIncludeFrame[i-1]])\nchunks = chunks[1:]\n\noutputAudioData = np.zeros((0,audioData.shape[1]))\noutputPointer = 0\n\nlastExistingFrame = None\nedlFrameNumber = 0\nif EDL and os.path.isfile(OUTPUT_FILE):\n os.remove(OUTPUT_FILE)\nfor chunk in chunks:\n if EDL:\n if (chunk[2] == True):\n edlFrameNumber += 1\n writeELD(chunk[0], chunk[1], edlFrameNumber)\n continue\n \n audioChunk = audioData[int(chunk[0]*samplesPerFrame):int(chunk[1]*samplesPerFrame)]\n \n sFile = TEMP_FOLDER+\"/tempStart.wav\"\n eFile = TEMP_FOLDER+\"/tempEnd.wav\"\n wavfile.write(sFile,SAMPLE_RATE,audioChunk)\n with WavReader(sFile) as reader:\n with WavWriter(eFile, reader.channels, reader.samplerate) as writer:\n tsm = phasevocoder(reader.channels, speed=NEW_SPEED[int(chunk[2])])\n tsm.run(reader, writer)\n _, alteredAudioData = wavfile.read(eFile)\n leng = alteredAudioData.shape[0]\n endPointer = outputPointer+leng\n outputAudioData = np.concatenate((outputAudioData,alteredAudioData/maxAudioVolume))\n\n #outputAudioData[outputPointer:endPointer] = alteredAudioData/maxAudioVolume\n\n # smooth out transitiion's audio by quickly fading in/out\n \n if leng < AUDIO_FADE_ENVELOPE_SIZE:\n outputAudioData[outputPointer:endPointer] = 0 # audio is less than 0.01 sec, let's just remove it.\n else:\n premask = np.arange(AUDIO_FADE_ENVELOPE_SIZE)/AUDIO_FADE_ENVELOPE_SIZE\n mask = np.repeat(premask[:, np.newaxis],2,axis=1) # make the fade-envelope mask stereo\n outputAudioData[outputPointer:outputPointer+AUDIO_FADE_ENVELOPE_SIZE] *= mask\n outputAudioData[endPointer-AUDIO_FADE_ENVELOPE_SIZE:endPointer] *= 1-mask\n\n startOutputFrame = int(math.ceil(outputPointer/samplesPerFrame))\n endOutputFrame = int(math.ceil(endPointer/samplesPerFrame))\n for outputFrame in range(startOutputFrame, endOutputFrame):\n inputFrame = int(chunk[0]+NEW_SPEED[int(chunk[2])]*(outputFrame-startOutputFrame))\n didItWork = copyFrame(inputFrame,outputFrame)\n if didItWork:\n lastExistingFrame = inputFrame\n else:\n copyFrame(lastExistingFrame,outputFrame)\n\n outputPointer = endPointer\n\nif not EDL:\n wavfile.write(TEMP_FOLDER+\"/audioNew.wav\",SAMPLE_RATE,outputAudioData)\n\n'''\noutputFrame = math.ceil(outputPointer/samplesPerFrame)\nfor endGap in range(outputFrame,audioFrameCount):\n copyFrame(int(audioSampleCount/samplesPerFrame)-1,endGap)\n'''\n\nif not EDL:\n command = \"ffmpeg -framerate \"+str(frameRate)+\" -i \"+TEMP_FOLDER+\"/newFrame%06d.jpg -i \"+TEMP_FOLDER+\"/audioNew.wav -strict -2 \"+OUTPUT_FILE\n subprocess.call(command, shell=True)\n\ndeletePath(TEMP_FOLDER)\n\n" ]
[ [ "scipy.io.wavfile.write", "numpy.min", "numpy.arange", "numpy.concatenate", "numpy.max", "numpy.repeat", "numpy.zeros", "scipy.io.wavfile.read" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
nagachika/probability
[ "2a5609ceec01a388ec03b583b4f8e813cfbad981", "2a5609ceec01a388ec03b583b4f8e813cfbad981", "2a5609ceec01a388ec03b583b4f8e813cfbad981", "2a5609ceec01a388ec03b583b4f8e813cfbad981", "2a5609ceec01a388ec03b583b4f8e813cfbad981", "2a5609ceec01a388ec03b583b4f8e813cfbad981" ]
[ "tensorflow_probability/python/distributions/zipf.py", "tensorflow_probability/python/distributions/batch_reshape_test.py", "experimental/fun_mcmc/fun_mcmc_lib.py", "tensorflow_probability/python/distributions/vector_sinh_arcsinh_diag.py", "tensorflow_probability/python/distributions/quantized_distribution.py", "tensorflow_probability/python/internal/backend/numpy/dtype.py" ]
[ "# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"The Zipf distribution class.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport tensorflow.compat.v2 as tf\n\nfrom tensorflow_probability.python.distributions import distribution\nfrom tensorflow_probability.python.distributions.seed_stream import SeedStream\nfrom tensorflow_probability.python.internal import assert_util\nfrom tensorflow_probability.python.internal import distribution_util\nfrom tensorflow_probability.python.internal import dtype_util\nfrom tensorflow_probability.python.internal import reparameterization\n\n\n__all__ = [\n \"Zipf\",\n]\n\n\nclass Zipf(distribution.Distribution):\n \"\"\"Zipf distribution.\n\n The Zipf distribution is parameterized by a `power` parameter.\n\n #### Mathematical Details\n\n The probability mass function (pmf) is,\n\n ```none\n pmf(k; alpha, k >= 0) = (k^(-alpha)) / Z\n Z = zeta(alpha).\n ```\n\n where `power = alpha` and Z is the normalization constant.\n `zeta` is the [Riemann zeta function](\n https://en.wikipedia.org/wiki/Riemann_zeta_function).\n\n Note that gradients with respect to the `power` parameter are not\n supported in the current implementation.\n \"\"\"\n\n def __init__(self,\n power,\n dtype=tf.int32,\n interpolate_nondiscrete=True,\n sample_maximum_iterations=100,\n validate_args=False,\n allow_nan_stats=False,\n name=\"Zipf\"):\n \"\"\"Initialize a batch of Zipf distributions.\n\n Args:\n power: `Float` like `Tensor` representing the power parameter. Must be\n strictly greater than `1`.\n dtype: The `dtype` of `Tensor` returned by `sample`.\n Default value: `tf.int32`.\n interpolate_nondiscrete: Python `bool`. When `False`, `log_prob` returns\n `-inf` (and `prob` returns `0`) for non-integer inputs. When `True`,\n `log_prob` evaluates the continuous function `-power log(k) -\n log(zeta(power))` , which matches the Zipf pmf at integer arguments `k`\n (note that this function is not itself a normalized probability\n log-density).\n Default value: `True`.\n sample_maximum_iterations: Maximum number of iterations of allowable\n iterations in `sample`. When `validate_args=True`, samples which fail to\n reach convergence (subject to this cap) are masked out with\n `self.dtype.min` or `nan` depending on `self.dtype.is_integer`.\n Default value: `100`.\n validate_args: Python `bool`, default `False`. When `True` distribution\n parameters are checked for validity despite possibly degrading runtime\n performance. When `False` invalid inputs may silently render incorrect\n outputs.\n Default value: `False`.\n allow_nan_stats: Python `bool`, default `True`. When `True`, statistics\n (e.g., mean, mode, variance) use the value \"`NaN`\" to indicate the\n result is undefined. When `False`, an exception is raised if one or more\n of the statistic's batch members are undefined.\n Default value: `False`.\n name: Python `str` name prefixed to Ops created by this class.\n Default value: `'Zipf'`.\n\n Raises:\n TypeError: if `power` is not `float` like.\n \"\"\"\n parameters = dict(locals())\n with tf.name_scope(name) as name:\n power = tf.convert_to_tensor(\n value=power,\n name=\"power\",\n dtype=dtype_util.common_dtype([power], preferred_dtype=tf.float32))\n if (not dtype_util.is_floating(power.dtype) or\n dtype_util.base_equal(power.dtype, tf.float16)):\n raise TypeError(\n \"power.dtype ({}) is not a supported `float` type.\".format(\n dtype_util.name(power.dtype)))\n runtime_assertions = []\n if validate_args:\n runtime_assertions.append(assert_util.assert_greater(\n power, np.ones([], power.dtype.as_numpy_dtype)))\n with tf.control_dependencies(runtime_assertions):\n self._power = tf.identity(power, name=\"power\")\n\n self._interpolate_nondiscrete = interpolate_nondiscrete\n self._sample_maximum_iterations = sample_maximum_iterations\n super(Zipf, self).__init__(\n dtype=dtype,\n reparameterization_type=reparameterization.NOT_REPARAMETERIZED,\n validate_args=validate_args,\n allow_nan_stats=allow_nan_stats,\n parameters=parameters,\n graph_parents=[self._power],\n name=name)\n\n @classmethod\n def _params_event_ndims(cls):\n return dict(power=0)\n\n @property\n def power(self):\n \"\"\"Exponent parameter.\"\"\"\n return self._power\n\n @property\n def interpolate_nondiscrete(self):\n \"\"\"Interpolate (log) probs on non-integer inputs.\"\"\"\n return self._interpolate_nondiscrete\n\n @property\n def sample_maximum_iterations(self):\n \"\"\"Maximum number of allowable iterations in `sample`.\"\"\"\n return self._sample_maximum_iterations\n\n def _batch_shape_tensor(self):\n return tf.shape(input=self.power)\n\n def _batch_shape(self):\n return self.power.shape\n\n def _event_shape_tensor(self):\n return tf.constant([], dtype=tf.int32)\n\n def _event_shape(self):\n return tf.TensorShape([])\n\n def _log_prob(self, x):\n # The log probability at positive integer points x is log(x^(-power) / Z)\n # where Z is the normalization constant. For x < 1 and non-integer points,\n # the log-probability is -inf.\n #\n # However, if interpolate_nondiscrete is True, we return the natural\n # continuous relaxation for x >= 1 which agrees with the log probability at\n # positive integer points.\n #\n # If interpolate_nondiscrete is False and validate_args is True, we check\n # that the sample point x is in the support. That is, x is equivalent to a\n # positive integer.\n x = tf.cast(x, self.power.dtype)\n if self.validate_args and not self.interpolate_nondiscrete:\n x = distribution_util.embed_check_integer_casting_closed(\n x, target_dtype=self.dtype, assert_positive=True)\n return self._log_unnormalized_prob(x) - self._log_normalization()\n\n def _cdf(self, x):\n # CDF(x) at positive integer x is the probability that the Zipf variable is\n # less than or equal to x; given by the formula:\n # CDF(x) = 1 - (zeta(power, x + 1) / Z)\n # For fractional x, the CDF is equal to the CDF at n = floor(x).\n # For x < 1, the CDF is zero.\n\n # If interpolate_nondiscrete is True, we return a continuous relaxation\n # which agrees with the CDF at integer points.\n x = tf.cast(x, self.power.dtype)\n safe_x = tf.maximum(x if self.interpolate_nondiscrete else tf.floor(x), 0.)\n\n cdf = 1. - (\n tf.math.zeta(self.power, safe_x + 1.) / tf.math.zeta(self.power, 1.))\n return tf.where(\n tf.broadcast_to(tf.less(x, 1.), tf.shape(input=cdf)),\n tf.zeros_like(cdf), cdf)\n\n def _log_normalization(self):\n return tf.math.log(tf.math.zeta(self.power, 1.))\n\n def _log_unnormalized_prob(self, x):\n safe_x = tf.maximum(x if self.interpolate_nondiscrete else tf.floor(x), 1.)\n y = -self.power * tf.math.log(safe_x)\n is_supported = tf.broadcast_to(tf.equal(x, safe_x), tf.shape(input=y))\n neg_inf = tf.fill(\n tf.shape(input=y), value=dtype_util.as_numpy_dtype(y.dtype)(-np.inf))\n return tf.where(is_supported, y, neg_inf)\n\n @distribution_util.AppendDocstring(\n \"\"\"Note: Zipf has an infinite mean when `power` <= 2.\"\"\")\n def _mean(self):\n zeta_p = tf.math.zeta(self.power[..., tf.newaxis] - [0., 1.], 1.)\n return zeta_p[..., 1] / zeta_p[..., 0]\n\n @distribution_util.AppendDocstring(\n \"\"\"Note: Zipf has infinite variance when `power` <= 3.\"\"\")\n def _variance(self):\n zeta_p = tf.math.zeta(self.power[..., tf.newaxis] - [0., 1., 2.], 1.)\n return ((zeta_p[..., 0] * zeta_p[..., 2]) - (zeta_p[..., 1]**2)) / (\n zeta_p[..., 0]**2)\n\n def _mode(self):\n return tf.ones_like(self.power, dtype=self.dtype)\n\n @distribution_util.AppendDocstring(\n \"\"\"The sampling algorithm is rejection-inversion; Algorithm ZRI of\n [Horman and Derflinger (1996)][1]. For simplicity, we don't use the\n squeeze function in our implementation.\n\n #### References\n [1]: W. Hormann , G. Derflinger, Rejection-inversion to generate variates\n from monotone discrete distributions, ACM Transactions on Modeling and\n Computer Simulation (TOMACS), v.6 n.3, p.169-184, July 1996.\n \"\"\")\n def _sample_n(self, n, seed=None):\n shape = tf.concat([[n], self.batch_shape_tensor()], axis=0)\n\n has_seed = seed is not None\n seed = SeedStream(seed, salt=\"zipf\")\n\n minval_u = self._hat_integral(0.5) + 1.\n maxval_u = self._hat_integral(tf.int64.max - 0.5)\n\n def loop_body(should_continue, k):\n \"\"\"Resample the non-accepted points.\"\"\"\n # The range of U is chosen so that the resulting sample K lies in\n # [0, tf.int64.max). The final sample, if accepted, is K + 1.\n u = tf.random.uniform(\n shape,\n minval=minval_u,\n maxval=maxval_u,\n dtype=self.power.dtype,\n seed=seed())\n\n # Sample the point X from the continuous density h(x) \\propto x^(-power).\n x = self._hat_integral_inverse(u)\n\n # Rejection-inversion requires a `hat` function, h(x) such that\n # \\int_{k - .5}^{k + .5} h(x) dx >= pmf(k + 1) for points k in the\n # support. A natural hat function for us is h(x) = x^(-power).\n #\n # After sampling X from h(x), suppose it lies in the interval\n # (K - .5, K + .5) for integer K. Then the corresponding K is accepted if\n # if lies to the left of x_K, where x_K is defined by:\n # \\int_{x_k}^{K + .5} h(x) dx = H(x_K) - H(K + .5) = pmf(K + 1),\n # where H(x) = \\int_x^inf h(x) dx.\n\n # Solving for x_K, we find that x_K = H_inverse(H(K + .5) + pmf(K + 1)).\n # Or, the acceptance condition is X <= H_inverse(H(K + .5) + pmf(K + 1)).\n # Since X = H_inverse(U), this simplifies to U <= H(K + .5) + pmf(K + 1).\n\n # Update the non-accepted points.\n # Since X \\in (K - .5, K + .5), the sample K is chosen as floor(X + 0.5).\n k = tf.where(should_continue, tf.floor(x + 0.5), k)\n accept = (u <= self._hat_integral(k + .5) + tf.exp(self._log_prob(k + 1)))\n\n return [should_continue & (~accept), k]\n\n should_continue, samples = tf.while_loop(\n cond=lambda should_continue, *ignore: tf.reduce_any(\n input_tensor=should_continue),\n body=loop_body,\n loop_vars=[\n tf.ones(shape, dtype=tf.bool), # should_continue\n tf.zeros(shape, dtype=self.power.dtype), # k\n ],\n parallel_iterations=1 if has_seed else 10,\n maximum_iterations=self.sample_maximum_iterations,\n )\n samples = samples + 1.\n\n if self.validate_args and dtype_util.is_integer(self.dtype):\n samples = distribution_util.embed_check_integer_casting_closed(\n samples, target_dtype=self.dtype, assert_positive=True)\n\n samples = tf.cast(samples, self.dtype)\n\n if self.validate_args:\n npdt = dtype_util.as_numpy_dtype(self.dtype)\n v = npdt(dtype_util.min(npdt) if dtype_util.is_integer(npdt) else np.nan)\n mask = tf.fill(shape, value=v)\n samples = tf.where(should_continue, mask, samples)\n\n return samples\n\n def _hat_integral(self, x):\n \"\"\"Integral of the `hat` function, used for sampling.\n\n We choose a `hat` function, h(x) = x^(-power), which is a continuous\n (unnormalized) density touching each positive integer at the (unnormalized)\n pmf. This function implements `hat` integral: H(x) = int_x^inf h(t) dt;\n which is needed for sampling purposes.\n\n Arguments:\n x: A Tensor of points x at which to evaluate H(x).\n\n Returns:\n A Tensor containing evaluation H(x) at x.\n \"\"\"\n x = tf.cast(x, self.power.dtype)\n t = self.power - 1.\n return tf.exp((-t) * tf.math.log1p(x) - tf.math.log(t))\n\n def _hat_integral_inverse(self, x):\n \"\"\"Inverse function of _hat_integral.\"\"\"\n x = tf.cast(x, self.power.dtype)\n t = self.power - 1.\n return tf.math.expm1(-(tf.math.log(t) + tf.math.log(x)) / t)\n", "# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Tests for BatchReshape.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# Dependency imports\nimport numpy as np\n\nimport tensorflow as tf\nimport tensorflow_probability as tfp\n\nfrom tensorflow_probability.python.internal import test_util as tfp_test_util\nfrom tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import\n\ntfd = tfp.distributions\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass _BatchReshapeTest(object):\n\n def make_wishart(self, dims, new_batch_shape, old_batch_shape):\n new_batch_shape_ph = (\n tf.constant(np.int32(new_batch_shape))\n if self.is_static_shape else tf.compat.v1.placeholder_with_default(\n np.int32(new_batch_shape), shape=None))\n\n scale = self.dtype([\n [[1., 0.5],\n [0.5, 1.]],\n [[0.5, 0.25],\n [0.25, 0.75]],\n ])\n scale = np.reshape(np.concatenate([scale, scale], axis=0),\n old_batch_shape + [dims, dims])\n scale_ph = tf.compat.v1.placeholder_with_default(\n scale, shape=scale.shape if self.is_static_shape else None)\n wishart = tfd.Wishart(df=5, scale=scale_ph)\n reshape_wishart = tfd.BatchReshape(\n distribution=wishart,\n batch_shape=new_batch_shape_ph,\n validate_args=True)\n\n return wishart, reshape_wishart\n\n def test_matrix_variate_sample_and_log_prob(self):\n if tf.executing_eagerly():\n # TODO(b/122840816): Modify this test so that it runs in eager mode or\n # document that the test is not intended to run in eager mode.\n return\n\n dims = 2\n seed = tfp_test_util.test_seed()\n new_batch_shape = [4]\n old_batch_shape = [2, 2]\n wishart, reshape_wishart = self.make_wishart(\n dims, new_batch_shape, old_batch_shape)\n\n batch_shape = reshape_wishart.batch_shape_tensor()\n event_shape = reshape_wishart.event_shape_tensor()\n\n expected_sample_shape = [3, 1] + new_batch_shape + [dims, dims]\n x = wishart.sample([3, 1], seed=seed)\n expected_sample = tf.reshape(x, expected_sample_shape)\n actual_sample = reshape_wishart.sample([3, 1], seed=seed)\n\n expected_log_prob_shape = [3, 1] + new_batch_shape\n expected_log_prob = tf.reshape(wishart.log_prob(x), expected_log_prob_shape)\n actual_log_prob = reshape_wishart.log_prob(expected_sample)\n\n [\n batch_shape_,\n event_shape_,\n expected_sample_,\n actual_sample_,\n expected_log_prob_,\n actual_log_prob_,\n ] = self.evaluate([\n batch_shape,\n event_shape,\n expected_sample,\n actual_sample,\n expected_log_prob,\n actual_log_prob,\n ])\n\n self.assertAllEqual(new_batch_shape, batch_shape_)\n self.assertAllEqual([dims, dims], event_shape_)\n self.assertAllClose(expected_sample_, actual_sample_,\n atol=0., rtol=1e-6)\n self.assertAllClose(expected_log_prob_, actual_log_prob_,\n atol=0., rtol=1e-6)\n if not self.is_static_shape:\n return\n self.assertAllEqual(new_batch_shape, reshape_wishart.batch_shape)\n self.assertAllEqual([dims, dims], reshape_wishart.event_shape)\n self.assertAllEqual(expected_sample_shape, actual_sample.shape)\n self.assertAllEqual(expected_log_prob_shape, actual_log_prob.shape)\n\n def test_matrix_variate_stats(self):\n dims = 2\n new_batch_shape = [4]\n old_batch_shape = [2, 2]\n wishart, reshape_wishart = self.make_wishart(\n dims, new_batch_shape, old_batch_shape)\n\n expected_scalar_stat_shape = new_batch_shape\n expected_matrix_stat_shape = new_batch_shape + [dims, dims]\n\n expected_entropy = tf.reshape(wishart.entropy(), expected_scalar_stat_shape)\n actual_entropy = reshape_wishart.entropy()\n\n expected_mean = tf.reshape(wishart.mean(), expected_matrix_stat_shape)\n actual_mean = reshape_wishart.mean()\n\n expected_mode = tf.reshape(wishart.mode(), expected_matrix_stat_shape)\n actual_mode = reshape_wishart.mode()\n\n expected_stddev = tf.reshape(wishart.stddev(), expected_matrix_stat_shape)\n actual_stddev = reshape_wishart.stddev()\n\n expected_variance = tf.reshape(wishart.variance(),\n expected_matrix_stat_shape)\n actual_variance = reshape_wishart.variance()\n\n [\n expected_entropy_,\n actual_entropy_,\n expected_mean_,\n actual_mean_,\n expected_mode_,\n actual_mode_,\n expected_stddev_,\n actual_stddev_,\n expected_variance_,\n actual_variance_,\n ] = self.evaluate([\n expected_entropy,\n actual_entropy,\n expected_mean,\n actual_mean,\n expected_mode,\n actual_mode,\n expected_stddev,\n actual_stddev,\n expected_variance,\n actual_variance,\n ])\n\n self.assertAllClose(expected_entropy_, actual_entropy_,\n atol=0., rtol=1e-6)\n self.assertAllClose(expected_mean_, actual_mean_,\n atol=0., rtol=1e-6)\n self.assertAllClose(expected_mode_, actual_mode_,\n atol=0., rtol=1e-6)\n self.assertAllClose(expected_stddev_, actual_stddev_,\n atol=0., rtol=1e-6)\n self.assertAllClose(expected_variance_, actual_variance_,\n atol=0., rtol=1e-6)\n if not self.is_static_shape:\n return\n self.assertAllEqual(expected_scalar_stat_shape, actual_entropy.shape)\n self.assertAllEqual(expected_matrix_stat_shape, actual_mean.shape)\n self.assertAllEqual(expected_matrix_stat_shape, actual_mode.shape)\n self.assertAllEqual(expected_matrix_stat_shape, actual_stddev.shape)\n self.assertAllEqual(expected_matrix_stat_shape, actual_variance.shape)\n\n def make_normal(self, new_batch_shape, old_batch_shape):\n new_batch_shape_ph = (\n tf.constant(np.int32(new_batch_shape))\n if self.is_static_shape else tf.compat.v1.placeholder_with_default(\n np.int32(new_batch_shape), shape=None))\n\n scale = self.dtype(0.5 + np.arange(\n np.prod(old_batch_shape)).reshape(old_batch_shape))\n scale_ph = tf.compat.v1.placeholder_with_default(\n scale, shape=scale.shape if self.is_static_shape else None)\n normal = tfd.Normal(loc=self.dtype(0), scale=scale_ph)\n reshape_normal = tfd.BatchReshape(\n distribution=normal, batch_shape=new_batch_shape_ph, validate_args=True)\n return normal, reshape_normal\n\n def test_scalar_variate_sample_and_log_prob(self):\n if tf.executing_eagerly():\n # TODO(b/122840816): Modify this test so that it runs in eager mode or\n # document that the test is not intended to run in eager mode.\n return\n\n seed = tfp_test_util.test_seed()\n\n new_batch_shape = [2, 2]\n old_batch_shape = [4]\n\n normal, reshape_normal = self.make_normal(\n new_batch_shape, old_batch_shape)\n\n batch_shape = reshape_normal.batch_shape_tensor()\n event_shape = reshape_normal.event_shape_tensor()\n\n expected_sample_shape = new_batch_shape\n x = normal.sample(seed=seed)\n expected_sample = tf.reshape(x, expected_sample_shape)\n actual_sample = reshape_normal.sample(seed=seed)\n\n expected_log_prob_shape = new_batch_shape\n expected_log_prob = tf.reshape(normal.log_prob(x), expected_log_prob_shape)\n actual_log_prob = reshape_normal.log_prob(expected_sample)\n\n [\n batch_shape_,\n event_shape_,\n expected_sample_,\n actual_sample_,\n expected_log_prob_,\n actual_log_prob_,\n ] = self.evaluate([\n batch_shape,\n event_shape,\n expected_sample,\n actual_sample,\n expected_log_prob,\n actual_log_prob,\n ])\n self.assertAllEqual(new_batch_shape, batch_shape_)\n self.assertAllEqual([], event_shape_)\n self.assertAllClose(expected_sample_, actual_sample_,\n atol=0., rtol=1e-6)\n self.assertAllClose(expected_log_prob_, actual_log_prob_,\n atol=0., rtol=1e-6)\n if not self.is_static_shape:\n return\n self.assertAllEqual(new_batch_shape, reshape_normal.batch_shape)\n self.assertAllEqual([], reshape_normal.event_shape)\n self.assertAllEqual(expected_sample_shape, actual_sample.shape)\n self.assertAllEqual(expected_log_prob_shape, actual_log_prob.shape)\n\n def test_scalar_variate_stats(self):\n new_batch_shape = [2, 2]\n old_batch_shape = [4]\n\n normal, reshape_normal = self.make_normal(new_batch_shape, old_batch_shape)\n\n expected_scalar_stat_shape = new_batch_shape\n\n expected_entropy = tf.reshape(normal.entropy(), expected_scalar_stat_shape)\n actual_entropy = reshape_normal.entropy()\n\n expected_mean = tf.reshape(normal.mean(), expected_scalar_stat_shape)\n actual_mean = reshape_normal.mean()\n\n expected_mode = tf.reshape(normal.mode(), expected_scalar_stat_shape)\n actual_mode = reshape_normal.mode()\n\n expected_stddev = tf.reshape(normal.stddev(), expected_scalar_stat_shape)\n actual_stddev = reshape_normal.stddev()\n\n expected_variance = tf.reshape(normal.variance(),\n expected_scalar_stat_shape)\n actual_variance = reshape_normal.variance()\n\n [\n expected_entropy_,\n actual_entropy_,\n expected_mean_,\n actual_mean_,\n expected_mode_,\n actual_mode_,\n expected_stddev_,\n actual_stddev_,\n expected_variance_,\n actual_variance_,\n ] = self.evaluate([\n expected_entropy,\n actual_entropy,\n expected_mean,\n actual_mean,\n expected_mode,\n actual_mode,\n expected_stddev,\n actual_stddev,\n expected_variance,\n actual_variance,\n ])\n self.assertAllClose(expected_entropy_, actual_entropy_,\n atol=0., rtol=1e-6)\n self.assertAllClose(expected_mean_, actual_mean_,\n atol=0., rtol=1e-6)\n self.assertAllClose(expected_mode_, actual_mode_,\n atol=0., rtol=1e-6)\n self.assertAllClose(expected_stddev_, actual_stddev_,\n atol=0., rtol=1e-6)\n self.assertAllClose(expected_variance_, actual_variance_,\n atol=0., rtol=1e-6)\n if not self.is_static_shape:\n return\n self.assertAllEqual(expected_scalar_stat_shape, actual_entropy.shape)\n self.assertAllEqual(expected_scalar_stat_shape, actual_mean.shape)\n self.assertAllEqual(expected_scalar_stat_shape, actual_mode.shape)\n self.assertAllEqual(expected_scalar_stat_shape, actual_stddev.shape)\n self.assertAllEqual(expected_scalar_stat_shape, actual_variance.shape)\n\n def make_mvn(self, dims, new_batch_shape, old_batch_shape):\n new_batch_shape_ph = (\n tf.constant(np.int32(new_batch_shape))\n if self.is_static_shape else tf.compat.v1.placeholder_with_default(\n np.int32(new_batch_shape), shape=None))\n\n scale = np.ones(old_batch_shape + [dims], self.dtype)\n scale_ph = tf.compat.v1.placeholder_with_default(\n scale, shape=scale.shape if self.is_static_shape else None)\n mvn = tfd.MultivariateNormalDiag(scale_diag=scale_ph)\n reshape_mvn = tfd.BatchReshape(\n distribution=mvn, batch_shape=new_batch_shape_ph, validate_args=True)\n return mvn, reshape_mvn\n\n def test_vector_variate_sample_and_log_prob(self):\n if tf.executing_eagerly():\n # TODO(b/122840816): Modify this test so that it runs in eager mode or\n # document that the test is not intended to run in eager mode.\n return\n\n dims = 3\n seed = tfp_test_util.test_seed()\n new_batch_shape = [2, 1]\n old_batch_shape = [2]\n mvn, reshape_mvn = self.make_mvn(\n dims, new_batch_shape, old_batch_shape)\n\n batch_shape = reshape_mvn.batch_shape_tensor()\n event_shape = reshape_mvn.event_shape_tensor()\n\n expected_sample_shape = [3] + new_batch_shape + [dims]\n x = mvn.sample(3, seed=seed)\n expected_sample = tf.reshape(x, expected_sample_shape)\n actual_sample = reshape_mvn.sample(3, seed=seed)\n\n expected_log_prob_shape = [3] + new_batch_shape\n expected_log_prob = tf.reshape(mvn.log_prob(x), expected_log_prob_shape)\n actual_log_prob = reshape_mvn.log_prob(expected_sample)\n\n [\n batch_shape_,\n event_shape_,\n expected_sample_,\n actual_sample_,\n expected_log_prob_,\n actual_log_prob_,\n ] = self.evaluate([\n batch_shape,\n event_shape,\n expected_sample,\n actual_sample,\n expected_log_prob,\n actual_log_prob,\n ])\n self.assertAllEqual(new_batch_shape, batch_shape_)\n self.assertAllEqual([dims], event_shape_)\n self.assertAllClose(expected_sample_, actual_sample_,\n atol=0., rtol=1e-6)\n self.assertAllClose(expected_log_prob_, actual_log_prob_,\n atol=0., rtol=1e-6)\n if not self.is_static_shape:\n return\n self.assertAllEqual(new_batch_shape, reshape_mvn.batch_shape)\n self.assertAllEqual([dims], reshape_mvn.event_shape)\n self.assertAllEqual(expected_sample_shape, actual_sample.shape)\n self.assertAllEqual(expected_log_prob_shape, actual_log_prob.shape)\n\n def test_vector_variate_stats(self):\n dims = 3\n new_batch_shape = [2, 1]\n old_batch_shape = [2]\n mvn, reshape_mvn = self.make_mvn(\n dims, new_batch_shape, old_batch_shape)\n\n expected_scalar_stat_shape = new_batch_shape\n\n expected_entropy = tf.reshape(mvn.entropy(), expected_scalar_stat_shape)\n actual_entropy = reshape_mvn.entropy()\n\n expected_vector_stat_shape = new_batch_shape + [dims]\n\n expected_mean = tf.reshape(mvn.mean(), expected_vector_stat_shape)\n actual_mean = reshape_mvn.mean()\n\n expected_mode = tf.reshape(mvn.mode(), expected_vector_stat_shape)\n actual_mode = reshape_mvn.mode()\n\n expected_stddev = tf.reshape(mvn.stddev(), expected_vector_stat_shape)\n actual_stddev = reshape_mvn.stddev()\n\n expected_variance = tf.reshape(mvn.variance(), expected_vector_stat_shape)\n actual_variance = reshape_mvn.variance()\n\n expected_matrix_stat_shape = new_batch_shape + [dims, dims]\n\n expected_covariance = tf.reshape(mvn.covariance(),\n expected_matrix_stat_shape)\n actual_covariance = reshape_mvn.covariance()\n\n [\n expected_entropy_,\n actual_entropy_,\n expected_mean_,\n actual_mean_,\n expected_mode_,\n actual_mode_,\n expected_stddev_,\n actual_stddev_,\n expected_variance_,\n actual_variance_,\n expected_covariance_,\n actual_covariance_,\n ] = self.evaluate([\n expected_entropy,\n actual_entropy,\n expected_mean,\n actual_mean,\n expected_mode,\n actual_mode,\n expected_stddev,\n actual_stddev,\n expected_variance,\n actual_variance,\n expected_covariance,\n actual_covariance,\n ])\n self.assertAllClose(expected_entropy_, actual_entropy_,\n atol=0., rtol=1e-6)\n self.assertAllClose(expected_mean_, actual_mean_,\n atol=0., rtol=1e-6)\n self.assertAllClose(expected_mode_, actual_mode_,\n atol=0., rtol=1e-6)\n self.assertAllClose(expected_stddev_, actual_stddev_,\n atol=0., rtol=1e-6)\n self.assertAllClose(expected_variance_, actual_variance_,\n atol=0., rtol=1e-6)\n self.assertAllClose(expected_covariance_, actual_covariance_,\n atol=0., rtol=1e-6)\n if not self.is_static_shape:\n return\n self.assertAllEqual(expected_scalar_stat_shape, actual_entropy.shape)\n self.assertAllEqual(expected_vector_stat_shape, actual_mean.shape)\n self.assertAllEqual(expected_vector_stat_shape, actual_mode.shape)\n self.assertAllEqual(expected_vector_stat_shape, actual_stddev.shape)\n self.assertAllEqual(expected_vector_stat_shape, actual_variance.shape)\n self.assertAllEqual(expected_matrix_stat_shape, actual_covariance.shape)\n\n def test_bad_reshape_size(self):\n dims = 2\n new_batch_shape = [2, 3]\n old_batch_shape = [2] # 2 != 2*3\n\n new_batch_shape_ph = (\n tf.constant(np.int32(new_batch_shape))\n if self.is_static_shape else tf.compat.v1.placeholder_with_default(\n np.int32(new_batch_shape), shape=None))\n\n scale = np.ones(old_batch_shape + [dims], self.dtype)\n scale_ph = tf.compat.v1.placeholder_with_default(\n scale, shape=scale.shape if self.is_static_shape else None)\n mvn = tfd.MultivariateNormalDiag(scale_diag=scale_ph)\n\n if self.is_static_shape or tf.executing_eagerly():\n with self.assertRaisesRegexp(\n ValueError, (r\"`batch_shape` size \\(6\\) must match \"\n r\"`distribution\\.batch_shape` size \\(2\\)\")):\n tfd.BatchReshape(\n distribution=mvn,\n batch_shape=new_batch_shape_ph,\n validate_args=True)\n\n else:\n with self.assertRaisesOpError(r\"Shape sizes do not match.\"):\n self.evaluate(\n tfd.BatchReshape(\n distribution=mvn,\n batch_shape=new_batch_shape_ph,\n validate_args=True).sample())\n\n def test_non_positive_shape(self):\n dims = 2\n old_batch_shape = [4]\n if self.is_static_shape:\n # Unknown first dimension does not trigger size check. Note that\n # any dimension < 0 is treated statically as unknown.\n new_batch_shape = [-1, 0]\n else:\n new_batch_shape = [-2, -2] # -2 * -2 = 4, same size as the old shape.\n\n new_batch_shape_ph = (\n tf.constant(np.int32(new_batch_shape))\n if self.is_static_shape else tf.compat.v1.placeholder_with_default(\n np.int32(new_batch_shape), shape=None))\n\n scale = np.ones(old_batch_shape + [dims], self.dtype)\n scale_ph = tf.compat.v1.placeholder_with_default(\n scale, shape=scale.shape if self.is_static_shape else None)\n mvn = tfd.MultivariateNormalDiag(scale_diag=scale_ph)\n\n if self.is_static_shape or tf.executing_eagerly():\n with self.assertRaisesRegexp(ValueError, r\".*must be >=(-1| 0).*\"):\n tfd.BatchReshape(\n distribution=mvn,\n batch_shape=new_batch_shape_ph,\n validate_args=True)\n\n else:\n with self.assertRaisesOpError(r\".*must be >=(-1| 0).*\"):\n self.evaluate(\n tfd.BatchReshape(\n distribution=mvn,\n batch_shape=new_batch_shape_ph,\n validate_args=True).sample())\n\n def test_non_vector_shape(self):\n if tf.executing_eagerly():\n # TODO(b/122840816): Modify this test so that it runs in eager mode or\n # document that the test is not intended to run in eager mode.\n return\n\n dims = 2\n new_batch_shape = 2\n old_batch_shape = [2]\n\n new_batch_shape_ph = (\n tf.constant(np.int32(new_batch_shape))\n if self.is_static_shape else tf.compat.v1.placeholder_with_default(\n np.int32(new_batch_shape), shape=None))\n\n scale = np.ones(old_batch_shape + [dims], self.dtype)\n scale_ph = tf.compat.v1.placeholder_with_default(\n scale, shape=scale.shape if self.is_static_shape else None)\n mvn = tfd.MultivariateNormalDiag(scale_diag=scale_ph)\n\n if self.is_static_shape:\n with self.assertRaisesRegexp(ValueError, r\".*must be a vector.*\"):\n tfd.BatchReshape(\n distribution=mvn,\n batch_shape=new_batch_shape_ph,\n validate_args=True)\n\n else:\n with self.assertRaisesOpError(r\".*must be a vector.*\"):\n self.evaluate(\n tfd.BatchReshape(\n distribution=mvn,\n batch_shape=new_batch_shape_ph,\n validate_args=True).sample())\n\n def test_broadcasting_explicitly_unsupported(self):\n old_batch_shape = [4]\n new_batch_shape = [1, 4, 1]\n rate_ = self.dtype([1, 10, 2, 20])\n\n rate = tf.compat.v1.placeholder_with_default(\n rate_, shape=old_batch_shape if self.is_static_shape else None)\n poisson_4 = tfd.Poisson(rate)\n new_batch_shape_ph = (\n tf.constant(np.int32(new_batch_shape))\n if self.is_static_shape else tf.compat.v1.placeholder_with_default(\n np.int32(new_batch_shape), shape=None))\n poisson_141_reshaped = tfd.BatchReshape(\n poisson_4, new_batch_shape_ph, validate_args=True)\n\n x_4 = self.dtype([2, 12, 3, 23])\n x_114 = self.dtype([2, 12, 3, 23]).reshape(1, 1, 4)\n\n if self.is_static_shape or tf.executing_eagerly():\n with self.assertRaisesRegexp(NotImplementedError,\n \"too few batch and event dims\"):\n poisson_141_reshaped.log_prob(x_4)\n with self.assertRaisesRegexp(NotImplementedError,\n \"unexpected batch and event shape\"):\n poisson_141_reshaped.log_prob(x_114)\n return\n\n with self.assertRaisesOpError(\"too few batch and event dims\"):\n self.evaluate(poisson_141_reshaped.log_prob(x_4))\n\n with self.assertRaisesOpError(\"unexpected batch and event shape\"):\n self.evaluate(poisson_141_reshaped.log_prob(x_114))\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass BatchReshapeStaticTest(_BatchReshapeTest, tf.test.TestCase):\n\n dtype = np.float32\n is_static_shape = True\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass BatchReshapeDynamicTest(_BatchReshapeTest, tf.test.TestCase):\n\n dtype = np.float64\n is_static_shape = False\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n", "# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Functional MCMC: A functional API for creating new Markov Chains.\n\nThe core convention of this API is that transition operators have the following\nform:\n\n```\ntransition_operator(state...) -> (new_state..., extra_outputs)\n```\n\nWhere 'x...', reresents one or more values. This operator can then be called\nrecursively as follows:\n\n```\nstate = ...\nwhile not_done:\n state, extra = transition_operator(*state)\n```\n\n`state` is allowed to be partially specified (i.e. have `None` elements), which\nthe transition operator must impute when it returns the new state.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\n# [internal] enable type annotations\nfrom __future__ import print_function\n\nimport collections\n\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow_probability as tfp\nfrom typing import Any, Callable, Mapping, Tuple, Union, Sequence\nfrom tensorflow.python.util import nest # pylint: disable=g-direct-tensorflow-import\n\ntfb = tfp.bijectors\nmcmc_util = tfp.mcmc.internal.util\n\n__all__ = [\n 'HamiltonianMonteCarloExtra',\n 'HamiltonianMonteCarloState',\n 'LeapFrogStepExtras',\n 'LeapFrogStepState',\n 'PotentialFn',\n 'State',\n 'TransitionOperator',\n 'call_and_grads',\n 'call_fn',\n 'hamiltonian_monte_carlo',\n 'maybe_broadcast_structure',\n 'metropolis_hastings_step',\n 'leapfrog_step',\n 'sign_adaptation',\n 'trace',\n 'transform_log_prob_fn',\n]\n\nAnyTensor = Union[tf.Tensor, np.ndarray, np.generic]\nIntTensor = Union[int, tf.Tensor, np.ndarray, np.integer]\nFloatTensor = Union[float, tf.Tensor, np.ndarray, np.floating]\n# TODO(b/109648354): Correctly represent the recursive nature of this type.\nTensorNest = Union[AnyTensor, Sequence[AnyTensor], Mapping[Any, AnyTensor]]\nBijectorNest = Union[tfb.Bijector, Sequence[tfb.Bijector], Mapping[Any, tfb\n .Bijector]]\nFloatNest = Union[FloatTensor, Sequence[FloatTensor], Mapping[Any, FloatTensor]]\nState = TensorNest # pylint: disable=invalid-name\nTransitionOperator = Union[Callable[[TensorNest], Tuple[State, TensorNest]],\n Callable[..., Tuple[State, TensorNest]]]\nPotentialFn = Union[Callable[[TensorNest], Tuple[tf.Tensor, TensorNest]],\n Callable[..., Tuple[tf.Tensor, TensorNest]]]\n\n\ndef trace(state: State, fn: TransitionOperator, num_steps: IntTensor,\n trace_fn: Callable[[State, TensorNest], TensorNest]\n ) -> Tuple[State, TensorNest]:\n \"\"\"`TransitionOperator` that runs `fn` repeatedly and traces its outputs.\n\n Args:\n state: A nest of `Tensor`s or None.\n fn: A `TransitionOperator`.\n num_steps: Number of steps to run the function for. Must be greater than 1.\n trace_fn: Callable that the unpacked outputs of `fn` and returns a nest of\n `Tensor`s. These will be stacked and returned.\n\n Returns:\n state: The final state returned by `fn`.\n traces: Stacked outputs of `trace_fn`.\n \"\"\"\n\n def fn_wrapper(args, _):\n return tf.nest.map_structure(tf.convert_to_tensor, call_fn(fn, args[0]))\n\n def trace_fn_wrapper(args):\n return tf.nest.map_structure(tf.convert_to_tensor, call_fn(trace_fn, args))\n\n state = call_fn(fn, state)\n first_trace = trace_fn_wrapper(state)\n\n state, full_trace = mcmc_util.trace_scan(\n fn_wrapper, state, tf.ones(num_steps - 1), trace_fn=trace_fn_wrapper)\n\n prepend = lambda x, y: tf.concat( # pylint: disable=g-long-lambda\n [tf.convert_to_tensor(value=x)[tf.newaxis], y], 0)\n\n return state, tf.nest.map_structure(prepend, first_trace, full_trace)\n\n\ndef call_fn(fn: TransitionOperator, args: Union[Tuple[Any], Any]) -> Any:\n \"\"\"Calls a transition operator with args, unpacking args if its a sequence.\n\n Args:\n fn: A `TransitionOperator`.\n args: Arguments to `fn`\n\n Returns:\n ret: Return value of `fn`.\n \"\"\"\n\n if isinstance(args, (list, tuple)) and not mcmc_util.is_namedtuple_like(args):\n args = args # type: Tuple[Any]\n return fn(*args)\n else:\n return fn(args)\n\n\ndef call_and_grads(fn: TransitionOperator, args: Union[Tuple[Any], Any]\n ) -> Tuple[tf.Tensor, TensorNest, TensorNest]:\n \"\"\"Calls `fn` and returns the gradients with respect to `fn`'s first output.\n\n Args:\n fn: A `TransitionOperator`.\n args: Arguments to `fn`\n\n Returns:\n ret: First output of `fn`.\n extra: Second output of `fn`.\n grads: Gradients of `ret` with respect to `args`.\n \"\"\"\n with tf.GradientTape() as tape:\n tape.watch(args)\n ret, extra = call_fn(fn, args)\n grads = tape.gradient(ret, args)\n return ret, extra, grads\n\n\ndef maybe_broadcast_structure(from_structure: Any, to_structure: Any) -> Any:\n \"\"\"Maybe broadcasts `from_structure` to `to_structure`.\n\n If `from_structure` is a singleton, it is tiled to match the structure of\n `to_structure`. Note that the elements in `from_structure` are not copied if\n this tiling occurs.\n\n Args:\n from_structure: A structure.\n to_structure: A structure.\n\n Returns:\n new_from_structure: Same structure as `to_structure`.\n \"\"\"\n flat_from = tf.nest.flatten(from_structure)\n flat_to = tf.nest.flatten(to_structure)\n if len(flat_from) == 1:\n flat_from *= len(flat_to)\n return tf.nest.pack_sequence_as(to_structure, flat_from)\n\n\ndef transform_log_prob_fn(log_prob_fn: PotentialFn,\n bijector: BijectorNest,\n init_state: State = None\n ) -> Union[PotentialFn, Tuple[PotentialFn, State]]:\n \"\"\"Transforms a log-prob function using a bijector.\n\n This takes a log-prob function and creates a new log-prob function that now\n takes takes state in the domain of the bijector, forward transforms that state\n and calls the original log-prob function. It then returns the log-probability\n that correctly accounts for this transformation.\n\n The forward-transformed state is pre-pended to the original log-prob\n function's extra returns and returned as the new extra return.\n\n For convenience you can also pass the initial state (in the original space),\n and this function will return the inverse transformed as the 2nd return value.\n You'd use this to initialize MCMC operators that operate in the transformed\n space.\n\n Args:\n log_prob_fn: Log prob fn.\n bijector: Bijector(s), must be of the same structure as the `log_prob_fn`\n inputs.\n init_state: Initial state, in the original space.\n\n Returns:\n transformed_log_prob_fn: Transformed log prob fn.\n transformed_init_state: If `init_state` is provided. Initial state in the\n transformed space.\n \"\"\"\n\n def wrapper(*args):\n \"\"\"Transformed wrapper.\"\"\"\n bijector_ = bijector\n\n args = tf.nest.map_structure(lambda x: 0. + x, args)\n if len(args) == 1:\n args = args[0]\n elif isinstance(bijector_, list):\n bijector_ = tuple(bijector_)\n\n original_space_args = tf.nest.map_structure(lambda b, x: b.forward(x),\n bijector_, args)\n original_space_args = original_space_args # type: Tuple[Any]\n original_space_log_prob, extra = call_fn(log_prob_fn, original_space_args)\n event_ndims = tf.nest.map_structure(\n lambda x: tf.rank(x) - tf.rank(original_space_log_prob), args)\n\n return original_space_log_prob + sum(\n tf.nest.flatten(\n tf.nest.map_structure(\n lambda b, x, e: b.forward_log_det_jacobian(x, event_ndims=e),\n bijector_, args, event_ndims))), [original_space_args, extra]\n\n if init_state is None:\n return wrapper\n else:\n return wrapper, tf.nest.map_structure(lambda b, s: b.inverse(s), bijector,\n init_state)\n\n\nLeapFrogStepState = collections.namedtuple('LeapFrogStepState',\n 'state, state_grads, momentum')\nLeapFrogStepExtras = collections.namedtuple(\n 'LeapFrogStepExtras', 'target_log_prob, state_extra, '\n 'kinetic_energy, kinetic_energy_extra')\n\n\ndef leapfrog_step(leapfrog_step_state: LeapFrogStepState,\n step_size: FloatTensor, target_log_prob_fn: PotentialFn,\n kinetic_energy_fn: PotentialFn\n ) -> Tuple[LeapFrogStepState, LeapFrogStepExtras]:\n \"\"\"Leapfrog `TransitionOperator`.\n\n Args:\n leapfrog_step_state: LeapFrogStepState.\n step_size: Step size, structure broadcastable to the `target_log_prob_fn`\n state.\n target_log_prob_fn: Target log prob fn.\n kinetic_energy_fn: Kinetic energy fn.\n\n Returns:\n leapfrog_step_state: LeapFrogStepState.\n leapfrog_step_extras: LeapFrogStepExtras.\n \"\"\"\n state = leapfrog_step_state.state\n state_grads = leapfrog_step_state.state_grads\n momentum = leapfrog_step_state.momentum\n step_size = maybe_broadcast_structure(step_size, state)\n\n state = tf.nest.map_structure(tf.convert_to_tensor, state)\n momentum = tf.nest.map_structure(tf.convert_to_tensor, momentum)\n state = tf.nest.map_structure(tf.convert_to_tensor, state)\n\n if state_grads is None:\n _, _, state_grads = call_and_grads(target_log_prob_fn, state)\n else:\n state_grads = tf.nest.map_structure(tf.convert_to_tensor, state_grads)\n\n momentum = tf.nest.map_structure(lambda m, sg, s: m + 0.5 * sg * s, momentum,\n state_grads, step_size)\n\n kinetic_energy, kinetic_energy_extra, momentum_grads = call_and_grads(\n kinetic_energy_fn, momentum)\n\n state = tf.nest.map_structure(lambda x, mg, s: x + mg * s, state,\n momentum_grads, step_size)\n\n target_log_prob, state_extra, state_grads = call_and_grads(\n target_log_prob_fn, state)\n\n momentum = tf.nest.map_structure(lambda m, sg, s: m + 0.5 * sg * s, momentum,\n state_grads, step_size)\n\n return LeapFrogStepState(state, state_grads, momentum), LeapFrogStepExtras(\n target_log_prob, state_extra, kinetic_energy, kinetic_energy_extra)\n\n\ndef metropolis_hastings_step(current_state: State,\n proposed_state: State,\n energy_change: FloatTensor,\n seed=None) -> Tuple[State, tf.Tensor, tf.Tensor]:\n \"\"\"Metropolis-Hastings step.\n\n This probabilistically chooses between `current_state` and `proposed_state`\n based on the `energy_change` so as to preserve detailed balance.\n\n Energy change is the negative of `log_accept_ratio`.\n\n Args:\n current_state: Current state.\n proposed_state: Proposed state.\n energy_change: E(proposed_state) - E(previous_state).\n seed: For reproducibility.\n\n Returns:\n new_state: The chosen state.\n is_accepted: Whether the proposed state was accepted.\n log_uniform: The random number that was used to select between the two\n states.\n \"\"\"\n flat_current = tf.nest.flatten(current_state)\n flat_proposed = nest.flatten_up_to(current_state, proposed_state)\n # Impute the None's in the current state.\n flat_current = [\n p if c is None else c for p, c in zip(flat_proposed, flat_current)\n ]\n current_state = tf.nest.pack_sequence_as(current_state, flat_current)\n\n current_state = tf.nest.map_structure(tf.convert_to_tensor, current_state)\n proposed_state = tf.nest.map_structure(tf.convert_to_tensor, proposed_state)\n energy_change = tf.convert_to_tensor(value=energy_change)\n\n log_accept_ratio = -energy_change\n\n log_uniform = tf.math.log(\n tf.random.uniform(\n shape=tf.shape(input=log_accept_ratio),\n dtype=log_accept_ratio.dtype.base_dtype,\n seed=seed))\n is_accepted = log_uniform < log_accept_ratio\n\n next_state = mcmc_util.choose(\n is_accepted, proposed_state, current_state, name='choose_next_state')\n return next_state, is_accepted, log_uniform\n\n\n# state_extra is not a true state, but here for convenience.\nHamiltonianMonteCarloState = collections.namedtuple(\n 'HamiltonianMonteCarloState',\n 'state, state_grads, target_log_prob, state_extra')\n\nHamiltonianMonteCarloExtra = collections.namedtuple(\n 'HamiltonianMonteCarloExtra',\n 'is_accepted, log_accept_ratio, leapfrog_trace, '\n 'proposed_hmc_state')\n\nMomentumSampleFn = Union[Callable[[State], State], Callable[..., State]]\n\n\ndef hamiltonian_monte_carlo(\n hmc_state: HamiltonianMonteCarloState,\n target_log_prob_fn: PotentialFn,\n step_size: Any,\n num_leapfrog_steps: IntTensor,\n momentum: State = None,\n kinetic_energy_fn: PotentialFn = None,\n momentum_sample_fn: MomentumSampleFn = None,\n leapfrog_trace_fn: Callable[[LeapFrogStepState, LeapFrogStepExtras],\n TensorNest] = lambda *args: (),\n seed=None,\n) -> Tuple[HamiltonianMonteCarloState, HamiltonianMonteCarloExtra]:\n \"\"\"Hamiltonian Monte Carlo `TransitionOperator`.\n\n #### Example\n\n ```python\n step_size = 0.2\n num_steps = 2000\n num_leapfrog_steps = 10\n state = tf.ones([16, 2])\n\n base_mean = [1., 0]\n base_cov = [[1, 0.5], [0.5, 1]]\n\n bijector = tfb.Softplus()\n base_dist = tfd.MultivariateNormalFullCovariance(\n loc=base_mean, covariance_matrix=base_cov)\n target_dist = bijector(base_dist)\n\n def orig_target_log_prob_fn(x):\n return target_dist.log_prob(x), ()\n\n target_log_prob_fn, state = fun_mcmc.transform_log_prob_fn(\n orig_target_log_prob_fn, bijector, state)\n\n kernel = tf.function(lambda state: fun_mcmc.hamiltonian_monte_carlo(\n state,\n step_size=step_size,\n num_leapfrog_steps=num_leapfrog_steps,\n target_log_prob_fn=target_log_prob_fn,\n seed=tfp_test_util.test_seed()))\n\n _, chain = fun_mcmc.trace(\n state=fun_mcmc.HamiltonianMonteCarloState(\n state=state,\n state_grads=None,\n target_log_prob=None,\n state_extra=None),\n fn=kernel,\n num_steps=num_steps,\n trace_fn=lambda state, extra: state.state_extra[0])\n ```\n\n Args:\n hmc_state: HamiltonianMonteCarloState.\n target_log_prob_fn: Target log prob fn.\n step_size: Step size, structure broadcastable to the `target_log_prob_fn`\n state.\n num_leapfrog_steps: Number of leapfrog steps to take.\n momentum: Initial momentum, passed to `momentum_sample_fn`. Default: zeroes.\n kinetic_energy_fn: Kinetic energy function.\n momentum_sample_fn: Sampler for the momentum.\n leapfrog_trace_fn: Trace function for the leapfrog integrator.\n seed: For reproducibility.\n\n Returns:\n hmc_state: HamiltonianMonteCarloState\n hmc_extra: HamiltonianMonteCarloExtra\n \"\"\"\n state = hmc_state.state\n state_grads = hmc_state.state_grads\n target_log_prob = hmc_state.target_log_prob\n state_extra = hmc_state.state_extra\n\n if kinetic_energy_fn is None:\n\n # pylint: disable=function-redefined\n def kinetic_energy_fn(*momentum):\n return tf.add_n([\n tf.reduce_sum(input_tensor=tf.square(x), axis=-1) / 2.\n for x in tf.nest.flatten(momentum)\n ]), ()\n\n if momentum_sample_fn is None:\n\n # pylint: disable=function-redefined\n def momentum_sample_fn(*momentum):\n ret = tf.nest.map_structure(\n lambda x: tf.random.normal(tf.shape(input=x), dtype=x.dtype),\n momentum)\n if len(ret) == 1:\n return ret[0]\n else:\n return ret\n\n if momentum is None:\n momentum = call_fn(momentum_sample_fn,\n tf.nest.map_structure(tf.zeros_like, state))\n if target_log_prob is None:\n target_log_prob, state_extra, state_grads = call_and_grads(\n target_log_prob_fn, state)\n\n kinetic_energy, _ = call_fn(kinetic_energy_fn, momentum)\n current_energy = -target_log_prob + kinetic_energy\n current_state = HamiltonianMonteCarloState(\n state=state,\n state_grads=state_grads,\n state_extra=state_extra,\n target_log_prob=target_log_prob)\n\n def leapfrog_wrapper(leapfrog_state, target_log_prob, state_extra):\n \"\"\"Leapfrog wrapper that tracks extra state.\"\"\"\n del target_log_prob\n del state_extra\n\n leapfrog_state, leapfrog_extra = leapfrog_step(\n leapfrog_state,\n step_size=step_size,\n target_log_prob_fn=target_log_prob_fn,\n kinetic_energy_fn=kinetic_energy_fn)\n\n return [\n leapfrog_state, leapfrog_extra.target_log_prob,\n leapfrog_extra.state_extra\n ], leapfrog_extra\n\n def leapfrog_trace_wrapper_fn(args, leapfrog_extra):\n return leapfrog_trace_fn(args[0], leapfrog_extra)\n\n leapfrog_wrapper_state = (LeapFrogStepState(state, state_grads, momentum),\n target_log_prob, state_extra)\n\n [[leapfrog_state, target_log_prob, state_extra], _], leapfrog_trace = trace(\n leapfrog_wrapper_state,\n leapfrog_wrapper,\n num_leapfrog_steps,\n trace_fn=leapfrog_trace_wrapper_fn)\n\n kinetic_energy, _ = call_fn(kinetic_energy_fn, leapfrog_state.momentum)\n proposed_energy = -target_log_prob + kinetic_energy\n proposed_state = HamiltonianMonteCarloState(\n state=leapfrog_state.state,\n state_grads=leapfrog_state.state_grads,\n target_log_prob=target_log_prob,\n state_extra=state_extra)\n\n energy_change = proposed_energy - current_energy\n hmc_state, is_accepted, _ = metropolis_hastings_step(\n current_state, proposed_state, energy_change, seed=seed)\n\n hmc_state = hmc_state # type: HamiltonianMonteCarloState\n return hmc_state, HamiltonianMonteCarloExtra(\n is_accepted=is_accepted,\n proposed_hmc_state=proposed_state,\n log_accept_ratio=-energy_change,\n leapfrog_trace=leapfrog_trace)\n\n\ndef sign_adaptation(control: FloatNest,\n output: FloatTensor,\n set_point: FloatTensor,\n adaptation_rate: FloatTensor = 0.01) -> FloatNest:\n \"\"\"A function to do simple sign-based control of a variable.\n\n ```\n control = control * (1. + adaptation_rate) ** sign(output - set_point)\n ```\n\n Args:\n control: The control variable.\n output: The output variable.\n set_point: The set point for `output`. This function will adjust `control`\n so that `output` matches `set_point`.\n adaptation_rate: Adaptation rate.\n\n Returns:\n control: New control.\n \"\"\"\n\n def _get_new_control(control, output, set_point):\n new_control = mcmc_util.choose(output > set_point,\n control * (1. + adaptation_rate),\n control / (1. + adaptation_rate))\n return new_control\n\n output = maybe_broadcast_structure(output, control)\n set_point = maybe_broadcast_structure(set_point, control)\n\n return tf.nest.map_structure(_get_new_control, control, output, set_point)\n", "# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Multi-dimensional (Vector) SinhArcsinh transformation of a distribution.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow.compat.v2 as tf\nfrom tensorflow_probability.python.bijectors import affine as affine_bijector\nfrom tensorflow_probability.python.bijectors import chain as chain_bijector\nfrom tensorflow_probability.python.bijectors import sinh_arcsinh as sinh_arcsinh_bijector\nfrom tensorflow_probability.python.distributions import normal\nfrom tensorflow_probability.python.distributions import transformed_distribution\nfrom tensorflow_probability.python.internal import distribution_util\nfrom tensorflow_probability.python.internal import dtype_util\n\n__all__ = [\n \"VectorSinhArcsinhDiag\",\n]\n\n\nclass VectorSinhArcsinhDiag(transformed_distribution.TransformedDistribution):\n \"\"\"The (diagonal) SinhArcsinh transformation of a distribution on `R^k`.\n\n This distribution models a random vector `Y = (Y1,...,Yk)`, making use of\n a `SinhArcsinh` transformation (which has adjustable tailweight and skew),\n a rescaling, and a shift.\n\n The `SinhArcsinh` transformation of the Normal is described in great depth in\n [Sinh-arcsinh distributions](https://www.jstor.org/stable/27798865).\n Here we use a slightly different parameterization, in terms of `tailweight`\n and `skewness`. Additionally we allow for distributions other than Normal,\n and control over `scale` as well as a \"shift\" parameter `loc`.\n\n #### Mathematical Details\n\n Given iid random vector `Z = (Z1,...,Zk)`, we define the VectorSinhArcsinhDiag\n transformation of `Z`, `Y`, parameterized by\n `(loc, scale, skewness, tailweight)`, via the relation (with `@` denoting\n matrix multiplication):\n\n ```\n Y := loc + scale @ F(Z) * (2 / F_0(2))\n F(Z) := Sinh( (Arcsinh(Z) + skewness) * tailweight )\n F_0(Z) := Sinh( Arcsinh(Z) * tailweight )\n ```\n\n This distribution is similar to the location-scale transformation\n `L(Z) := loc + scale @ Z` in the following ways:\n\n * If `skewness = 0` and `tailweight = 1` (the defaults), `F(Z) = Z`, and then\n `Y = L(Z)` exactly.\n * `loc` is used in both to shift the result by a constant factor.\n * The multiplication of `scale` by `2 / F_0(2)` ensures that if `skewness = 0`\n `P[Y - loc <= 2 * scale] = P[L(Z) - loc <= 2 * scale]`.\n Thus it can be said that the weights in the tails of `Y` and `L(Z)` beyond\n `loc + 2 * scale` are the same.\n\n This distribution is different than `loc + scale @ Z` due to the\n reshaping done by `F`:\n\n * Positive (negative) `skewness` leads to positive (negative) skew.\n * positive skew means, the mode of `F(Z)` is \"tilted\" to the right.\n * positive skew means positive values of `F(Z)` become more likely, and\n negative values become less likely.\n * Larger (smaller) `tailweight` leads to fatter (thinner) tails.\n * Fatter tails mean larger values of `|F(Z)|` become more likely.\n * `tailweight < 1` leads to a distribution that is \"flat\" around `Y = loc`,\n and a very steep drop-off in the tails.\n * `tailweight > 1` leads to a distribution more peaked at the mode with\n heavier tails.\n\n To see the argument about the tails, note that for `|Z| >> 1` and\n `|Z| >> (|skewness| * tailweight)**tailweight`, we have\n `Y approx 0.5 Z**tailweight e**(sign(Z) skewness * tailweight)`.\n\n To see the argument regarding multiplying `scale` by `2 / F_0(2)`,\n\n ```\n P[(Y - loc) / scale <= 2] = P[F(Z) * (2 / F_0(2)) <= 2]\n = P[F(Z) <= F_0(2)]\n = P[Z <= 2] (if F = F_0).\n ```\n \"\"\"\n\n def __init__(self,\n loc=None,\n scale_diag=None,\n scale_identity_multiplier=None,\n skewness=None,\n tailweight=None,\n distribution=None,\n validate_args=False,\n allow_nan_stats=True,\n name=\"VectorSinhArcsinhDiag\"):\n \"\"\"Construct VectorSinhArcsinhDiag distribution on `R^k`.\n\n The arguments `scale_diag` and `scale_identity_multiplier` combine to\n define the diagonal `scale` referred to in this class docstring:\n\n ```none\n scale = diag(scale_diag + scale_identity_multiplier * ones(k))\n ```\n\n The `batch_shape` is the broadcast shape between `loc` and `scale`\n arguments.\n\n The `event_shape` is given by last dimension of the matrix implied by\n `scale`. The last dimension of `loc` (if provided) must broadcast with this\n\n Additional leading dimensions (if any) will index batches.\n\n Args:\n loc: Floating-point `Tensor`. If this is set to `None`, `loc` is\n implicitly `0`. When specified, may have shape `[B1, ..., Bb, k]` where\n `b >= 0` and `k` is the event size.\n scale_diag: Non-zero, floating-point `Tensor` representing a diagonal\n matrix added to `scale`. May have shape `[B1, ..., Bb, k]`, `b >= 0`,\n and characterizes `b`-batches of `k x k` diagonal matrices added to\n `scale`. When both `scale_identity_multiplier` and `scale_diag` are\n `None` then `scale` is the `Identity`.\n scale_identity_multiplier: Non-zero, floating-point `Tensor` representing\n a scale-identity-matrix added to `scale`. May have shape\n `[B1, ..., Bb]`, `b >= 0`, and characterizes `b`-batches of scale\n `k x k` identity matrices added to `scale`. When both\n `scale_identity_multiplier` and `scale_diag` are `None` then `scale`\n is the `Identity`.\n skewness: Skewness parameter. floating-point `Tensor` with shape\n broadcastable with `event_shape`.\n tailweight: Tailweight parameter. floating-point `Tensor` with shape\n broadcastable with `event_shape`.\n distribution: `tf.Distribution`-like instance. Distribution from which `k`\n iid samples are used as input to transformation `F`. Default is\n `tfd.Normal(loc=0., scale=1.)`.\n Must be a scalar-batch, scalar-event distribution. Typically\n `distribution.reparameterization_type = FULLY_REPARAMETERIZED` or it is\n a function of non-trainable parameters. WARNING: If you backprop through\n a VectorSinhArcsinhDiag sample and `distribution` is not\n `FULLY_REPARAMETERIZED` yet is a function of trainable variables, then\n the gradient will be incorrect!\n validate_args: Python `bool`, default `False`. When `True` distribution\n parameters are checked for validity despite possibly degrading runtime\n performance. When `False` invalid inputs may silently render incorrect\n outputs.\n allow_nan_stats: Python `bool`, default `True`. When `True`,\n statistics (e.g., mean, mode, variance) use the value \"`NaN`\" to\n indicate the result is undefined. When `False`, an exception is raised\n if one or more of the statistic's batch members are undefined.\n name: Python `str` name prefixed to Ops created by this class.\n\n Raises:\n ValueError: if at most `scale_identity_multiplier` is specified.\n \"\"\"\n parameters = dict(locals())\n\n with tf.name_scope(name) as name:\n dtype = dtype_util.common_dtype(\n [loc, scale_diag, scale_identity_multiplier, skewness, tailweight],\n tf.float32)\n loc = loc if loc is None else tf.convert_to_tensor(\n value=loc, name=\"loc\", dtype=dtype)\n tailweight = 1. if tailweight is None else tailweight\n has_default_skewness = skewness is None\n skewness = 0. if skewness is None else skewness\n\n # Recall, with Z a random variable,\n # Y := loc + C * F(Z),\n # F(Z) := Sinh( (Arcsinh(Z) + skewness) * tailweight )\n # F_0(Z) := Sinh( Arcsinh(Z) * tailweight )\n # C := 2 * scale / F_0(2)\n\n # Construct shapes and 'scale' out of the scale_* and loc kwargs.\n # scale_linop is only an intermediary to:\n # 1. get shapes from looking at loc and the two scale args.\n # 2. combine scale_diag with scale_identity_multiplier, which gives us\n # 'scale', which in turn gives us 'C'.\n scale_linop = distribution_util.make_diag_scale(\n loc=loc,\n scale_diag=scale_diag,\n scale_identity_multiplier=scale_identity_multiplier,\n validate_args=False,\n assert_positive=False,\n dtype=dtype)\n batch_shape, event_shape = distribution_util.shapes_from_loc_and_scale(\n loc, scale_linop)\n # scale_linop.diag_part() is efficient since it is a diag type linop.\n scale_diag_part = scale_linop.diag_part()\n dtype = scale_diag_part.dtype\n\n if distribution is None:\n distribution = normal.Normal(\n loc=tf.zeros([], dtype=dtype),\n scale=tf.ones([], dtype=dtype),\n allow_nan_stats=allow_nan_stats)\n else:\n asserts = distribution_util.maybe_check_scalar_distribution(\n distribution, dtype, validate_args)\n if asserts:\n scale_diag_part = distribution_util.with_dependencies(\n asserts, scale_diag_part)\n\n # Make the SAS bijector, 'F'.\n skewness = tf.convert_to_tensor(\n value=skewness, dtype=dtype, name=\"skewness\")\n tailweight = tf.convert_to_tensor(\n value=tailweight, dtype=dtype, name=\"tailweight\")\n f = sinh_arcsinh_bijector.SinhArcsinh(\n skewness=skewness, tailweight=tailweight)\n if has_default_skewness:\n f_noskew = f\n else:\n f_noskew = sinh_arcsinh_bijector.SinhArcsinh(\n skewness=dtype_util.as_numpy_dtype(skewness.dtype)(0.),\n tailweight=tailweight)\n\n # Make the Affine bijector, Z --> loc + C * Z.\n c = 2 * scale_diag_part / f_noskew.forward(\n tf.convert_to_tensor(value=2, dtype=dtype))\n affine = affine_bijector.Affine(\n shift=loc, scale_diag=c, validate_args=validate_args)\n\n bijector = chain_bijector.Chain([affine, f])\n\n super(VectorSinhArcsinhDiag, self).__init__(\n distribution=distribution,\n bijector=bijector,\n batch_shape=batch_shape,\n event_shape=event_shape,\n validate_args=validate_args,\n name=name)\n self._parameters = parameters\n self._loc = loc\n self._scale = scale_linop\n self._tailweight = tailweight\n self._skewness = skewness\n\n @property\n def loc(self):\n \"\"\"The `loc` in `Y := loc + scale @ F(Z) * (2 / F(2)).\"\"\"\n return self._loc\n\n @property\n def scale(self):\n \"\"\"The `LinearOperator` `scale` in `Y := loc + scale @ F(Z) * (2 / F(2)).\"\"\"\n return self._scale\n\n @property\n def tailweight(self):\n \"\"\"Controls the tail decay. `tailweight > 1` means faster than Normal.\"\"\"\n return self._tailweight\n\n @property\n def skewness(self):\n \"\"\"Controls the skewness. `Skewness > 0` means right skew.\"\"\"\n return self._skewness\n", "# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Quantized distribution.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# Dependency imports\nimport numpy as np\nimport tensorflow.compat.v2 as tf\n\nfrom tensorflow_probability.python.distributions import distribution as distributions\nfrom tensorflow_probability.python.internal import assert_util\nfrom tensorflow_probability.python.internal import distribution_util\nfrom tensorflow_probability.python.internal import dtype_util\nfrom tensorflow_probability.python.internal import reparameterization\n\n\n__all__ = [\"QuantizedDistribution\"]\n\n\ndef _logsum_expbig_minus_expsmall(big, small):\n \"\"\"Stable evaluation of `Log[exp{big} - exp{small}]`.\n\n To work correctly, we should have the pointwise relation: `small <= big`.\n\n Args:\n big: Floating-point `Tensor`\n small: Floating-point `Tensor` with same `dtype` as `big` and broadcastable\n shape.\n\n Returns:\n `Tensor` of same `dtype` of `big` and broadcast shape.\n \"\"\"\n with tf.name_scope(\"logsum_expbig_minus_expsmall\"):\n return tf.math.log1p(-tf.exp(small - big)) + big\n\n\n_prob_base_note = \"\"\"\nFor whole numbers `y`,\n\n```\nP[Y = y] := P[X <= low], if y == low,\n := P[X > high - 1], y == high,\n := 0, if j < low or y > high,\n := P[y - 1 < X <= y], all other y.\n```\n\n\"\"\"\n\n_prob_note = _prob_base_note + \"\"\"\nThe base distribution's `cdf` method must be defined on `y - 1`. If the\nbase distribution has a `survival_function` method, results will be more\naccurate for large values of `y`, and in this case the `survival_function` must\nalso be defined on `y - 1`.\n\"\"\"\n\n_log_prob_note = _prob_base_note + \"\"\"\nThe base distribution's `log_cdf` method must be defined on `y - 1`. If the\nbase distribution has a `log_survival_function` method results will be more\naccurate for large values of `y`, and in this case the `log_survival_function`\nmust also be defined on `y - 1`.\n\"\"\"\n\n\n_cdf_base_note = \"\"\"\n\nFor whole numbers `y`,\n\n```\ncdf(y) := P[Y <= y]\n = 1, if y >= high,\n = 0, if y < low,\n = P[X <= y], otherwise.\n```\n\nSince `Y` only has mass at whole numbers, `P[Y <= y] = P[Y <= floor(y)]`.\nThis dictates that fractional `y` are first floored to a whole number, and\nthen above definition applies.\n\"\"\"\n\n_cdf_note = _cdf_base_note + \"\"\"\nThe base distribution's `cdf` method must be defined on `y - 1`.\n\"\"\"\n\n_log_cdf_note = _cdf_base_note + \"\"\"\nThe base distribution's `log_cdf` method must be defined on `y - 1`.\n\"\"\"\n\n\n_sf_base_note = \"\"\"\n\nFor whole numbers `y`,\n\n```\nsurvival_function(y) := P[Y > y]\n = 0, if y >= high,\n = 1, if y < low,\n = P[X <= y], otherwise.\n```\n\nSince `Y` only has mass at whole numbers, `P[Y <= y] = P[Y <= floor(y)]`.\nThis dictates that fractional `y` are first floored to a whole number, and\nthen above definition applies.\n\"\"\"\n\n_sf_note = _sf_base_note + \"\"\"\nThe base distribution's `cdf` method must be defined on `y - 1`.\n\"\"\"\n\n_log_sf_note = _sf_base_note + \"\"\"\nThe base distribution's `log_cdf` method must be defined on `y - 1`.\n\"\"\"\n\n\nclass QuantizedDistribution(distributions.Distribution):\n \"\"\"Distribution representing the quantization `Y = ceiling(X)`.\n\n #### Definition in Terms of Sampling\n\n ```\n 1. Draw X\n 2. Set Y <-- ceiling(X)\n 3. If Y < low, reset Y <-- low\n 4. If Y > high, reset Y <-- high\n 5. Return Y\n ```\n\n #### Definition in Terms of the Probability Mass Function\n\n Given scalar random variable `X`, we define a discrete random variable `Y`\n supported on the integers as follows:\n\n ```\n P[Y = j] := P[X <= low], if j == low,\n := P[X > high - 1], j == high,\n := 0, if j < low or j > high,\n := P[j - 1 < X <= j], all other j.\n ```\n\n Conceptually, without cutoffs, the quantization process partitions the real\n line `R` into half open intervals, and identifies an integer `j` with the\n right endpoints:\n\n ```\n R = ... (-2, -1](-1, 0](0, 1](1, 2](2, 3](3, 4] ...\n j = ... -1 0 1 2 3 4 ...\n ```\n\n `P[Y = j]` is the mass of `X` within the `jth` interval.\n If `low = 0`, and `high = 2`, then the intervals are redrawn\n and `j` is re-assigned:\n\n ```\n R = (-infty, 0](0, 1](1, infty)\n j = 0 1 2\n ```\n\n `P[Y = j]` is still the mass of `X` within the `jth` interval.\n\n #### Examples\n\n We illustrate a mixture of discretized logistic distributions\n [(Salimans et al., 2017)][1]. This is used, for example, for capturing 16-bit\n audio in WaveNet [(van den Oord et al., 2017)][2]. The values range in\n a 1-D integer domain of `[0, 2**16-1]`, and the discretization captures\n `P(x - 0.5 < X <= x + 0.5)` for all `x` in the domain excluding the endpoints.\n The lowest value has probability `P(X <= 0.5)` and the highest value has\n probability `P(2**16 - 1.5 < X)`.\n\n Below we assume a `wavenet` function. It takes as `input` right-shifted audio\n samples of shape `[..., sequence_length]`. It returns a real-valued tensor of\n shape `[..., num_mixtures * 3]`, i.e., each mixture component has a `loc` and\n `scale` parameter belonging to the logistic distribution, and a `logits`\n parameter determining the unnormalized probability of that component.\n\n ```python\n tfd = tfp.distributions\n tfb = tfp.bijectors\n\n net = wavenet(inputs)\n loc, unconstrained_scale, logits = tf.split(net,\n num_or_size_splits=3,\n axis=-1)\n scale = tf.nn.softplus(unconstrained_scale)\n\n # Form mixture of discretized logistic distributions. Note we shift the\n # logistic distribution by -0.5. This lets the quantization capture \"rounding\"\n # intervals, `(x-0.5, x+0.5]`, and not \"ceiling\" intervals, `(x-1, x]`.\n discretized_logistic_dist = tfd.QuantizedDistribution(\n distribution=tfd.TransformedDistribution(\n distribution=tfd.Logistic(loc=loc, scale=scale),\n bijector=tfb.AffineScalar(shift=-0.5)),\n low=0.,\n high=2**16 - 1.)\n mixture_dist = tfd.MixtureSameFamily(\n mixture_distribution=tfd.Categorical(logits=logits),\n components_distribution=discretized_logistic_dist)\n\n neg_log_likelihood = -tf.reduce_sum(mixture_dist.log_prob(targets))\n train_op = tf.train.AdamOptimizer().minimize(neg_log_likelihood)\n ```\n\n After instantiating `mixture_dist`, we illustrate maximum likelihood by\n calculating its log-probability of audio samples as `target` and optimizing.\n\n #### References\n\n [1]: Tim Salimans, Andrej Karpathy, Xi Chen, and Diederik P. Kingma.\n PixelCNN++: Improving the PixelCNN with discretized logistic mixture\n likelihood and other modifications.\n _International Conference on Learning Representations_, 2017.\n https://arxiv.org/abs/1701.05517\n [2]: Aaron van den Oord et al. Parallel WaveNet: Fast High-Fidelity Speech\n Synthesis. _arXiv preprint arXiv:1711.10433_, 2017.\n https://arxiv.org/abs/1711.10433\n \"\"\"\n\n def __init__(self,\n distribution,\n low=None,\n high=None,\n validate_args=False,\n name=\"QuantizedDistribution\"):\n \"\"\"Construct a Quantized Distribution representing `Y = ceiling(X)`.\n\n Some properties are inherited from the distribution defining `X`. Example:\n `allow_nan_stats` is determined for this `QuantizedDistribution` by reading\n the `distribution`.\n\n Args:\n distribution: The base distribution class to transform. Typically an\n instance of `Distribution`.\n low: `Tensor` with same `dtype` as this distribution and shape\n able to be added to samples. Should be a whole number. Default `None`.\n If provided, base distribution's `prob` should be defined at\n `low`.\n high: `Tensor` with same `dtype` as this distribution and shape\n able to be added to samples. Should be a whole number. Default `None`.\n If provided, base distribution's `prob` should be defined at\n `high - 1`.\n `high` must be strictly greater than `low`.\n validate_args: Python `bool`, default `False`. When `True` distribution\n parameters are checked for validity despite possibly degrading runtime\n performance. When `False` invalid inputs may silently render incorrect\n outputs.\n name: Python `str` name prefixed to Ops created by this class.\n\n Raises:\n TypeError: If `dist_cls` is not a subclass of\n `Distribution` or continuous.\n NotImplementedError: If the base distribution does not implement `cdf`.\n \"\"\"\n parameters = dict(locals())\n with tf.name_scope(name) as name:\n self._dist = distribution\n\n if low is not None:\n low = tf.convert_to_tensor(\n value=low, name=\"low\", dtype=distribution.dtype)\n if high is not None:\n high = tf.convert_to_tensor(\n value=high, name=\"high\", dtype=distribution.dtype)\n dtype_util.assert_same_float_dtype(\n tensors=[self.distribution, low, high])\n\n # We let QuantizedDistribution access _graph_parents since this class is\n # more like a baseclass.\n graph_parents = self._dist._graph_parents # pylint: disable=protected-access\n\n checks = []\n if validate_args and low is not None and high is not None:\n message = \"low must be strictly less than high.\"\n checks.append(assert_util.assert_less(low, high, message=message))\n self._validate_args = validate_args # self._check_integer uses this.\n with tf.control_dependencies(checks if validate_args else []):\n if low is not None:\n self._low = self._check_integer(low)\n graph_parents += [self._low]\n else:\n self._low = None\n if high is not None:\n self._high = self._check_integer(high)\n graph_parents += [self._high]\n else:\n self._high = None\n\n super(QuantizedDistribution, self).__init__(\n dtype=self._dist.dtype,\n reparameterization_type=reparameterization.NOT_REPARAMETERIZED,\n validate_args=validate_args,\n allow_nan_stats=self._dist.allow_nan_stats,\n parameters=parameters,\n graph_parents=graph_parents,\n name=name)\n\n @property\n def distribution(self):\n \"\"\"Base distribution, p(x).\"\"\"\n return self._dist\n\n @property\n def low(self):\n \"\"\"Lowest value that quantization returns.\"\"\"\n return self._low\n\n @property\n def high(self):\n \"\"\"Highest value that quantization returns.\"\"\"\n return self._high\n\n def _batch_shape_tensor(self):\n return self.distribution.batch_shape_tensor()\n\n def _batch_shape(self):\n return self.distribution.batch_shape\n\n def _event_shape_tensor(self):\n return self.distribution.event_shape_tensor()\n\n def _event_shape(self):\n return self.distribution.event_shape\n\n def _sample_n(self, n, seed=None):\n low = self._low\n high = self._high\n with tf.name_scope(\"transform\"):\n n = tf.convert_to_tensor(value=n, name=\"n\")\n x_samps = self.distribution.sample(n, seed=seed)\n ones = tf.ones_like(x_samps)\n\n # Snap values to the intervals (j - 1, j].\n result_so_far = tf.math.ceil(x_samps)\n\n if low is not None:\n result_so_far = tf.where(result_so_far < low, low * ones, result_so_far)\n\n if high is not None:\n result_so_far = tf.where(result_so_far > high, high * ones,\n result_so_far)\n\n return result_so_far\n\n @distribution_util.AppendDocstring(_log_prob_note)\n def _log_prob(self, y):\n if not hasattr(self.distribution, \"_log_cdf\"):\n raise NotImplementedError(\n \"'log_prob' not implemented unless the base distribution implements \"\n \"'log_cdf'\")\n y = self._check_integer(y)\n try:\n return self._log_prob_with_logsf_and_logcdf(y)\n except NotImplementedError:\n return self._log_prob_with_logcdf(y)\n\n def _log_prob_with_logcdf(self, y):\n return _logsum_expbig_minus_expsmall(self.log_cdf(y), self.log_cdf(y - 1))\n\n def _log_prob_with_logsf_and_logcdf(self, y):\n \"\"\"Compute log_prob(y) using log survival_function and cdf together.\"\"\"\n # There are two options that would be equal if we had infinite precision:\n # Log[ sf(y - 1) - sf(y) ]\n # = Log[ exp{logsf(y - 1)} - exp{logsf(y)} ]\n # Log[ cdf(y) - cdf(y - 1) ]\n # = Log[ exp{logcdf(y)} - exp{logcdf(y - 1)} ]\n logsf_y = self.log_survival_function(y)\n logsf_y_minus_1 = self.log_survival_function(y - 1)\n logcdf_y = self.log_cdf(y)\n logcdf_y_minus_1 = self.log_cdf(y - 1)\n\n # Important: Here we use select in a way such that no input is inf, this\n # prevents the troublesome case where the output of select can be finite,\n # but the output of grad(select) will be NaN.\n\n # In either case, we are doing Log[ exp{big} - exp{small} ]\n # We want to use the sf items precisely when we are on the right side of the\n # median, which occurs when logsf_y < logcdf_y.\n big = tf.where(logsf_y < logcdf_y, logsf_y_minus_1, logcdf_y)\n small = tf.where(logsf_y < logcdf_y, logsf_y, logcdf_y_minus_1)\n\n return _logsum_expbig_minus_expsmall(big, small)\n\n @distribution_util.AppendDocstring(_prob_note)\n def _prob(self, y):\n if not hasattr(self.distribution, \"_cdf\"):\n raise NotImplementedError(\n \"'prob' not implemented unless the base distribution implements \"\n \"'cdf'\")\n y = self._check_integer(y)\n try:\n return self._prob_with_sf_and_cdf(y)\n except NotImplementedError:\n return self._prob_with_cdf(y)\n\n def _prob_with_cdf(self, y):\n return self.cdf(y) - self.cdf(y - 1)\n\n def _prob_with_sf_and_cdf(self, y):\n # There are two options that would be equal if we had infinite precision:\n # sf(y - 1) - sf(y)\n # cdf(y) - cdf(y - 1)\n sf_y = self.survival_function(y)\n sf_y_minus_1 = self.survival_function(y - 1)\n cdf_y = self.cdf(y)\n cdf_y_minus_1 = self.cdf(y - 1)\n\n # sf_prob has greater precision iff we're on the right side of the median.\n return tf.where(\n sf_y < cdf_y, # True iff we're on the right side of the median.\n sf_y_minus_1 - sf_y,\n cdf_y - cdf_y_minus_1)\n\n @distribution_util.AppendDocstring(_log_cdf_note)\n def _log_cdf(self, y):\n low = self._low\n high = self._high\n\n # Recall the promise:\n # cdf(y) := P[Y <= y]\n # = 1, if y >= high,\n # = 0, if y < low,\n # = P[X <= y], otherwise.\n\n # P[Y <= j] = P[floor(Y) <= j] since mass is only at integers, not in\n # between.\n j = tf.floor(y)\n\n result_so_far = self.distribution.log_cdf(j)\n\n # Broadcast, because it's possible that this is a single distribution being\n # evaluated on a number of samples, or something like that.\n j += tf.zeros_like(result_so_far)\n\n # Re-define values at the cutoffs.\n if low is not None:\n neg_inf = -np.inf * tf.ones_like(result_so_far)\n result_so_far = tf.where(j < low, neg_inf, result_so_far)\n if high is not None:\n result_so_far = tf.where(j >= high, tf.zeros_like(result_so_far),\n result_so_far)\n\n return result_so_far\n\n @distribution_util.AppendDocstring(_cdf_note)\n def _cdf(self, y):\n low = self._low\n high = self._high\n\n # Recall the promise:\n # cdf(y) := P[Y <= y]\n # = 1, if y >= high,\n # = 0, if y < low,\n # = P[X <= y], otherwise.\n\n # P[Y <= j] = P[floor(Y) <= j] since mass is only at integers, not in\n # between.\n j = tf.floor(y)\n\n # P[X <= j], used when low < X < high.\n result_so_far = self.distribution.cdf(j)\n\n # Broadcast, because it's possible that this is a single distribution being\n # evaluated on a number of samples, or something like that.\n j += tf.zeros_like(result_so_far)\n\n # Re-define values at the cutoffs.\n if low is not None:\n result_so_far = tf.where(j < low, tf.zeros_like(result_so_far),\n result_so_far)\n if high is not None:\n result_so_far = tf.where(j >= high, tf.ones_like(result_so_far),\n result_so_far)\n\n return result_so_far\n\n @distribution_util.AppendDocstring(_log_sf_note)\n def _log_survival_function(self, y):\n low = self._low\n high = self._high\n\n # Recall the promise:\n # survival_function(y) := P[Y > y]\n # = 0, if y >= high,\n # = 1, if y < low,\n # = P[X > y], otherwise.\n\n # P[Y > j] = P[ceiling(Y) > j] since mass is only at integers, not in\n # between.\n j = tf.math.ceil(y)\n\n # P[X > j], used when low < X < high.\n result_so_far = self.distribution.log_survival_function(j)\n\n # Broadcast, because it's possible that this is a single distribution being\n # evaluated on a number of samples, or something like that.\n j += tf.zeros_like(result_so_far)\n\n # Re-define values at the cutoffs.\n if low is not None:\n result_so_far = tf.where(j < low, tf.zeros_like(result_so_far),\n result_so_far)\n if high is not None:\n neg_inf = -np.inf * tf.ones_like(result_so_far)\n result_so_far = tf.where(j >= high, neg_inf, result_so_far)\n\n return result_so_far\n\n @distribution_util.AppendDocstring(_sf_note)\n def _survival_function(self, y):\n low = self._low\n high = self._high\n\n # Recall the promise:\n # survival_function(y) := P[Y > y]\n # = 0, if y >= high,\n # = 1, if y < low,\n # = P[X > y], otherwise.\n\n # P[Y > j] = P[ceiling(Y) > j] since mass is only at integers, not in\n # between.\n j = tf.math.ceil(y)\n\n # P[X > j], used when low < X < high.\n result_so_far = self.distribution.survival_function(j)\n\n # Broadcast, because it's possible that this is a single distribution being\n # evaluated on a number of samples, or something like that.\n j += tf.zeros_like(result_so_far)\n\n # Re-define values at the cutoffs.\n if low is not None:\n result_so_far = tf.where(j < low, tf.ones_like(result_so_far),\n result_so_far)\n if high is not None:\n result_so_far = tf.where(j >= high, tf.zeros_like(result_so_far),\n result_so_far)\n\n return result_so_far\n\n def _check_integer(self, value):\n with tf.name_scope(\"check_integer\"):\n value = tf.convert_to_tensor(value=value, name=\"value\")\n if not self.validate_args:\n return value\n dependencies = [distribution_util.assert_integer_form(\n value, message=\"value has non-integer components.\")]\n return distribution_util.with_dependencies(dependencies, value)\n", "# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Numpy implementations of TensorFlow dtype related.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# Dependency imports\nimport numpy as np\n\nimport tensorflow as tf\n\nfrom tensorflow_probability.python.internal.backend.numpy.internal import utils\n\n\n__all__ = [\n 'as_dtype',\n 'bool',\n 'complex',\n 'complex128',\n 'complex64',\n 'double',\n 'float16',\n 'float32',\n 'float64',\n 'half',\n 'int16',\n 'int32',\n 'int64',\n 'int8',\n 'string',\n 'uint16',\n 'uint32',\n 'uint64',\n 'uint8',\n # 'as_string',\n # 'bfloat16',\n # 'dtypes',\n # 'qint16',\n # 'qint32',\n # 'qint8',\n # 'quint16',\n # 'quint8',\n]\n\n\n# --- Begin Public Functions --------------------------------------------------\n\nas_dtype = utils.copy_docstring(\n tf.as_dtype,\n lambda type_value: np.dtype(type_value).type)\n\nbool = np.bool # pylint: disable=redefined-builtin\n\ncomplex = np.complex # pylint: disable=redefined-builtin\n\ncomplex128 = np.complex128\n\ncomplex64 = np.complex64\n\ndouble = np.double\n\nfloat16 = np.float16\n\nfloat32 = np.float32\n\nfloat64 = np.float64\n\nhalf = np.half\n\nint16 = np.int16\n\nint32 = np.int32\n\nint64 = np.int64\n\nint8 = np.int8\n\nstring = np.str\n\nuint16 = np.uint16\n\nuint32 = np.uint32\n\nuint64 = np.uint64\n\nuint8 = np.uint8\n" ]
[ [ "tensorflow.compat.v2.math.zeta", "tensorflow.compat.v2.shape", "tensorflow.compat.v2.identity", "tensorflow.compat.v2.ones", "tensorflow.compat.v2.TensorShape", "tensorflow.compat.v2.name_scope", "tensorflow.compat.v2.where", "tensorflow.compat.v2.zeros", "tensorflow.compat.v2.reduce_any", "tensorflow.compat.v2.math.log", "tensorflow.compat.v2.floor", "tensorflow.compat.v2.fill", "tensorflow.compat.v2.equal", "tensorflow.compat.v2.control_dependencies", "tensorflow.compat.v2.less", "tensorflow.compat.v2.constant", "tensorflow.compat.v2.zeros_like", "tensorflow.compat.v2.ones_like", "tensorflow.compat.v2.cast", "numpy.ones", "tensorflow.compat.v2.math.log1p" ], [ "tensorflow.executing_eagerly", "tensorflow.reshape", "numpy.int32", "tensorflow.test.main", "numpy.ones", "numpy.concatenate", "numpy.prod", "tensorflow.compat.v1.placeholder_with_default" ], [ "tensorflow.convert_to_tensor", "tensorflow.shape", "tensorflow.ones", "tensorflow.GradientTape", "tensorflow.square", "tensorflow.rank", "tensorflow.nest.flatten", "tensorflow.nest.pack_sequence_as", "tensorflow.nest.map_structure", "tensorflow.python.util.nest.flatten_up_to" ], [ "tensorflow.compat.v2.ones", "tensorflow.compat.v2.convert_to_tensor", "tensorflow.compat.v2.zeros", "tensorflow.compat.v2.name_scope" ], [ "tensorflow.compat.v2.zeros_like", "tensorflow.compat.v2.floor", "tensorflow.compat.v2.ones_like", "tensorflow.compat.v2.exp", "tensorflow.compat.v2.control_dependencies", "tensorflow.compat.v2.name_scope", "tensorflow.compat.v2.where", "tensorflow.compat.v2.convert_to_tensor", "tensorflow.compat.v2.math.ceil" ], [ "numpy.dtype" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "2.6", "2.2", "1.13", "2.3", "2.4", "1.4", "2.9", "1.5", "1.7", "2.5", "2.8", "1.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
profintegra/stumpy
[ "66b3402d91820005b466e1da6fe353b61e6246c5", "66b3402d91820005b466e1da6fe353b61e6246c5" ]
[ "stumpy/gpu_aamp.py", "tests/test_mstump.py" ]
[ "# STUMPY\n# Copyright 2019 TD Ameritrade. Released under the terms of the 3-Clause BSD license.\n# STUMPY is a trademark of TD Ameritrade IP Company, Inc. All rights reserved.\nimport logging\nimport math\nimport multiprocessing as mp\nimport os\n\nimport numpy as np\nfrom numba import cuda\n\nfrom . import core, config\n\nlogger = logging.getLogger(__name__)\n\n\[email protected](\n \"(i8, f8[:], f8[:], i8, f8[:], f8[:], f8[:], b1[:], b1[:],\"\n \"f8[:], f8[:], i8, b1, i8, f8[:, :], i8[:, :], b1)\"\n)\ndef _compute_and_update_PI_kernel(\n i,\n T_A,\n T_B,\n m,\n QT_even,\n QT_odd,\n QT_first,\n T_A_subseq_isfinite,\n T_B_subseq_isfinite,\n T_A_subseq_squared,\n T_B_subseq_squared,\n k,\n ignore_trivial,\n excl_zone,\n profile,\n indices,\n compute_QT,\n):\n \"\"\"\n A Numba CUDA kernel to update the non-normalized (i.e., without z-normalization)\n matrix profile and matrix profile indices\n\n Parameters\n ----------\n i : int\n sliding window `i`\n\n T_A : ndarray\n The time series or sequence for which to compute the dot product\n\n T_B : ndarray\n The time series or sequence that will be used to annotate T_A. For every\n subsequence in T_A, its nearest neighbor in T_B will be recorded.\n\n m : int\n Window size\n\n QT_even : ndarray\n The input QT array (dot product between the query sequence,`Q`, and\n time series, `T`) to use when `i` is even\n\n QT_odd : ndarray\n The input QT array (dot product between the query sequence,`Q`, and\n time series, `T`) to use when `i` is odd\n\n QT_first : ndarray\n Dot product between the first query sequence,`Q`, and time series, `T`\n\n T_A_subseq_isfinite : ndarray\n A boolean array that indicates whether a subsequence in `T_A` contains a\n `np.nan`/`np.inf` value (False)\n\n T_B_subseq_isfinite : ndarray\n A boolean array that indicates whether a subsequence in `T_B` contains a\n `np.nan`/`np.inf` value (False)\n\n T_A_subseq_squared : ndarray\n The squared subsequences of `T_A`\n\n T_B_subseq_squared : ndarray\n The squared subsequences of `T_B`\n\n k : int\n The total number of sliding windows to iterate over\n\n ignore_trivial : bool\n Set to `True` if this is a self-join. Otherwise, for AB-join, set this to\n `False`.\n\n excl_zone : int\n The half width for the exclusion zone relative to the current\n sliding window\n\n profile : ndarray\n Matrix profile. The first column consists of the global matrix profile,\n the second column consists of the left matrix profile, and the third\n column consists of the right matrix profile.\n\n indices : ndarray\n The first column consists of the matrix profile indices, the second\n column consists of the left matrix profile indices, and the third\n column consists of the right matrix profile indices.\n\n compute_QT : bool\n A boolean flag for whether or not to compute QT\n\n Returns\n -------\n None\n\n Notes\n -----\n `arXiv:1901.05708 \\\n <https://arxiv.org/pdf/1901.05708.pdf>`__\n\n See Algorithm 1\n\n Note that we have extended this algorithm for AB-joins as well.\n\n `DOI: 10.1109/ICDM.2016.0085 \\\n <https://www.cs.ucr.edu/~eamonn/STOMP_GPU_final_submission_camera_ready.pdf>`__\n\n See Table II, Figure 5, and Figure 6\n \"\"\"\n start = cuda.grid(1)\n stride = cuda.gridsize(1)\n\n if i % 2 == 0:\n QT_out = QT_even\n QT_in = QT_odd\n else:\n QT_out = QT_odd\n QT_in = QT_even\n\n for j in range(start, QT_out.shape[0], stride):\n zone_start = max(0, j - excl_zone)\n zone_stop = min(k, j + excl_zone)\n\n if compute_QT:\n QT_out[j] = (\n QT_in[j - 1] - T_B[i - 1] * T_A[j - 1] + T_B[i + m - 1] * T_A[j + m - 1]\n )\n\n QT_out[0] = QT_first[i]\n\n if not T_B_subseq_isfinite[i] or not T_A_subseq_isfinite[j]:\n D = np.inf\n else:\n D = T_B_subseq_squared[i] + T_A_subseq_squared[j] - 2.0 * QT_out[j]\n\n if D < config.STUMPY_D_SQUARED_THRESHOLD:\n D = 0\n\n if ignore_trivial:\n if i <= zone_stop and i >= zone_start:\n D = np.inf\n if D < profile[j, 1] and i < j:\n profile[j, 1] = D\n indices[j, 1] = i\n if D < profile[j, 2] and i > j:\n profile[j, 2] = D\n indices[j, 2] = i\n\n if D < profile[j, 0]:\n profile[j, 0] = D\n indices[j, 0] = i\n\n\ndef _gpu_aamp(\n T_A_fname,\n T_B_fname,\n m,\n range_stop,\n excl_zone,\n T_A_subseq_isfinite_fname,\n T_B_subseq_isfinite_fname,\n T_A_subseq_squared_fname,\n T_B_subseq_squared_fname,\n QT_fname,\n QT_first_fname,\n k,\n ignore_trivial=True,\n range_start=1,\n device_id=0,\n):\n \"\"\"\n A Numba CUDA version of AAMP for parallel computation of the non-normalized (i.e.,\n without z-normalization) matrix profile, matrix profile indices, left matrix profile\n indices, and right matrix profile indices.\n\n Parameters\n ----------\n T_A_fname : str\n The file name for the time series or sequence for which to compute\n the matrix profile\n\n T_B_fname : str\n The file name for the time series or sequence that will be used to annotate T_A.\n For every subsequence in T_A, its nearest neighbor in T_B will be recorded.\n\n m : int\n Window size\n\n range_stop : int\n The index value along T_B for which to stop the matrix profile\n calculation. This parameter is here for consistency with the\n distributed `stumped` algorithm.\n\n excl_zone : int\n The half width for the exclusion zone relative to the current\n sliding window\n\n T_A_subseq_isfinite_fname : str\n The file name for the boolean array that indicates whether a subsequence in\n `T_A` contains a `np.nan`/`np.inf` value (False)\n\n T_B_subseq_isfinite_fname : str\n The file name for the boolean array that indicates whether a subsequence in\n `T_B` contains a `np.nan`/`np.inf` value (False)\n\n T_A_subseq_squared_fname : str\n The file name for the squared subsequences of `T_A`\n\n T_B_subseq_squared_fname : str\n The file name for the squared subsequences of `T_B`\n\n QT_fname : str\n The file name for the dot product between some query sequence,`Q`,\n and time series, `T`\n\n QT_first_fname : str\n The file name for the QT for the first window relative to the current\n sliding window\n\n k : int\n The total number of sliding windows to iterate over\n\n ignore_trivial : bool, default True\n Set to `True` if this is a self-join. Otherwise, for AB-join, set this to\n `False`. Default is `True`.\n\n range_start : int, default 1\n The starting index value along T_B for which to start the matrix\n profile calculation. Default is 1.\n\n device_id : int, default 0\n The (GPU) device number to use. The default value is `0`.\n\n Returns\n -------\n profile_fname : str\n The file name for the matrix profile\n\n indices_fname : str\n The file name for the matrix profile indices. The first column of the\n array consists of the matrix profile indices, the second column consists\n of the left matrix profile indices, and the third column consists of the\n right matrix profile indices.\n\n Notes\n -----\n `arXiv:1901.05708 \\\n <https://arxiv.org/pdf/1901.05708.pdf>`__\n\n See Algorithm 1\n\n Note that we have extended this algorithm for AB-joins as well.\n\n `DOI: 10.1109/ICDM.2016.0085 \\\n <https://www.cs.ucr.edu/~eamonn/STOMP_GPU_final_submission_camera_ready.pdf>`__\n\n See Table II, Figure 5, and Figure 6\n \"\"\"\n threads_per_block = config.STUMPY_THREADS_PER_BLOCK\n blocks_per_grid = math.ceil(k / threads_per_block)\n\n T_A = np.load(T_A_fname, allow_pickle=False)\n T_B = np.load(T_B_fname, allow_pickle=False)\n QT = np.load(QT_fname, allow_pickle=False)\n QT_first = np.load(QT_first_fname, allow_pickle=False)\n T_A_subseq_isfinite = np.load(T_A_subseq_isfinite_fname, allow_pickle=False)\n T_B_subseq_isfinite = np.load(T_B_subseq_isfinite_fname, allow_pickle=False)\n T_A_subseq_squared = np.load(T_A_subseq_squared_fname, allow_pickle=False)\n T_B_subseq_squared = np.load(T_B_subseq_squared_fname, allow_pickle=False)\n\n with cuda.gpus[device_id]:\n device_T_A = cuda.to_device(T_A)\n device_T_A_subseq_isfinite = cuda.to_device(T_A_subseq_isfinite)\n device_T_A_subseq_squared = cuda.to_device(T_A_subseq_squared)\n device_QT_odd = cuda.to_device(QT)\n device_QT_even = cuda.to_device(QT)\n device_QT_first = cuda.to_device(QT_first)\n if ignore_trivial:\n device_T_B = device_T_A\n device_T_B_subseq_isfinite = device_T_A_subseq_isfinite\n device_T_B_subseq_squared = device_T_A_subseq_squared\n else:\n device_T_B = cuda.to_device(T_B)\n device_T_B_subseq_isfinite = cuda.to_device(T_B_subseq_isfinite)\n device_T_B_subseq_squared = cuda.to_device(T_B_subseq_squared)\n\n profile = np.full((k, 3), np.inf) # float64\n indices = np.full((k, 3), -1, dtype=np.int64) # int64\n\n device_profile = cuda.to_device(profile)\n device_indices = cuda.to_device(indices)\n _compute_and_update_PI_kernel[blocks_per_grid, threads_per_block](\n range_start - 1,\n device_T_A,\n device_T_B,\n m,\n device_QT_even,\n device_QT_odd,\n device_QT_first,\n device_T_A_subseq_isfinite,\n device_T_B_subseq_isfinite,\n device_T_A_subseq_squared,\n device_T_B_subseq_squared,\n k,\n ignore_trivial,\n excl_zone,\n device_profile,\n device_indices,\n False,\n )\n\n for i in range(range_start, range_stop):\n _compute_and_update_PI_kernel[blocks_per_grid, threads_per_block](\n i,\n device_T_A,\n device_T_B,\n m,\n device_QT_even,\n device_QT_odd,\n device_QT_first,\n device_T_A_subseq_isfinite,\n device_T_B_subseq_isfinite,\n device_T_A_subseq_squared,\n device_T_B_subseq_squared,\n k,\n ignore_trivial,\n excl_zone,\n device_profile,\n device_indices,\n True,\n )\n\n profile = device_profile.copy_to_host()\n indices = device_indices.copy_to_host()\n profile = np.sqrt(profile)\n\n profile_fname = core.array_to_temp_file(profile)\n indices_fname = core.array_to_temp_file(indices)\n\n return profile_fname, indices_fname\n\n\ndef gpu_aamp(T_A, m, T_B=None, ignore_trivial=True, device_id=0):\n \"\"\"\n Compute the non-normalized (i.e., without z-normalization) matrix profile with one\n or more GPU devices\n\n This is a convenience wrapper around the Numba `cuda.jit` `_gpu_aamp` function\n which computes the non-normalized matrix profile according to modified version\n GPU-STOMP.\n\n Parameters\n ----------\n T_A : ndarray\n The time series or sequence for which to compute the matrix profile\n\n m : int\n Window size\n\n T_B : ndarray, default None\n The time series or sequence that contain your query subsequences\n of interest. Default is `None` which corresponds to a self-join.\n\n ignore_trivial : bool, default True\n Set to `True` if this is a self-join. Otherwise, for AB-join, set this\n to `False`. Default is `True`.\n\n device_id : int or list, default 0\n The (GPU) device number to use. The default value is `0`. A list of\n valid device ids (int) may also be provided for parallel GPU-STUMP\n computation. A list of all valid device ids can be obtained by\n executing `[device.id for device in numba.cuda.list_devices()]`.\n\n Returns\n -------\n out : ndarray\n The first column consists of the matrix profile, the second column\n consists of the matrix profile indices, the third column consists of\n the left matrix profile indices, and the fourth column consists of\n the right matrix profile indices.\n\n Notes\n -----\n `arXiv:1901.05708 \\\n <https://arxiv.org/pdf/1901.05708.pdf>`__\n\n See Algorithm 1\n\n Note that we have extended this algorithm for AB-joins as well.\n\n `DOI: 10.1109/ICDM.2016.0085 \\\n <https://www.cs.ucr.edu/~eamonn/STOMP_GPU_final_submission_camera_ready.pdf>`__\n\n See Table II, Figure 5, and Figure 6\n \"\"\"\n if T_B is None: # Self join!\n T_B = T_A\n ignore_trivial = True\n\n T_A, T_A_subseq_isfinite = core.preprocess_non_normalized(T_A, m)\n T_B, T_B_subseq_isfinite = core.preprocess_non_normalized(T_B, m)\n\n T_A_subseq_squared = np.sum(core.rolling_window(T_A * T_A, m), axis=1)\n T_B_subseq_squared = np.sum(core.rolling_window(T_B * T_B, m), axis=1)\n\n if T_A.ndim != 1: # pragma: no cover\n raise ValueError(\n f\"T_A is {T_A.ndim}-dimensional and must be 1-dimensional. \"\n \"For multidimensional STUMP use `stumpy.mstump` or `stumpy.mstumped`\"\n )\n\n if T_B.ndim != 1: # pragma: no cover\n raise ValueError(\n f\"T_B is {T_B.ndim}-dimensional and must be 1-dimensional. \"\n \"For multidimensional STUMP use `stumpy.mstump` or `stumpy.mstumped`\"\n )\n\n core.check_window_size(m, max_size=min(T_A.shape[0], T_B.shape[0]))\n\n if ignore_trivial is False and core.are_arrays_equal(T_A, T_B): # pragma: no cover\n logger.warning(\"Arrays T_A, T_B are equal, which implies a self-join.\")\n logger.warning(\"Try setting `ignore_trivial = True`.\")\n\n if ignore_trivial and core.are_arrays_equal(T_A, T_B) is False: # pragma: no cover\n logger.warning(\"Arrays T_A, T_B are not equal, which implies an AB-join.\")\n logger.warning(\"Try setting `ignore_trivial = False`.\")\n\n n = T_B.shape[0]\n k = T_A.shape[0] - m + 1\n l = n - m + 1\n excl_zone = int(\n np.ceil(m / config.STUMPY_EXCL_ZONE_DENOM)\n ) # See Definition 3 and Figure 3\n\n T_A_fname = core.array_to_temp_file(T_A)\n T_B_fname = core.array_to_temp_file(T_B)\n T_A_subseq_isfinite_fname = core.array_to_temp_file(T_A_subseq_isfinite)\n T_B_subseq_isfinite_fname = core.array_to_temp_file(T_B_subseq_isfinite)\n T_A_subseq_squared_fname = core.array_to_temp_file(T_A_subseq_squared)\n T_B_subseq_squared_fname = core.array_to_temp_file(T_B_subseq_squared)\n\n out = np.empty((k, 4), dtype=object)\n\n if isinstance(device_id, int):\n device_ids = [device_id]\n else:\n device_ids = device_id\n\n profile = [None] * len(device_ids)\n indices = [None] * len(device_ids)\n\n for _id in device_ids:\n with cuda.gpus[_id]:\n if (\n cuda.current_context().__class__.__name__ != \"FakeCUDAContext\"\n ): # pragma: no cover\n cuda.current_context().deallocations.clear()\n\n step = 1 + l // len(device_ids)\n\n # Start process pool for multi-GPU request\n if len(device_ids) > 1: # pragma: no cover\n mp.set_start_method(\"spawn\", force=True)\n p = mp.Pool(processes=len(device_ids))\n results = [None] * len(device_ids)\n\n QT_fnames = []\n QT_first_fnames = []\n\n for idx, start in enumerate(range(0, l, step)):\n stop = min(l, start + step)\n\n QT, QT_first = core._get_QT(start, T_A, T_B, m)\n QT_fname = core.array_to_temp_file(QT)\n QT_first_fname = core.array_to_temp_file(QT_first)\n QT_fnames.append(QT_fname)\n QT_first_fnames.append(QT_first_fname)\n\n if len(device_ids) > 1 and idx < len(device_ids) - 1: # pragma: no cover\n # Spawn and execute in child process for multi-GPU request\n results[idx] = p.apply_async(\n _gpu_aamp,\n (\n T_A_fname,\n T_B_fname,\n m,\n stop,\n excl_zone,\n T_A_subseq_isfinite_fname,\n T_B_subseq_isfinite_fname,\n T_A_subseq_squared_fname,\n T_B_subseq_squared_fname,\n QT_fname,\n QT_first_fname,\n k,\n ignore_trivial,\n start + 1,\n device_ids[idx],\n ),\n )\n else:\n # Execute last chunk in parent process\n # Only parent process is executed when a single GPU is requested\n profile[idx], indices[idx] = _gpu_aamp(\n T_A_fname,\n T_B_fname,\n m,\n stop,\n excl_zone,\n T_A_subseq_isfinite_fname,\n T_B_subseq_isfinite_fname,\n T_A_subseq_squared_fname,\n T_B_subseq_squared_fname,\n QT_fname,\n QT_first_fname,\n k,\n ignore_trivial,\n start + 1,\n device_ids[idx],\n )\n\n # Clean up process pool for multi-GPU request\n if len(device_ids) > 1: # pragma: no cover\n p.close()\n p.join()\n\n # Collect results from spawned child processes if they exist\n for idx, result in enumerate(results):\n if result is not None:\n profile[idx], indices[idx] = result.get()\n\n os.remove(T_A_fname)\n os.remove(T_B_fname)\n os.remove(T_A_subseq_isfinite_fname)\n os.remove(T_B_subseq_isfinite_fname)\n os.remove(T_A_subseq_squared_fname)\n os.remove(T_B_subseq_squared_fname)\n for QT_fname in QT_fnames:\n os.remove(QT_fname)\n for QT_first_fname in QT_first_fnames:\n os.remove(QT_first_fname)\n\n for idx in range(len(device_ids)):\n profile_fname = profile[idx]\n indices_fname = indices[idx]\n profile[idx] = np.load(profile_fname, allow_pickle=False)\n indices[idx] = np.load(indices_fname, allow_pickle=False)\n os.remove(profile_fname)\n os.remove(indices_fname)\n\n for i in range(1, len(device_ids)):\n # Update all matrix profiles and matrix profile indices\n # (global, left, right) and store in profile[0] and indices[0]\n for col in range(profile[0].shape[1]): # pragma: no cover\n cond = profile[0][:, col] < profile[i][:, col]\n profile[0][:, col] = np.where(cond, profile[0][:, col], profile[i][:, col])\n indices[0][:, col] = np.where(cond, indices[0][:, col], indices[i][:, col])\n\n out[:, 0] = profile[0][:, 0]\n out[:, 1:4] = indices[0][:, :]\n\n threshold = 10e-6\n if core.are_distances_too_small(out[:, 0], threshold=threshold): # pragma: no cover\n logger.warning(f\"A large number of values are smaller than {threshold}.\")\n logger.warning(\"For a self-join, try setting `ignore_trivial = True`.\")\n\n return out\n", "import numpy as np\nimport numpy.testing as npt\nimport pandas as pd\nfrom stumpy import core, config\nfrom stumpy import mstump, subspace\nfrom stumpy.mstump import (\n _multi_mass,\n _query_mstump_profile,\n _get_first_mstump_profile,\n _get_multi_QT,\n _apply_include,\n)\nimport pytest\nimport naive\n\n\ndef naive_rolling_window_dot_product(Q, T):\n window = len(Q)\n result = np.zeros(len(T) - window + 1)\n for i in range(len(result)):\n result[i] = np.dot(T[i : i + window], Q)\n return result\n\n\ntest_data = [\n (np.array([[584, -11, 23, 79, 1001, 0, -19]], dtype=np.float64), 3),\n (np.random.uniform(-1000, 1000, [5, 20]).astype(np.float64), 5),\n]\n\nsubstitution_locations = [(slice(0, 0), 0, -1, slice(1, 3), [0, 3])]\nsubstitution_values = [np.nan, np.inf]\n\n\ndef test_apply_include():\n D = np.random.uniform(-1000, 1000, [10, 20]).astype(np.float64)\n ref_D = np.empty(D.shape)\n comp_D = np.empty(D.shape)\n for width in range(D.shape[0]):\n for i in range(D.shape[0] - width):\n ref_D[:, :] = D[:, :]\n comp_D[:, :] = D[:, :]\n include = np.asarray(range(i, i + width + 1))\n\n naive.apply_include(D, include)\n _apply_include(D, include)\n\n npt.assert_almost_equal(ref_D, comp_D)\n\n\ndef test_multi_mass_seeded():\n np.random.seed(5)\n T = np.random.uniform(-1000, 1000, [3, 10]).astype(np.float64)\n m = 5\n\n trivial_idx = 2\n\n Q = T[:, trivial_idx : trivial_idx + m]\n\n ref = naive.multi_mass(Q, T, m)\n\n M_T, Σ_T = core.compute_mean_std(T, m)\n comp = _multi_mass(Q, T, m, M_T, Σ_T, M_T[:, trivial_idx], Σ_T[:, trivial_idx])\n\n npt.assert_almost_equal(ref, comp, decimal=config.STUMPY_TEST_PRECISION)\n\n\[email protected](\"T, m\", test_data)\ndef test_multi_mass(T, m):\n trivial_idx = 2\n\n Q = T[:, trivial_idx : trivial_idx + m]\n\n ref = naive.multi_mass(Q, T, m)\n\n M_T, Σ_T = core.compute_mean_std(T, m)\n comp = _multi_mass(Q, T, m, M_T, Σ_T, M_T[:, trivial_idx], Σ_T[:, trivial_idx])\n\n npt.assert_almost_equal(ref, comp, decimal=config.STUMPY_TEST_PRECISION)\n\n\[email protected](\"T, m\", test_data)\ndef test_query_mstump_profile(T, m):\n excl_zone = int(np.ceil(m / 4))\n for query_idx in range(T.shape[0] - m + 1):\n ref_P, ref_I = naive.mstump(T, m, excl_zone)\n ref_P = ref_P[:, query_idx]\n ref_I = ref_I[:, query_idx]\n\n M_T, Σ_T = core.compute_mean_std(T, m)\n comp_P, comp_I = _query_mstump_profile(\n query_idx, T, T, m, excl_zone, M_T, Σ_T, M_T, Σ_T\n )\n\n npt.assert_almost_equal(ref_P, comp_P)\n npt.assert_equal(ref_I, comp_I)\n\n\[email protected](\"T, m\", test_data)\ndef test_get_first_mstump_profile(T, m):\n excl_zone = int(np.ceil(m / 4))\n start = 0\n\n ref_P, ref_I = naive.mstump(T, m, excl_zone)\n ref_P = ref_P[:, start]\n ref_I = ref_I[:, start]\n\n M_T, Σ_T = core.compute_mean_std(T, m)\n comp_P, comp_I = _get_first_mstump_profile(\n start, T, T, m, excl_zone, M_T, Σ_T, M_T, Σ_T\n )\n\n npt.assert_almost_equal(ref_P, comp_P)\n npt.assert_equal(ref_I, comp_I)\n\n\[email protected](\"T, m\", test_data)\ndef test_get_multi_QT(T, m):\n start = 0\n Q = core.rolling_window(T, m)\n ref_QT = np.empty((Q.shape[0], Q.shape[1]), dtype=\"float64\")\n ref_QT_first = np.empty((Q.shape[0], Q.shape[1]), dtype=\"float64\")\n\n for dim in range(T.shape[0]):\n ref_QT[dim] = naive_rolling_window_dot_product(\n T[dim, start : start + m], T[dim]\n )\n ref_QT_first[dim] = naive_rolling_window_dot_product(T[dim, :m], T[dim])\n\n comp_QT, comp_QT_first = _get_multi_QT(start, T, m)\n\n npt.assert_almost_equal(ref_QT, comp_QT)\n npt.assert_almost_equal(ref_QT_first, comp_QT_first)\n\n\[email protected](\"T, m\", test_data)\ndef test_subspace(T, m):\n motif_idx = 1\n nn_idx = 4\n\n for k in range(T.shape[0]):\n ref_S = naive.subspace(T, m, motif_idx, nn_idx, k)\n comp_S = subspace(T, m, motif_idx, nn_idx, k)\n npt.assert_almost_equal(ref_S, comp_S)\n\n\[email protected](\"T, m\", test_data)\ndef test_subspace_include(T, m):\n motif_idx = 1\n nn_idx = 4\n for width in range(T.shape[0]):\n for i in range(T.shape[0] - width):\n include = np.asarray(range(i, i + width + 1))\n\n for k in range(T.shape[0]):\n ref_S = naive.subspace(T, m, motif_idx, nn_idx, k, include)\n comp_S = subspace(T, m, motif_idx, nn_idx, k, include)\n npt.assert_almost_equal(ref_S, comp_S)\n\n\[email protected](\"T, m\", test_data)\ndef test_subspace_discords(T, m):\n discord_idx = 1\n nn_idx = 4\n\n for k in range(T.shape[0]):\n ref_S = naive.subspace(T, m, discord_idx, nn_idx, k, discords=True)\n comp_S = subspace(T, m, discord_idx, nn_idx, k, discords=True)\n npt.assert_almost_equal(ref_S, comp_S)\n\n\[email protected](\"T, m\", test_data)\ndef test_subspace_include_discords(T, m):\n discord_idx = 1\n nn_idx = 4\n for width in range(T.shape[0]):\n for i in range(T.shape[0] - width):\n include = np.asarray(range(i, i + width + 1))\n\n for k in range(T.shape[0]):\n ref_S = naive.subspace(\n T, m, discord_idx, nn_idx, k, include, discords=True\n )\n comp_S = subspace(T, m, discord_idx, nn_idx, k, include, discords=True)\n npt.assert_almost_equal(ref_S, comp_S)\n\n\ndef test_naive_mstump():\n T = np.random.uniform(-1000, 1000, [1, 1000]).astype(np.float64)\n m = 20\n\n zone = int(np.ceil(m / 4))\n\n ref_mp = naive.stamp(T[0], m, exclusion_zone=zone)\n ref_P = ref_mp[np.newaxis, :, 0]\n ref_I = ref_mp[np.newaxis, :, 1]\n\n comp_P, comp_I = naive.mstump(T, m, zone)\n\n npt.assert_almost_equal(ref_P, comp_P)\n npt.assert_almost_equal(ref_I, comp_I)\n\n\ndef test_mstump_int_input():\n with pytest.raises(TypeError):\n mstump(np.arange(20).reshape(2, 10), 5)\n\n\[email protected](\"T, m\", test_data)\ndef test_mstump(T, m):\n excl_zone = int(np.ceil(m / 4))\n\n ref_P, ref_I = naive.mstump(T, m, excl_zone)\n comp_P, comp_I = mstump(T, m)\n\n npt.assert_almost_equal(ref_P, comp_P)\n npt.assert_almost_equal(ref_I, comp_I)\n\n\[email protected](\"T, m\", test_data)\ndef test_mstump_include(T, m):\n for width in range(T.shape[0]):\n for i in range(T.shape[0] - width):\n include = np.asarray(range(i, i + width + 1))\n excl_zone = int(np.ceil(m / 4))\n\n ref_P, ref_I = naive.mstump(T, m, excl_zone, include)\n comp_P, comp_I = mstump(T, m, include)\n\n npt.assert_almost_equal(ref_P, comp_P)\n npt.assert_almost_equal(ref_I, comp_I)\n\n\[email protected](\"T, m\", test_data)\ndef test_mstump_discords(T, m):\n excl_zone = int(np.ceil(m / 4))\n\n ref_P, ref_I = naive.mstump(T, m, excl_zone, discords=True)\n comp_P, comp_I = mstump(T, m, discords=True)\n\n npt.assert_almost_equal(ref_P, comp_P)\n npt.assert_almost_equal(ref_I, comp_I)\n\n\[email protected](\"T, m\", test_data)\ndef test_mstump_include_discords(T, m):\n for width in range(T.shape[0]):\n for i in range(T.shape[0] - width):\n include = np.asarray(range(i, i + width + 1))\n\n excl_zone = int(np.ceil(m / 4))\n\n ref_P, ref_I = naive.mstump(T, m, excl_zone, include, discords=True)\n comp_P, comp_I = mstump(T, m, include, discords=True)\n\n npt.assert_almost_equal(ref_P, comp_P)\n npt.assert_almost_equal(ref_I, comp_I)\n\n\[email protected](\"T, m\", test_data)\ndef test_mstump_wrapper(T, m):\n excl_zone = int(np.ceil(m / 4))\n\n ref_P, ref_I = naive.mstump(T, m, excl_zone)\n comp_P, comp_I = mstump(T, m)\n\n npt.assert_almost_equal(ref_P, comp_P)\n npt.assert_almost_equal(ref_I, comp_I)\n\n df = pd.DataFrame(T.T)\n comp_P, comp_I = mstump(df, m)\n\n npt.assert_almost_equal(ref_P, comp_P)\n npt.assert_almost_equal(ref_I, comp_I)\n\n\[email protected](\"T, m\", test_data)\ndef test_mstump_wrapper_include(T, m):\n for width in range(T.shape[0]):\n for i in range(T.shape[0] - width):\n include = np.asarray(range(i, i + width + 1))\n\n excl_zone = int(np.ceil(m / 4))\n\n ref_P, ref_I = naive.mstump(T, m, excl_zone, include)\n comp_P, comp_I = mstump(T, m, include)\n\n npt.assert_almost_equal(ref_P, comp_P)\n npt.assert_almost_equal(ref_I, comp_I)\n\n df = pd.DataFrame(T.T)\n comp_P, comp_I = mstump(df, m, include)\n\n npt.assert_almost_equal(ref_P, comp_P)\n npt.assert_almost_equal(ref_I, comp_I)\n\n\ndef test_constant_subsequence_self_join():\n T_A = np.concatenate((np.zeros(20, dtype=np.float64), np.ones(5, dtype=np.float64)))\n T = np.array([T_A, T_A, np.random.rand(T_A.shape[0])])\n m = 3\n\n excl_zone = int(np.ceil(m / 4))\n\n ref_P, ref_I = naive.mstump(T, m, excl_zone)\n comp_P, comp_I = mstump(T, m)\n\n npt.assert_almost_equal(ref_P, comp_P) # ignore indices\n\n\ndef test_identical_subsequence_self_join():\n identical = np.random.rand(8)\n T_A = np.random.rand(20)\n T_A[1 : 1 + identical.shape[0]] = identical\n T_A[11 : 11 + identical.shape[0]] = identical\n T = np.array([T_A, T_A, np.random.rand(T_A.shape[0])])\n m = 3\n\n excl_zone = int(np.ceil(m / 4))\n\n ref_P, ref_I = naive.mstump(T, m, excl_zone)\n comp_P, comp_I = mstump(T, m)\n\n npt.assert_almost_equal(\n ref_P, comp_P, decimal=config.STUMPY_TEST_PRECISION\n ) # ignore indices\n\n\[email protected](\"T, m\", test_data)\[email protected](\"substitute\", substitution_values)\[email protected](\"substitution_locations\", substitution_locations)\ndef test_mstump_nan_inf_self_join_first_dimension(\n T, m, substitute, substitution_locations\n):\n excl_zone = int(np.ceil(m / 4))\n\n T_sub = T.copy()\n\n for substitution_location in substitution_locations:\n T_sub[:] = T[:]\n T_sub[0, substitution_location] = substitute\n\n ref_P, ref_I = naive.mstump(T_sub, m, excl_zone)\n comp_P, comp_I = mstump(T_sub, m)\n\n npt.assert_almost_equal(ref_P, comp_P)\n npt.assert_almost_equal(ref_I, comp_I)\n\n\[email protected](\"T, m\", test_data)\[email protected](\"substitute\", substitution_values)\[email protected](\"substitution_locations\", substitution_locations)\ndef test_mstump_nan_self_join_all_dimensions(T, m, substitute, substitution_locations):\n excl_zone = int(np.ceil(m / 4))\n\n T_sub = T.copy()\n\n for substitution_location in substitution_locations:\n T_sub[:] = T[:]\n T_sub[:, substitution_location] = substitute\n\n ref_P, ref_I = naive.mstump(T_sub, m, excl_zone)\n comp_P, comp_I = mstump(T_sub, m)\n\n npt.assert_almost_equal(ref_P, comp_P)\n npt.assert_almost_equal(ref_I, comp_I)\n" ]
[ [ "numpy.sqrt", "numpy.full", "numpy.ceil", "numpy.load", "numpy.where", "numpy.empty" ], [ "numpy.testing.assert_equal", "numpy.dot", "numpy.random.seed", "numpy.arange", "pandas.DataFrame", "numpy.ones", "numpy.testing.assert_almost_equal", "numpy.ceil", "numpy.random.rand", "numpy.random.uniform", "numpy.array", "numpy.zeros", "numpy.empty" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
susantamoh84/TensorFlow-Book
[ "905bd82e6e58c373b566f4813859c5dfc1fa1aa4" ]
[ "ch09_cnn/cifar_tools.py" ]
[ "import cPickle\nimport numpy as np\n\n\ndef unpickle(file):\n fo = open(file, 'rb')\n dict = cPickle.load(fo)\n fo.close()\n return dict\n\n\ndef clean(data):\n imgs = data.reshape(data.shape[0], 3, 32, 32)\n grayscale_imgs = imgs.mean(1)\n cropped_imgs = grayscale_imgs[:, 4:28, 4:28]\n img_data = cropped_imgs.reshape(data.shape[0], -1)\n img_size = np.shape(img_data)[1]\n means = np.mean(img_data, axis=1)\n meansT = means.reshape(len(means), 1)\n stds = np.std(img_data, axis=1)\n stdsT = stds.reshape(len(stds), 1)\n adj_stds = np.maximum(stdsT, 1.0 / np.sqrt(img_size))\n normalized = (img_data - meansT) / adj_stds\n return normalized\n\n\ndef read_data(directory):\n names = unpickle('{}/batches.meta'.format(directory))['label_names']\n print('names', names)\n\n data, labels = [], []\n for i in range(1, 6):\n filename = '{}/data_batch_{}'.format(directory, i)\n batch_data = unpickle(filename)\n if len(data) > 0:\n data = np.vstack((data, batch_data['data']))\n labels = np.hstack((labels, batch_data['labels']))\n else:\n data = batch_data['data']\n labels = batch_data['labels']\n\n print(np.shape(data), np.shape(labels))\n\n data = clean(data)\n data = data.astype(np.float32)\n return names, data, labels\n" ]
[ [ "numpy.hstack", "numpy.sqrt", "numpy.std", "numpy.shape", "numpy.mean", "numpy.vstack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
joschout/Multi-Directional-Rule-Set-Learning
[ "ef0620b115f4e0fd7fba3e752d238a8020c1ca6b", "ef0620b115f4e0fd7fba3e752d238a8020c1ca6b", "ef0620b115f4e0fd7fba3e752d238a8020c1ca6b", "ef0620b115f4e0fd7fba3e752d238a8020c1ca6b", "ef0620b115f4e0fd7fba3e752d238a8020c1ca6b" ]
[ "mdrsl/rule_generation/association_rule_mining/apyori_impl/mine_mt_rules_from_dataframe_with_apyori.py", "experiments/arcbench_data_preparation/arc_model_data_preparation.py", "mdrsl/rule_models/mids/objective_function/mids_objective_function_statistics.py", "experiments/e2_multi_directional_model_comparison/rule_mining/mine_multi_target_rules_from_random_forests2.py", "mdrsl/rule_models/mids/objective_function/mids_objective_function_without_value_reuse.py" ]
[ "import random\n\nimport numpy as np\nfrom typing import List, Optional, Dict\nimport pandas as pd\nimport time\n\nfrom mdrsl.rule_generation.association_rule_mining.apyori_impl.mine_mt_rules_from_transactions_with_apyori import (\n mine_MCARs_from_transactions_using_apyori)\nfrom mdrsl.rule_generation.association_rule_mining.frequent_itemset_mining import (\n dataframe_to_list_of_transactions, run_fim_apriori, dataframe_to_list_of_transactions_with_encoding)\nfrom mdrsl.data_structures.rules.multi_target_class_association_rule import MCAR\n\n\ndef mine_MCARs_from_df_using_apyori(df,\n min_support: float = 0.1, min_confidence: float = 0.0, min_lift=0.0,\n max_length=None) -> List[MCAR]:\n transactions = dataframe_to_list_of_transactions(df)\n return mine_MCARs_from_transactions_using_apyori(\n transactions,\n min_support=min_support, min_confidence=min_confidence,\n min_lift=min_lift, max_length=max_length)\n\n\ndef mine_MCARs_from_df_using_apyori_with_encodings(df,\n min_support: float = 0.1, min_confidence: float = 0.0, min_lift=0.0,\n max_length=None) -> List[MCAR]:\n transactions, item_encoder = dataframe_to_list_of_transactions_with_encoding(df)\n return mine_MCARs_from_transactions_using_apyori(\n transactions,\n min_support=min_support, min_confidence=min_confidence,\n min_lift=min_lift, max_length=max_length, item_encoder=item_encoder)\n\n\ndef mine_MCARs(df, rule_cutoff: int,\n sample=False, random_seed=None,\n verbose: bool = True,\n **top_rules_kwargs) -> List[MCAR]:\n\n transactions: List[List[str]] = dataframe_to_list_of_transactions(df)\n mcars: Optional[List[MCAR]] = _top_rules_MIDS(transactions,\n target_rule_count=rule_cutoff,\n verbose=verbose)\n\n if mcars is None:\n raise Exception(\"no MCARs found as input for MIDS\")\n\n if len(mcars) > rule_cutoff:\n if sample:\n if random_seed is not None:\n random.seed(random_seed)\n mcars_subset = random.sample(mcars, rule_cutoff)\n else:\n mcars_subset = mcars[:rule_cutoff]\n else:\n mcars_subset = mcars\n return mcars_subset\n\n\nif __name__ == '__main__':\n\n df_total = pd.DataFrame({\n 'A': np.array([1] * 4, dtype='float32'),\n 'B': np.array([2] * 4, dtype='float32'),\n 'C': np.array([3] * 4, dtype='float32'),\n 'D': np.array([4] * 4, dtype='float32')\n })\n\n print(df_total)\n\n itemsets = dataframe_to_list_of_transactions(df_total)\n\n support_threshold = 0.1\n dataset_transactions = dataframe_to_list_of_transactions(df_total) # type: List[List[str]]\n\n cars = mine_MCARs_from_transactions_using_apyori(dataset_transactions, min_support=support_threshold)\n for car in cars:\n print(car)\n\n print(\"---\")\n fim_frequent_itemsets = run_fim_apriori(df_total, support_threshold)\n print(fim_frequent_itemsets)\n\n\ndef _top_rules_MIDS(transactions: List[List[str]],\n appearance: Optional[Dict] = None,\n\n target_rule_count: int = 1000,\n\n init_support: float = 0.05,\n init_confidence: float = 0.5,\n\n confidence_step: float = 0.05,\n support_step: float = 0.05,\n\n min_length: int = 2,\n init_max_length: int = 3,\n\n total_timeout: float = 100.0, # max time in seconds\n max_iterations: int = 30,\n verbose: bool = True\n ):\n \"\"\"\n Function for finding the best n (target_rule_count) rules from transaction list.\n PROBLEM: how to define 'best'?\n\n Iteratively:\n Search for the rules under the current mining parameters.\n Check the properties of the found rules.\n If there is still room for improvement,\n Then update the mining parameters,\n\n\n\n STOP if:\n - max nb of iterations is reached (default: 30).\n - the current nb of rules is more than the nb of rules we are looking for.\n - the time out is reach\n\n FIND all rules with as constraints:\n - min_support\n - min_confidence\n - max_length\n\n\n Parameters\n ----------\n :param transactions : 2D array of strings, e.g. [[\"a:=:1\", \"b:=:3\"], [\"a:=:4\", \"b:=:2\"]]\n :param appearance : dict - dictionary specifying rule appearance\n :param target_rule_count : int - target number of rules to mine\n :param init_support : float - support from which to start mining\n :param init_confidence : float - confidence from which to start mining\n :param confidence_step : float\n :param support_step : float\n :param min_length : int - minimum len of rules to mine\n :param init_max_length : int - maximum len from which to start mining\n :param total_timeout : float - maximum execution time of the function\n :param max_iterations : int - maximum iterations to try before stopping execution\n :param verbose : bool\n\n Returns\n -------\n list of mined rules. The rules are not ordered.\n\n \"\"\"\n\n if appearance is None:\n appearance = {}\n\n start_time: float = time.time()\n\n # the length of a rule is at most the length of a transaction. (All transactions have the same length.)\n\n # max_rule_length_wanted = 10\n # MAX_RULE_LEN: int = min(len(transactions[0]), max_rule_length_wanted)\n MAX_RULE_LEN: int = len(transactions[0])\n\n\n current_support: float = init_support\n current_confidence: float = init_confidence\n\n current_max_length: int = init_max_length\n\n keep_mining: bool = True\n\n is_max_length_decreased_due_timeout = False\n current_iteration = 0\n\n last_rule_count = -1\n rules: Optional[List[MCAR]] = None\n\n if verbose:\n print(\"STARTING top_rules\")\n while keep_mining:\n current_iteration += 1\n\n if current_iteration > max_iterations:\n if verbose:\n print(\"Max iterations reached\")\n break\n\n if verbose:\n print(f\"--- iteration {current_iteration} ---\")\n print((f\"Running apriori with setting: \"\n f\"confidence={current_confidence}, \"\n f\"support={current_support}, \"\n f\"min_length={min_length}, \"\n f\"max_length={current_max_length}, \"\n f\"MAX_RULE_LEN={MAX_RULE_LEN}\"\n ))\n\n current_rules: List[MCAR] = mine_MCARs_from_transactions_using_apyori(\n transactions, min_support=current_support, min_confidence=current_confidence, max_length=current_max_length)\n # rules_current = fim.arules(transactions, supp=support, conf=conf, mode=\"o\", report=\"sc\", appear=appearance,\n # zmax=maxlen, zmin=minlen)\n\n current_nb_of_rules = len(current_rules)\n\n # assign\n rules = current_rules\n\n if verbose:\n print(f\"Rule count: {current_nb_of_rules}, Iteration: {current_iteration}\")\n\n if current_nb_of_rules >= target_rule_count:\n keep_mining = False\n if verbose:\n print(f\"\\tTarget rule count satisfied: {target_rule_count}\")\n else:\n current_execution_time = time.time() - start_time\n\n # if timeout limit exceeded\n if current_execution_time > total_timeout:\n if verbose:\n print(\"Execution time exceeded:\", total_timeout)\n keep_mining = False\n\n # if we can still increase our rule length AND\n # the number of rules found has changed (increased?) since last time AND\n # there has\n elif current_max_length < MAX_RULE_LEN and last_rule_count != current_nb_of_rules and not is_max_length_decreased_due_timeout:\n current_max_length += 1\n last_rule_count = current_nb_of_rules\n if verbose:\n print(f\"\\tIncreasing max_length {current_max_length}\")\n\n # if we can still increase our rule length AND\n #\n # we can still increase our support\n # THEN:\n # increase our support\n # increment our max length\n elif current_max_length < MAX_RULE_LEN and is_max_length_decreased_due_timeout and current_support <= 1 - support_step:\n current_support += support_step\n current_max_length += 1\n last_rule_count = current_nb_of_rules\n is_max_length_decreased_due_timeout = False\n\n if verbose:\n print(f\"\\tIncreasing maxlen to {current_max_length}\")\n print(f\"\\tIncreasing minsup to {current_support}\")\n # IF we can still decrease our confidence\n # THEN decrease our confidence\n elif current_confidence > confidence_step:\n current_confidence -= confidence_step\n if verbose:\n print(f\"\\tDecreasing confidence to {current_confidence}\")\n else:\n if verbose:\n print(\"\\tAll options exhausted\")\n keep_mining = False\n if verbose:\n end_of_current_iteration_message = f\"--- end iteration {current_iteration} ---\"\n print(end_of_current_iteration_message)\n print(\"-\" * len(end_of_current_iteration_message))\n if verbose:\n print(f\"FINISHED top_rules after {current_iteration} iterations\")\n return rules\n", "import pandas as pd\n\nfrom experiments.arcbench_data_preparation.reworked_one_hot_encoding import get_original_data_fold_abs_file_name, \\\n TrainTestEnum\nfrom mdrsl.data_handling.nan_data_filtering import remove_instances_with_nans_in_column\nfrom mdrsl.data_handling.reorder_dataset_columns import reorder_columns\n\n\ndef prepare_arc_data(\n dataset_name: str,\n fold_i: int,\n target_attribute: str,\n train_test: TrainTestEnum\n) -> pd.DataFrame:\n # read in original (discretized) training/test data\n # reorder the data so the target column is last\n original_data_fold_abs_file_name = get_original_data_fold_abs_file_name(dataset_name, fold_i, train_test)\n df_original_column_order = pd.read_csv(original_data_fold_abs_file_name, delimiter=',')\n df_reordered = reorder_columns(df_original_column_order, target_attribute)\n\n # REMOVE INSTANCES WITH NAN AS TARGET VALUE:\n df_reordered = remove_instances_with_nans_in_column(df_reordered, target_attribute)\n return df_reordered\n", "from typing import Optional, Dict\n\nfrom tabulate import tabulate\nimport pandas as pd\n\nfrom mdrsl.utils.value_collection import ValueCollector\n\n\nclass MIDSObjectiveFunctionStatistics:\n def __init__(self):\n\n self.last_f0: Optional[int] = None\n self.last_f1: Optional[int] = None\n self.last_f2: Optional[int] = None\n self.last_f3: Optional[int] = None\n self.last_f4: Optional[int] = None\n self.last_f5: Optional[int] = None\n self.last_f6: Optional[int] = None\n self.last_f7: Optional[int] = None\n self.last_f_total: Optional[int] = None\n\n self.value_collectors = dict(\n f0=ValueCollector(),\n f1=ValueCollector(),\n f2=ValueCollector(),\n f3=ValueCollector(),\n f4=ValueCollector(),\n f5=ValueCollector(),\n f6=ValueCollector(),\n f_total=ValueCollector()\n )\n\n def add_values(self, f0, f1, f2, f3, f4, f5, f6, f_total):\n\n self.last_f0 = f0\n self.last_f1 = f1\n self.last_f2 = f2\n self.last_f3 = f3\n self.last_f4 = f4\n self.last_f5 = f5\n self.last_f6 = f6\n self.last_f_total = f_total\n\n self.value_collectors['f0'].add_value(f0)\n self.value_collectors['f1'].add_value(f1)\n self.value_collectors['f2'].add_value(f2)\n self.value_collectors['f3'].add_value(f3)\n self.value_collectors['f4'].add_value(f4)\n self.value_collectors['f5'].add_value(f5)\n self.value_collectors['f6'].add_value(f6)\n self.value_collectors['f_total'].add_value(f_total)\n\n def values_to_pandas_dataframe(self) -> Optional[pd.DataFrame]:\n if ValueCollector.collect_values:\n columns = ['type', 'value']\n data = []\n for function_name, value_collector in self.value_collectors.items():\n for value in value_collector.values:\n data.append([function_name, value])\n\n df = pd.DataFrame(data=data, columns=columns)\n return df\n else:\n return None\n\n def values_to_pandas_dataframe2(self) -> Optional[pd.DataFrame]:\n if ValueCollector.collect_values:\n columns = ['call_index', 'type', 'value']\n data = []\n\n for function_name, value_collector in self.value_collectors.items():\n for call_index, value in enumerate(value_collector.values):\n data.append([call_index, function_name, value])\n\n df = pd.DataFrame(data=data, columns=columns)\n return df\n else:\n return None\n\n def get_last_f_values(self) -> Dict[str, float]:\n return dict(\n f0=self.last_f0,\n f1=self.last_f1,\n f2=self.last_f2,\n f3=self.last_f3,\n f4=self.last_f4,\n f5=self.last_f5,\n f6=self.last_f6,\n f_total=self.last_f_total)\n\n def __str__(self):\n table_str = tabulate(\n [\n ['count',\n self.value_collectors['f0'].count,\n self.value_collectors['f1'].count,\n self.value_collectors['f2'].count,\n self.value_collectors['f3'].count,\n self.value_collectors['f4'].count,\n self.value_collectors['f5'].count,\n self.value_collectors['f6'].count,\n self.value_collectors['f_total'].count\n ],\n ['sum',\n self.value_collectors['f0'].sum,\n self.value_collectors['f1'].sum,\n self.value_collectors['f2'].sum,\n self.value_collectors['f3'].sum,\n self.value_collectors['f4'].sum,\n self.value_collectors['f5'].sum,\n self.value_collectors['f6'].sum,\n self.value_collectors['f_total'].sum\n ],\n ['min',\n self.value_collectors['f0'].min,\n self.value_collectors['f1'].min,\n self.value_collectors['f2'].min,\n self.value_collectors['f3'].min,\n self.value_collectors['f4'].min,\n self.value_collectors['f5'].min,\n self.value_collectors['f6'].min,\n self.value_collectors['f_total'].min\n ],\n ['avg',\n self.value_collectors['f0'].get_avg(),\n self.value_collectors['f1'].get_avg(),\n self.value_collectors['f2'].get_avg(),\n self.value_collectors['f3'].get_avg(),\n self.value_collectors['f4'].get_avg(),\n self.value_collectors['f5'].get_avg(),\n self.value_collectors['f6'].get_avg(),\n self.value_collectors['f_total'].get_avg()\n ],\n ['max',\n self.value_collectors['f0'].max,\n self.value_collectors['f1'].max,\n self.value_collectors['f2'].max,\n self.value_collectors['f3'].max,\n self.value_collectors['f4'].max,\n self.value_collectors['f5'].max,\n self.value_collectors['f6'].max,\n self.value_collectors['f_total'].max\n ],\n ['last_val',\n self.last_f0,\n self.last_f1,\n self.last_f2,\n self.last_f3,\n self.last_f4,\n self.last_f5,\n self.last_f6,\n self.last_f_total\n ]\n ],\n headers=['type', 'f0', 'f1', 'f2', 'f3', 'f4', 'f5', 'f6', 'f_total']\n )\n return table_str\n\n\nif __name__ == '__main__':\n vc = ValueCollector()\n vc.add_value(1)\n vc.add_value(2)\n vc.add_value(3)\n print(vc)\n", "import os\nimport time\nfrom typing import List, Tuple, Dict\n\nimport pandas as pd\nfrom dask import delayed\nfrom dask.delayed import Delayed\nfrom sklearn.ensemble import RandomForestClassifier\n\nfrom experiments.dask_utils.computations import compute_delayed_functions\nfrom experiments.dask_utils.dask_initialization import reconnect_client_to_ssh_cluster\n\nfrom experiments.arcbench_data_preparation.reworked_one_hot_encoding import \\\n get_one_hot_encoded_data_fold_abs_file_name\nfrom experiments.arcbench_data_preparation.reworked_one_hot_encoding import get_original_data_fold_abs_file_name, \\\n TrainTestEnum\nfrom experiments.decision_tree_rule_learning.attribute_grouping import AttrGroupPartitioning, AttrGroup\nfrom experiments.decision_tree_rule_learning.data_preparation import PreparedDataForTargetSet, get_attr_groupings, \\\n get_prepared_data_for_attr_group\nfrom experiments.decision_tree_rule_learning.tree_ensemble_generation import convert_random_forest_to_rules\nfrom experiments.decision_tree_rule_learning.timing_utils import store_tree_rule_gen_timing_info, TreeRuleGenTimingInfo\nfrom experiments.decision_tree_rule_learning.relative_file_naming import \\\n get_tree_derived_rules_rel_file_name_without_extension\n\nfrom experiments.utils.experiment_logging import create_logger, close_logger\nfrom experiments.file_naming.car_naming import (\n get_tree_derived_rules_abs_file_name,\n get_tree_derived_rules_logger_abs_file_name, get_tree_derived_rules_dir,\n get_tree_derived_rules_gen_timing_info_abs_file_name\n)\nfrom experiments.file_naming.column_encodings import get_encodings_book_keeper_abs_file_name_for\nfrom experiments.file_naming.single_target_classifier_indicator import SingleTargetClassifierIndicator\n\nfrom mdrsl.data_structures.rules.multi_target_class_association_rule import MCAR\nfrom mdrsl.data_handling.one_hot_encoding.encoding_book_keeping import EncodingBookKeeper\nfrom mdrsl.data_handling.one_hot_encoding.encoding_io import load_encoding_book_keeper\nfrom mdrsl.rule_models.mids.io_mids import store_mcars\nfrom mdrsl.rule_models.mids.cover.cover_checker import CoverChecker\n\nAttr = str\n\n\ndef learn_and_convert_tree_model_to_rules(\n dataset_name: str,\n fold_i: int,\n nb_of_trees_per_model: int,\n nb_of_original_targets_to_predict: int,\n nb_grouping_iterations: int,\n min_support: float,\n max_depth: int,\n seed: int\n):\n classifier_indicator = SingleTargetClassifierIndicator.random_forest\n train_test = TrainTestEnum.train\n\n logger = create_logger(\n logger_name=f'mine_multi-target_cars_tree_derived_' + get_tree_derived_rules_rel_file_name_without_extension(\n dataset_name=dataset_name, fold_i=fold_i, classifier_indicator=classifier_indicator,\n nb_of_trees_per_model=nb_of_trees_per_model,\n nb_of_original_targets_to_predict=nb_of_original_targets_to_predict,\n min_support=min_support, max_depth=max_depth),\n log_file_name=get_tree_derived_rules_logger_abs_file_name(\n dataset_name=dataset_name, fold_i=fold_i,\n classifier_indicator=classifier_indicator, nb_of_trees_per_model=nb_of_trees_per_model,\n nb_of_original_targets_to_predict=nb_of_original_targets_to_predict,\n min_support=min_support, max_depth=max_depth\n )\n )\n\n # --- load train data ---------------------------------------------------------------------------------------------\n df_original = pd.read_csv(get_original_data_fold_abs_file_name(dataset_name, fold_i, train_test),\n delimiter=',')\n df_one_hot_encoded = pd.read_csv(get_one_hot_encoded_data_fold_abs_file_name(dataset_name, fold_i, train_test),\n delimiter=\",\")\n\n encoding_book_keeper: EncodingBookKeeper = load_encoding_book_keeper(\n get_encodings_book_keeper_abs_file_name_for(dataset_name, fold_i))\n\n # --- prepare data ------------------------------------------------------------------------------------------------\n logger.info(f\"Start preparing data using {nb_of_original_targets_to_predict} attrs per group\"\n f\" with {nb_grouping_iterations} grouping iterations\")\n\n different_attr_groupings: List[AttrGroupPartitioning] = get_attr_groupings(\n nb_of_original_targets_to_predict=nb_of_original_targets_to_predict,\n nb_grouping_iterations=nb_grouping_iterations,\n encoding_book_keeper=encoding_book_keeper\n )\n\n complete_rule_list: List[MCAR] = []\n cover_checker = CoverChecker()\n\n total_time_random_forest_learning_s = 0.0\n total_time_rf_conversion_s = 0.0\n # prepared_data_list: List[PreparedDataForTargetSet] = []\n for original_target_attribute_partitioning in different_attr_groupings:\n attr_group: AttrGroup\n for attr_group in original_target_attribute_partitioning:\n prepared_data: PreparedDataForTargetSet = get_prepared_data_for_attr_group(\n original_group_to_predict=attr_group,\n df_original=df_original,\n df_one_hot_encoded=df_one_hot_encoded,\n encoding_book_keeper=encoding_book_keeper\n )\n # prepared_data_list.append(prepared_data)\n\n start_time_decision_tree_learning_s = time.time()\n classifier: RandomForestClassifier = RandomForestClassifier(\n n_estimators=nb_of_trees_per_model,\n random_state=seed,\n min_samples_leaf=min_support,\n max_depth=max_depth\n )\n\n # --- Learn a random forest given the current number of trees -----------------------------------\n classifier.fit(\n prepared_data.df_one_hot_encoded_descriptive_attributes,\n prepared_data.df_one_hot_encoded_target_attributes)\n end_time_decision_tree_learning_s = time.time()\n total_time_decision_tree_learning_s: float = end_time_decision_tree_learning_s - start_time_decision_tree_learning_s\n total_time_random_forest_learning_s += total_time_decision_tree_learning_s\n\n tree_based_rules: List[MCAR]\n total_time_rf_conversion_s: float\n tree_based_rules, partial_time_rf_conversion_s = convert_random_forest_to_rules(\n random_forest_clf=classifier,\n df_original_without_nans=prepared_data.df_original_without_nans_for_targets,\n descriptive_one_hot_encoded_column_names=prepared_data.descriptive_one_hot_encoded_columns,\n # target_attribute_names=df_original_target_attrs_without_nans.columns,\n target_attribute_names=prepared_data.target_one_hot_encoded_columns,\n encoding_book_keeper=encoding_book_keeper,\n logger=logger\n )\n total_time_rf_conversion_s += partial_time_rf_conversion_s\n complete_rule_list.extend(tree_based_rules)\n\n logger.info(f\"Complete set size: {len(complete_rule_list)}\")\n\n # --- Save rules to file ---------------------------------------------------------------------------------\n\n tree_clf_derived_rules_abs_file_name = get_tree_derived_rules_abs_file_name(dataset_name,\n fold_i,\n classifier_indicator,\n nb_of_trees_per_model,\n nb_of_original_targets_to_predict,\n min_support,\n max_depth)\n store_mcars(tree_clf_derived_rules_abs_file_name, complete_rule_list)\n logger.info(f\"finished writing tree-derived ruled to file: {tree_clf_derived_rules_abs_file_name}\")\n logger.info(\"==================================================================\")\n\n tree_rule_gen_timing_info = TreeRuleGenTimingInfo(\n total_time_decision_tree_learning_s=total_time_random_forest_learning_s,\n total_time_rf_conversion_s=total_time_rf_conversion_s\n )\n\n tree_rule_gen_timing_info_abs_file_name: str = get_tree_derived_rules_gen_timing_info_abs_file_name(\n dataset_name,\n fold_i,\n classifier_indicator,\n nb_of_trees_per_model,\n nb_of_original_targets_to_predict,\n min_support,\n max_depth\n )\n store_tree_rule_gen_timing_info(tree_rule_gen_timing_info_abs_file_name, tree_rule_gen_timing_info)\n\n close_logger(logger)\n\n\ndef main():\n from experiments.arcbench_data_preparation.dataset_info import datasets\n datasets = [dict(filename=\"iris\", targetvariablename=\"class\", numerical=True)]\n from experiments.dask_utils.dask_initialization import scheduler_host_name\n scheduler_host: str = scheduler_host_name\n list_of_computations: List[Tuple[Delayed, Dict]] = []\n\n seed: int = 3\n nb_of_folds: int = 10\n nb_of_original_targets_to_predict: int = 2\n nb_grouping_iterations = 5\n\n nb_of_trees_per_model_list: List[int] = [5, 10]\n min_support: float = 0.1 # min_samples_leaf must be at least 1 or in (0, 0.5], got 0\n\n max_depth: int = 7 - nb_of_original_targets_to_predict\n\n use_dask = False\n if use_dask:\n client = reconnect_client_to_ssh_cluster(scheduler_host)\n\n for dataset_info in datasets:\n dataset_name = dataset_info['filename']\n\n for fold_i in range(nb_of_folds):\n\n for nb_of_trees_per_model in nb_of_trees_per_model_list:\n\n if use_dask:\n\n func_args = dict(\n dataset_name=dataset_name,\n fold_i=fold_i,\n nb_of_trees_per_model=nb_of_trees_per_model,\n nb_of_original_targets_to_predict=nb_of_original_targets_to_predict,\n nb_grouping_iterations=nb_grouping_iterations,\n min_support=min_support,\n max_depth=max_depth,\n seed=seed\n )\n\n delayed_func = \\\n delayed(learn_and_convert_tree_model_to_rules)(\n **func_args\n )\n list_of_computations.append((delayed_func, func_args))\n else:\n learn_and_convert_tree_model_to_rules(\n dataset_name=dataset_name,\n fold_i=fold_i,\n nb_of_trees_per_model=nb_of_trees_per_model,\n nb_of_original_targets_to_predict=nb_of_original_targets_to_predict,\n nb_grouping_iterations=nb_grouping_iterations,\n min_support=min_support,\n max_depth=max_depth,\n seed=seed\n )\n if use_dask:\n log_file_dir: str = get_tree_derived_rules_dir()\n\n logger_name: str = 'multi_target_tree_rule_generation_ERROR_LOGGER'\n logger_file_name: str = os.path.join(\n log_file_dir,\n f'ERROR_LOG_multi_target_tree_rule_generation.log'\n )\n\n compute_delayed_functions(\n list_of_computations=list_of_computations,\n client=client,\n nb_of_retries_if_erred=5,\n error_logger_name=logger_name,\n error_logger_file_name=logger_file_name\n )\n\n\nif __name__ == '__main__':\n main()\n", "from typing import List, Optional, Dict, Set, Tuple\nimport time\n\nimport numpy as np\n\nfrom mdrsl.rule_models.mids.cover.cover_checker import CoverChecker\nfrom mdrsl.rule_models.mids.cover.cover_metric import get_avg_incorrect_cover_size\nfrom mdrsl.rule_models.mids.objective_function.mids_objective_function_abstract import AbstractMIDSObjectiveFunction\nfrom mdrsl.rule_models.mids.cover.overlap_cacher import OverlapChecker\nfrom mdrsl.rule_models.mids.objective_function.f2_f3_cacher import f2_f3_combo_minimize_overlap_predicting_the_same_and_different_class_caching\nfrom mdrsl.rule_models.mids.mids_ruleset import MIDSRuleSet\nfrom mdrsl.rule_models.mids.objective_function.mids_objective_function_parameters import ObjectiveFunctionParameters\n\nfrom submodmax.abstract_optimizer import AbstractSubmodularFunction\n\nTargetAttr = str\nTargetVal = str\n\n\nclass MIDSObjectiveFunction(AbstractSubmodularFunction, AbstractMIDSObjectiveFunction):\n\n def __init__(self, objective_func_params: ObjectiveFunctionParameters,\n cover_checker: CoverChecker,\n overlap_checker: OverlapChecker,\n scale_factor: float = 1.0):\n AbstractMIDSObjectiveFunction.__init__(self, objective_func_params,\n cover_checker=cover_checker,\n overlap_checker=overlap_checker,\n scale_factor=scale_factor)\n\n def f1_minimize_total_nb_of_literals(self, solution_set: MIDSRuleSet):\n \"\"\"\n Minimize the total number of terms in the rule set\n\n :param solution_set:\n :return:\n \"\"\"\n\n upper_bound_nb_of_literals = self.objective_func_params.f1_upper_bound_nb_literals\n f1_unnormalized = upper_bound_nb_of_literals - solution_set.sum_rule_length()\n\n if self.normalize:\n f1 = f1_unnormalized / upper_bound_nb_of_literals\n else:\n f1 = f1_unnormalized\n self._normalized_boundary_check(f1, 'f1')\n return f1\n\n # def f2_minimize_overlap_predicting_the_same_class(self, solution_set: MIDSRuleSet):\n # \"\"\"\n # Minimize the overlap of rules predicting the same class value.\n #\n # :param solution_set:\n # :return:\n # \"\"\"\n #\n # quant_dataframe = self.objective_func_params.quant_dataframe\n # target_attr_to_intraclass_overlap_sum_map: Dict[str, int] = {}\n #\n # for i, rule_i in enumerate(solution_set.ruleset):\n # for j, rule_j in enumerate(solution_set.ruleset):\n # if i >= j:\n # continue\n #\n # target_attr_rule_i = set(rule_i.car.consequent.itemset.keys())\n # target_attr_rule_j = set(rule_j.car.consequent.itemset.keys())\n # shared_attributes = target_attr_rule_i.intersection(target_attr_rule_j)\n #\n # for target_attr in shared_attributes:\n # target_value_rule_i = rule_i.car.consequent.itemset[target_attr]\n # target_value_rule_j = rule_j.car.consequent.itemset[target_attr]\n #\n # if target_value_rule_i == target_value_rule_j:\n # overlap_tmp = self.overlap_checker.get_pure_overlap_count(rule_i, rule_j, quant_dataframe)\n # target_attr_to_intraclass_overlap_sum_map[\n # target_attr] = target_attr_to_intraclass_overlap_sum_map.get(target_attr, 0) + overlap_tmp\n #\n # f2: float = 0\n # for target_attr in self.objective_func_params.f2_f3_target_attr_to_upper_bound_map.keys():\n # f2_target_attr_upper_bound = self.objective_func_params.f2_f3_target_attr_to_upper_bound_map[target_attr]\n # target_attr_intraclass_overlap_sum = target_attr_to_intraclass_overlap_sum_map[target_attr]\n # if self.normalize:\n # f2 = f2 + (f2_target_attr_upper_bound - target_attr_intraclass_overlap_sum) / f2_target_attr_upper_bound\n # else:\n # f2 = f2 + (f2_target_attr_upper_bound - target_attr_intraclass_overlap_sum)\n #\n # nb_of_target_attributes = self.objective_func_params.nb_of_target_attrs\n #\n # f2 = f2 / nb_of_target_attributes\n # self._normalized_boundary_check(f2, 'f2')\n # return f2\n #\n # def f3_minimize_overlap_predicting_different_class(self, solution_set: MIDSRuleSet):\n # \"\"\"\n # Term minimizing the overlap of rules predicting the different class values.\n #\n # :param solution_set:\n # :return:\n # \"\"\"\n #\n # quant_dataframe = self.objective_func_params.quant_dataframe\n # target_attr_to_interclass_overlap_sum_map: Dict[str, int] = {}\n #\n # for i, rule_i in enumerate(solution_set.ruleset):\n # for j, rule_j in enumerate(solution_set.ruleset):\n # if i >= j:\n # continue\n #\n # target_attr_rule_i = set(rule_i.car.consequent.itemset.keys())\n # target_attr_rule_j = set(rule_j.car.consequent.itemset.keys())\n # shared_attributes = target_attr_rule_i.intersection(target_attr_rule_j)\n #\n # for target_attr in shared_attributes:\n # target_value_rule_i = rule_i.car.consequent.itemset[target_attr]\n # target_value_rule_j = rule_j.car.consequent.itemset[target_attr]\n #\n # if target_value_rule_i != target_value_rule_j:\n # overlap_tmp = self.overlap_checker.get_pure_overlap_count(rule_i, rule_j, quant_dataframe)\n # target_attr_to_interclass_overlap_sum_map[target_attr] = \\\n # target_attr_to_interclass_overlap_sum_map.get(target_attr, 0) + overlap_tmp\n # f3: float = 0\n # for target_attr in self.objective_func_params.f2_f3_target_attr_to_upper_bound_map.keys():\n # f3_target_attr_upper_bound: int = self.objective_func_params.f2_f3_target_attr_to_upper_bound_map[\n # target_attr]\n # target_attr_interclass_overlap_sum: int = target_attr_to_interclass_overlap_sum_map[target_attr]\n # if self.normalize:\n # f3 = f3 + (f3_target_attr_upper_bound - target_attr_interclass_overlap_sum)\n # else:\n # f3 = f3 + (f3_target_attr_upper_bound - target_attr_interclass_overlap_sum) / f3_target_attr_upper_bound\n #\n # nb_of_target_attributes = self.objective_func_params.nb_of_target_attrs\n # f3 = f3 / nb_of_target_attributes\n #\n # self._normalized_boundary_check(f3, 'f3')\n # return f3\n\n def _f2_f3_get_overlap_sum_maps(self,\n solution_set: MIDSRuleSet) -> Tuple[Dict[TargetAttr, int], Dict[TargetAttr, int]]:\n quant_dataframe = self.objective_func_params.quant_dataframe\n\n f2_target_attr_to_intra_class_overlap_sum_map: Dict[TargetAttr, int] = {}\n f3_target_attr_to_inter_class_overlap_sum_map: Dict[TargetAttr, int] = {}\n\n for i, rule_i in enumerate(solution_set.ruleset):\n for j, rule_j in enumerate(solution_set.ruleset):\n if i >= j:\n continue\n\n target_attrs_rule_i = rule_i.get_target_attributes()\n target_attrs_rule_j = rule_j.get_target_attributes()\n shared_attributes = target_attrs_rule_i & target_attrs_rule_j\n\n # if both rules have at least one target attribute in common\n if len(shared_attributes) > 0:\n overlap_count = self.overlap_checker.get_pure_overlap_count(rule_i, rule_j, quant_dataframe)\n\n for target_attr in shared_attributes:\n # check whether the rules predict the same value for the target attribute\n target_value_rule_i = rule_i.get_predicted_value_for(target_attr)\n target_value_rule_j = rule_j.get_predicted_value_for(target_attr)\n\n if target_value_rule_i == target_value_rule_j:\n f2_target_attr_to_intra_class_overlap_sum_map[target_attr] = \\\n f2_target_attr_to_intra_class_overlap_sum_map.get(target_attr, 0) + overlap_count\n else:\n f3_target_attr_to_inter_class_overlap_sum_map[target_attr] = \\\n f3_target_attr_to_inter_class_overlap_sum_map.get(target_attr, 0) + overlap_count\n\n return f2_target_attr_to_intra_class_overlap_sum_map, f3_target_attr_to_inter_class_overlap_sum_map\n\n def _calc_f2_f3_from_map(self, target_attr_to_overlap_sum_map: Dict[TargetAttr, int]) -> float:\n nb_of_target_attributes = self.objective_func_params.nb_of_target_attrs\n\n f_val: float = 0\n for target_attr in self.objective_func_params.f2_f3_target_attr_to_upper_bound_map.keys():\n f2_f3_target_attr_upper_bound = self.objective_func_params.f2_f3_target_attr_to_upper_bound_map[target_attr]\n target_attr_overlap_sum: int = target_attr_to_overlap_sum_map.get(target_attr, 0)\n if f2_f3_target_attr_upper_bound != 0:\n if self.normalize:\n f_val = f_val + (\n f2_f3_target_attr_upper_bound - target_attr_overlap_sum) / f2_f3_target_attr_upper_bound\n else:\n f_val = f_val + (f2_f3_target_attr_upper_bound - target_attr_overlap_sum)\n f_val = f_val / nb_of_target_attributes\n return f_val\n\n def f2_f3_combo_minimize_overlap_predicting_the_same_and_different_class(self,\n solution_set: MIDSRuleSet) -> Tuple[float, float]:\n\n f2_target_attr_to_intra_class_overlap_sum_map: Dict[TargetAttr, int]\n f3_target_attr_to_inter_class_overlap_sum_map: Dict[TargetAttr, int]\n f2_target_attr_to_intra_class_overlap_sum_map, f3_target_attr_to_inter_class_overlap_sum_map = \\\n self._f2_f3_get_overlap_sum_maps(solution_set)\n\n for target_attr in f2_target_attr_to_intra_class_overlap_sum_map.keys():\n if target_attr not in self.objective_func_params.target_attr_set:\n raise Exception(f\"Illegal target attr: {target_attr}\")\n for target_attr in f3_target_attr_to_inter_class_overlap_sum_map.keys():\n if target_attr not in self.objective_func_params.target_attr_set:\n raise Exception(f\"Illegal target attr: {target_attr}\")\n\n f2: float = self._calc_f2_f3_from_map(f2_target_attr_to_intra_class_overlap_sum_map)\n f3: float = self._calc_f2_f3_from_map(f3_target_attr_to_inter_class_overlap_sum_map)\n\n self._normalized_boundary_check(f2, 'f2')\n self._normalized_boundary_check(f3, 'f3')\n\n return f2, f3\n\n def f2_f3_combo_minimize_overlap_predicting_the_same_and_different_class_using_cache(self,\n solution_set: MIDSRuleSet) -> \\\n Tuple[float, float]:\n return f2_f3_combo_minimize_overlap_predicting_the_same_and_different_class_caching(self.f2_f3_cache,\n solution_set\n )\n\n def f4_at_least_one_rule_per_attribute_value_combo(self, solution_set: MIDSRuleSet):\n \"\"\"\n The requirement to have one rule for each value of each attribute might need to be relaxed,\n as it is no longer guaranteed that each value of each attribute occurs in at least one rule head.\n\n\n :param solution_set:\n :return:\n \"\"\"\n\n # 1. gather for each attribute the unique values that are predicted\n target_attr_to_val_set_dict: Dict[TargetAttr, Set[TargetVal]] \\\n = solution_set.get_predicted_values_per_predicted_attribute()\n\n # 2. count the total nb of values that are predicted over all attributes\n total_nb_of_attribute_values_covered: int = 0\n for target_attr in self.objective_func_params.f4_target_attr_to_dom_size_map.keys():\n predicted_values: Optional[Set[TargetVal]] = target_attr_to_val_set_dict.get(target_attr, None)\n if predicted_values is None:\n nb_of_predicted_values: int = 0\n else:\n nb_of_predicted_values: int = len(predicted_values)\n\n if self.normalize:\n target_attr_dom_size: int = self.objective_func_params.f4_target_attr_to_dom_size_map[target_attr]\n total_nb_of_attribute_values_covered += nb_of_predicted_values / target_attr_dom_size\n else:\n total_nb_of_attribute_values_covered += nb_of_predicted_values\n\n f4: float = total_nb_of_attribute_values_covered / self.objective_func_params.nb_of_target_attrs\n\n self._normalized_boundary_check(f4, 'f4')\n return f4\n\n def f5_minimize_incorrect_cover(self, solution_set: MIDSRuleSet):\n \"\"\"\n Mazimize the precision, or minimize the nb examples that are in the incorrect-cover set the rules\n Parameters\n ----------\n solution_set\n\n Returns\n -------\n\n \"\"\"\n\n # nb_of_instances = self.objective_func_params.nb_of_training_examples\n # len_all_rules = self.objective_func_params.ground_set_size\n quant_dataframe = self.objective_func_params.quant_dataframe\n\n sum_incorrect_cover = 0\n\n for rule in solution_set.ruleset:\n # self.cover_checker.\n sum_incorrect_cover += get_avg_incorrect_cover_size(rule, quant_dataframe, self.cover_checker)\n\n f5_upper_bound: int = self.objective_func_params.f5_upper_bound\n\n # print(f\"MIDS f5 upper bound: {f5_upper_bound}\")\n # print(f\"MIDS f5 sum incorrect cover: {sum_incorrect_cover}\")\n f5 = f5_upper_bound - sum_incorrect_cover\n if self.normalize:\n f5 = f5 / f5_upper_bound\n\n self._normalized_boundary_check(f5, 'f5')\n\n return f5\n\n def f6_cover_each_example(self, solution_set: MIDSRuleSet):\n \"\"\"\n Originally:\n Each data point should be covered by at least one rule.\n In other words,\n Each instance should be in the correct cover set (with respect to the target attribute) of at least one rule.\n\n\n Extension to multi-target rules:\n Each instance should be in the correct cover set of at least one rule for each of its attributes.\n\n :param solution_set:\n :return:\n \"\"\"\n # TODO: this is super expensive\n\n quant_dataframe = self.objective_func_params.quant_dataframe\n nb_of_training_examples = self.objective_func_params.nb_of_training_examples\n nb_of_target_attrs = self.objective_func_params.nb_of_target_attrs\n target_attrs: List[TargetAttr] = self.objective_func_params.target_attrs\n\n sum_correct_cover_sizes_over_all_attributes = 0\n\n for target_attr in target_attrs:\n correctly_covered_instances_for_attribute_by_at_least_one_rule = np.zeros(nb_of_training_examples, dtype=bool)\n\n for rule in solution_set.ruleset:\n if target_attr in rule.get_target_attributes():\n correct_cover_mask = self.cover_checker.get_correct_cover(\n rule, quant_dataframe, target_attribute=target_attr)\n correctly_covered_instances_for_attribute_by_at_least_one_rule = np.logical_or(\n correctly_covered_instances_for_attribute_by_at_least_one_rule,\n correct_cover_mask\n )\n\n sum_correct_cover_sizes_over_all_attributes += np.sum(\n correctly_covered_instances_for_attribute_by_at_least_one_rule)\n\n f6 = sum_correct_cover_sizes_over_all_attributes / (\n nb_of_training_examples * self.objective_func_params.nb_of_target_attrs)\n\n self._normalized_boundary_check(f6, 'f6')\n\n return f6\n\n def evaluate(self, solution_set: MIDSRuleSet):\n if type(solution_set) == set:\n solution_set = MIDSRuleSet(solution_set)\n\n if type(solution_set) != MIDSRuleSet:\n raise Exception(\"Type of solution_set must be MIDSRuleSet\")\n\n self.call_counter += 1\n self.set_size_collector.add_value(len(solution_set))\n start_time = time.time()\n\n l: List[float] = self.objective_func_params.lambda_array\n\n ground_set_size = self.objective_func_params.ground_set_size\n current_nb_of_rules = len(solution_set)\n\n f0 = self.f0_minimize_rule_set_size(ground_set_size, current_nb_of_rules)\n f1 = self.f1_minimize_total_nb_of_literals(solution_set)\n\n if MIDSObjectiveFunction.should_cache_f2_f3:\n f2, f3 = self.f2_f3_combo_minimize_overlap_predicting_the_same_and_different_class_using_cache(solution_set)\n else:\n f2, f3 = self.f2_f3_combo_minimize_overlap_predicting_the_same_and_different_class(solution_set)\n # f2 = self.f2_minimize_overlap_predicting_the_same_class(solution_set)\n # f3 = self.f3_minimize_overlap_predicting_different_class(solution_set)\n f4 = self.f4_at_least_one_rule_per_attribute_value_combo(solution_set)\n f5 = self.f5_minimize_incorrect_cover(solution_set)\n f6 = self.f6_cover_each_example(solution_set)\n\n fs = np.array([\n f0, f1, f2, f3, f4, f5, f6\n ]) / self.scale_factor\n\n result = np.dot(l, fs)\n\n if self.stat_collector is not None:\n self.stat_collector.add_values(f0, f1, f2, f3, f4, f5, f6, result)\n\n\n end_time = time.time()\n elapsed_time = end_time - start_time\n self.run_time_collector.add_value(elapsed_time)\n\n self.f0_val = f0\n self.f1_val = f1\n self.f2_val = f2\n self.f3_val = f3\n self.f4_val = f4\n self.f5_val = f5\n self.f6_val = f6\n\n # print(f\"MIDS f1:{f1}\")\n\n return result\n\n def f0(self, solution_set):\n current_nb_of_rules: int = len(solution_set)\n ground_set_size = len(self.objective_func_params.all_rules)\n return self.f0_minimize_rule_set_size(ground_set_size=ground_set_size,\n current_nb_of_rules=current_nb_of_rules)\n\n def f1(self, solution_set):\n return self.f1_minimize_total_nb_of_literals(solution_set)\n\n def f2(self, solution_set):\n f2, f3 = self.f2_f3_combo_minimize_overlap_predicting_the_same_and_different_class(solution_set)\n return f2\n\n def f3(self, solution_set):\n f2, f3 = self.f2_f3_combo_minimize_overlap_predicting_the_same_and_different_class(solution_set)\n return f3\n\n def f4(self, solution_set):\n return self.f4_at_least_one_rule_per_attribute_value_combo(solution_set)\n\n def f5(self, solution_set):\n return self.f5_minimize_incorrect_cover(solution_set)\n\n def f6(self, solution_set):\n return self.f6_cover_each_example(solution_set)\n\n\nif __name__ == '__main__':\n d1 = {'a': 1, 'b': 2}\n d2 = {'a': 3, 'c': 4}\n combo = d1.keys() & d2.keys()\n print(combo)\n print(len(combo))\n for k in combo:\n print(k)\n" ]
[ [ "numpy.array" ], [ "pandas.read_csv" ], [ "pandas.DataFrame" ], [ "sklearn.ensemble.RandomForestClassifier" ], [ "numpy.dot", "numpy.logical_or", "numpy.array", "numpy.zeros", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ashishpatel26/ML-DL-scripts
[ "25f930630f6e546955ad13863d6e728c8c702d43", "25f930630f6e546955ad13863d6e728c8c702d43" ]
[ "NLP/LSTM RNN/WSDM - Fake News Classification/Berd generate embeddings/3_bert_encode_ch_test.py", "DEEP LEARNING/instance segmentation/Kaggle TGS Salt Identification Challenge/v2/common_blocks/augmentation.py" ]
[ "# Please run bert-serving-start before running this notebook\n# Setup: https://github.com/hanxiao/bert-as-service\n# Examples (change folders to your locals)\n# english cased: bert-serving-start -model_dir /bert-as-service/cased_L-24_H-1024_A-16/ -num_worker=4\n# multi cased: bert-serving-start -model_dir /bert-as-service/multi_cased_L-12_H-768_A-12/ -num_worker=4\n# chinese: bert-serving-start -model_dir /bert-as-service/chinese_L-12_H-768_A-12/ -num_worker=4\n\n# launch bert (valilenk): \n# english cased: bert-serving-start -model_dir /media/airvetra/1tb/valilenk/nlp/bert-as-service/cased_L-24_H-1024_A-16/ -num_worker=2\n# multi cased: bert-serving-start -model_dir /media/airvetra/1tb/valilenk/nlp/bert-as-service/multi_cased_L-12_H-768_A-12/ -num_worker=2\n# chinese: bert-serving-start -model_dir /media/airvetra/1tb/valilenk/nlp/bert-as-service/chinese_L-12_H-768_A-12/ -num_worker=2\n\nimport pandas as pd\nimport torch\nimport os\nfrom time import time\nfrom tqdm import tqdm\nfrom bert_serving.client import BertClient\n\ndata_folder = os.path.dirname(os.getcwd())+'/data'\ntest = pd.read_csv(data_folder+'/raw/test.csv')\n\nbc = BertClient()\ndef gen_encodings(df, column):\n t0 = time()\n _list = list(df.loc[:, column])\n for i, text in enumerate(_list):\n if not isinstance(_list[i], str):\n _list[i] = str(text)\n if not _list[i].strip():\n _list[i] = _list[i].strip()\n if len(_list[i]) == 0:\n _list[i] = 'temp'\n arr = bc.encode(_list)\n temp = pd.DataFrame(arr)\n temp.columns = [f'{column}_{c}' for c in range(len(arr[0]))]\n temp = temp.join(df.id)\n print(f'time: {time() - t0}')\n return temp\n\nencoded_test = gen_encodings(test, 'title1_zh')\nencoded_test.to_csv('encoded_ch_test1.csv')\nencoded_test = gen_encodings(test, 'title2_zh')\nencoded_test.to_csv('encoded_ch_test2.csv')", "import cv2\nimport numpy as np\nimport imgaug as ia\nfrom imgaug import augmenters as iaa\n\nfrom .utils import get_crop_pad_sequence, reseed\n\n\ndef _perspective_transform_augment_images(self, images, random_state, parents, hooks):\n result = images\n if not self.keep_size:\n result = list(result)\n\n matrices, max_heights, max_widths = self._create_matrices(\n [image.shape for image in images],\n random_state\n )\n\n for i, (M, max_height, max_width) in enumerate(zip(matrices, max_heights, max_widths)):\n warped = cv2.warpPerspective(images[i], M, (max_width, max_height))\n if warped.ndim == 2 and images[i].ndim == 3:\n warped = np.expand_dims(warped, 2)\n if self.keep_size:\n h, w = images[i].shape[0:2]\n warped = ia.imresize_single_image(warped, (h, w))\n\n result[i] = warped\n\n return result\n\n\niaa.PerspectiveTransform._augment_images = _perspective_transform_augment_images\n\naffine_seq = iaa.Sequential([\n# General\niaa.SomeOf((1, 2),\n [iaa.Fliplr(0.5),\n iaa.Affine(rotate=(-2, 2),\n translate_percent={\"x\": (-0.04, 0.04)},\n mode='edge')#symmetric (-0.25, 0.25)\n #iaa.CropAndPad(percent=((0.0, 0.10), (0.0, 0.05), (0.0, 0.10), (0.0, 0.05)), pad_mode='symmetric')\n ]),\n#>>> aug = iaa.CropAndPad(px=((0, 10), (0, 5), (0, 10), (0, 5)))\n# pads the top and bottom by a random value from the range 0px to 10px\n# and the left and right by a random value in the range 0px to 5px.\n\n# Deformations\niaa.Sometimes(0.05, iaa.PiecewiseAffine(scale=(0.04, 0.08))),\niaa.Sometimes(0.05, iaa.PerspectiveTransform(scale=(0.05, 0.1))),\n], random_order=True)\nintensity_seq = iaa.Sequential([\n iaa.Noop()\n], random_order=False)\n'''\nintensity_seq = iaa.Sequential([\niaa.Invert(0.3),\niaa.Sometimes(0.3, iaa.ContrastNormalization((0.5, 1.5))),\niaa.OneOf([\n iaa.Noop(),\n iaa.Sequential([\n iaa.OneOf([\n iaa.Add((-10, 10)),\n iaa.AddElementwise((-10, 10)),\n iaa.Multiply((0.95, 1.05)),\n iaa.MultiplyElementwise((0.95, 1.05)),\n ]),\n ]),\n iaa.OneOf([\n iaa.GaussianBlur(sigma=(0.0, 1.0)),\n iaa.AverageBlur(k=(2, 5)),\n iaa.MedianBlur(k=(3, 5))\n ])\n])\n], random_order=False)\n'''\n\ntta_intensity_seq = iaa.Sequential([\n iaa.Noop()\n], random_order=False)\n\n\ndef resize_pad_seq(resize_target_size, pad_method, pad_size):\n seq = iaa.Sequential([\n iaa.Scale({'height': resize_target_size, 'width': resize_target_size}),\n PadFixed(pad=(pad_size, pad_size), pad_method=pad_method),\n affine_seq,\n ], random_order=False)\n return seq\n\n\ndef pad_to_fit_net(divisor, pad_mode, rest_of_augs=iaa.Noop()):\n return iaa.Sequential(InferencePad(divisor, pad_mode), rest_of_augs)\n\n\nclass PadFixed(iaa.Augmenter):\n PAD_FUNCTION = {'reflect': cv2.BORDER_REFLECT_101,\n 'edge': cv2.BORDER_REPLICATE\n }\n\n def __init__(self, pad=None, pad_method=None, name=None, deterministic=False, random_state=None):\n super().__init__(name, deterministic, random_state)\n self.pad = pad\n self.pad_method = pad_method\n\n def _augment_images(self, images, random_state, parents, hooks):\n result = []\n for i, image in enumerate(images):\n image_pad = self._pad(image)\n result.append(image_pad)\n return result\n\n def _augment_keypoints(self, keypoints_on_images, random_state, parents, hooks):\n result = []\n return result\n\n def _pad(self, img):\n img_ = img.copy()\n\n if self._is_expanded_grey_format(img):\n img_ = np.squeeze(img_, axis=-1)\n\n h_pad, w_pad = self.pad\n img_ = cv2.copyMakeBorder(img_.copy(), h_pad, h_pad, w_pad, w_pad, PadFixed.PAD_FUNCTION[self.pad_method])\n\n if self._is_expanded_grey_format(img):\n img_ = np.expand_dims(img_, axis=-1)\n\n return img_\n\n def get_parameters(self):\n return []\n\n def _is_expanded_grey_format(self, img):\n if len(img.shape) == 3 and img.shape[2] == 1:\n return True\n else:\n return False\n\n\ndef test_time_augmentation_transform(image, tta_parameters):\n if tta_parameters['ud_flip']:\n image = np.flipud(image)\n if tta_parameters['lr_flip']:\n image = np.fliplr(image)\n if tta_parameters['color_shift']:\n random_color_shift = reseed(intensity_seq, deterministic=False)\n image = random_color_shift.augment_image(image)\n image = rotate(image, tta_parameters['rotation'])\n return image\n\n\ndef test_time_augmentation_inverse_transform(image, tta_parameters):\n image = per_channel_rotation(image.copy(), -1 * tta_parameters['rotation'])\n\n if tta_parameters['lr_flip']:\n image = per_channel_fliplr(image.copy())\n if tta_parameters['ud_flip']:\n image = per_channel_flipud(image.copy())\n return image\n\n\ndef per_channel_flipud(x):\n x_ = x.copy()\n for i, channel in enumerate(x):\n x_[i, :, :] = np.flipud(channel)\n return x_\n\n\ndef per_channel_fliplr(x):\n x_ = x.copy()\n for i, channel in enumerate(x):\n x_[i, :, :] = np.fliplr(channel)\n return x_\n\n\ndef per_channel_rotation(x, angle):\n return rotate(x, angle, axes=(1, 2))\n\n\ndef rotate(image, angle, axes=(0, 1)):\n if angle % 90 != 0:\n raise Exception('Angle must be a multiple of 90.')\n k = angle // 90\n return np.rot90(image, k, axes=axes)\n\n\nclass RandomCropFixedSize(iaa.Augmenter):\n def __init__(self, px=None, name=None, deterministic=False, random_state=None):\n super(RandomCropFixedSize, self).__init__(name=name, deterministic=deterministic, random_state=random_state)\n self.px = px\n if isinstance(self.px, tuple):\n self.px_h, self.px_w = self.px\n elif isinstance(self.px, int):\n self.px_h = self.px\n self.px_w = self.px\n else:\n raise NotImplementedError\n\n def _augment_images(self, images, random_state, parents, hooks):\n\n result = []\n seeds = random_state.randint(0, 10 ** 6, (len(images),))\n for i, image in enumerate(images):\n seed = seeds[i]\n image_cr = self._random_crop(seed, image)\n result.append(image_cr)\n return result\n\n def _augment_keypoints(self, keypoints_on_images, random_state, parents, hooks):\n result = []\n return result\n\n def _random_crop(self, seed, image):\n height, width = image.shape[:2]\n\n np.random.seed(seed)\n if height > self.px_h:\n crop_top = np.random.randint(height - self.px_h)\n elif height == self.px_h:\n crop_top = 0\n else:\n raise ValueError(\"To big crop height\")\n crop_bottom = crop_top + self.px_h\n\n np.random.seed(seed + 1)\n if width > self.px_w:\n crop_left = np.random.randint(width - self.px_w)\n elif width == self.px_w:\n crop_left = 0\n else:\n raise ValueError(\"To big crop width\")\n crop_right = crop_left + self.px_w\n\n if len(image.shape) == 2:\n image_cropped = image[crop_top:crop_bottom, crop_left:crop_right]\n else:\n image_cropped = image[crop_top:crop_bottom, crop_left:crop_right, :]\n return image_cropped\n\n def get_parameters(self):\n return []\n\n\nclass InferencePad(iaa.Augmenter):\n def __init__(self, divisor=2, pad_mode='symmetric', name=None, deterministic=False, random_state=None):\n super(InferencePad, self).__init__(name=name, deterministic=deterministic, random_state=random_state)\n self.divisor = divisor\n self.pad_mode = pad_mode\n\n def _augment_keypoints(self, keypoints_on_images, random_state, parents, hooks):\n return keypoints_on_images\n\n def _augment_images(self, images, random_state, parents, hooks):\n\n result = []\n for i, image in enumerate(images):\n image_padded = self._pad_image(image)\n result.append(image_padded)\n return result\n\n def _pad_image(self, image):\n height = image.shape[0]\n width = image.shape[1]\n\n pad_sequence = self._get_pad_sequence(height, width)\n augmenter = iaa.Pad(px=pad_sequence, keep_size=False, pad_mode=self.pad_mode)\n return augmenter.augment_image(image)\n\n def _get_pad_sequence(self, height, width):\n pad_vertical = self._get_pad(height)\n pad_horizontal = self._get_pad(width)\n return get_crop_pad_sequence(pad_vertical, pad_horizontal)\n\n def _get_pad(self, dim):\n if dim % self.divisor == 0:\n return 0\n else:\n return self.divisor - dim % self.divisor\n\n def get_parameters(self):\n return [self.divisor, self.pad_mode]\n" ]
[ [ "pandas.read_csv", "pandas.DataFrame" ], [ "numpy.rot90", "numpy.expand_dims", "numpy.random.seed", "numpy.fliplr", "numpy.squeeze", "numpy.flipud", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
sburgholzer/MSDS-Capstone-Project
[ "4f22149c7ebff5c3dc129bb785d56f161ab138a8" ]
[ "implementation/spcTornadoCounts.py" ]
[ "import pandas as pd\r\n\r\ndf = pd.read_csv('/mnt/data3/scott/1950-2018_actual_tornadoes.csv')\r\n\r\ndf['date'] = pd.to_datetime(df['date'])\r\nmask = (df['date'] >= '1979-1-1') & (df['date'] <= '2013-12-31')\r\ndf = df.loc[mask]\r\ndf.groupby('date').size()\r\ndf.groupby('date').size().to_csv('/mnt/data3/scott/tornadoCounts.csv')" ]
[ [ "pandas.read_csv", "pandas.to_datetime" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
Harald-R/aw_nas
[ "8cf0cf48f7bcfd7893e6355dcc3ccbc83fd39783", "8cf0cf48f7bcfd7893e6355dcc3ccbc83fd39783", "8cf0cf48f7bcfd7893e6355dcc3ccbc83fd39783", "8cf0cf48f7bcfd7893e6355dcc3ccbc83fd39783", "8cf0cf48f7bcfd7893e6355dcc3ccbc83fd39783" ]
[ "aw_nas/final/rnn_model.py", "aw_nas/final/cnn_trainer.py", "aw_nas/utils/common_utils.py", "aw_nas/hardware/utils.py", "examples/plugins/robustness/2_multi_evaluator.py" ]
[ "#pylint: disable=invalid-name\n\nimport numpy as np\nimport torch\nfrom torch import nn\n\nfrom aw_nas import ops\nfrom aw_nas.utils.exception import expect, ConfigException\nfrom aw_nas.weights_manager.rnn_shared import RNNSharedNet, INIT_RANGE\n\nclass RNNGenotypeModel(RNNSharedNet):\n REGISTRY = \"final_model\"\n NAME = \"rnn_model\"\n def __init__(self, search_space, device, genotypes,\n num_tokens, num_emb=300, num_hid=300,\n tie_weight=True, decoder_bias=True,\n share_primitive_weights=False, share_from_weights=False,\n batchnorm_step=False,\n batchnorm_edge=False, batchnorm_out=True,\n # training\n max_grad_norm=5.0,\n # dropout probs\n dropout_emb=0., dropout_inp0=0., dropout_inp=0., dropout_hid=0., dropout_out=0.):\n\n self.genotypes = genotypes\n if isinstance(genotypes, str):\n self.genotypes = eval(\"search_space.genotype_type({})\".format(self.genotypes)) # pylint: disable=eval-used\n self.genotypes = list(self.genotypes._asdict().values())\n # check tos:\n _tos = [conn[2] for conn in self.genotypes[0]]\n (np.argsort(_tos) == np.arange(len(_tos))).all()\n expect((np.argsort(_tos) == np.arange(len(_tos))).all(),\n \"genotype must be ordered in the way that `to_node` monotonously increase\",\n ConfigException)\n\n super(RNNGenotypeModel, self).__init__(\n search_space, device,\n cell_cls=RNNGenotypeCell, op_cls=None,\n num_tokens=num_tokens, num_emb=num_emb, num_hid=num_hid,\n tie_weight=tie_weight, decoder_bias=decoder_bias,\n share_primitive_weights=share_primitive_weights, share_from_weights=share_from_weights,\n batchnorm_step=batchnorm_step,\n batchnorm_edge=batchnorm_edge, batchnorm_out=batchnorm_out,\n max_grad_norm=max_grad_norm,\n dropout_emb=dropout_emb, dropout_inp0=dropout_inp0, dropout_inp=dropout_inp,\n dropout_hid=dropout_hid, dropout_out=dropout_out,\n genotypes=self.genotypes) # this genotypes will be used for construction/forward\n\n self.logger.info(\"Genotype: %s\", self.genotypes)\n\n def forward(self, inputs, hiddens): #pylint: disable=arguments-differ\n # this genotypes will not be used\n return RNNSharedNet.forward(self, inputs, self.genotypes, hiddens)\n\n @classmethod\n def supported_rollout_types(cls):\n # this should not be called\n # assert 0, \"should not be called\"\n return []\n\n def assemble_candidate(self, *args, **kwargs): #pylint: disable=arguments-differ\n # this will not be called\n assert 0, \"should not be called\"\n\nclass RNNGenotypeCell(nn.Module):\n def __init__(self, search_space, device, op_cls, num_emb, num_hid,\n share_from_weights, batchnorm_step,\n batchnorm_edge, batchnorm_out, genotypes, **kwargs):\n super(RNNGenotypeCell, self).__init__()\n self.genotypes = genotypes\n\n self.search_space = search_space\n\n self.num_emb = num_emb\n self.num_hid = num_hid\n self.batchnorm_step = batchnorm_step\n self.batchnorm_edge = batchnorm_edge\n self.batchnorm_out = batchnorm_out\n self.share_from_w = share_from_weights\n self._steps = search_space.num_steps\n self._num_init = search_space.num_init_nodes\n\n # the first step, convert input x and previous hidden\n self.w_prev = nn.Linear(num_emb + num_hid, 2 * num_hid, bias=False)\n self.w_prev.weight.data.uniform_(-INIT_RANGE, INIT_RANGE)\n\n if self.batchnorm_edge:\n # batchnorm on each edge/connection\n # when `num_node_inputs==1`, there is `step + 1` edges\n # the first bn\n self.bn_prev = nn.BatchNorm1d(num_emb + num_hid, affine=True)\n # other bn\n self.bn_edges = nn.ModuleList([nn.BatchNorm1d(num_emb + num_hid, affine=True)\n for _ in range(len(self.genotypes[0]))])\n\n if self.batchnorm_step:\n # batchnorm after every step (as in darts's implementation)\n self.bn_steps = nn.ModuleList([nn.BatchNorm1d(num_hid, affine=False)\n for _ in range(self._steps+1)])\n\n if self.batchnorm_out:\n # the out bn\n self.bn_out = nn.BatchNorm1d(num_hid, affine=True)\n\n if self.share_from_w:\n # actually, as `num_node_inputs==1`, thus only one from node is used each step\n # `share_from_w==True/False` are equivalent in final training...\n self.step_weights = nn.ModuleList([\n nn.Linear(num_hid, 2*num_hid, bias=False)\n for _ in range(self._steps)])\n [mod.weight.data.uniform_(-INIT_RANGE, INIT_RANGE) for mod in self.step_weights]\n\n # initiatiate op on edges\n self.Ws = nn.ModuleList()\n self.ops = nn.ModuleList()\n genotype_, _ = self.genotypes\n\n for op_type, _, _ in genotype_:\n # edge weights\n op = ops.get_op(op_type)()\n self.ops.append(op)\n if not self.share_from_w:\n W = nn.Linear(self.num_hid, 2 * self.num_hid, bias=False)\n W.weight.data.uniform_(-INIT_RANGE, INIT_RANGE)\n self.Ws.append(W)\n\n def forward(self, inputs, hidden, x_mask, h_mask, genotypes): #pylint: disable=arguments-differ\n \"\"\"\n Cell forward, forward for one timestep.\n \"\"\"\n genotype, concat_ = self.genotypes # self.genotypes == genotypes\n\n s0 = self._compute_init_state(inputs, hidden, x_mask, h_mask)\n if self.batchnorm_step:\n s0 = self.bn_steps[0](s0)\n\n states = {0: s0}\n\n for i, (_, from_, to_) in enumerate(genotype):\n s_prev = states[from_]\n s_inputs = s_prev\n if self.training:\n s_inputs = s_prev * h_mask\n w = self.step_weights[to_-1] if self.share_from_w else self.Ws[i]\n ch = w(s_inputs)\n if self.batchnorm_edge:\n ch = self.bn_edges[i](ch)\n c, h = torch.split(ch, self.num_hid, dim=-1)\n c = c.sigmoid()\n h = self.ops[i](h)\n out = s_prev + c * (h - s_prev)\n if to_ in states:\n states[to_] = states[to_] + out\n else:\n states[to_] = out\n\n to_finish = i == len(genotype)-1 or genotype[i+1][2] != to_\n if self.batchnorm_step and to_finish:\n # if the calculation of the `to_` step finished, batch norm it\n states[to_] = self.bn_steps[to_](states[to_])\n\n # average the ends\n output = torch.mean(torch.stack([states[i] for i in concat_]), 0)\n if self.batchnorm_out:\n # batchnorm\n output = self.bn_out(output)\n return output\n\n def _compute_init_state(self, x, h, x_mask, h_mask):\n if self.training:\n xh_prev = torch.cat([x * x_mask, h * h_mask], dim=-1)\n else:\n xh_prev = torch.cat([x, h], dim=-1)\n xh_prev = self.w_prev(xh_prev)\n if self.batchnorm_edge:\n xh_prev = self.bn_prev(xh_prev)\n\n c0, h0 = torch.split(xh_prev, self.num_hid, dim=-1)\n c0 = c0.sigmoid()\n h0 = h0.tanh()\n s0 = h + c0 * (h0 - h)\n return s0\n", "# -*- coding: utf-8 -*-\n\nimport os\nimport random\nimport functools\n\nimport six\nimport numpy as np\nimport torch\nfrom torch import nn\nfrom torch.utils.data.distributed import DistributedSampler\n\nfrom aw_nas import utils\nfrom aw_nas.final.base import FinalTrainer\nfrom aw_nas.final.bnn_model import BNNGenotypeModel\nfrom aw_nas.utils.common_utils import nullcontext\nfrom aw_nas.utils.exception import expect\nfrom aw_nas.utils import DataParallel\nfrom aw_nas.utils import DistributedDataParallel\nfrom aw_nas.utils.torch_utils import calib_bn, GroupSampler, DistributedGroupSampler\nfrom aw_nas.utils.parallel_utils import get_dist_info\n\n\ntry:\n from torch.nn import SyncBatchNorm\n convert_sync_bn = SyncBatchNorm.convert_sync_batchnorm\nexcept ImportError:\n utils.getLogger(\"cnn_trainer\").warn(\n \"Import convert_sync_bn failed! SyncBatchNorm might not work!\")\n convert_sync_bn = lambda m: m\n\ndef _warmup_update_lr(optimizer, epoch, init_lr, warmup_epochs, warmup_ratio=0.0):\n \"\"\"\n update learning rate of optimizers\n \"\"\"\n lr = (init_lr - warmup_ratio) * epoch / warmup_epochs + warmup_ratio\n for param_group in optimizer.param_groups:\n param_group[\"lr\"] = lr\n return lr\n\ndef worker_init_fn(worker_id, num_workers, rank, seed):\n worker_seed = num_workers * rank + worker_id + seed\n np.random.seed(worker_seed)\n random.seed(worker_seed)\n\nclass CNNFinalTrainer(FinalTrainer): #pylint: disable=too-many-instance-attributes\n NAME = \"cnn_trainer\"\n\n def __init__(self, model, dataset, device, gpus, objective,#pylint: disable=dangerous-default-value\n multiprocess=False,\n epochs=600, batch_size=96,\n optimizer_type=\"SGD\", optimizer_kwargs=None,\n learning_rate=0.025, momentum=0.9,\n warmup_epochs=0,\n optimizer_scheduler={\n \"type\": \"CosineAnnealingLR\",\n \"T_max\": 600,\n \"eta_min\": 0.001\n },\n weight_decay=3e-4, no_bias_decay=False,\n grad_clip=5.0,\n auxiliary_head=False, auxiliary_weight=0.4,\n add_regularization=False,\n save_as_state_dict=False,\n workers_per_queue=2,\n eval_no_grad=True,\n eval_every=1,\n eval_batch_size=1,\n calib_bn_setup=False, # for OFA final model\n seed=None,\n schedule_cfg=None):\n super(CNNFinalTrainer, self).__init__(schedule_cfg)\n\n self.model = model\n self.parallel_model = None\n self.dataset = dataset\n self.device = device\n self.gpus = gpus\n self.multiprocess = multiprocess\n self.objective = objective\n self._perf_func = self.objective.get_perfs\n self._perf_names = self.objective.perf_names()\n self._obj_loss = self.objective.get_loss\n\n self.epochs = epochs\n self.warmup_epochs = warmup_epochs\n self.optimizer_type = optimizer_type\n self.optimizer_kwargs = optimizer_kwargs\n self.learning_rate = learning_rate\n self.grad_clip = grad_clip\n self.auxiliary_head = auxiliary_head\n self.auxiliary_weight = auxiliary_weight\n self.add_regularization = add_regularization\n self.save_as_state_dict = save_as_state_dict\n self.eval_no_grad = eval_no_grad\n self.eval_every = eval_every\n self.calib_bn_setup = calib_bn_setup\n\n # for optimizer\n self.weight_decay = weight_decay\n self.no_bias_decay = no_bias_decay\n self.learning_rate = learning_rate\n self.momentum = momentum\n self.optimizer_scheduler_cfg = optimizer_scheduler\n\n self._criterion = nn.CrossEntropyLoss().to(self.device)\n\n _splits = self.dataset.splits()\n train_kwargs = getattr(_splits[\"train\"], \"kwargs\", {})\n test_kwargs = getattr(_splits[\"test\"], \"kwargs\", train_kwargs)\n\n \"\"\"\n GroupSampler is needed when `keep_ratio` in dataset is set True.\n It makes two group of images: aspect ratio > 1 , and aspect ratio < 1.\n\n `shuffle` is invalid when using GroupSampler because it cannot\n guarantee the original order of images.\n \"\"\"\n group = train_kwargs.pop(\"group_sample\", False)\n test_kwargs[\"shuffle\"] = False\n\n if self.multiprocess:\n sampler = DistributedGroupSampler(_splits[\"train\"], None,\n batch_size) if group \\\n else DistributedSampler(_splits[\"train\"], shuffle=True)\n test_kwargs[\"sampler\"] = DistributedSampler(_splits[\"test\"],\n shuffle=False)\n else:\n sampler = GroupSampler(_splits[\"train\"], None, batch_size) if group \\\n else None\n if sampler is None:\n train_kwargs[\"shuffle\"] = True\n else:\n train_kwargs.pop(\"shuffle\", None)\n train_kwargs[\"sampler\"] = sampler\n\n rank, world_size = get_dist_info()\n init_fn = functools.partial(worker_init_fn, num_workers=workers_per_queue, rank=rank,\n seed=seed) if seed is not None else None\n\n self.train_queue = torch.utils.data.DataLoader(\n _splits[\"train\"], batch_size=batch_size, pin_memory=False,\n num_workers=workers_per_queue,\n worker_init_fn=init_fn,\n **train_kwargs)\n\n self.valid_queue = torch.utils.data.DataLoader(\n _splits[\"test\"], batch_size=eval_batch_size, pin_memory=False,\n num_workers=workers_per_queue, **test_kwargs)\n\n if self.calib_bn_setup:\n self.model = calib_bn(self.model, self.train_queue)\n\n # optimizer and scheduler is called in `trainer.setup` call\n self.optimizer = None\n self.scheduler = None\n\n # states of the trainer\n self.last_epoch = 0\n self.epoch = 0\n self.save_every = None\n self.report_every = None\n self.train_dir = None\n self._is_setup = False\n\n def setup(self, load=None, load_state_dict=None,\n save_every=None, train_dir=None, report_every=50):\n expect(not (load is not None and load_state_dict is not None),\n \"`load` and `load_state_dict` cannot be passed simultaneously.\")\n if load is not None:\n self.load(load)\n else:\n assert self.model is not None\n if load_state_dict is not None:\n self._load_state_dict(load_state_dict)\n\n self.logger.info(\"param size = {} M\".format( \\\n utils.count_parameters(\n self.model,\n count_binary=isinstance(self.model, BNNGenotypeModel))/1.e6))\n if self.model is not None:\n self._parallelize()\n self.optimizer = self._init_optimizer()\n self.scheduler = self._init_scheduler(self.optimizer, self.optimizer_scheduler_cfg)\n\n self.save_every = save_every\n self.train_dir = train_dir\n self.report_every = report_every\n\n expect(self.save_every is None or self.train_dir is not None,\n \"when `save_every` is not None, make sure `train_dir` is not None\")\n\n self._is_setup = True\n\n def save(self, path):\n rank = (os.environ.get(\"LOCAL_RANK\"))\n if rank is not None and rank != '0':\n return\n path = utils.makedir(path)\n if self.save_as_state_dict:\n torch.save(self.model.state_dict(), os.path.join(path, \"model_state.pt\"))\n else:\n # save the model directly instead of the state_dict,\n # so that it can be loaded and run directly, without specificy configuration\n torch.save(self.model, os.path.join(path, \"model.pt\"))\n torch.save({\n \"epoch\": self.epoch,\n \"optimizer\":self.optimizer.state_dict()\n }, os.path.join(path, \"optimizer.pt\"))\n if self.scheduler is not None:\n torch.save(self.scheduler.state_dict(), os.path.join(path, \"scheduler.pt\"))\n self.logger.info(\"Saved checkpoint to %s\", path)\n\n def load(self, path):\n # load the model\n m_path = os.path.join(path, \"model.pt\") if os.path.isdir(path) else path\n if not os.path.exists(m_path):\n m_path = os.path.join(path, \"model_state.pt\")\n self._load_state_dict(m_path)\n else:\n self.model = torch.load(m_path, map_location=torch.device(\"cpu\"))\n self.model.to(self.device)\n self._parallelize()\n log_strs = [\"model from {}\".format(m_path)]\n\n # init the optimzier/scheduler\n self.optimizer = self._init_optimizer()\n self.scheduler = self._init_scheduler(self.optimizer, self.optimizer_scheduler_cfg)\n\n o_path = os.path.join(path, \"optimizer.pt\") if os.path.isdir(path) else None\n if o_path and os.path.exists(o_path):\n checkpoint = torch.load(o_path, map_location=torch.device(\"cpu\"))\n self.optimizer.load_state_dict(checkpoint[\"optimizer\"])\n log_strs.append(\"optimizer from {}\".format(o_path))\n self.last_epoch = checkpoint[\"epoch\"]\n\n # load the optimizer/scheduler\n if self.scheduler is not None:\n s_path = os.path.join(path, \"scheduler.pt\") if os.path.isdir(path) else None\n if s_path and os.path.exists(s_path):\n self.scheduler.load_state_dict(torch.load(s_path, map_location=torch.device(\"cpu\")))\n log_strs.append(\"scheduler from {}\".format(s_path))\n\n self.logger.info(\"param size = %f M\",\n utils.count_parameters(self.model) / 1.e6)\n self.logger.info(\"Loaded checkpoint from %s: %s\", path, \", \".join(log_strs))\n self.logger.info(\"Last epoch: %d\", self.last_epoch)\n\n def train(self):\n if len(self.gpus) >= 2:\n self._forward_once_for_flops(self.model)\n # save the model.log\n if self.train_dir is not None:\n with open(os.path.join(self.train_dir, \"model.log\"),\"w\") as f:\n f.write(str(self.model))\n for epoch in range(self.last_epoch+1, self.epochs+1):\n self.epoch = epoch\n self.on_epoch_start(epoch)\n\n if epoch < self.warmup_epochs:\n _warmup_update_lr(self.optimizer, epoch, self.learning_rate, self.warmup_epochs)\n else:\n if self.scheduler is not None and epoch != 1:\n self.scheduler.step()\n self.logger.info(\"epoch %d lr %e\", epoch, self.optimizer.param_groups[0][\"lr\"])\n\n train_acc, train_obj = self.train_epoch(self.train_queue, self.parallel_model,\n self._criterion, self.optimizer,\n self.device, epoch)\n self.logger.info(\"train_acc %f ; train_obj %f\", train_acc, train_obj)\n\n if self.save_every and epoch % self.save_every == 0:\n path = os.path.join(self.train_dir, str(epoch))\n self.save(path)\n\n if epoch % self.eval_every == 0:\n valid_acc, valid_obj, valid_perfs = self.infer_epoch(self.valid_queue,\n self.parallel_model,\n self._criterion, self.device)\n self.logger.info(\"valid_acc %f ; valid_obj %f ; valid performances: %s\",\n valid_acc, valid_obj,\n \"; \".join(\n [\"{}: {:.3f}\".format(n, v) for n, v in valid_perfs.items()]))\n\n self.on_epoch_end(epoch)\n\n self.save(os.path.join(self.train_dir, \"final\"))\n\n def evaluate_split(self, split):\n if len(self.gpus) >= 2:\n self._forward_once_for_flops(self.model)\n assert split in {\"train\", \"test\"}\n if split == \"test\":\n queue = self.valid_queue\n else:\n queue = self.train_queue\n acc, obj, perfs = self.infer_epoch(queue, self.parallel_model,\n self._criterion, self.device)\n self.logger.info(\"acc %f ; obj %f ; performance: %s\", acc, obj,\n \"; \".join(\n [\"{}: {:.3f}\".format(n, v) for n, v in perfs.items()]))\n return acc, obj\n\n @classmethod\n def supported_data_types(cls):\n return [\"image\"]\n\n def _load_state_dict(self, path):\n # load state dict\n checkpoint = torch.load(path, map_location=torch.device(\"cpu\"))\n extra_keys = set(checkpoint.keys()).difference(set(self.model.state_dict().keys()))\n if extra_keys:\n self.logger.error(\"%d extra keys in checkpoint! \"\n \"Make sure the genotype match\", len(extra_keys))\n missing_keys = {key for key in set(self.model.state_dict().keys())\\\n .difference(checkpoint.keys()) \\\n if \"auxiliary\" not in key}\n if missing_keys:\n self.logger.error((\"{} missing keys will not be loaded! Check your genotype, \"\n \"This should be due to you're using the state dict dumped by\"\n \" `awnas eval-arch --save-state-dict` in an old version, \"\n \"and your genotype actually skip some \"\n \"cells, which might means, many parameters of your \"\n \"sub-network is not actually active, \"\n \"and this genotype might not be so effective.\")\n .format(len(missing_keys)))\n self.logger.error(str(missing_keys))\n self.logger.info(self.model.load_state_dict(checkpoint, strict=False))\n\n def _parallelize(self):\n if self.multiprocess:\n self.model = convert_sync_bn(self.model).to(self.device)\n self.parallel_model = DistributedDataParallel(\n self.model, self.gpus, broadcast_buffers=False, find_unused_parameters=True)\n elif len(self.gpus) >= 2:\n self.parallel_model = DataParallel(self.model, self.gpus).to(self.device)\n else:\n self.parallel_model = self.model\n\n def _init_optimizer(self):\n group_weight = []\n group_bias = []\n for name, param in self.model.named_parameters():\n if \"bias\" in name:\n group_bias.append(param)\n else:\n group_weight.append(param)\n assert len(list(self.model.parameters())) == len(group_weight) + len(group_bias)\n optim_cls = getattr(torch.optim, self.optimizer_type)\n if self.optimizer_type == \"Adam\":\n optim_kwargs = {\n \"lr\": self.learning_rate,\n \"weight_decay\": self.weight_decay\n }\n else:\n optim_kwargs = {\n \"lr\": self.learning_rate,\n \"momentum\": self.momentum,\n \"weight_decay\": self.weight_decay\n }\n optim_kwargs.update(self.optimizer_kwargs or {})\n optimizer = optim_cls(\n [{\"params\": group_weight},\n {\"params\": group_bias,\n \"weight_decay\": 0 if self.no_bias_decay else self.weight_decay}],\n **optim_kwargs)\n\n return optimizer\n\n @staticmethod\n def _init_scheduler(optimizer, cfg):\n if cfg:\n cfg = {k:v for k, v in six.iteritems(cfg)}\n sch_cls = utils.get_scheduler_cls(cfg.pop(\"type\"))\n return sch_cls(optimizer, **cfg)\n return None\n\n\n def train_epoch(self, train_queue, model, criterion, optimizer, device, epoch):\n expect(self._is_setup, \"trainer.setup should be called first\")\n objs = utils.AverageMeter()\n top1 = utils.AverageMeter()\n top5 = utils.AverageMeter()\n model.train()\n\n for step, (inputs, target) in enumerate(train_queue):\n inputs = inputs.to(device)\n target = target.to(device)\n\n optimizer.zero_grad()\n if self.auxiliary_head: # assume model return two logits in train mode\n logits, logits_aux = model(inputs)\n loss = self._obj_loss(inputs, logits, target, model,\n add_evaluator_regularization=self.add_regularization)\n loss_aux = criterion(logits_aux, target)\n loss += self.auxiliary_weight * loss_aux\n else:\n logits = model(inputs)\n loss = self._obj_loss(inputs, logits, target, model,\n add_evaluator_regularization=self.add_regularization)\n #torch.distributed.all_reduce(loss, op=torch.distributed.ReduceOp.SUM)\n loss.backward()\n if isinstance(self.grad_clip, (int, float)) and self.grad_clip > 0:\n nn.utils.clip_grad_norm_(model.parameters(), self.grad_clip)\n optimizer.step()\n\n prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))\n n = inputs.size(0)\n objs.update(loss.item(), n)\n top1.update(prec1.item(), n)\n top5.update(prec5.item(), n)\n del loss\n\n if step % self.report_every == 0:\n self.logger.info(\"train %03d %.3f; %.2f%%; %.2f%%\",\n step, objs.avg, top1.avg, top5.avg)\n\n return top1.avg, objs.avg\n\n\n def infer_epoch(self, valid_queue, model, criterion, device):\n expect(self._is_setup, \"trainer.setup should be called first\")\n objs = utils.AverageMeter()\n top1 = utils.AverageMeter()\n top5 = utils.AverageMeter()\n objective_perfs = utils.OrderedStats()\n all_perfs = []\n model.eval()\n\n context = torch.no_grad if self.eval_no_grad else nullcontext\n with context():\n for step, (inputs, target) in enumerate(valid_queue):\n inputs = inputs.to(device)\n target = target.to(device)\n\n logits = model(inputs)\n loss = criterion(logits, target)\n perfs = self._perf_func(inputs, logits, target, model)\n all_perfs.append(perfs)\n prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))\n n = inputs.size(0)\n # objective_perfs.update(dict(zip(self._perf_names, perfs)), n=n)\n objs.update(loss.item(), n)\n top1.update(prec1.item(), n)\n top5.update(prec5.item(), n)\n del loss\n if step % self.report_every == 0:\n all_perfs_by_name = list(zip(*all_perfs))\n # support use objective aggregate fn, for stat method other than mean\n # e.g., adversarial distance median; detection mAP (see det_trainer.py)\n obj_perfs = {\n k: self.objective.aggregate_fn(k, False)(v)\n for k, v in zip(self._perf_names, all_perfs_by_name)\n }\n self.logger.info(\"valid %03d %e %f %f %s\", step, objs.avg, top1.avg, top5.avg,\n \"; \".join([\"{}: {:.3f}\".format(perf_n, v) \\\n # for perf_n, v in objective_perfs.avgs().items()]))\n for perf_n, v in obj_perfs.items()]))\n all_perfs_by_name = list(zip(*all_perfs))\n obj_perfs = {\n k: self.objective.aggregate_fn(k, False)(v)\n for k, v in zip(self._perf_names, all_perfs_by_name)\n }\n return top1.avg, objs.avg, obj_perfs\n\n\n def on_epoch_start(self, epoch):\n super(CNNFinalTrainer, self).on_epoch_start(epoch)\n self.model.on_epoch_start(epoch)\n self.objective.on_epoch_start(epoch)\n\n def on_epoch_end(self, epoch):\n super(CNNFinalTrainer, self).on_epoch_end(epoch)\n self.model.on_epoch_end(epoch)\n self.objective.on_epoch_end(epoch)\n\n def _forward_once_for_flops(self, model):\n # forward the model once to get the flops calculated\n self.logger.info(\"Training parallel: Forward one batch for the flops information\")\n inputs, _ = next(iter(self.train_queue))\n model(inputs.to(self.device))\n", "# -*- coding: utf-8 -*-\n#pylint: disable=attribute-defined-outside-init\n\nimport os\nimport re\nimport io\nimport sys\nimport time\nimport copy\nimport shutil\nimport inspect\nimport itertools\nimport functools\nimport collections\nfrom collections import OrderedDict, namedtuple\nfrom contextlib import contextmanager\n\nimport six\nimport yaml\nimport click\nimport numpy as np\nimport scipy\nimport scipy.signal\nimport torch\n\nfrom aw_nas.utils.registry import RegistryMeta\nfrom aw_nas.utils.exception import expect, ConfigException\nfrom aw_nas.utils.log import getLogger\n\n_HOME_DIR = os.path.abspath(os.path.expanduser(os.environ.get(\"AWNAS_HOME\", \"~/awnas\")))\n\nclass Context(object):\n def __init__(self, num_init_nodes, num_layers, use_stem=True,\n previous_cells=None, current_cell=None, previous_op=None, current_op=None):\n self.use_stem = use_stem\n self.num_init_nodes = num_init_nodes\n self.num_layers = num_layers\n self.previous_cells = previous_cells or []\n self.current_cell = current_cell or []\n self.previous_op = previous_op or []\n self.current_op = current_op or []\n self._is_inject = dict()\n self._num_conn = dict()\n self._last_conv_modules = dict()\n\n @property\n def next_op_index(self):\n return len(self.previous_op), len(self.current_op)\n\n @property\n def next_step_index(self):\n return len(self.previous_cells) - (1 if self.use_stem else 0), len(self.current_cell)\n\n @property\n def is_last_concat_op(self):\n _, n_s = self.next_step_index\n return self.is_end_of_cell or (n_s > self.num_init_nodes and self.is_end_of_step)\n\n @property\n def is_end_of_cell(self):\n # next_cell, next_step\n n_c, n_s = self.next_step_index\n return sum(self.next_op_index) == 0 and n_s == 0 and self.num_layers >= n_c > 0\n\n @property\n def is_end_of_step(self):\n _, n_s = self.next_step_index\n return sum(self.next_op_index) == 0 and n_s > 0\n\n @property\n def is_end_of_op(self):\n return len(self.current_op) == 0\n\n @property\n def last_state(self):\n for lst in [self.current_op, self.previous_op, self.current_cell, self.previous_cells]:\n if lst:\n return lst[-1]\n return None # empty context, which is not likely to happen\n\n @last_state.setter\n def last_state(self, state):\n for lst in [self.current_op, self.previous_op, self.current_cell, self.previous_cells]:\n if lst:\n lst[-1] = state\n break\n else:\n raise Exception(\"Empty context, set failed\")\n\n @property\n def index(self):\n next_cell, next_step = self.next_step_index\n next_conn, next_op_step = self.next_op_index\n return next_cell, next_step, next_conn, next_op_step\n\n def flag_inject(self, is_inject):\n self._is_inject[self.index] = is_inject\n\n @property\n def is_last_inject(self):\n return self._is_inject.get(self.index, True)\n\n @property\n def last_conv_module(self):\n return self._last_conv_modules.get(self.index, None)\n\n @last_conv_module.setter\n def last_conv_module(self, value):\n self._last_conv_modules[self.index] = value\n\n def __repr__(self):\n next_cell, next_step, next_conn, next_op_step = self.index\n return \"Context(next_cell={}, next_step={}, next_conn={}, next_op_step={})\"\\\n .format(next_cell, next_step, next_conn, next_op_step)\n\n## --- misc helpers ---\n# subclass `click.Group` to list commands in order\nclass _OrderedCommandGroup(click.Group):\n def __init__(self, *args, **kwargs):\n self.cmd_names = []\n super(_OrderedCommandGroup, self).__init__(*args, **kwargs)\n\n def list_commands(self, ctx):\n \"\"\"reorder the list of commands when listing the help\"\"\"\n commands = super(_OrderedCommandGroup, self).list_commands(ctx)\n return sorted(commands, key=self.cmd_names.index)\n\n def command(self, *args, **kwargs):\n def decorator(func):\n cmd = super(_OrderedCommandGroup, self).command(*args, **kwargs)(func)\n self.cmd_names.append(cmd.name)\n return cmd\n return decorator\n\n@contextmanager\ndef nullcontext():\n yield\n\ndef makedir(path, remove=False, quiet=False):\n if os.path.exists(path) and remove:\n if not quiet:\n response = input(\n \"The {} already exists.\".format(path) + \\\n \"Do you want to delete it anyway. [Y/y/yes] or [N/n/no/others], default is N\\n\"\n )\n if str(response) in [\"Y\", \"y\", \"yes\"]:\n shutil.rmtree(path)\n else:\n print(\"exit!\")\n sys.exit(0)\n else:\n shutil.rmtree(path)\n if not os.path.isdir(path):\n os.makedirs(path)\n return path\n\ndef get_awnas_dir(env, name):\n # try to fetch from environment variable\n dir_ = os.environ.get(env, None)\n if dir_ is None:\n # if not in environment variable, return the default\n dir_ = os.path.join(_HOME_DIR, name)\n return makedir(dir_)\n\ndef flatten_list(lst):\n return functools.reduce(lambda s, l: s + list(l) \\\n if isinstance(l, (tuple, list)) else s + [l],\n lst, [])\n\ndef recur_apply(func, lst, depth=0, out_type=list):\n return out_type([recur_apply(func, item, depth-1, out_type)\n if isinstance(item, (tuple, list)) and depth > 0 \\\n else func(item) for item in lst])\n\nclass Ticker(object):\n def __init__(self, name):\n self.name = name\n self.total_time = 0.\n self.cur_time = time.time()\n self.logger = getLogger(\"ticker_{}\".format(name))\n\n def tick(self, message=\"\"):\n cur_time = time.time()\n elapsed = cur_time - self.cur_time\n self.logger.debug(\"Ticker %s: %s: %.6f s\", self.name, message, elapsed)\n self.total_time += elapsed\n self.cur_time = cur_time\n return elapsed\n\nclass OrderedStats(object):\n def __init__(self):\n self.stat_meters = None\n\n def __nonzero__(self):\n return self.stat_meters is not None\n\n __bool__ = __nonzero__\n\n def update(self, stats, n=1):\n if self.stat_meters is None:\n self.stat_meters = OrderedDict([(name, AverageMeter()) for name in stats])\n [self.stat_meters[name].update(v, n) for name, v in stats.items()]\n\n def avgs(self):\n if self.stat_meters is None:\n return None\n return OrderedDict((name, meter.avg) for name, meter in self.stat_meters.items())\n\n def items(self):\n return self.stat_meters.items() if self.stat_meters is not None else None\n\nclass AverageMeter(object):\n def __init__(self):\n self.reset()\n\n def is_empty(self):\n return self.cnt == 0\n\n def reset(self):\n self.avg = 0.\n self.sum = 0.\n self.cnt = 0\n\n def update(self, val, n=1):\n self.sum += val * n\n self.cnt += n\n self.avg = self.sum / self.cnt\n\nclass keydefaultdict(collections.defaultdict): #pylint: disable=invalid-name\n def __missing__(self, key):\n if self.default_factory is None:\n raise KeyError(key)\n ret = self[key] = self.default_factory(key) #pylint: disable=not-callable\n return ret\n\ndef tick(register_attr, device=None):\n def _timer(func):\n @functools.wraps(func)\n def method(self, *args, **kwargv):\n if device != \"cpu\":\n torch.cuda.synchronize(device=device)\n start = time.time()\n out = func(self, *args, **kwargv)\n if device != \"cpu\":\n torch.cuda.synchronize(device=device)\n elapse = time.time() - start\n elapse *= 1000\n object.__setattr__(self, register_attr, elapse)\n return out\n return method\n return _timer\n\n\n## --- math utils ---\ndef compute_returns(rewards, gamma, length=None):\n if not isinstance(rewards, collections.Sequence):\n assert length is not None\n _rewards = np.zeros((length,))\n _rewards[-1] = rewards\n else:\n _rewards = rewards\n return scipy.signal.lfilter([1], [1, -gamma], _rewards[::-1], axis=0)[::-1]\n\ndef softmax(arr):\n e_arr = np.exp(arr - np.max(arr, axis=-1, keepdims=True))\n return e_arr / np.sum(e_arr, axis=-1, keepdims=True)\n\n\n## --- Python 2/3 compatibility utils ---\nclass abstractclassmethod(classmethod):\n #pylint: disable=too-few-public-methods,invalid-name\n # for python2 compatibility\n __isabstractmethod__ = True\n\n def __init__(self, a_callable):\n a_callable.__isabstractmethod__ = True\n super(abstractclassmethod, self).__init__(a_callable)\n\ndef get_argspec(func):\n if sys.version_info.major == 3:\n # python 3\n sig = inspect.signature(func) #pylint: disable=no-member\n return OrderedDict([(n, param.default) for n, param in six.iteritems(sig.parameters)])\n\n sig = inspect.getargspec(func) #pylint: disable=deprecated-method\n return OrderedDict(list(zip(sig.args,\n [None] * (len(sig.args) - len(sig.defaults)) + list(sig.defaults))))\n\ndef get_default_argspec(func):\n if sys.version_info.major == 3:\n # python 3\n sig = inspect.signature(func) #pylint: disable=no-member\n return [(n, param.default) for n, param in six.iteritems(sig.parameters) \\\n if not param.default is param.empty]\n # python 2\n sig = inspect.getargspec(func) #pylint: disable=deprecated-method\n return list(reversed(list(zip(reversed(sig.args),\n reversed(sig.defaults)))))\n\ndef namedtuple_with_defaults(name, fields, defaults):\n if sys.version_info.major == 3 and (\n sys.version_info.minor > 7 or\n (sys.version_info.minor == 7 and sys.version_info.micro >= 6)):\n return namedtuple(name, fields, defaults=defaults)\n type_ = namedtuple(name, fields)\n if defaults:\n type_.__new__.__defaults__ = tuple(defaults)\n return type_\n\n## --- text utils ---\ndef add_text_prefix(text, prefix):\n lines = text.split(\"\\n\")\n return \"\\n\".join([prefix + line if line else line for line in lines])\n\ndef component_sample_config_str(comp_name, prefix, filter_funcs=None, cfg_name=None):\n if cfg_name is None:\n cfg_name = comp_name\n filter_funcs = filter_funcs or []\n all_text = prefix + \"## ---- Component {} ----\\n\".format(cfg_name)\n\n for type_name, cls in six.iteritems(RegistryMeta.all_classes(comp_name)):\n try:\n is_skip = any(not func(cls) for func in filter_funcs)\n except Exception as e: #pylint: disable=broad-except\n # some plugin class might be wrongly implemented, check here\n import traceback\n traceback.print_exc()\n getLogger(\"utils\")\\\n .warning(\"Skip %s: %s(%s) as exception occurs in checking. %s: %s\",\n comp_name, type_name, cls, e.__class__.__name__, str(e))\n if is_skip:\n continue\n\n all_text += prefix + \"# ---- Type {} ----\\n\".format(type_name)\n all_text += prefix + \"{}_type: {}\\n\".format(cfg_name, type_name)\n all_text += prefix + \"{}_cfg:\\n\".format(cfg_name)\n\n # write the default configuration\n config_str = cls.get_default_config_str()\n all_text += add_text_prefix(config_str, prefix + \" \")\n\n all_text += prefix + \"# ---- End Type {} ----\\n\".format(type_name)\n\n all_text += prefix + \"## ---- End Component {} ----\\n\".format(cfg_name)\n return all_text\n\n# text utils for deriving\ndef _dump_with_perf(rollout, dump_mode, of, index=None):\n reward = rollout.get_perf()\n index_str = \" Arch {}\".format(index) if index is not None else \"\"\n reward_str = \" (Reward {})\".format(reward) if reward is not None else \"\"\n of.write(\"# ----{}{} ----\\n\".format(index_str, reward_str))\n if rollout.perf:\n of.write(\"# Perfs: {}\\n\".format(\", \".join(\n [\"{}: {:.4f}\".format(perf_name, value)\n for perf_name, value in rollout.perf.items()])))\n _dump(rollout, dump_mode, of)\n\ndef _parse_derive_file(input_f):\n content = input_f.read()\n regexp_iter = re.finditer(r\"# ----(?P<archid> Arch \\d+)?\"\n r\"( \\(Reward (?P<reward>[0-9.]+)\\))? ----\\n\"\n r\"(?P<perfstr># Perfs: [^\\n]+\\n)?\\n*- (?P<genotype>[^#]+)\",\n content)\n\n genotype_perf_dict = OrderedDict()\n for match in regexp_iter:\n genotype = yaml.load(io.StringIO(match.group(\"genotype\")))\n if match.group(\"perfstr\"):\n # all perfs are dumped\n perf_patterns = re.findall(r\"(\\w+): ([0-9.]+)[,\\n]\", match.group(\"perfstr\"))\n genotype_perf_dict[genotype] = {k: float(v) for k, v in perf_patterns}\n elif match.group(\"reward\"):\n # reward is dumped\n genotype_perf_dict[genotype] = {\"reward\": float(match.group(\"reward\"))}\n return genotype_perf_dict\n\ndef _dump(rollout, dump_mode, of):\n if dump_mode == \"list\":\n yaml.safe_dump([list(rollout.genotype._asdict().values())], of)\n elif dump_mode == \"str\":\n yaml.safe_dump([str(rollout.genotype)], of)\n else:\n raise Exception(\"Unexpected dump_mode: {}\".format(dump_mode))\n\n\n## --- schedule utils ---\ndef _assert_keys(dct, mandatory_keys, possible_keys, name):\n if mandatory_keys:\n expect(set(mandatory_keys).issubset(dct.keys()),\n \"{} schedule cfg must have keys: ({})\".format(name, \", \".join(mandatory_keys)))\n if possible_keys:\n addi_keys = set(dct.keys()).difference(possible_keys)\n expect(not addi_keys,\n \"{} schedule cfg cannot have keys: ({}); all possible keys: ({})\"\\\n .format(name, \", \".join(addi_keys), \", \".join(possible_keys)))\n\n_SUPPORTED_TYPES = {\"value\", \"mul\", \"add\"}\ndef check_schedule_cfg(schedule):\n \"\"\"\n Check the sanity of the schedule configuration.\n Currently supported type: mul, add, value.\n\n Rules: mul : [boundary / every], step, start, [optional: min, max, start_epoch]\n add : [boundary / every], step, start, [optional: min, max, start_epoch]\n value: boundary, value\n \"\"\"\n expect(\"type\" in schedule,\n \"Schedule config must have `type` specified: one in \"+\", \".join(_SUPPORTED_TYPES),\n ConfigException)\n type_ = schedule[\"type\"]\n expect(type_ in _SUPPORTED_TYPES,\n \"Supported schedule config type: \"+\", \".join(_SUPPORTED_TYPES),\n ConfigException)\n\n if type_ == \"value\":\n _assert_keys(schedule, [\"value\", \"boundary\"], None, \"value\")\n expect(len(schedule[\"value\"]) == len(schedule[\"boundary\"]),\n \"value schedule cfg `value` and `boundary` should be of the same length.\",\n ConfigException)\n expect(schedule[\"boundary\"][0] == 1,\n \"value schedule cfg must have `boundary` config start from 1.\", ConfigException)\n else: # mul/add\n _assert_keys(schedule, [\"step\", \"start\"],\n [\"type\", \"step\", \"start\", \"boundary\",\n \"every\", \"min\", \"max\", \"start_epoch\"], \"mul/add\")\n expect(\"boundary\" in schedule or \"every\" in schedule,\n \"{} schedule cfg must have one of `boundary` and `every` key existed.\".format(type_),\n ConfigException)\n expect(not (\"boundary\" in schedule and \"every\" in schedule),\n \"{} shcedule cfg cannot have `boundary` and `every` key in the mean time.\"\\\n .format(type_), ConfigException)\n\ndef get_schedule_value(schedule, epoch):\n \"\"\"\n See docstring of `check_schedule_cfg` for details.\n \"\"\"\n\n type_ = schedule[\"type\"]\n if type_ == \"value\":\n ind = list(np.where(epoch < np.array(schedule[\"boundary\"]))[0])\n if not ind: # if epoch is larger than the last boundary\n ind = len(schedule[\"boundary\"]) - 1\n else:\n ind = ind[0] - 1\n next_v = schedule[\"value\"][ind]\n else:\n min_ = schedule.get(\"min\", -np.inf)\n max_ = schedule.get(\"max\", np.inf)\n start_epoch = schedule.get(\"start_epoch\", 0)\n epoch = epoch - start_epoch\n if epoch <= 0:\n return schedule[\"start\"]\n if \"every\" in schedule:\n ind = (epoch - 1) // schedule[\"every\"]\n else: # \"boundary\" in schedule\n ind = list(np.where(epoch < np.array(schedule[\"boundary\"]))[0])\n if not ind: # if epoch is larger than the last boundary\n ind = len(schedule[\"boundary\"])\n else:\n ind = ind[0]\n if type_ == \"mul\":\n next_v = schedule[\"start\"] * schedule[\"step\"] ** ind\n else: # type_ == \"add\"\n next_v = schedule[\"start\"] + schedule[\"step\"] * ind\n next_v = max(min(next_v, max_), min_)\n return next_v\n\n\n## --- cache utils ---\ndef cache_results(cache_params, key_funcs, buffer_size):\n if callable(key_funcs):\n key_funcs = [key_funcs] * len(cache_params)\n def decorator(func):\n sig_dct = OrderedDict(get_argspec(func))\n cache_dict = OrderedDict()\n cache_hit_and_miss = [0, 0] # hit, miss\n @functools.wraps(func)\n def _inner_func(*args, **kwargs):\n params = copy.deepcopy(sig_dct)\n params.update(kwargs)\n for value, arg_name in zip(args, sig_dct):\n params[arg_name] = value\n key_tuple = []\n for name, key_func in zip(cache_params, key_funcs):\n key_tuple.append(key_func(params[name]))\n key_tuple = tuple(key_tuple)\n if key_tuple in cache_dict:\n cache_hit_and_miss[0] += 1\n return cache_dict[key_tuple]\n cache_hit_and_miss[1] += 1\n res = func(*args, **kwargs)\n cache_dict[key_tuple] = res\n if len(cache_dict) > buffer_size:\n cache_dict.popitem(last=False)\n return res\n _inner_func.cache_dict = cache_dict\n _inner_func.cache_hit_and_miss = cache_hit_and_miss\n return _inner_func\n return decorator\n\n\n## ---- thread utils ----\nclass LazyThreadLocal(six.moves._thread._local):\n def __init__(self, creator_map=None):\n super(LazyThreadLocal, self).__init__()\n if creator_map is not None:\n assert isinstance(creator_map, dict)\n self.creator_map = creator_map\n\n def __getattr__(self, name):\n if name in self.creator_map:\n value = self.creator_map[name]()\n setattr(self, name, value)\n return value\n raise AttributeError((\"LazyThreadlocal object do not have attribute named {}, \"\n \"also not specified in the lazy creator map.\").format(name))\n\n\ndef make_divisible(v, divisor, min_val=None):\n \"\"\"\n ref: https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py\n \"\"\"\n if min_val is None:\n min_val = divisor\n new_v = max(min_val, int(v + divisor / 2) // divisor * divisor)\n # Make sure that round down does not go down by more than 10%.\n if new_v < 0.9 * v:\n new_v += divisor\n return new_v\n\n\n#---- OFA related utils ----\ndef get_sub_kernel(kernel, sub_kernel_size):\n original_size = kernel.shape[-1]\n assert original_size >= sub_kernel_size, \\\n \"sub kernel size should not be larger than origin kernel size\"\n center = original_size // 2\n width = sub_kernel_size // 2\n left = center - width\n right = left + sub_kernel_size\n return kernel[:, :, left:right, left:right].contiguous()\n\n\ndef _get_channel_mask(filters: torch.Tensor, num_channels: int, p=1):\n assert p in (0, 1)\n dim = (0, 2, 3) if p == 1 else (1, 2, 3)\n channel_order = filters.norm(p=1, dim=dim).argsort(descending=True)\n mask = channel_order[:num_channels].sort()[0]\n return mask\n\ndef _get_feature_mask(filters: torch.Tensor, num_features: int, p=1):\n assert p in (0, 1)\n dim = (0) if p == 1 else (1)\n feature_order = filters.norm(p=1, dim=dim).argsort(descending=True)\n mask = feature_order[:num_features].sort()[0]\n return mask\n\n\n#---- Detection Task Utils ----\ndef feature_level_to_stage_index(strides, offset=1):\n \"\"\"\n calculate the level of each stage feature map by stride\n \"\"\"\n levels = itertools.accumulate([offset] + list(strides), lambda x, y: x + y - 1)\n return {l: i for i, l in enumerate(levels, -1)}\n\ndef format_as_float(container_or_float, float_fmt):\n if isinstance(container_or_float, (list, tuple)):\n return \"[\" + \", \".join([format_as_float(value, float_fmt) for value in container_or_float])\\\n + \"]\"\n return float_fmt.format(container_or_float)\n", "# -*- coding: utf-8 -*-\nimport copy\nimport inspect\nfrom inspect import signature\nimport os\nimport pickle\nfrom collections import namedtuple\nimport random\nimport torch\nimport torch.nn as nn\nimport torch.nn.utils.rnn as rnn_utils\nfrom torch import optim\n\nimport numpy as np\nimport yaml\n\ntry:\n from sklearn import linear_model\n from sklearn.neural_network import MLPRegressor\nexcept ImportError as e:\n from aw_nas.utils import getLogger\n getLogger(\"hardware\").warn(\n (\"Cannot import module hardware.utils: {}\\n\"\n \"Should install scikit-learn to make some hardware-related\"\n \" functionalities work\").format(e))\n\n\nfrom aw_nas.hardware.base import (\n BaseHardwarePerformanceModel,\n MixinProfilingSearchSpace,\n Preprocessor\n)\nfrom aw_nas.ops import get_op\n\nPrim_ = namedtuple(\n \"Prim\",\n [\"prim_type\", \"spatial_size\", \"C\", \"C_out\", \"stride\", \"affine\", \"kwargs\"],\n)\n\n\nclass Prim(Prim_):\n def __new__(cls, prim_type, spatial_size, C, C_out, stride, affine, **kwargs):\n position_params = [\"C\", \"C_out\", \"stride\", \"affine\"]\n prim_constructor = get_op(prim_type)\n prim_sig = signature(prim_constructor)\n params = prim_sig.parameters\n for name, param in params.items():\n if param.default != inspect._empty:\n if name in position_params:\n continue\n if kwargs.get(name) is None:\n kwargs[name] = param.default\n else:\n assert name in position_params or name in kwargs, \\\n \"{} is a non-default parameter which should be provided explicitly.\".format(\n name)\n\n kwargs = {k: v for k, v in kwargs.items() if v is not None}\n\n assert set(params.keys()) == set(\n position_params + list(kwargs.keys())),\\\n (\"The passed parameters are different from the formal parameter list of primitive \"\n \"type `{}`, expected {}, got {}\").format(\n prim_type,\n str(params.keys()),\n str(position_params + list(kwargs.keys()))\n )\n\n kwargs = tuple(\n sorted([(k, v) for k, v in kwargs.items()]))\n return super(Prim, cls).__new__(\n cls,\n prim_type,\n int(spatial_size),\n int(C),\n int(C_out),\n int(stride),\n affine,\n kwargs,\n )\n\n def _asdict(self):\n origin_dict = dict(super(Prim, self)._asdict())\n kwargs = origin_dict.pop(\"kwargs\")\n origin_dict.update(dict(kwargs))\n return origin_dict\n\n def __getnewargs_ex__(self):\n return tuple(), self._asdict()\n\ndef assemble_profiling_nets_from_file(fname,\n base_cfg_fname,\n image_size=224,\n sample=None,\n max_layers=20):\n with open(fname, \"r\") as f:\n prof_prims = yaml.load(f)\n with open(base_cfg_fname, \"r\") as f:\n base_cfg = yaml.load(f)\n return assemble_profiling_nets(prof_prims, base_cfg, image_size, sample,\n max_layers)\n\n\ndef sample_networks(mixin_search_space,\n base_cfg_template,\n num_sample,\n **kwargs):\n for _ in range(num_sample):\n rollout = mixin_search_space.random_sample()\n primitives = mixin_search_space.rollout_to_primitives(rollout,\n **kwargs)\n base_cfg_template[\"final_model_cfg\"][\"genotypes\"] = [\n p._asdict() for p in primitives\n ]\n yield copy.deepcopy(base_cfg_template)\n\ndef assemble_profiling_nets(profiling_primitives,\n base_cfg_template,\n num_sample=None,\n image_size=224,\n max_layers=20,\n fix_stem_layer=True):\n \"\"\"\n Args:\n profiling_primitives: (list of dict)\n possible keys: prim_type, spatial_size, C, C_out, stride, primitive_kwargs\n (Don't use dict and list that is unhashable. Use tuple instead: (key, value) )\n base_cfg_template: (dict) final configuration template\n image_size: (int) the inputs size\n sample: (int) the number of nets\n max_layers: (int) the number of max layers of each net (glue layers do not count)\n\n Returns:\n a generator of yaml configs\n\n This function assembles all profiling primitives into multiple networks, which takes a several steps:\n 1. Each network has a stride=2 layer as the first conv layer (like many convolution network, in order to reduce the size of feature map.)\n 2. Find a available primitive for current spatial_size and current channel number:\n a). If there is a primitive has exactly same channel number and spatial size with previous primitive, append it to genotype;\n b). else we select a primitive which has the same or smaller spatial size, and insert a glue layer between them to make the number of channels consistant.\n 3. Iterate profiling primitives until there is not available primitive or the number of genotype's layers exceeds the max layer.\n \"\"\"\n\n if num_sample is None:\n num_sample = np.inf\n\n # genotypes: \"[prim_type, *params], [] ...\"\n profiling_primitives = [prim for prim in profiling_primitives if prim[\"C\"] != 3]\n profiling_primitives = sorted(profiling_primitives,\n key=lambda x: (x[\"C\"], x[\"stride\"]))\n ith_arch = 0\n glue_layer = lambda spatial_size, C, C_out, stride=1: {\n \"prim_type\": \"conv_1x1\",\n \"spatial_size\": spatial_size,\n \"C\": C,\n \"C_out\": C_out,\n \"stride\": stride,\n \"affine\": True,\n }\n\n # use C as the index of df to accelerate query\n available_idx = list(range(len(profiling_primitives)))\n channel_to_idx = {}\n for i, prim in enumerate(profiling_primitives):\n channel_to_idx[prim[\"C\"]] = channel_to_idx.get(prim[\"C\"], []) + [i]\n channel_to_idx = {k: set(v) for k, v in channel_to_idx.items()}\n\n while len(available_idx) > 0 and ith_arch < num_sample:\n ith_arch += 1\n geno = []\n\n # the first conv layer reduing the size of feature map.\n sampled_prim = profiling_primitives[available_idx[0]]\n cur_channel = int(sampled_prim[\"C\"])\n if fix_stem_layer:\n first_cov_op = {\n \"prim_type\": \"conv_3x3\",\n \"spatial_size\": image_size,\n \"C\": 3,\n \"C_out\": cur_channel,\n \"stride\": 2,\n \"affine\": True,\n }\n geno.append(first_cov_op)\n \n cur_size = round(image_size / 2)\n\n for _ in range(max_layers):\n if len(available_idx) == 0:\n break\n try:\n # find layer which has exactly same channel number and spatial size with the previous one\n idx = channel_to_idx[cur_channel]\n if len(idx) == 0:\n raise ValueError\n for i in idx:\n sampled_prim = profiling_primitives[i]\n if sampled_prim[\"spatial_size\"] == cur_size:\n break\n else:\n raise ValueError\n except:\n # or find a layer which has arbitrary channel number but has smaller spatial size\n # we need to assure that spatial size decreases as the layer number (or upsample layer will be needed.)\n for i in available_idx:\n if profiling_primitives[i][\"spatial_size\"] <= cur_size:\n sampled_prim = profiling_primitives[i]\n break\n else:\n break\n\n out_channel = int(sampled_prim[\"C\"])\n spatial_size = int(sampled_prim[\"spatial_size\"])\n stride = int(round(cur_size / spatial_size))\n assert isinstance(\n stride, int) and stride > 0, \"stride: {stride}\".format(\n stride=stride)\n glue_conv = glue_layer(cur_size, cur_channel, out_channel, min(stride, 2))\n geno.append(glue_conv)\n\n if stride > 2:\n _stride = stride // 2\n _cur_size = int(round(cur_size / 2))\n _channel = out_channel\n while _stride > 1:\n glue_conv = glue_layer(_cur_size, _channel, _channel,\n 2)\n geno.append(glue_conv)\n _stride //= 2\n _cur_size //= 2\n\n cur_channel = int(sampled_prim[\"C_out\"])\n cur_size = int(\n round(sampled_prim[\"spatial_size\"] / sampled_prim[\"stride\"]))\n\n available_idx.remove(i)\n channel_to_idx[sampled_prim[\"C\"]].remove(i)\n\n geno.append(sampled_prim)\n\n base_cfg_template[\"final_model_cfg\"][\"genotypes\"] = geno\n yield copy.deepcopy(base_cfg_template)\n\n\nclass BlockSumPreprocessor(Preprocessor):\n NAME = \"block_sum\"\n\n def __init__(self, preprocessors=None, schedule_cfg=None):\n super().__init__(preprocessors, schedule_cfg)\n\n def __call__(self, unpreprocessed, **kwargs):\n for prof_net in unpreprocessed:\n for ith_prof in prof_net:\n block_sum = {}\n for prim in ith_prof[\"primitives\"]:\n for k, perf in prim[\"performances\"].items():\n block_sum[k] = block_sum.get(k, 0.) + perf\n for k, perf in block_sum.items():\n ith_prof[\"block_sum_{}\".format(k)] = block_sum[k]\n yield prof_net\n\n\nclass FlattenPreprocessor(Preprocessor):\n NAME = \"flatten\"\n\n def __init__(self, preprocessors=None, schedule_cfg=None):\n super().__init__(preprocessors, schedule_cfg)\n\n def __call__(self, unpreprocessed, **kwargs):\n for prof_net in unpreprocessed:\n for ith_prof in prof_net:\n yield ith_prof\n\n\nclass RemoveAnomalyPreprocessor(Preprocessor):\n NAME = \"remove_anomaly\"\n\n def __init__(self, preprocessors=None, schedule_cfg=None):\n super().__init__(preprocessors, schedule_cfg)\n\n def __call__(self, unpreprocessed, **kwargs):\n is_training = kwargs.get(\"is_training\", True)\n if not is_training:\n for net in unpreprocessed:\n yield net\n tolerance_std = kwargs.get(\"tolerance_std\", 0.1)\n for prof_net in unpreprocessed:\n # FIXME: assert every primitive has the performance keys.\n perf_keys = prof_net[0][\"primitives\"][0][\"performances\"].keys()\n block_sum_avg = {\n k: np.mean([\n ith_prof[\"block_sum_{}\".format(k)] for ith_prof in prof_net\n ])\n for k in perf_keys\n }\n filtered_net = []\n for ith_prof in prof_net:\n for k in perf_keys:\n if abs(ith_prof[\"block_sum_{}\".format(k)] -\n block_sum_avg[k]\n ) > block_sum_avg[k] * tolerance_std:\n break\n else:\n filtered_net += [ith_prof]\n yield filtered_net\n\n\nclass ExtractSumFeaturesPreprocessor(Preprocessor):\n NAME = \"extract_sum_features\"\n\n def __init__(self, preprocessors=None, schedule_cfg=None):\n super().__init__(preprocessors, schedule_cfg)\n\n def __call__(self, unpreprocessed, **kwargs):\n is_training = kwargs.get(\"is_training\", True)\n performance = kwargs.get(\"performance\", \"latency\")\n unpreprocessed = list(unpreprocessed)\n train_x = []\n train_y = []\n for prof_net in unpreprocessed:\n train_x += [[prof_net[\"block_sum_{}\".format(performance)]]]\n if is_training:\n train_y += [prof_net[\"overall_{}\".format(performance)]]\n train_x = np.array(train_x).reshape(-1, 1)\n if is_training:\n train_y = np.array(train_y).reshape(-1)\n return unpreprocessed, train_x, train_y\n return unpreprocessed, train_x\n\nclass ExtractLSTMFeaturesPreProcessor(Preprocessor):\n NAME = \"extract_lstm_features\"\n\n def __init__(self, preprocessors=None, schedule_cfg=None):\n super().__init__(preprocessors, schedule_cfg)\n\n def __call__(self, unpreprocessed, **kwargs):\n is_training = kwargs.get('is_training', True)\n perf_name = kwargs.get(\"performance\", 'latency')\n unpreprocessed = list(unpreprocessed)\n train_x = list()\n train_y = list()\n for prof_net in unpreprocessed:\n x_feature = list()\n for prim in prof_net['primitives']:\n cin = prim['C']\n cout = prim['C_out']\n exp = prim['expansion'] if 'expansion' in prim else 1\n k = prim['kernel_size'] if 'kernel_size' in prim else 3\n perf = prim['performances'][perf_name]\n size = prim['spatial_size']\n stride = prim['stride']\n x_feature.append([cin, cout, exp, k, perf, size, stride])\n train_x.append(x_feature)\n if is_training:\n train_y.append(prof_net['overall_{}'.format(perf_name)])\n if is_training:\n return unpreprocessed, train_x, train_y\n else:\n return unpreprocessed, train_x\n \nclass PaddingPreProcessor(Preprocessor):\n NAME = \"padding\"\n\n def __init__(self, preprocessors=None, schedule_cfg=None):\n super().__init__(preprocessors, schedule_cfg)\n\n def __call__(self, unpreprocessed, **kwargs):\n is_training = kwargs.get(\"is_training\", True)\n perf_name = kwargs.get('performance', 'latency')\n unpreprocessed = list(unpreprocessed)\n train_x = list()\n train_y = list()\n for prof_net in unpreprocessed:\n x_feature = list()\n for prim in prof_net['primitives']:\n x_feature.append(prim['performances'][perf_name])\n train_x.append(x_feature)\n if is_training:\n train_y.append(prof_net['overall_{}'.format(perf_name)])\n # pad train_x\n max_len = max([len(_) for _ in train_x])\n for x in train_x:\n if len(x) == max_len: continue\n x.extend([0] * (max_len - len(x)) )\n\n if is_training:\n return unpreprocessed, train_x, train_y\n else:\n return unpreprocessed, train_x\n\n\nclass TableBasedModel(BaseHardwarePerformanceModel):\n NAME = \"table\"\n\n def __init__(\n self,\n mixin_search_space,\n *,\n perf_name=\"latency\",\n preprocessors=(\"flatten\", ),\n prof_prims_cfg={},\n schedule_cfg=None,\n ):\n super(TableBasedModel, self).__init__(\n mixin_search_space,\n perf_name=perf_name,\n preprocessors=preprocessors,\n schedule_cfg=schedule_cfg,\n )\n self.prof_prims_cfg = prof_prims_cfg\n\n self._table = {}\n\n def _train(self, args):\n prof_nets = args\n for net in prof_nets:\n for prim in net.get(\"primitives\", []):\n perf = prim.pop(\"performances\")[self.perf_name]\n prim = Prim(**prim)\n self._table.setdefault(prim, []).append(perf)\n\n self._table = {k: np.mean(v) for k, v in self._table.items()}\n\n def predict(self, rollout, assemble_fn=sum):\n # return random.random()\n primitives = self.mixin_search_space.rollout_to_primitives(\n rollout, **self.prof_prims_cfg)\n perfs = []\n for prim in primitives:\n perf = self._table.get(prim)\n if perf is None:\n self.logger.warn(\n \"primitive %s is not found in the table, return default value 0.\",\n prim)\n perf = 0.\n perfs += [perf]\n return assemble_fn(perfs)\n\n def save(self, path):\n pickled_table = [(k._asdict(), v) for k, v in self._table.items()]\n with open(path, \"wb\") as wf:\n pickle.dump(\n {\n \"table\": pickled_table,\n }, wf)\n\n def load(self, path):\n with open(path, \"rb\") as fr:\n m = pickle.load(fr)\n self._table = {Prim(**k): v for k, v in m[\"table\"]}\n\n\nclass RegressionModel(TableBasedModel):\n NAME = \"regression\"\n\n def __init__(\n self,\n mixin_search_space,\n *,\n perf_name=\"latency\",\n preprocessors=(\"block_sum\", \"remove_anomaly\", \"flatten\",\n \"extract_sum_features\"),\n prof_prims_cfg={},\n schedule_cfg=None,\n ):\n super().__init__(mixin_search_space,\n perf_name=perf_name,\n preprocessors=preprocessors,\n prof_prims_cfg=prof_prims_cfg,\n schedule_cfg=schedule_cfg)\n self.regression_model = linear_model.LinearRegression()\n\n assert isinstance(mixin_search_space, MixinProfilingSearchSpace)\n\n def _train(self, args):\n prof_nets, train_x, train_y = args\n super()._train(prof_nets)\n return self.regression_model.fit(train_x, train_y)\n\n def predict(self, rollout):\n primitives = self.mixin_search_space.rollout_to_primitives(\n rollout, **self.prof_prims_cfg)\n perfs = super().predict(rollout, assemble_fn=lambda x: x)\n primitives = [p._asdict() for p in primitives]\n for prim, perf in zip(primitives, perfs):\n prim[\"performances\"] = {self.perf_name: perf}\n prof_nets = [[{\"primitives\": primitives}]]\n prof_nets, test_x = self.preprocessor(\n prof_nets, is_training=False, performance=self.perf_name)\n return float(self.regression_model.predict(test_x)[0])\n\n def save(self, path):\n pickled_table = [(k._asdict(), v) for k, v in self._table.items()]\n with open(path, \"wb\") as fw:\n pickle.dump(\n {\n \"table\": pickled_table,\n \"model\": self.regression_model\n }, fw)\n\n def load(self, path):\n with open(path, \"rb\") as fr:\n m = pickle.load(fr)\n self._table = {Prim(**k): v for k, v in m[\"table\"]}\n self.regression_model = m[\"model\"]\n\n\nclass MLPModel(TableBasedModel):\n NAME = 'mlp'\n\n def __init__(\n self,\n mixin_search_space,\n *,\n perf_name='latency',\n preprocessors=('block_sum','remove_anomaly','flatten','extract_sum_features'),\n prof_prims_cfg={},\n schedule_cfg=None,\n ):\n\n super().__init__(\n mixin_search_space,\n perf_name=perf_name,\n preprocessors=preprocessors,\n prof_prims_cfg=prof_prims_cfg,\n schedule_cfg=schedule_cfg\n ) \n\n self.mlp_model = MLPRegressor(\n solver='adam',\n alpha=1e-4,\n hidden_layer_sizes=(100,100,100),\n random_state=1,\n max_iter=10000\n )\n \n def _train(self, args):\n prof_nets, train_x, train_y = args\n super()._train(prof_nets)\n return self.mlp_model.fit(train_x, train_y)\n\n def predict(self, rollout):\n primitives = self.mixin_search_space.rollout_to_primitives(\n rollout, **self.prof_prims_cfg)\n perfs = super().predict(rollout, assemble_fn=lambda x: x)\n primitives = [p._asdict() for p in primitives]\n for prim, perf in zip(primitives, perfs):\n prim[\"performances\"] = {self.perf_name: perf}\n prof_nets = [[{\"primitives\": primitives}]]\n prof_nets, test_x = self.preprocessor(\n prof_nets, is_training=False, performance=self.perf_name)\n return float(self.mlp_model.predict(test_x)[0])\n\n def save(self, path):\n pickled_table = [(k._asdict(), v) for k, v in self._table.items()]\n with open(path, \"wb\") as fw:\n pickle.dump(\n {\n \"table\": pickled_table,\n \"model\": self.mlp_model\n }, fw)\n\n def load(self, path):\n with open(path, \"rb\") as fr:\n m = pickle.load(fr)\n self._table = {Prim(**k): v for k, v in m[\"table\"]}\n self.mlp_model = m[\"model\"]\n\n\n\n\nclass LSTM(nn.Module):\n def __init__(self, input_size=1, hidden_layer_size=100, output_size=1, device=\"cpu\"):\n super().__init__()\n self.hidden_layer_size = hidden_layer_size\n self.device = device\n self.lstm = nn.LSTM(input_size, hidden_layer_size, batch_first=True).to(self.device)\n self.linear = nn.Linear(hidden_layer_size, output_size).to(self.device)\n self.hidden_cell = (torch.zeros(1, 1, self.hidden_layer_size).to(self.device),\n torch.zeros(1, 1, self.hidden_layer_size).to(self.device))\n \n\n def forward(self, seq, bs=1):\n self.hidden_cell = (torch.zeros(1, bs, self.hidden_layer_size).to(self.device),\n torch.zeros(1, bs, self.hidden_layer_size).to(self.device))\n seq = rnn_utils.pack_sequence([torch.tensor(s).reshape(-1, 1).to(self.device) for s in seq], enforce_sorted=False)\n lstm_out, self.hidden_cell = self.lstm(seq, self.hidden_cell)\n lstm_out, index = rnn_utils.pad_packed_sequence(lstm_out)\n lstm_out = lstm_out.permute([1, 0, 2])\n select = torch.zeros(lstm_out.shape[:2]).scatter_(1, index.reshape(-1, 1) - 1, 1).to(torch.bool).to(self.device)\n lstm_out = lstm_out[select]\n predictions = self.linear(lstm_out)\n return predictions[:, -1]\n\n def predict(self, input_seqs):\n return self.forward(input_seqs, bs=len(input_seqs)).cpu().detach().numpy()\n\n def fit(self, train_X, train_y, epochs=3000, bs=128):\n loss_function = nn.MSELoss()\n optimizer = torch.optim.Adam(self.parameters(), lr=0.001)\n for i in range(epochs):\n for j in range(len(train_X) // bs + 1):\n seq = train_X[j * bs: (j + 1) * bs]\n label = train_y[j * bs: (j + 1) * bs]\n optimizer.zero_grad()\n pred = self.forward(seq, bs=bs)\n loss = loss_function(pred, torch.tensor(label).to(pred.device))\n loss.backward()\n optimizer.step()\n\n if i % 5 == 0:\n print(\"epoch: {:3}, loss: {:10.5f}\".format(\n i, loss.item()))\n return self \n\n\nclass LSTMModel(TableBasedModel):\n NAME = \"lstm\"\n\n def __init__(\n self,\n mixin_search_space,\n *,\n perf_name=\"latency\",\n preprocessors=(\"block_sum\", \"remove_anomaly\", \"flatten\",\n \"extract_lstm_features\"),\n prof_prims_cfg={},\n schedule_cfg=None,\n ):\n super().__init__(mixin_search_space,\n perf_name=perf_name,\n preprocessors=preprocessors,\n prof_prims_cfg=prof_prims_cfg,\n schedule_cfg=schedule_cfg)\n gpu = torch.cuda.current_device()\n self.device = 'cuda:' + str(gpu)\n self.lstm_model = LSTM(1, 100, 1, device=self.device)\n assert isinstance(mixin_search_space, MixinProfilingSearchSpace) \n\n def _train(self, args):\n prof_nets, train_x, train_y = args\n # build Prim -> performance look-up table\n super()._train(prof_nets)\n return self.lstm_model.fit(train_x, train_y)\n\n\n def predict(self, rollout):\n primitives = self.mixin_search_space.rollout_to_primitives(\n rollout, **self.prof_prims_cfg)\n perfs = super().predict(rollout, assemble_fn=lambda x: x)\n primitives = [p._asdict() for p in primitives]\n for prim, perf in zip(primitives, perfs):\n prim[\"performances\"] = {self.perf_name: perf}\n prof_nets = [[{\"primitives\": primitives}]]\n prof_nets, test_x = self.preprocessor(\n prof_nets, is_training=False, performance=self.perf_name)\n return float(self.lstm_model.predict(test_x)[0])\n\n def save(self, path):\n pickled_table = [(k._asdict(), v) for k, v in self._table.items()]\n with open(path, \"wb\") as fw:\n pickle.dump(\n {\n \"table\": pickled_table,\n \"model\": self.lstm_model\n }, fw)\n\n def load(self, path):\n with open(path, \"rb\") as fr:\n m = pickle.load(fr)\n self._table = {Prim(**k): v for k, v in m[\"table\"]}\n self.lstm_model = m[\"model\"].to(self.device)\n\n\n\ndef iterate(prof_prim_dir):\n for _dir in os.listdir(prof_prim_dir):\n cur_dir = os.path.join(prof_prim_dir, _dir)\n if not os.path.isdir(cur_dir):\n continue\n prof_net = []\n for f in os.listdir(cur_dir):\n if not f.endswith(\"yaml\"):\n continue\n with open(os.path.join(cur_dir, f), \"r\") as fr:\n prof_net += [yaml.load(fr)]\n yield prof_net\n", "# -*- coding: utf-8 -*-\n# pylint: disable=invalid-name\n\"\"\"\nMultiShotEvaluator: Curve fitting to estimate reward at targeted FLOPs\nCopyright (c) 2019 Xuefei Ning, Junbo Zhao\n\"\"\"\n\nimport abc\nimport copy\nimport collections\nimport math\n\nimport numpy as np\n\nfrom scipy.optimize import curve_fit\nfrom aw_nas.common import BaseRollout\nfrom aw_nas.evaluator.base import BaseEvaluator\nfrom aw_nas.weights_manager.base import BaseWeightsManager\n\n\nclass BaseMultiShotEvaluator(BaseEvaluator):\n def __init__(\n self,\n dataset,\n weights_manager,\n objective,\n rollout_type,\n sub_evaluators=[],\n schedule_cfg=None,\n ):\n super(BaseMultiShotEvaluator, self).__init__(\n dataset, weights_manager, objective, rollout_type, schedule_cfg=schedule_cfg\n )\n self.sub_evaluator_cfgs = sub_evaluators\n self.num_sub_evaluators = len(self.sub_evaluator_cfgs)\n self.sub_evaluators = []\n for sub_eva_cfg in self.sub_evaluator_cfgs:\n wm_type = sub_eva_cfg[\"sub_weights_manager_type\"]\n wm_cfg = sub_eva_cfg[\"sub_weights_manager_cfg\"]\n wm_device = sub_eva_cfg.get(\"device\", weights_manager.device)\n sub_wm = BaseWeightsManager.get_class_(wm_type)(\n weights_manager.search_space, device=wm_device, **wm_cfg\n )\n eva_type = sub_eva_cfg[\"sub_evaluator_type\"]\n eva_cfg = sub_eva_cfg[\"sub_evaluator_cfg\"]\n eva_ckpt_path = sub_eva_cfg.get(\"ckpt_path\", None)\n sub_eva = BaseEvaluator.get_class_(eva_type)(\n dataset, sub_wm, objective, **eva_cfg\n )\n if eva_ckpt_path is not None:\n sub_eva.load(eva_ckpt_path)\n self.sub_evaluators.append(sub_eva)\n\n @classmethod\n def supported_data_types(cls):\n # just return all possible data types\n # since this is a hyper-evaluator\n return [\"image\", \"sequence\"]\n\n @classmethod\n def supported_rollout_types(cls):\n # just return all possible rollout types\n # since this is a hyper-evaluator\n return list(BaseRollout.all_classes_().keys())\n\n def evaluate_rollouts(\n self,\n rollouts,\n is_training,\n portion=None,\n eval_batches=None,\n return_candidate_net=False,\n callback=None,\n ):\n sub_eva_perfs = []\n for sub_eva in self.sub_evaluators:\n rollouts = sub_eva.evaluate_rollouts(\n rollouts,\n is_training,\n portion=portion,\n eval_batches=eval_batches,\n return_candidate_net=False,\n callback=callback,\n )\n r_perfs = [copy.deepcopy(r.perf) for r in rollouts]\n sub_eva_perfs.append(r_perfs)\n r_comb_perfs = [\n self._combine_multi_perfs(r_perfs) for r_perfs in zip(*sub_eva_perfs)\n ]\n for i, rollout in enumerate(rollouts):\n rollout.multi_perf = [copy.deepcopy(sub_eva_perfs[j][i]) for j in range(self.num_sub_evaluators)]\n for rollout, r_comb_perf in zip(rollouts, r_comb_perfs):\n # use update instead of assignment, to keep track of the `predicted_score` field\n # that is set by predictor-based controller\n rollout.perf.update(r_comb_perf)\n return rollouts\n\n def update_rollouts(self, rollouts):\n pass\n\n def update_evaluator(self, controller):\n pass\n\n def save(self, path):\n for i_eva, sub_eva in enumerate(self.sub_evaluators):\n sub_eva.save(\"{}_sub{}\".format(path, i_eva))\n\n def load(self, path):\n for i_eva, sub_eva in enumerate(self.sub_evaluators):\n sub_eva.load(\"{}_sub{}\".format(path, i_eva))\n\n # ---- helper methods ----\n @abc.abstractmethod\n def _combine_multi_perfs(self, perfs):\n res = collections.OrderedDict()\n # An unmeaningful example\n res[\"reward\"], res[\"acc_clean\"], res[\"acc_adv\"], res[\"flops\"] = (\n 0.5 * (perfs[0][\"acc_clean\"] + perfs[0][\"acc_adv\"]),\n perfs[0][\"acc_clean\"],\n perfs[0][\"acc_adv\"],\n perfs[0][\"flops\"],\n )\n return res\n\n\nclass MultiEvaluator(BaseMultiShotEvaluator):\n NAME = \"multi_shot_evaluator\"\n\n def __init__(\n self,\n dataset,\n weights_manager,\n objective,\n rollout_type,\n sub_evaluators=[],\n target_flops=1500.0e6,\n fit_function_type=\"log_power\",\n schedule_cfg=None,\n ):\n super(MultiEvaluator, self).__init__(\n dataset,\n weights_manager,\n objective,\n rollout_type,\n sub_evaluators,\n schedule_cfg,\n )\n self.target_flops = float(target_flops)\n self.fit_function_type = fit_function_type\n self.fit_function = getattr(self, fit_function_type)\n assert callable(self.fit_function)\n\n # ---- candidate functions ----\n @staticmethod\n def pow2(x, c, a):\n return c - a * x ** (-a)\n\n @staticmethod\n def pow3(x, c, a, alpha):\n return c - a * x ** (-alpha)\n\n @staticmethod\n def vapor_pressure(x, a, b, c):\n return a + b / x + c * np.log(x)\n\n @staticmethod\n def log_log_linear(x, a, b):\n return np.log(a * np.log(x) + b)\n\n @staticmethod\n def ilog2(x, c, a):\n return c - a / np.log(x)\n\n @staticmethod\n def log_power(x, a, b, c):\n return a / (1 + pow(x, b) * math.exp(c))\n\n # ---- helper methods ----\n def _combine_multi_perfs(self, perfs):\n acc_cleans, acc_advs, flops = zip(\n *[(perf[\"acc_clean\"], perf[\"acc_adv\"], perf[\"flops\"]) for perf in perfs]\n )\n\n reward_ = [\n 0.5 * (acc_clean + acc_adv)\n for acc_clean, acc_adv in zip(acc_cleans, acc_advs)\n ]\n acc_cleans, acc_advs, flops, reward_ = (\n np.array(acc_cleans),\n np.array(acc_advs),\n np.array(flops),\n np.array(reward_),\n )\n\n target_acc_clean = self.fit(\n self.fit_function, flops, acc_cleans, self.target_flops\n )\n target_acc_adv = self.fit(self.fit_function, flops, acc_advs, self.target_flops)\n\n target_reward = self.fit(self.fit_function, flops, reward_, self.target_flops)\n res = collections.OrderedDict()\n res[\"reward\"], res[\"acc_clean\"], res[\"acc_adv\"], res[\"flops\"] = (\n target_reward,\n target_acc_clean,\n target_acc_adv,\n self.target_flops,\n )\n return res\n\n @staticmethod\n def fit(func, flops, rewards, target_flops):\n popt, _ = curve_fit(\n func,\n flops,\n rewards,\n maxfev=500000000,\n bounds=([0.0, -np.inf, -np.inf], [np.inf, 0, np.inf]),\n )\n target_reward = func(target_flops, *popt)\n return target_reward\n" ]
[ [ "torch.nn.BatchNorm1d", "torch.cat", "torch.nn.ModuleList", "torch.nn.Linear", "torch.split", "torch.stack", "numpy.argsort" ], [ "torch.nn.CrossEntropyLoss", "torch.utils.data.distributed.DistributedSampler", "numpy.random.seed", "torch.utils.data.DataLoader", "torch.device" ], [ "torch.cuda.synchronize", "numpy.max", "scipy.signal.lfilter", "numpy.array", "numpy.zeros", "numpy.sum" ], [ "torch.cuda.current_device", "torch.nn.LSTM", "torch.zeros", "torch.tensor", "torch.nn.Linear", "torch.nn.utils.rnn.pad_packed_sequence", "numpy.mean", "sklearn.linear_model.LinearRegression", "numpy.array", "torch.nn.MSELoss", "sklearn.neural_network.MLPRegressor" ], [ "numpy.log", "numpy.array", "scipy.optimize.curve_fit" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
UST-QuAntiL/qhana
[ "bf499d072dcc37f81efec1b8e17b7d5460db7a04", "bf499d072dcc37f81efec1b8e17b7d5460db7a04" ]
[ "qhana/backend/elementComparer.py", "qhana/backend/clustering.py" ]
[ "from abc import ABCMeta\nfrom abc import abstractmethod\nfrom typing import Any\nimport enum\nimport networkx as nx\nfrom networkx import Graph\nfrom qhana.backend.taxonomie import Taxonomie\nfrom qhana.backend.logger import Logger\nimport numpy as np\nfrom qhana.backend.logger import Logger\nimport os\nimport json\nimport math\nfrom qhana.backend.timer import Timer\n\n\"\"\" \nDefines an enum to list up all available element comparer\n\"\"\"\nclass ElementComparerType(enum.Enum):\n wuPalmer = \"wuPalmer\"\n timeTanh = \"timeTanh\"\n\n \"\"\"\n Returns the name of the given ElementComparerType.\n \"\"\"\n @staticmethod\n def get_name(elementComparerType) -> str:\n name = \"\"\n if elementComparerType == ElementComparerType.wuPalmer:\n name += \"WuPalmer\"\n elif elementComparerType == ElementComparerType.timeTanh:\n name += \"TimeTanh\"\n else:\n Logger.error(\"No name for element comparer \\\"\" + str(elementComparerType) + \"\\\" specified\")\n raise ValueError(\"No name for element comparer \\\"\" + str(elementComparerType) + \"\\\" specified\")\n return name\n\n \"\"\"\n Returns the description of the given ElementComparerType.\n \"\"\"\n @staticmethod\n def get_description(elementComparerType) -> str:\n description = \"\"\n if elementComparerType == ElementComparerType.wuPalmer:\n description += \"Compares two elements based on a taxonomy \" \\\n + \"using the wu palmer similarity measure.\"\n elif elementComparerType == ElementComparerType.timeTanh:\n description += \"Compares two timecodes using the tanh function: \" \\\n + \"tanh(abs(a-b) / 7200). We normalize this function to 7200 seconds.\"\n else:\n Logger.error(\"No description for element comparer \\\"\" + str(elementComparerType) + \"\\\" specified\")\n raise ValueError(\"No description for element comparer \\\"\" + str(elementComparerType) + \"\\\" specified\")\n return description\n\n\"\"\" \nRepresents the abstract element comprarer base class\n\"\"\"\nclass ElementComparer(metaclass=ABCMeta):\n \"\"\" \n Returns the comparison value of first and second\n element based on the giben base\n \"\"\"\n @abstractmethod\n def compare(self, first: Any, second: Any, base: Any) -> float:\n pass\n\n \"\"\"\n Creates a full cache on file, i.e. calculates all pariwise similarities\n and safes the result in a file\n \"\"\"\n @abstractmethod\n def create_cache(self, base: Any) -> None:\n pass\n\"\"\" \nRepresents the factory to create an element comparer\n\"\"\"\nclass ElementComparerFactory:\n \"\"\"\n Static method for creating an element comparer\n \"\"\"\n @staticmethod\n def create(type: ElementComparerType) -> ElementComparer:\n if type == ElementComparerType.wuPalmer:\n return WuPalmer()\n elif type == ElementComparerType.timeTanh:\n return TimeTanh()\n else:\n raise Exception(\"Unknown type of element comparer\")\n return\n\n\"\"\"\nRepresents the conventional wu palmer similarity measure\n\"\"\"\nclass WuPalmer(ElementComparer):\n\n \"\"\"\n Constructor.\n \"\"\"\n def __init__(self):\n self.cache = None\n return\n\n \"\"\"\n Applies try to use cache first if available, if not,\n run compare_inner\n \"\"\"\n def compare(self, first: str, second: str, base: Taxonomie) -> float:\n # check if cache is available\n cache = dict()\n if self.cache is None:\n if os.path.isdir(\"cache\") != False:\n fileName = \"cache/\" + base.name + \".json\"\n if os.path.isfile(fileName) != False:\n self.cache = self.__loads_json(fileName)\n cache = self.cache\n else:\n cache = self.cache\n\n if (first, second) in cache:\n if (first, second) in cache:\n return self.cache[(first, second)]\n\n return self.compare_inner(first, second, base)\n\n \"\"\"\n Applies wu palmer similarity measure on two taxonomie elements\n \"\"\"\n def compare_inner(self, first: str, second: str, base: Taxonomie) -> float:\n # Get directed graph\n d_graph = base.graph\n\n # Get undirected graph\n ud_graph = d_graph.to_undirected()\n\n # Get lowest reachable node from both \n lowest_common_ancestor = nx.algorithms.lowest_common_ancestors.lowest_common_ancestor(d_graph, first, second)\n\n # Get root of graph\n root = [n for n,d in d_graph.in_degree() if d == 0][0]\n\n # Count edges - weight is 1 per default\n d1 = nx.algorithms.shortest_paths.generic.shortest_path_length(ud_graph, first, lowest_common_ancestor)\n d2 = nx.algorithms.shortest_paths.generic.shortest_path_length(ud_graph, second, lowest_common_ancestor)\n d3 = nx.algorithms.shortest_paths.generic.shortest_path_length(ud_graph, lowest_common_ancestor, root)\n\n # if first and second, both is the root\n if d1 + d2 + 2 * d3 == 0.0:\n return 0.0\n\n return 2 * d3 / (d1 + d2 + 2 * d3)\n\n \"\"\"\n Serializes a dict object with 2-tuples as key to json file\n \"\"\"\n def __dump_json(self, dic, fileName) -> None:\n with open(fileName, \"w\") as f:\n k = dic.keys()\n v = dic.values()\n k1 = [str(i) for i in k]\n json.dump(json.dumps(dict(zip(*[k1,v]))),f)\n \n \"\"\"\n Deserializes a json file to a dict object with 2-tuples as key\n \"\"\"\n def __loads_json(self, fileName) -> dict:\n with open(fileName, \"r\") as f:\n data = json.load(f)\n dic = json.loads(data)\n k = dic.keys()\n v = dic.values()\n k1 = [eval(i) for i in k]\n return dict(zip(*[k1,v]))\n\n \"\"\"\n Creates the cache for WuPalmer similarity, i.e. calculates pairwise values for\n taxonomy entries\n \"\"\"\n def create_cache(self, base: Taxonomie) -> None:\n fileName = \"cache/\" + base.name + \".json\"\n\n # check if cache already exist\n if os.path.isfile(fileName) and os.path.exists(fileName):\n Logger.debug(\"Cache for \" + base.name + \" already exist\")\n return\n\n # format ((first taxonomy entry, second taxonomy entry), value)\n cache = dict()\n\n amount = int(math.pow(len(base.graph.nodes()), 2))\n index = 1\n everyNSteps = 100\n\n timer: Timer = Timer()\n timer.start()\n\n for first in base.graph.nodes():\n for second in base.graph.nodes():\n cache[(first, second)] = self.compare_inner(first, second, base)\n index += 1\n if index % everyNSteps == 0:\n Logger.debug(str(index) + \" from \" + str(amount))\n\n if os.path.isdir(\"cache\") == False:\n os.mkdir(\"cache\")\n\n self.__dump_json(cache, fileName)\n\n timer.stop()\n\n return\n\n\"\"\"\nRepresents a timecode comparer using the tanh function.\n\"\"\"\nclass TimeTanh(ElementComparer):\n \"\"\"\n Applies the tanh function for comparing timecodes.\n \"\"\"\n def compare(self, first: int, second: int, base: Any) -> float:\n return np.tanh(np.abs( (first - second)) / 7200.0)\n ", "from abc import ABCMeta\nfrom abc import abstractmethod\nfrom typing import Any\nimport enum\nimport numpy as np\nfrom qhana.backend.logger import Logger, LogLevel\nfrom sklearn.cluster import OPTICS, KMeans\nfrom sklearn_extra.cluster import KMedoids\nfrom qhana.backend.entity import Costume\nfrom typing import List\nfrom qhana.backend.quantumKMeans import NegativeRotationQuantumKMeans, DestructiveInterferenceQuantumKMeans, \\\n StatePreparationQuantumKMeans, PositiveCorrelationQuantumKmeans\n\nfrom qiskit import Aer\nfrom qiskit.circuit.library import TwoLocal\nfrom qiskit.optimization.applications.ising import max_cut\nfrom qiskit.aqua.algorithms import VQE\nfrom qiskit.aqua.components.optimizers import SPSA\nfrom qiskit.aqua import QuantumInstance\nfrom qiskit.optimization.applications.ising.common import sample_most_likely\nfrom qiskit import IBMQ\n\nimport networkx as nx\nfrom qhana.backend.classicNaiveMaxCutSolver import ClassicNaiveMaxCutSolver\nfrom qhana.backend.sdpMaxCutSolver import SdpMaxCutSolver\nfrom qhana.backend.bmMaxCutSolver import BmMaxCutSolver\nfrom qhana.backend.timer import Timer\n\n\"\"\"\nEnums for Clustertyps\n\"\"\"\nclass ClusteringType(enum.Enum):\n optics = 0 # OPTICS =:Ordering Points To Identify the Clustering Structure\n vqeMaxCut = 1 # MaxCut Quantenalgorithmus based on VQE \n classicNaiveMaxCut = 2 # Naive classical implementation\n sdpMaxCut = 3 # Semidefinite Programming implementation\n bmMaxCut = 4 # Bureir-Monteiro implementation\n negativeRotationQuantumKMeans = 5 # Negative Rotation Quantum K Means\n destructiveInterferenceQuantumKMeans = 6 # Destructive Interference Quantum K Means\n ClassicalKMeans = 7 # a classical scikit implementation of K means\n StatePreparationQuantumKMeans = 8 # an own implementation of a quantum k means\n PositiveCorrelationQuantumKMeans = 9\n ClassicalKMedoids = 10\n\n @staticmethod\n def get_name(clusteringTyp) -> str:\n name = \"\"\n if clusteringTyp == ClusteringType.optics :\n name = \"optics\"\n elif clusteringTyp == ClusteringType.vqeMaxCut :\n name = \"qaoaMaxCut\"\n elif clusteringTyp == ClusteringType.classicNaiveMaxCut:\n name = \"classicNaiveMaxCut\"\n elif clusteringTyp == ClusteringType.sdpMaxCut:\n name = \"sdpMaxCut\"\n elif clusteringTyp == ClusteringType.bmMaxCut:\n name = \"bmMaxCut\"\n elif clusteringTyp == ClusteringType.negativeRotationQuantumKMeans:\n name = \"negativeRotationQuantumKMeans\"\n elif clusteringTyp == ClusteringType.destructiveInterferenceQuantumKMeans:\n name = \"destructiveInterferenceQuantumKMeans\"\n elif clusteringTyp == ClusteringType.ClassicalKMeans:\n name = \"classicalKMeans\"\n elif clusteringTyp == ClusteringType.StatePreparationQuantumKMeans:\n name = \"statePreparationQuantumKMeans\"\n elif clusteringTyp == ClusteringType.PositiveCorrelationQuantumKMeans:\n name = \"positiveCorrelationQuantumKMeans\"\n elif clusteringTyp == ClusteringType.ClassicalKMedoids:\n name = \"classicalKMedoids\"\n else:\n Logger.error(\"No name for clustering \\\"\" + str(clusteringTyp) + \"\\\" specified\")\n raise ValueError(\"No name for clustering \\\"\" + str(clusteringTyp) + \"\\\" specified\")\n return name\n\n \"\"\"\n Returns the description of the given ScalingType.\n \"\"\"\n @staticmethod\n def get_description(clusteringTyp) -> str:\n description = \"\"\n if clusteringTyp == ClusteringType.optics:\n description = (\"Ordering points to identify the clustering structure (OPTICS)\" \n + \" is an algorithm for finding density-based clusters\"\n + \" in spatial data\")\n elif clusteringTyp == ClusteringType.vqeMaxCut:\n description = (\"MaxCut Quantum Algorithm based on QAOA\")\n elif clusteringTyp == ClusteringType.classicNaiveMaxCut:\n description = (\"Classical naive implemented MaxCut algorithm\")\n elif clusteringTyp == ClusteringType.sdpMaxCut:\n description = (\"Semidefinite Programming solver for MaxCut\")\n elif clusteringTyp == ClusteringType.bmMaxCut:\n description = (\"Bureir-Monteiro solver for MaxCut\")\n elif clusteringTyp == ClusteringType.negativeRotationQuantumKMeans:\n description = (\"Negative Rotation Quantum K Means\")\n elif clusteringTyp == ClusteringType.destructiveInterferenceQuantumKMeans:\n description = (\"Destructive Interference Quantum K Means\")\n elif clusteringTyp == ClusteringType.ClassicalKMeans:\n description = (\"Classical K Means\")\n elif clusteringTyp == ClusteringType.StatePreparationQuantumKMeans:\n description = (\"State Preparation Quantum K Means\")\n elif clusteringTyp == ClusteringType.PositiveCorrelationQuantumKMeans:\n description = (\"Positive Correlation Quantum K Means\")\n elif clusteringTyp == ClusteringType.ClassicalKMedoids:\n description = (\"Classical K Medoids\")\n else:\n Logger.error(\"No description for clustering \\\"\" + str(clusteringTyp) + \"\\\" specified\")\n raise ValueError(\"No description for clustering \\\"\" + str(clusteringTyp) + \"\\\" specified\")\n return description\n\n\nclass Clustering(metaclass=ABCMeta):\n\n def __init__(self):\n self.keep_cluster_mapping = False\n\n \"\"\"\n Interface for Clustering Object\n \"\"\"\n @abstractmethod\n def create_cluster(self, position_matrix : np.matrix , similarity_matrix : np.matrix) -> np.matrix:\n pass\n \n @abstractmethod\n def get_param_list(self) -> list:\n pass\n\n @abstractmethod\n def set_param_list(self, params: list = []) -> np.matrix:\n pass\n\n @abstractmethod\n def d2_plot(self, last_sequenz: List[int] , costumes: List[Costume] ) -> None:\n pass\n\n def get_keep_cluster_mapping(self):\n return self.keep_cluster_mapping\n\n def set_keep_cluster_mapping(self, keep_cluster_mapping):\n self.keep_cluster_mapping = keep_cluster_mapping\n return\n\n\"\"\" \nRepresents the factory to create an scaling object\n\"\"\"\nclass ClusteringFactory:\n \n @staticmethod\n def create(type: ClusteringType) -> Clustering:\n if type == ClusteringType.optics:\n return Optics()\n elif type == ClusteringType.vqeMaxCut:\n return VQEMaxCut()\n elif type == ClusteringType.classicNaiveMaxCut:\n return ClassicNaiveMaxCut()\n elif type == ClusteringType.sdpMaxCut:\n return SdpMaxCut()\n elif type == ClusteringType.bmMaxCut:\n return BmMaxCut()\n elif type == ClusteringType.negativeRotationQuantumKMeans:\n return NegativeRotationQuantumKMeansClustering()\n elif type == ClusteringType.destructiveInterferenceQuantumKMeans:\n return DestructiveInterferenceQuantumKMeansClustering()\n elif type == ClusteringType.ClassicalKMeans:\n return ClassicalKMeans()\n elif type == ClusteringType.StatePreparationQuantumKMeans:\n return StatePreparationQuantumKMeansClustering()\n elif type == ClusteringType.PositiveCorrelationQuantumKMeans:\n return PositiveCorrelationQuantumKMeansClustering()\n elif type == ClusteringType.ClassicalKMedoids:\n return ClassicalKMedoids()\n else:\n Logger.error(\"Unknown type of clustering. The application will quit know.\")\n raise Exception(\"Unknown type of clustering.\")\n\n\n\"\"\"\noptics\n\"\"\"\nclass Optics(Clustering):\n \"\"\"\n OPTICS Referenz : https://scikit-learn.org/stable/modules/generated/sklearn.cluster.OPTICS.html\n\n OPTICS (Ordering Points To Identify the Clustering Structure), closely related to DBSCAN, finds\n core sample of high density and expands clusters from them [R2c55e37003fe-1]. Unlike DBSCAN, k-\n eeps cluster hierarchy for a variable neighborhood radius. Better suited for usage on large da-\n tasets than the current sklearn implementation of DBSCAN.\n Clusters are then extracted using a DBSCAN-like method (cluster_method = ‘dbscan’) or an automa-\n tic technique proposed in [R2c55e37003fe-1] (cluster_method = ‘xi’).\n This implementation deviates from the original OPTICS by first performing k-nearest-neighborhood\n searches on all points to identify core sizes, then computing only the distances to unprocessed \n points when constructing the cluster order. Note that we do not employ a heap to manage the exp-\n ansion candidates, so the time complexity will be O(n^2). Read more in the User Guide.\n\n Parameters:\n min_samples: int > 1 or float between 0 and 1 (default=5)\n The number of samples in a neighborhood for a point to be considered as a\n core point. Also, up and down steep regions can’t have more then min_sam-\n ples consecutive non-steep points. Expressed as an absolute number or a \n fraction of the number of samples (rounded to be at least 2).\n\n max_eps: float, optional (default=np.inf)\n The maximum distance between two samples for one to be considered as in t-\n he neighborhood of the other. Default value of np.inf will identify clust-\n ers across all scales; reducing max_eps will result in shorter run times.\n\n metric: str or callable, optional (default=’minkowski’)\n Metric to use for distance computation. Any metric from scikit-learn or \n scipy.spatial.distance can be used.\n\n If metric is a callable function, it is called on each pair of instances \n (rows) and the resulting value recorded. The callable should take two arr-\n ays as input and return one value indicating the distance between them. \n This works for Scipy’s metrics, but is less efficient than passing the me-\n tric name as a string. If metric is “precomputed”, X is assumed to be a \n distance matrix and must be square.\n\n Valid values for metric are:\n\n from scikit-learn: [‘cityblock’, ‘cosine’, ‘euclidean’, ‘l1’, ‘l2’, \n ‘manhattan’]\n\n from scipy.spatial.distance: [‘braycurtis’, ‘canberra’, ‘chebyshev’, \n ‘correlation’, ‘dice’, ‘hamming’, ‘jaccard’, ‘kulsinski’, ‘mahalanobis’, \n ‘minkowski’, ‘rogerstanimoto’, ‘russellrao’, ‘seuclidean’, ‘sokalmichener’,\n ‘sokalsneath’, ‘sqeuclidean’, ‘yule’]\n\n See the documentation for scipy.spatial.distance for details on these metrics.\n\n p: int, optional (default=2)\n Parameter for the Minkowski metric from sklearn.metrics.pairwise_distances. \n When p = 1, this is equivalent to using manhattan_distance (l1), and euclidea-\n n_distance (l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.\n\n metric_params: dict, optional (default=None)\n Additional keyword arguments for the metric function.\n\n cluster_method: str, optional (default=’xi’)\n The extraction method used to extract clusters using the calculated reachability\n and ordering. Possible values are “xi” and “dbscan”.\n\n eps: float, optional (default=None)\n The maximum distance between two samples for one to be considered as in the neig-\n hborhood of the other. By default it assumes the same value as max_eps. Used only \n when cluster_method='dbscan'.\n\n xi: float, between 0 and 1, optional (default=0.05)\n Determines the minimum steepness on the reachability plot that constitutes a clu-\n ster boundary. For example, an upwards point in the reachability plot is defined \n by the ratio from one point to its successor being at most 1-xi. Used only when \n cluster_method='xi'.\n\n predecessor_correction:bool, optional (default=True)\n Correct clusters according to the predecessors calculated by OPTICS [R2c55e37003fe-2].\n This parameter has minimal effect on most datasets. Used only when cluster_method='xi'.\n\n min_cluster_size: int > 1 or float between 0 and 1 (default=None)\n Minimum number of samples in an OPTICS cluster, expressed as an absolute number or a \n fraction of the number of samples (rounded to be at least 2). If None, the value of \n min_samples is used instead. Used only when cluster_method='xi'.\n\n algorithm: {‘auto’, ‘ball_tree’, ‘kd_tree’, ‘brute’}, optional\n Algorithm used to compute the nearest neighbors:\n\n ‘ball_tree’ will use BallTree\n ‘kd_tree’ will use KDTree\n ‘brute’ will use a brute-force search.\n ‘auto’ will attempt to decide the most appropriate algorithm based on the values passed\n to fit method. (default)\n\n Note: fitting on sparse input will override the setting of this parameter, using brute \n force.\n\n leaf_size: int, optional (default=30)\n Leaf size passed to BallTree or KDTree. This can affect the speed of the construction \n and query, as well as the memory required to store the tree. The optimal value depends on\n the nature of the problem.\n\n n_jobs: int or None, optional (default=None)\n The number of parallel jobs to run for neighbors search. None means 1 unless in a \n joblib.parallel_backend context. -1 means using all processors. See Glossary for more det-\n ails.\n \"\"\"\n def __init__(\n self,\n min_samples: float = 5, # float between 0 and 1 else int\n max_eps: float = np.inf,\n metric: str = 'minkowski',\n p: int = 2, #only when the minkowski metric is choosen\n metric_params: dict = None, # additional keywords for the metric function\n cluster_method: str = 'xi',\n eps: float = None, # by default it assumes the same value as max_eps (Only used when cluster_method='dbscan')\n xi: float = 0.05, # only between 0 and 1 (Only used when cluster_method='xi')\n predecessor_correction: bool = True, # only used when cluster_method='xi'\n min_cluster_size: float = None, # float between 0 and 1 else int\n algorithm: str = 'auto',\n leaf_size: int = 30, # only for BallTree or KDTree\n n_jobs: int = None # -1 mean using all processors\n ):\n super().__init__()\n if min_samples <= 1 and min_samples >= 0:\n self.__min_samples: float = min_samples\n elif min_samples > 1:\n self.__min_samples: int = round(min_samples)\n else:\n Logger.error(\"min_samples is smaller than 0.\")\n raise Exception(\"min_samples is smaller than 0\")\n self.__max_eps: float = max_eps\n self.__metric: str = metric\n self.__p: int = p\n self.__metric_params: dict = None\n self.__cluster_method: str = cluster_method\n self.__eps: float = eps\n if xi >= 0 and xi <= 1:\n self.__xi : float = xi \n else:\n Logger.warning(\"xi is not between 0 and 1. Default Value was set! xi = 0.05\")\n self.__xi : float = 0.05\n self.__predecessor_correction: bool = predecessor_correction\n self.__algorithm: str = algorithm\n self.__leaf_size: int = leaf_size\n self.__n_jobs: int = n_jobs\n if min_cluster_size == None or (min_cluster_size >= 0 and min_cluster_size <= 1):\n self.__min_cluster_size : float = min_cluster_size \n else:\n Logger.warning(\"min is not between 0 and 1 or None. Default Value was set! min_cluster_size = None\")\n self.__min_cluster_size : float = None\n \n try:\n self.__cluster_instance: OPTICS = self.__create_optics_cluster_instance()\n except Exception as error:\n Logger.error(\"An Exception occurs by OPTICS initialization: \" + str(error))\n raise Exception(\"Exception occurs in Method __create_optics_instance by OPTICS initialization.\")\n\n # sklearn.cluster._optics.OPTICS\n\n def __create_optics_cluster_instance(self) -> OPTICS :\n if self.__min_samples < 0:\n Logger.error(\"min_samples is smaller than 0.\")\n raise Exception(\"min_samples is smaller than 0\")\n elif self.__min_samples > 1:\n self.__min_samples = round(self.__min_samples)\n \n if self.__cluster_method != \"xi\" and self.__cluster_method != \"dbscan\":\n Logger.error(\"Not valid cluster_method.\")\n raise Exception(\"Not valid cluster_method.\")\n \n if self.__min_cluster_size != None and self.__min_cluster_size < 0 and self.__min_cluster_size > 1 :\n Logger.warning(\"min is not between 0 and 1 or None. Default Value was set! min_cluster_size = None\")\n self.__min_cluster_size : float = None\n \n if self.__algorithm != \"auto\" and self.__algorithm != \"ball_tree\" and self.__algorithm != \"kd_tree\" and self.__algorithm != \"brute\":\n Logger.error(\"Not valid algorithm method.\")\n raise Exception(\"Not valid algorithm method.\")\n\n if self.__cluster_method == \"xi\":\n if self.__xi > 1 and self.__xi < 0:\n Logger.warning(\"xi is not between 0 and 1. Default Value was set! xi = 0.05\")\n self.__xi : float = 0.05\n\n if self.__algorithm == \"ball_tree\" or self.__algorithm == \"kd_tree\":\n\n if self.__metric == \"minkowski\":\n # xi, ball algorithm , minkowski\n return OPTICS( min_samples = self.__min_samples,\n max_eps = self.__max_eps,\n metric = self.__metric,\n p = self.__p,\n metric_params = self.__metric_params,\n cluster_method = self.__cluster_method,\n xi = self.__xi,\n predecessor_correction = self.__predecessor_correction,\n min_cluster_size = self.__min_cluster_size,\n algorithm = self.__algorithm,\n leaf_size = self.__leaf_size,\n n_jobs = self.__n_jobs\n )\n else:\n # xi, ball algorithm , not minkowski\n return OPTICS( min_samples = self.__min_samples,\n max_eps = self.__max_eps,\n metric = self.__metric,\n metric_params = self.__metric_params,\n cluster_method = self.__cluster_method,\n xi = self.__xi,\n predecessor_correction = self.__predecessor_correction,\n min_cluster_size = self.__min_cluster_size,\n algorithm = self.__algorithm,\n leaf_size = self.__leaf_size,\n n_jobs = self.__n_jobs\n )\n else:\n if self.__metric == \"minkowski\":\n # xi, not ball algorithm, minkowski\n return OPTICS( min_samples = self.__min_samples,\n max_eps = self.__max_eps,\n metric = self.__metric,\n p = self.__p,\n metric_params = self.__metric_params,\n cluster_method = self.__cluster_method,\n xi = self.__xi,\n predecessor_correction = self.__predecessor_correction,\n min_cluster_size = self.__min_cluster_size,\n algorithm = self.__algorithm,\n n_jobs = self.__n_jobs\n )\n else:\n # xi, not ball algorithm , not minkowski\n return OPTICS( min_samples = self.__min_samples,\n max_eps = self.__max_eps,\n metric = self.__metric,\n metric_params = self.__metric_params,\n cluster_method = self.__cluster_method,\n xi = self.__xi,\n predecessor_correction = self.__predecessor_correction,\n min_cluster_size = self.__min_cluster_size,\n algorithm = self.__algorithm,\n n_jobs = self.__n_jobs\n )\n\n\n elif self.__cluster_method == \"dbscan\":\n if self.__algorithm == \"ball_tree\" or self.__algorithm == \"ball_tree\":\n\n if self.__metric == \"minkowski\":\n # dbscan, ball algorithm , minkowski\n return OPTICS( min_samples = self.__min_samples,\n max_eps = self.__max_eps,\n metric = self.__metric,\n p = self.__p,\n metric_params = self.__metric_params,\n cluster_method = self.__cluster_method,\n eps = self.__eps,\n min_cluster_size = self.__min_cluster_size,\n algorithm = self.__algorithm,\n leaf_size = self.__leaf_size,\n n_jobs = self.__n_jobs\n )\n else:\n # dbscan, ball algorithm , not minkowski\n return OPTICS( min_samples = self.__min_samples,\n max_eps = self.__max_eps,\n metric = self.__metric,\n metric_params = self.__metric_params,\n cluster_method = self.__cluster_method,\n eps = self.__eps,\n min_cluster_size = self.__min_cluster_size,\n algorithm = self.__algorithm,\n leaf_size = self.__leaf_size,\n n_jobs = self.__n_jobs\n )\n\n else:\n if self.__metric == \"minkowski\":\n # dbscan, not ball algorithm, minkowski\n return OPTICS( min_samples = self.__min_samples,\n max_eps = self.__max_eps,\n metric = self.__metric,\n p = self.__p,\n metric_params = self.__metric_params,\n cluster_method = self.__cluster_method,\n eps = self.__eps,\n min_cluster_size = self.__min_cluster_size,\n algorithm = self.__algorithm,\n n_jobs = self.__n_jobs\n )\n else:\n # dbscan, not ball algorithm , not minkowski\n return OPTICS( min_samples = self.__min_samples,\n max_eps = self.__max_eps,\n metric = self.__metric,\n metric_params = self.__metric_params,\n cluster_method = self.__cluster_method,\n eps = self.__eps,\n min_cluster_size = self.__min_cluster_size,\n algorithm = self.__algorithm,\n n_jobs = self.__n_jobs\n )\n\n def create_cluster(self, position_matrix : np.matrix, similarity_matrix : np.matrix ) -> np.matrix:\n try:\n self.__cluster_instance = self.__create_optics_cluster_instance()\n except Exception as error:\n Logger.error(\"An Exception occurs by OPTICS initialization: \" + str(error))\n raise Exception(\"Exception occurs in Method __create_optics_instance by OPTICS initialization.\")\n\n try: \n self.__cluster_instance.fit(position_matrix)\n return self.__cluster_instance.labels_ \n except Exception as error:\n Logger.error(\"An Exception occurs by clustering the postion_matrix: \" + str(error))\n raise Exception(\"Exception occurs in Method create_cluster by clustering the positon_matrix.\") \n \n def d2_plot(self, last_sequenz: List[int] , costumes: List[Costume] ) -> None:\n pass\n\n # getter methodes\n def get_min_samples(self) -> float:\n return self.__min_samples\n \n def get_max_eps(self) -> float:\n return self.__max_eps\n \n def get_metric(self) -> str:\n return self.__metric \n \n def get_p(self) -> int:\n return self.__p\n \n def get_metric_params(self) -> dict:\n return self.__metric_params\n \n def get_cluster_method(self) -> str:\n return self.__cluster_method\n \n def get_eps(self) -> float:\n return self.__eps\n \n def get_xi(self) -> float:\n return self.__xi\n \n def get_predecessor_correction(self) -> bool:\n return self.__predecessor_correction \n \n def get_min_cluster_size(self) -> float:\n return self.__min_cluster_size\n \n def get_algorithm(self) -> str:\n return self.__algorithm\n \n def get_leaf_size(self) -> int:\n return self.__leaf_size\n \n def get_n_jobs(self) -> int:\n return self.__n_jobs\n \n def get_cluster_instance(self) -> OPTICS:\n return self.__cluster_instance\n\n # setter methodes\n def set_min_samples(self, min_samples: float = 5) -> None:\n try:\n self.__min_samples = min_samples\n self.__cluster_instance = self.__create_optics_cluster_instance()\n except Exception as error:\n Logger.error(\"An Exception occurs by OPTICS initialization: \" + str(error))\n raise Exception(\"Exception occurs in Method __create_optics_instance by OPTICS initialization.\")\n\n def set_max_eps(self, max_eps: float = np.inf) -> None:\n try:\n self.__max_eps = max_eps\n self.__cluster_instance = self.__create_optics_cluster_instance()\n except Exception as error:\n Logger.error(\"An Exception occurs by OPTICS initialization: \" + str(error))\n raise Exception(\"Exception occurs in Method __create_optics_instance by OPTICS initialization.\")\n \n def set_metric(self, metric: str = 'minkowski') -> None:\n try:\n self.__metric = metric\n self.__cluster_instance = self.__create_optics_cluster_instance()\n except Exception as error:\n Logger.error(\"An Exception occurs by OPTICS initialization: \" + str(error))\n raise Exception(\"Exception occurs in Method __create_optics_instance by OPTICS initialization.\")\n \n def set_p(self, p: int = 2) -> None:\n try:\n self.__p = p\n self.__cluster_instance = self.__create_optics_cluster_instance()\n except Exception as error:\n Logger.error(\"An Exception occurs by OPTICS initialization: \" + str(error))\n raise Exception(\"Exception occurs in Method __create_optics_instance by OPTICS initialization.\")\n \n def set_metric_params(self, metric_params: dict = None) -> None:\n try:\n self.__metric_params = metric_params\n self.__cluster_instance = self.__create_optics_cluster_instance()\n except Exception as error:\n Logger.error(\"An Exception occurs by OPTICS initialization: \" + str(error))\n raise Exception(\"Exception occurs in Method __create_optics_instance by OPTICS initialization.\")\n \n def set_cluster_method(self, cluster_method: str = 'xi') -> None:\n try:\n self.__cluster_method = cluster_method\n self.__cluster_instance = self.__create_optics_cluster_instance()\n except Exception as error:\n Logger.error(\"An Exception occurs by OPTICS initialization: \" + str(error))\n raise Exception(\"Exception occurs in Method __create_optics_instance by OPTICS initialization.\")\n \n def set_eps(self, eps: float = None) -> None:\n try:\n self.__eps = eps\n self.__cluster_instance = self.__create_optics_cluster_instance()\n except Exception as error:\n Logger.error(\"An Exception occurs by OPTICS initialization: \" + str(error))\n raise Exception(\"Exception occurs in Method __create_optics_instance by OPTICS initialization.\")\n \n def set_xi(self, xi: float = 0.05) -> None:\n try:\n self.__xi = xi\n self.__cluster_instance = self.__create_optics_cluster_instance()\n except Exception as error:\n Logger.error(\"An Exception occurs by OPTICS initialization: \" + str(error))\n raise Exception(\"Exception occurs in Method __create_optics_instance by OPTICS initialization.\")\n \n def set_predecessor_correction(self, predecessor_correction: bool = True) -> None:\n try:\n self.__predecessor_correction = predecessor_correction\n self.__cluster_instance = self.__create_optics_cluster_instance()\n except Exception as error:\n Logger.error(\"An Exception occurs by OPTICS initialization: \" + str(error))\n raise Exception(\"Exception occurs in Method __create_optics_instance by OPTICS initialization.\")\n \n def set_min_cluster_size(self, min_cluster_size: float = None) -> None:\n try:\n self.__min_cluster_size = min_cluster_size\n self.__cluster_instance = self.__create_optics_cluster_instance()\n except Exception as error:\n Logger.error(\"An Exception occurs by OPTICS initialization: \" + str(error))\n raise Exception(\"Exception occurs in Method __create_optics_instance by OPTICS initialization.\")\n \n def set_algorithm(self, algorithm: str = 'auto') -> None:\n try:\n self.__algorithm = algorithm\n self.__cluster_instance = self.__create_optics_cluster_instance()\n except Exception as error:\n Logger.error(\"An Exception occurs by OPTICS initialization: \" + str(error))\n raise Exception(\"Exception occurs in Method __create_optics_instance by OPTICS initialization.\")\n \n def set_leaf_size(self, leaf_size: int = 30) -> None:\n try:\n self.__leaf_size = leaf_size\n self.__cluster_instance = self.__create_optics_cluster_instance()\n except Exception as error:\n Logger.error(\"An Exception occurs by OPTICS initialization: \" + str(error))\n raise Exception(\"Exception occurs in Method __create_optics_instance by OPTICS initialization.\")\n \n def set_n_jobs(self, n_jobs: int = None) -> None:\n try:\n self.__n_jobs = n_jobs\n self.__cluster_instance = self.__create_optics_cluster_instance()\n except Exception as error:\n Logger.error(\"An Exception occurs by OPTICS initialization: \" + str(error))\n raise Exception(\"Exception occurs in Method __create_optics_instance by OPTICS initialization.\")\n\n # setter and getter params\n def get_param_list(self) -> list:\n \"\"\"\n # each tuple has informations as follows\n # (pc_name[0] , showedname[1] , description[2] , actual value[3] , input type[4] ,\n # [5] number(min steps)/select (options) /checkbox() / text )\n \"\"\"\n params = []\n clusteringTypeName = \"OPTICS\"\n params.append((\"name\", \"ClusterTyp\" ,\"Name of choosen Clustering Type\", clusteringTypeName ,\"header\"))\n\n parameter_minSamples = self.get_min_samples()\n description_minSamples = \"int > 1 or float between 0 and 1 (default=5)\"\\\n +\"The number of samples in a neighborhood for a point to be considered as a \"\\\n +\"core point. Also, up and down steep regions can’t have more then min_samples \"\\\n +\"consecutive non-steep points. Expressed as an absolute number or a \" \\\n +\"fraction of the number of samples (rounded to be at least 2).\"\n params.append((\"minSamples\", \"min Samples\" ,description_minSamples, parameter_minSamples, \"number\", 1 , 1 ))\n \n parameter_maxEps = self.get_max_eps()\n description_maxEps = \"float, optional (default=np.inf) \"\\\n +\"The maximum distance between two samples for one to be considered as in \"\\\n +\"the neighborhood of the other. Default value of np.inf will identify clusters \"\\\n +\"across all scales; reducing max_eps will result in shorter run times.\"\n params.append((\"maxEps\", \"max Epsilon\" ,description_maxEps, parameter_maxEps, \"text\" ))\n \n parameter_metric = self.get_metric()\n description_metric = \"str or callable, optional (default=’minkowski’) \"\\\n +\"Metric to use for distance computation. Any metric from scikit-learn or \"\\\n +\"scipy.spatial.distance can be used.\"\n params.append((\"metric\", \" Metric\" ,description_metric, parameter_metric , \"select\" , ('precomputed','minkowski','cityblock', 'cosine', 'euclidean', 'l1', 'l2', 'manhattan')))\n \n parameter_p = self.get_p()\n description_p = \"int, optional (default=2) \"\\\n +\"Parameter for the Minkowski metric from sklearn.metrics.pairwise_distances. \"\\\n +\"When p = 1, this is equivalent to using manhattan_distance (l1), and euclidean\"\\\n +\"_distance (l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.\"\n params.append((\"p\" , \"Parameter p for minkowski\" ,description_p, parameter_p, \"number\" , 1,1 ))\n \n parameter_cluster_method = self.get_cluster_method()\n description_cluster_method = \"str, optional (default=’xi’) \"\\\n +\"The extraction method used to extract clusters using the calculated reachability \"\\\n +\"and ordering. Possible values are “xi” and “dbscan”.\"\n params.append((\"cluster_method\",\"Cluster Method\",description_cluster_method, parameter_cluster_method, \"select\" , (\"xi\" , \"dbscan\")))\n \n parameter_eps = self.get_eps()\n description_eps = \"float, optional (default=None) \"\\\n +\"The maximum distance between two samples for one to be considered as in the \"\\\n +\"neighborhood of the other. By default it assumes the same value as max_eps. Used only \"\\\n +\"when cluster_method='dbscan'.\"\n params.append((\"eps\", \"Epsilon\",description_eps, parameter_eps , \"text\"))\n \n parameter_xi = self.get_xi()\n description_xi = \"float, between 0 and 1, optional (default=0.05) \"\\\n +\"Determines the minimum steepness on the reachability plot that constitutes a cluster \"\\\n +\"boundary. For example, an upwards point in the reachability plot is defined \" \\\n +\"by the ratio from one point to its successor being at most 1-xi. Used only when \"\\\n +\"cluster_method='xi'.\"\n params.append((\"xi\",\"Xi\" ,description_xi, parameter_xi, \"number\" , 0, 0.001))\n \n parameter_predecessor_correction = self.get_predecessor_correction()\n description_predecessor_correction = \"bool, optional (default=True) \"\\\n +\"Correct clusters according to the predecessors calculated by OPTICS [R2c55e37003fe-2]. \"\\\n +\"This parameter has minimal effect on most datasets. Used only when cluster_method='xi'.\"\n params.append((\"predecessor_correction\", \"Predecessor Correction\" ,description_predecessor_correction, parameter_predecessor_correction, \"checkbox\"))\n \n parameter_min_cluster_size = self.get_min_cluster_size()\n description_min_cluster_size = \"int > 1 or float between 0 and 1 (default=None) \"\\\n +\"Minimum number of samples in an OPTICS cluster, expressed as an absolute number or a \"\\\n +\"fraction of the number of samples (rounded to be at least 2). If None, the value of \"\\\n +\"min_samples is used instead. Used only when cluster_method='xi'.\"\n params.append((\"min_cluster_size\",\"Min Cluster Size\",description_min_cluster_size, parameter_min_cluster_size, \"text\"))\n \n parameter_algorithm = self.get_algorithm()\n description_algorithm = \"{‘auto’, ‘ball_tree’, ‘kd_tree’, ‘brute’}, optional \"\\\n +\"Algorithm used to compute the nearest neighbors: \"\\\n +\"‘ball_tree’ will use BallTree \"\\\n +\"‘kd_tree’ will use KDTree \"\\\n +\"‘brute’ will use a brute-force search. \"\\\n +\"‘auto’ will attempt to decide the most appropriate algorithm based on the values passed \"\\\n +\"to fit method. (default)\"\n params.append((\"algorithm\",\"Algorithm\" ,description_algorithm, parameter_algorithm , \"select\" , ('auto', 'ball_tree', 'kd_tree', 'brute')))\n \n parameter_leaf_size = self.get_leaf_size()\n description_leaf_size = \"int, optional (default=30) \"\\\n +\"Leaf size passed to BallTree or KDTree. This can affect the speed of the construction \"\\\n +\"and query, as well as the memory required to store the tree. The optimal value depends on \"\\\n +\"the nature of the problem.\"\n params.append((\"leaf_size\",\"Leaf Size\" ,description_leaf_size, parameter_leaf_size, \"number\", 0 , 1))\n parameter_keep_cluster_mapping = self.get_keep_cluster_mapping()\n\n description_keep_cluster_mapping = \"bool (default False): \" \\\n + \"If True, keeps the cluster mapping when re-calculating.\"\n params.append((\"keepClusterMapping\", \"keep cluster mapping\", description_keep_cluster_mapping,\n parameter_keep_cluster_mapping, \"checkbox\"))\n\n return params\n\n def set_param_list(self, params: list = []) -> np.matrix:\n \"\"\"\n # each tuple has informations as follows\n # (pc_name[0] , showedname[1] , description[2] , actual value[3] , input type[4] ,\n # [5] number(min steps)/select (options) /checkbox() / text )\n \"\"\"\n for param in params:\n if param[0] == \"minSamples\":\n self.set_min_samples(param[3])\n elif param[0] == \"maxEps\":\n self.set_max_eps(param[3])\n elif param[0] == \"metric\":\n self.set_metric(param[3])\n elif param[0] == \"p\":\n self.set_p(param[3])\n elif param[0] == \"cluster_method\":\n self.set_cluster_method(param[3])\n elif param[0] == \"eps\":\n self.set_eps(param[3])\n elif param[0] == \"xi\":\n self.set_xi(param[3])\n elif param[0] == \"predecessor_correction\":\n self.set_predecessor_correction(param[3])\n elif param[0] == \"min_cluster_size\":\n self.set_min_cluster_size(param[3])\n elif param[0] == \"algorithm\":\n self.set_algorithm(param[3])\n elif param[0] == \"leaf_size\":\n self.set_leaf_size(param[3])\n elif param[0] == \"keepClusterMapping\":\n self.set_keep_cluster_mapping(param[3])\n\n\nclass QuantumBackends(enum.Enum):\n custom_ibmq = \"custom_ibmq\"\n aer_statevector_simulator = \"aer_statevector_simulator\"\n aer_qasm_simulator = \"aer_qasm_simulator\"\n ibmq_qasm_simulator = \"ibmq_qasm_simulator\"\n ibmq_16_melbourne = \"ibmq_16_melbourne\"\n ibmq_armonk = \"ibmq_armonk\"\n ibmq_5_yorktown = \"ibmq_5_yorktown\"\n ibmq_ourense = \"ibmq_ourense\"\n ibmq_vigo = \"ibmq_vigo\"\n ibmq_valencia = \"ibmq_valencia\"\n ibmq_athens = \"ibmq_athens\"\n ibmq_santiago = \"ibmq_santiago\"\n\n @staticmethod\n def get_quantum_backend(backendEnum, ibmqToken = None, customBackendName = None):\n backend = None\n if backendEnum.name.startswith(\"aer\"):\n # Use local AER backend\n aerBackendName = backendEnum.name[4:]\n backend = Aer.get_backend(aerBackendName)\n elif backendEnum.name.startswith(\"ibmq\"):\n # Use IBMQ backend\n provider = IBMQ.enable_account(ibmqToken)\n backend = provider.get_backend(backendEnum.name)\n elif backendEnum.name.startswith(\"custom_ibmq\"):\n provider = IBMQ.enable_account(ibmqToken)\n backend = provider.get_backend(customBackendName)\n else:\n Logger.error(\"Unknown quantum backend specified!\")\n return backend\n\n\nclass VQEMaxCut(Clustering):\n def __init__(self,\n number_of_clusters = 1,\n max_trials: int = 1,\n reps: int = 1,\n entanglement: str = 'linear',\n backend = QuantumBackends.aer_statevector_simulator,\n ibmq_token: str = \"\",\n ibmq_custom_backend: str = \"\"):\n super().__init__()\n self.__number_of_clusters = number_of_clusters\n self.__max_trials = max_trials\n self.__reps = reps\n self.__entanglement = entanglement\n self.__backend = backend\n self.__ibmq_token = ibmq_token\n self.__ibmq_custom_backend = ibmq_custom_backend\n \n def create_cluster(self, position_matrix : np.matrix , similarity_matrix : np.matrix) -> np.matrix:\n if self.__number_of_clusters == 1:\n return self.__vqeAlgorithmus(similarity_matrix)\n else:\n # rekursiv Algorithmus for more than two clusters\n label = np.ones(similarity_matrix.shape[0])\n label.astype(np.int)\n label_all = np.zeros(similarity_matrix.shape[0])\n label_all.astype(np.int)\n label = self.__rekursivAlgorithmus(self.__number_of_clusters, similarity_matrix, label , label_all , 1)\n #print(\"Done\")\n return label.astype(np.int)\n\n def __rekursivAlgorithmus(self, iteration : int, similarity_matrix: np.matrix , label: np.matrix , label_all: np.matrix , category: int) -> np.matrix:\n # rekursiv Algorithmus for more than two clusters\n if iteration == 0:\n return label\n else:\n if len(label) == 1 or len(label) == 0:\n return label\n new_label = self.__vqeAlgorithmus(similarity_matrix)\n \n z = -1\n check_label = np.ones(len(label))\n check_label.astype(np.int)\n for i in range(len(label)):\n check_label[i] = label[i]\n for i in range(len(label)):\n if check_label[i] == category:\n z = z+1\n label_all[i] = label_all[i] + new_label[z]*pow(2,iteration-1)\n label[i] = new_label[z]\n Logger.normal(\"label after \"+str(iteration) + \" iteration :\" + str(label_all))\n\n # ones: rekursion only with ones labels in new label\n ones = self.__split_Matrix(similarity_matrix,new_label,1)\n self.__rekursivAlgorithmus(iteration-1, ones, label , label_all , 1)\n \n # change label for the zero cluster\n z = -1\n for i in range(len(label)):\n if check_label[i] == 1:\n z = z+1\n if new_label[z] == 0:\n label[i] = 1\n else: \n label[i] = 0\n else : \n label[i] = 0\n \n #zeros: rekursion only with zero labels in new label\n zeros = self.__split_Matrix(similarity_matrix,new_label,0)\n\n self.__rekursivAlgorithmus(iteration-1, zeros, label , label_all , 1) \n return label_all\n\n def __split_Matrix(self, similarity_matrix : np.matrix , label : np.matrix , category : int) -> np.matrix:\n # split the similarity matrix in one smaller matrix. These matrix contains only similarities with the right label\n npl = 0\n for i in range(len(label)):\n if label[i] == category:\n npl = npl+1\n\n NSM = np.zeros((npl,npl))\n s = -1\n t = -1\n for i in range(len(label)):\n if label[i] == category:\n s += 1\n t = -1\n for j in range(len(label)):\n if label[j] == category:\n t += 1\n NSM[s,t] = similarity_matrix[i,j]\n return NSM\n\n def __vqeAlgorithmus(self, similarity_matrix: np.matrix) -> np.matrix:\n if similarity_matrix.any() == np.zeros((similarity_matrix.shape)).any():\n label = np.zeros(similarity_matrix.shape[0])\n return label.astype(np.int)\n qubitOp, offset = max_cut.get_operator(similarity_matrix)\n seed = 10598\n\n backend = QuantumBackends.get_quantum_backend(self.__backend, self.__ibmq_token, self.__ibmq_custom_backend)\n\n quantum_instance = QuantumInstance(backend, seed_simulator=seed, seed_transpiler=seed)\n\n spsa = SPSA(max_trials=self.__max_trials)\n ry = TwoLocal(qubitOp.num_qubits, 'ry', 'cz', reps=self.__reps, entanglement=self.__entanglement)\n vqe = VQE(qubitOp, ry, spsa, quantum_instance=quantum_instance)\n\n # run VQE\n result = vqe.run(quantum_instance)\n\n # print results\n x = sample_most_likely(result.eigenstate)\n print('energy:', result.eigenvalue.real)\n print('time:', result.optimizer_time)\n print('max-cut objective:', result.eigenvalue.real + offset)\n print('solution:', max_cut.get_graph_solution(x))\n print('solution objective:', max_cut.max_cut_value(x, similarity_matrix))\n solution = max_cut.get_graph_solution(x)\n return solution.astype(np.int) \n\n # getter and setter methodes\n def get_number_of_clusters(self) -> int:\n return self.__number_of_clusters\n def get_max_trials(self) -> int:\n return self.__max_trials\n def get_reps(self) -> int:\n return self.__reps\n def get_entanglement(self) -> str:\n return self.__entanglement\n def get_backend(self) -> str:\n return self.__backend\n def get_ibmq_token(self) -> str:\n return self.__ibmq_token\n def get_ibmq_custom_backend(self):\n return self.__ibmq_custom_backend\n\n def set_number_of_clusters(self, number_of_clusters : int = 1) -> None:\n if isinstance(number_of_clusters, int) and number_of_clusters > 0:\n self.__number_of_clusters = number_of_clusters\n def set_max_trials(self, max_trials : int = 1) -> None:\n if isinstance(max_trials, int) and max_trials > 0:\n self.__max_trials = max_trials\n def set_reps(self, reps = 1 ) -> int:\n if isinstance(reps, int) and reps > 0:\n self.__reps = reps\n def set_entanglement(self, entanglement : str = 'linear') -> str:\n self.__entanglement = entanglement\n def set_backend(self, backend: str = QuantumBackends.aer_statevector_simulator) -> None:\n self.__backend = backend\n def set_ibmq_token(self, ibmq_token: str = \"\") -> None:\n self.__ibmq_token = ibmq_token\n def set_ibmq_custom_backend(self, ibmq_custom_backend: str = \"\"):\n self.__ibmq_custom_backend = ibmq_custom_backend\n\n #getter and setter params\n def get_param_list(self) -> list:\n \"\"\"\n # each tuple has informations as follows\n # (pc_name[0] , showedname[1] , description[2] , actual value[3] , input type[4] ,\n # [5] number(min steps)/select (options) /checkbox() / text )\n \"\"\"\n params = []\n clusteringTypeName = \"QAOA MaxCut\"\n params.append((\"name\", \"ClusterTyp\" ,\"Name of choosen Clustering Type\", clusteringTypeName ,\"header\"))\n\n parameter_number_of_clusters = self.get_number_of_clusters()\n description_number_of_clusters = \"int > 0 (default=1)\"\\\n +\"2**x Clusters would be generated\"\n params.append((\"numberClusters\", \"Number of Clusters\" ,description_number_of_clusters, parameter_number_of_clusters, \"number\", 1 , 1 ))\n \n parameter_max_trials = self.get_max_trials()\n description_max_trials = \"int > 0 (default 1) \"\\\n +\"For Simultaneous Perturbation Stochastic Approximation (SPSA) optimizer:\"\\\n +\"Maximum number of iterations to perform.\"\n params.append((\"maxTrials\", \"max Trials\" ,description_max_trials, parameter_max_trials,\"number\", 1 , 1 ))\n \n parameter_reps = self.get_reps()\n description_reps = \"int > 0 (default 1) \"\\\n +\"For The two-local circuit:\"\\\n +\"Specifies how often a block consisting of a rotation layer and entanglement layer is repeated.\"\n params.append((\"reps\" , \"Reps\" ,description_reps, parameter_reps, \"number\" , 1,1 ))\n\n parameter_entanglement = self.get_entanglement()\n description_entanglement = \"str default('linear') \"\\\n +\"A set of default entanglement strategies is provided:\"\\\n +\"'full' entanglement is each qubit is entangled with all the others.\"\\\n +\"'linear' entanglement is qubit i entangled with qubit i+1, for all i∈{0,1,...,n−2}, where n is the total number of qubits.\"\\\n +\"'circular' entanglement is linear entanglement but with an additional entanglement of the first and last qubit before the linear part.\"\\\n +\"'sca' (shifted-circular-alternating) entanglement is a generalized and modified version of the proposed circuit 14 in Sim et al.. \"\\\n +\"It consists of circular entanglement where the ‘long’ entanglement connecting the first with the last qubit is shifted by one each block.\"\\\n +\"Furthermore the role of control and target qubits are swapped every block (therefore alternating).\"\n\n params.append((\"entanglement\", \"Entanglement\" ,description_entanglement, parameter_entanglement , \"select\" , ('full','linear','circlular', 'sca')))\n\n parameter_backend = self.get_backend().value\n description_backend = \"Enum default(aer_statevector_simulator) \"\\\n + \" A list of possible backends. aer is a local simulator and ibmq are backends provided by IBM.\"\\\n + \" When using (custom_ibmq), a custom ibmq backend can be specified.\"\n params.append((\"quantumBackend\", \"QuantumBackend\", description_backend, parameter_backend, \"select\", [qb.value for qb in QuantumBackends]))\n\n parameter_ibmq_custom_backend = self.get_ibmq_custom_backend()\n description_ibmq_custom_backend = \"str default(\\\"\\\") \"\\\n + \" The name of a custom backend of ibmq.\"\n params.append((\"ibmqCustomBackend\", \"IBMQ-Custom-Backend\", description_ibmq_custom_backend, str(parameter_ibmq_custom_backend), \"text\", \"\", \"\"))\n\n parameter_ibmq_token = self.get_ibmq_token()\n description_ibmq_token = \"str default(\\\"\\\") \"\\\n + \" The token of an account accessing the IBMQ online service.\"\n params.append((\"ibmqToken\", \"IBMQ-Token\", description_ibmq_token, parameter_ibmq_token, \"text\", \"\", \"\"))\n\n parameter_keep_cluster_mapping = self.get_keep_cluster_mapping()\n description_keep_cluster_mapping = \"bool (default False): \" \\\n + \"If True, keeps the cluster mapping when re-calculating.\"\n params.append((\"keepClusterMapping\", \"keep cluster mapping\", description_keep_cluster_mapping,\n parameter_keep_cluster_mapping, \"checkbox\"))\n\n return params\n \n def set_param_list(self, params: list = []) -> np.matrix:\n for param in params:\n if param[0] == \"numberClusters\":\n self.set_number_of_clusters(param[3])\n elif param[0] == \"maxTrials\":\n self.set_max_trials(param[3])\n elif param[0] == \"reps\":\n self.set_reps(param[3])\n elif param[0] == \"entanglement\":\n self.set_entanglement(param[3])\n elif param[0] == \"quantumBackend\":\n self.set_backend(QuantumBackends[param[3]])\n elif param[0] == \"ibmqToken\":\n self.set_ibmq_token(param[3])\n elif param[0] == \"ibmqCustomBackend\":\n self.set_ibmq_custom_backend(param[3])\n elif param[0] == \"keepClusterMapping\":\n self.set_keep_cluster_mapping(param[3])\n\n def d2_plot(self, last_sequenz: List[int] , costumes: List[Costume] ) -> None:\n pass\n\n\nclass ClassicNaiveMaxCut(Clustering):\n def __init__(self, number_of_clusters = 1):\n super().__init__()\n self.__number_of_clusters = number_of_clusters\n return\n \n def create_cluster(self, position_matrix : np.matrix , similarity_matrix : np.matrix) -> np.matrix:\n if self.__number_of_clusters == 1:\n return self.__classicNaiveMaxCutAlgo(similarity_matrix)\n else:\n # rekursiv Algorithmus for more than two clusters\n label = np.ones(similarity_matrix.shape[0])\n label.astype(np.int)\n label_all = np.zeros(similarity_matrix.shape[0])\n label_all.astype(np.int)\n label = self.__rekursivAlgorithmus(self.__number_of_clusters, similarity_matrix, label , label_all , 1)\n #print(\"Done\")\n return label.astype(np.int)\n\n def __rekursivAlgorithmus(self, iteration : int, similarity_matrix: np.matrix , label: np.matrix , label_all: np.matrix , category: int) -> np.matrix:\n # rekursiv Algorithmus for more than two clusters\n if iteration == 0:\n return label\n else:\n if len(label) == 1 or len(label) == 0:\n return label\n new_label = self.__classicNaiveMaxCutAlgo(similarity_matrix)\n \n z = -1\n check_label = np.ones(len(label))\n check_label.astype(np.int)\n for i in range(len(label)):\n check_label[i] = label[i]\n for i in range(len(label)):\n if check_label[i] == category:\n z = z+1\n label_all[i] = label_all[i] + new_label[z]*pow(2,iteration-1)\n label[i] = new_label[z]\n Logger.normal(\"label after \"+str(iteration) + \" iteration :\" + str(label_all))\n\n # ones: rekursion only with ones labels in new label\n ones = self.__split_Matrix(similarity_matrix,new_label,1)\n self.__rekursivAlgorithmus(iteration-1, ones, label , label_all , 1)\n \n # change label for the zero cluster\n z = -1\n for i in range(len(label)):\n if check_label[i] == 1:\n z = z+1\n if new_label[z] == 0:\n label[i] = 1\n else: \n label[i] = 0\n else : \n label[i] = 0\n \n #zeros: rekursion only with zero labels in new label\n zeros = self.__split_Matrix(similarity_matrix,new_label,0)\n\n self.__rekursivAlgorithmus(iteration-1, zeros, label , label_all , 1) \n return label_all\n\n def __split_Matrix(self, similarity_matrix : np.matrix , label : np.matrix , category : int) -> np.matrix:\n # split the similarity matrix in one smaller matrix. These matrix contains only similarities with the right label\n npl = 0\n for i in range(len(label)):\n if label[i] == category:\n npl = npl+1\n\n NSM = np.zeros((npl,npl))\n s = -1\n t = -1\n for i in range(len(label)):\n if label[i] == category:\n s += 1\n t = -1\n for j in range(len(label)):\n if label[j] == category:\n t += 1\n NSM[s,t] = similarity_matrix[i,j]\n return NSM\n\n def __create_graph(self, similarity_matrix: np.matrix) -> nx.Graph:\n probSize = similarity_matrix.shape[0]\n graph = nx.Graph()\n\n for i in range(0, probSize):\n for j in range(0, probSize):\n if i != j:\n graph.add_edge(i, j, weight = similarity_matrix[i][j])\n \n return graph\n\n def __classicNaiveMaxCutAlgo(self, similarity_matrix: np.matrix) -> np.matrix:\n label = np.zeros(similarity_matrix.shape[0])\n\n if similarity_matrix.any() == np.zeros((similarity_matrix.shape)).any():\n return label.astype(np.int)\n\n # Create classic naive max cut solver\n graph = self.__create_graph(similarity_matrix)\n\n # Solve\n\n check: Timer = Timer() \n check.start()\n\n solver = ClassicNaiveMaxCutSolver(graph)\n (cutValue, cutEdges) = solver.solve()\n\n # Remove the max cut edges\n for (u, v) in cutEdges:\n graph.remove_edge(u, v)\n\n # Plot the graphs\n #from matplotlib import pyplot as plt\n #pos = nx.spring_layout(graph)\n #nx.draw(graph, pos)\n #labels = nx.get_edge_attributes(graph, 'weight')\n #nx.draw_networkx_edge_labels(graph, pos, edge_labels = labels)\n #plt.savefig(\"CuttedGraph.png\", format=\"PNG\")\n #plt.clf()\n\n # define element 0 (left side of first cut) is cluster 0\n element0 = cutEdges[0][0]\n label[element0] = 0\n\n for node in graph.nodes():\n # if node has path to element 0, then cluster 0\n # if not then cluster 1\n if nx.algorithms.shortest_paths.generic.has_path(graph, element0, node):\n label[node] = 0\n else:\n label[node] = 1\n\n check.stop()\n\n # print results\n print('solution:', str(cutEdges))\n print('solution objective:', str(cutValue))\n\n return label.astype(np.int)\n\n # getter and setter methodes\n def get_number_of_clusters(self) -> int:\n return self.__number_of_clusters\n\n def set_number_of_clusters(self, number_of_clusters : int = 2) -> None:\n if isinstance(number_of_clusters, int) and number_of_clusters > 0:\n self.__number_of_clusters = number_of_clusters\n\n #getter and setter params\n def get_param_list(self) -> list:\n \"\"\"\n # each tuple has informations as follows\n # (pc_name[0] , showedname[1] , description[2] , actual value[3] , input type[4] ,\n # [5] number(min steps)/select (options) /checkbox() / text )\n \"\"\"\n params = []\n clusteringTypeName = \"Classic Naive MaxCut\"\n params.append((\"name\", \"ClusterTyp\" ,\"Name of choosen Clustering Type\", clusteringTypeName ,\"header\"))\n\n parameter_number_of_clusters = self.get_number_of_clusters()\n description_number_of_clusters = \"int > 0 (default=2)\"\\\n +\"2**x Clusters would be generated\"\n params.append((\"numberClusters\", \"Number of Clusters\" ,description_number_of_clusters, parameter_number_of_clusters, \"number\", 1 , 1 ))\n\n parameter_keep_cluster_mapping = self.get_keep_cluster_mapping()\n description_keep_cluster_mapping = \"bool (default False): \" \\\n + \"If True, keeps the cluster mapping when re-calculating.\"\n params.append((\"keepClusterMapping\", \"keep cluster mapping\", description_keep_cluster_mapping,\n parameter_keep_cluster_mapping, \"checkbox\"))\n\n return params\n \n def set_param_list(self, params: list = []) -> np.matrix:\n for param in params:\n if param[0] == \"numberClusters\":\n self.set_number_of_clusters(param[3])\n elif param[0] == \"keepClusterMapping\":\n self.set_keep_cluster_mapping(param[3])\n \n def d2_plot(self, last_sequenz: List[int] , costumes: List[Costume] ) -> None:\n pass\n\n\nclass SdpMaxCut(Clustering):\n def __init__(self, number_of_clusters = 1):\n super().__init__()\n self.__number_of_clusters = number_of_clusters\n return\n \n def create_cluster(self, position_matrix : np.matrix , similarity_matrix : np.matrix) -> np.matrix:\n if self.__number_of_clusters == 1:\n return self.__sdpMaxCutAlgo(similarity_matrix)\n else:\n # rekursiv Algorithmus for more than two clusters\n label = np.ones(similarity_matrix.shape[0])\n label.astype(np.int)\n label_all = np.zeros(similarity_matrix.shape[0])\n label_all.astype(np.int)\n label = self.__rekursivAlgorithmus(self.__number_of_clusters, similarity_matrix, label , label_all , 1)\n #print(\"Done\")\n return label.astype(np.int)\n\n def __rekursivAlgorithmus(self, iteration : int, similarity_matrix: np.matrix , label: np.matrix , label_all: np.matrix , category: int) -> np.matrix:\n # rekursiv Algorithmus for more than two clusters\n if iteration == 0:\n return label\n else:\n if len(label) == 1 or len(label) == 0:\n return label\n new_label = self.__sdpMaxCutAlgo(similarity_matrix)\n \n z = -1\n check_label = np.ones(len(label))\n check_label.astype(np.int)\n for i in range(len(label)):\n check_label[i] = label[i]\n for i in range(len(label)):\n if check_label[i] == category:\n z = z+1\n label_all[i] = label_all[i] + new_label[z]*pow(2,iteration-1)\n label[i] = new_label[z]\n Logger.normal(\"label after \"+str(iteration) + \" iteration :\" + str(label_all))\n\n # ones: rekursion only with ones labels in new label\n ones = self.__split_Matrix(similarity_matrix,new_label,1)\n self.__rekursivAlgorithmus(iteration-1, ones, label , label_all , 1)\n \n # change label for the zero cluster\n z = -1\n for i in range(len(label)):\n if check_label[i] == 1:\n z = z+1\n if new_label[z] == 0:\n label[i] = 1\n else: \n label[i] = 0\n else : \n label[i] = 0\n \n #zeros: rekursion only with zero labels in new label\n zeros = self.__split_Matrix(similarity_matrix,new_label,0)\n\n self.__rekursivAlgorithmus(iteration-1, zeros, label , label_all , 1) \n return label_all\n\n def __split_Matrix(self, similarity_matrix : np.matrix , label : np.matrix , category : int) -> np.matrix:\n # split the similarity matrix in one smaller matrix. These matrix contains only similarities with the right label\n npl = 0\n for i in range(len(label)):\n if label[i] == category:\n npl = npl+1\n\n NSM = np.zeros((npl,npl))\n s = -1\n t = -1\n for i in range(len(label)):\n if label[i] == category:\n s += 1\n t = -1\n for j in range(len(label)):\n if label[j] == category:\n t += 1\n NSM[s,t] = similarity_matrix[i,j]\n return NSM\n\n def __create_graph(self, similarity_matrix: np.matrix) -> nx.Graph:\n probSize = similarity_matrix.shape[0]\n graph = nx.Graph()\n\n for i in range(0, probSize):\n for j in range(0, probSize):\n if i != j:\n graph.add_edge(i, j, weight = similarity_matrix[i][j])\n \n return graph\n\n def __sdpMaxCutAlgo(self, similarity_matrix: np.matrix) -> np.matrix:\n label = np.zeros(similarity_matrix.shape[0])\n\n if similarity_matrix.any() == np.zeros((similarity_matrix.shape)).any():\n return label.astype(np.int)\n\n # Create sdp max cut solver\n graph = self.__create_graph(similarity_matrix)\n\n # Solve\n\n check: Timer = Timer() \n check.start()\n\n solver = SdpMaxCutSolver(graph)\n (cutValue, cutEdges) = solver.solve()\n\n # Remove the max cut edges\n for (u, v) in cutEdges:\n graph.remove_edge(u, v)\n\n # Plot the graphs\n #from matplotlib import pyplot as plt\n #pos = nx.spring_layout(graph)\n #nx.draw(graph, pos)\n #labels = nx.get_edge_attributes(graph, 'weight')\n #nx.draw_networkx_edge_labels(graph, pos, edge_labels = labels)\n #plt.savefig(\"CuttedGraph.png\", format=\"PNG\")\n #plt.clf()\n\n # define element 0 (left side of first cut) is cluster 0\n element0 = cutEdges[0][0]\n label[element0] = 0\n\n for node in graph.nodes():\n # if node has path to element 0, then cluster 0\n # if not then cluster 1\n if nx.algorithms.shortest_paths.generic.has_path(graph, element0, node):\n label[node] = 0\n else:\n label[node] = 1\n\n check.stop()\n\n # print results\n print('solution:', str(cutEdges))\n print('solution objective:', str(cutValue))\n\n return label.astype(np.int)\n\n # getter and setter methodes\n def get_number_of_clusters(self) -> int:\n return self.__number_of_clusters\n\n def set_number_of_clusters(self, number_of_clusters : int = 2) -> None:\n if isinstance(number_of_clusters, int) and number_of_clusters > 0:\n self.__number_of_clusters = number_of_clusters\n\n #getter and setter params\n def get_param_list(self) -> list:\n \"\"\"\n # each tuple has informations as follows\n # (pc_name[0] , showedname[1] , description[2] , actual value[3] , input type[4] ,\n # [5] number(min steps)/select (options) /checkbox() / text )\n \"\"\"\n params = []\n clusteringTypeName = \"Semidefinite Programming MaxCut\"\n params.append((\"name\", \"ClusterTyp\" ,\"Name of choosen Clustering Type\", clusteringTypeName ,\"header\"))\n\n parameter_number_of_clusters = self.get_number_of_clusters()\n description_number_of_clusters = \"int > 0 (default=2)\"\\\n +\"2**x Clusters would be generated\"\n params.append((\"numberClusters\", \"Number of Clusters\" ,description_number_of_clusters, parameter_number_of_clusters, \"number\", 1 , 1 ))\n\n parameter_keep_cluster_mapping = self.get_keep_cluster_mapping()\n description_keep_cluster_mapping = \"bool (default False): \" \\\n + \"If True, keeps the cluster mapping when re-calculating.\"\n params.append((\"keepClusterMapping\", \"keep cluster mapping\", description_keep_cluster_mapping,\n parameter_keep_cluster_mapping, \"checkbox\"))\n\n return params\n \n def set_param_list(self, params: list = []) -> np.matrix:\n for param in params:\n if param[0] == \"numberClusters\":\n self.set_number_of_clusters(param[3])\n elif param[0] == \"keepClusterMapping\":\n self.set_keep_cluster_mapping(param[3])\n \n def d2_plot(self, last_sequenz: List[int] , costumes: List[Costume] ) -> None:\n pass\n\n\nclass BmMaxCut(Clustering):\n def __init__(self, number_of_clusters = 1):\n super().__init__()\n self.__number_of_clusters = number_of_clusters\n return\n \n def create_cluster(self, position_matrix : np.matrix , similarity_matrix : np.matrix) -> np.matrix:\n if self.__number_of_clusters == 1:\n return self.__bmMaxCutAlgo(similarity_matrix)\n else:\n # rekursiv Algorithmus for more than two clusters\n label = np.ones(similarity_matrix.shape[0])\n label.astype(np.int)\n label_all = np.zeros(similarity_matrix.shape[0])\n label_all.astype(np.int)\n label = self.__rekursivAlgorithmus(self.__number_of_clusters, similarity_matrix, label , label_all , 1)\n #print(\"Done\")\n return label.astype(np.int)\n\n def __rekursivAlgorithmus(self, iteration : int, similarity_matrix: np.matrix , label: np.matrix , label_all: np.matrix , category: int) -> np.matrix:\n # rekursiv Algorithmus for more than two clusters\n if iteration == 0:\n return label\n else:\n if len(label) == 1 or len(label) == 0:\n return label\n new_label = self.__bmMaxCutAlgo(similarity_matrix)\n \n z = -1\n check_label = np.ones(len(label))\n check_label.astype(np.int)\n for i in range(len(label)):\n check_label[i] = label[i]\n for i in range(len(label)):\n if check_label[i] == category:\n z = z+1\n label_all[i] = label_all[i] + new_label[z]*pow(2,iteration-1)\n label[i] = new_label[z]\n Logger.normal(\"label after \"+str(iteration) + \" iteration :\" + str(label_all))\n\n # ones: rekursion only with ones labels in new label\n ones = self.__split_Matrix(similarity_matrix,new_label,1)\n self.__rekursivAlgorithmus(iteration-1, ones, label , label_all , 1)\n \n # change label for the zero cluster\n z = -1\n for i in range(len(label)):\n if check_label[i] == 1:\n z = z+1\n if new_label[z] == 0:\n label[i] = 1\n else: \n label[i] = 0\n else : \n label[i] = 0\n \n #zeros: rekursion only with zero labels in new label\n zeros = self.__split_Matrix(similarity_matrix,new_label,0)\n\n self.__rekursivAlgorithmus(iteration-1, zeros, label , label_all , 1) \n return label_all\n\n def __split_Matrix(self, similarity_matrix : np.matrix , label : np.matrix , category : int) -> np.matrix:\n # split the similarity matrix in one smaller matrix. These matrix contains only similarities with the right label\n npl = 0\n for i in range(len(label)):\n if label[i] == category:\n npl = npl+1\n\n NSM = np.zeros((npl,npl))\n s = -1\n t = -1\n for i in range(len(label)):\n if label[i] == category:\n s += 1\n t = -1\n for j in range(len(label)):\n if label[j] == category:\n t += 1\n NSM[s,t] = similarity_matrix[i,j]\n return NSM\n\n def __create_graph(self, similarity_matrix: np.matrix) -> nx.Graph:\n probSize = similarity_matrix.shape[0]\n graph = nx.Graph()\n\n for i in range(0, probSize):\n for j in range(0, probSize):\n if i != j:\n graph.add_edge(i, j, weight = similarity_matrix[i][j])\n \n return graph\n\n def __bmMaxCutAlgo(self, similarity_matrix: np.matrix) -> np.matrix:\n label = np.zeros(similarity_matrix.shape[0])\n\n if similarity_matrix.any() == np.zeros((similarity_matrix.shape)).any():\n return label.astype(np.int)\n\n # Create sdp max cut solver\n graph = self.__create_graph(similarity_matrix)\n\n # Solve\n\n check: Timer = Timer() \n check.start()\n\n solver = BmMaxCutSolver(graph)\n (cutValue, cutEdges) = solver.solve()\n\n # Remove the max cut edges\n for (u, v) in cutEdges:\n graph.remove_edge(u, v)\n\n # Plot the graphs\n #from matplotlib import pyplot as plt\n #pos = nx.spring_layout(graph)\n #nx.draw(graph, pos)\n #labels = nx.get_edge_attributes(graph, 'weight')\n #nx.draw_networkx_edge_labels(graph, pos, edge_labels = labels)\n #plt.savefig(\"CuttedGraph.png\", format=\"PNG\")\n #plt.clf()\n\n # define element 0 (left side of first cut) is cluster 0\n element0 = cutEdges[0][0]\n label[element0] = 0\n\n for node in graph.nodes():\n # if node has path to element 0, then cluster 0\n # if not then cluster 1\n if nx.algorithms.shortest_paths.generic.has_path(graph, element0, node):\n label[node] = 0\n else:\n label[node] = 1\n\n check.stop()\n\n # print results\n print('solution:', str(cutEdges))\n print('solution objective:', str(cutValue))\n\n return label.astype(np.int)\n\n # getter and setter methodes\n def get_number_of_clusters(self) -> int:\n return self.__number_of_clusters\n\n def set_number_of_clusters(self, number_of_clusters : int = 2) -> None:\n if isinstance(number_of_clusters, int) and number_of_clusters > 0:\n self.__number_of_clusters = number_of_clusters\n\n #getter and setter params\n def get_param_list(self) -> list:\n \"\"\"\n # each tuple has informations as follows\n # (pc_name[0] , showedname[1] , description[2] , actual value[3] , input type[4] ,\n # [5] number(min steps)/select (options) /checkbox() / text )\n \"\"\"\n params = []\n clusteringTypeName = \"Bureir-Monteiro solver for MaxCut\"\n params.append((\"name\", \"ClusterTyp\" ,\"Name of choosen Clustering Type\", clusteringTypeName ,\"header\"))\n\n parameter_number_of_clusters = self.get_number_of_clusters()\n description_number_of_clusters = \"int > 0 (default=2)\"\\\n +\"2**x Clusters would be generated\"\n params.append((\"numberClusters\", \"Number of Clusters\" ,description_number_of_clusters, parameter_number_of_clusters, \"number\", 1 , 1 ))\n\n parameter_keep_cluster_mapping = self.get_keep_cluster_mapping()\n description_keep_cluster_mapping = \"bool (default False): \" \\\n + \"If True, keeps the cluster mapping when re-calculating.\"\n params.append((\"keepClusterMapping\", \"keep cluster mapping\", description_keep_cluster_mapping,\n parameter_keep_cluster_mapping, \"checkbox\"))\n\n return params\n \n def set_param_list(self, params: list = []) -> np.matrix:\n for param in params:\n if param[0] == \"numberClusters\":\n self.set_number_of_clusters(param[3])\n elif param[0] == \"keepClusterMapping\":\n self.set_keep_cluster_mapping(param[3])\n \n def d2_plot(self, last_sequenz: List[int] , costumes: List[Costume] ) -> None:\n pass\n\n\nclass NegativeRotationQuantumKMeansClustering(Clustering):\n def __init__(self, \n number_of_clusters = 2,\n max_qubits = 2,\n shots_each = 100,\n max_runs = 10,\n relative_residual_amount = 5,\n backend = QuantumBackends.aer_statevector_simulator,\n ibmq_token = \"\",\n ibmq_custom_backend = \"\"):\n super().__init__()\n self.clusterAlgo = NegativeRotationQuantumKMeans()\n\n self.number_of_clusters = number_of_clusters\n self.max_qubits = max_qubits\n self.shots_each = shots_each\n self.max_runs = max_runs\n self.relative_residual_amount = relative_residual_amount\n self.backend = backend\n self.ibmq_token = ibmq_token\n self.ibmq_custom_backend = ibmq_custom_backend\n return\n \n def create_cluster(self, position_matrix : np.matrix , similarity_matrix : np.matrix) -> np.matrix:\n self.clusterAlgo.set_number_of_clusters(self.get_number_of_clusters())\n self.clusterAlgo.set_max_qubits(self.get_max_qubits())\n self.clusterAlgo.set_shots_each(self.get_shots_each())\n self.clusterAlgo.set_max_runs(self.get_max_runs())\n self.clusterAlgo.set_relative_residual_amount(self.get_relative_residual_amount())\n\n qBackend = QuantumBackends.get_quantum_backend(self.backend, self.ibmq_token, self.ibmq_custom_backend)\n\n self.clusterAlgo.set_backend(qBackend)\n\n label = np.zeros(similarity_matrix.shape[0])\n\n if similarity_matrix.any() == np.zeros((similarity_matrix.shape)).any():\n return label.astype(np.int)\n\n # run\n clusterMapping = self.clusterAlgo.Run(position_matrix)\n\n # write result into labels\n for i in range(0, len(label)):\n label[i] = int(clusterMapping[i])\n\n return label.astype(np.int)\n\n #getter and setter params\n def get_number_of_clusters(self):\n return self.number_of_clusters\n\n def set_number_of_clusters(self, number_of_clusters):\n self.number_of_clusters = number_of_clusters\n return\n\n def get_max_qubits(self):\n return self.max_qubits\n\n def set_max_qubits(self, max_qubits):\n self.max_qubits = max_qubits\n return\n\n def get_shots_each(self):\n return self.shots_each\n\n def set_shots_each(self, shots_each):\n self.shots_each = shots_each\n return\n\n def get_max_runs(self):\n return self.max_runs\n\n def set_max_runs(self, max_runs):\n self.max_runs = max_runs\n return\n\n def get_relative_residual_amount(self):\n return self.relative_residual_amount\n\n def set_relative_residual_amount(self, relative_residual_amount):\n self.relative_residual_amount = relative_residual_amount\n return\n\n def get_backend(self):\n return self.backend\n\n def set_backend(self, backend):\n self.backend = backend\n return\n\n def get_ibmq_token(self):\n return self.ibmq_token\n\n def set_ibmq_token(self, ibmq_token):\n self.ibmq_token = ibmq_token\n return\n\n def get_ibmq_custom_backend(self):\n return self.ibmq_custom_backend\n\n def set_ibmq_custom_backend(self, ibmq_custom_backend):\n self.ibmq_custom_backend = ibmq_custom_backend\n return\n\n def get_param_list(self) -> list:\n \"\"\"\n # each tuple has informations as follows\n # (pc_name[0] , showedname[1] , description[2] , actual value[3] , input type[4] ,\n # [5] number(min steps)/select (options) /checkbox() / text )\n \"\"\"\n params = []\n clusteringTypeName = \"Negative Rotation Quantum KMeans\"\n params.append((\"name\", \"ClusterTyp\" ,\"Name of choosen Clustering Type\", clusteringTypeName ,\"header\"))\n\n parameter_number_of_clusters = self.get_number_of_clusters()\n description_number_of_clusters = \"int > 0 (default=2)\"\\\n +\": k Clusters will be generated\"\n params.append((\"numberClusters\", \"Number of Clusters\" , description_number_of_clusters, parameter_number_of_clusters, \"number\", 1 , 1 ))\n \n parameter_max_qubits = self.get_max_qubits()\n description_max_qubits = \"int > 0 (default 2): \"\\\n +\"The amount of qubits that are used for executing the circuits.\"\n params.append((\"maxQubits\", \"max qubits\" ,description_max_qubits,parameter_max_qubits,\"number\", 1 , 1 ))\n\n parameter_shots_each = self.get_shots_each()\n description_shots_each = \"int > 0 (default 100): \"\\\n +\"The amount of shots for each circuit run.\"\n params.append((\"shotsEach\", \"shots each\" ,description_shots_each,parameter_shots_each,\"number\", 1 , 1 ))\n\n parameter_max_runs = self.get_max_runs()\n description_max_runs = \"int > 0 (default 10): \"\\\n +\"The amount of k mean iteration runs.\"\n params.append((\"maxRuns\", \"max runs\" ,description_max_runs,parameter_max_runs,\"number\", 1 , 1 ))\n\n parameter_relative_residual = self.get_relative_residual_amount()\n description_relative_residual = \"int > 0 (default 5): \"\\\n +\"The amount in percentage of how many data points can change their label between\" \\\n + \"two runs. The default is 5, i.e. when less then 5% of the data points change\" \\\n + \"their label, we consider this as converged\"\n params.append((\"relativeResidual\", \"relative residual amount\" ,description_relative_residual,parameter_relative_residual,\"number\", 1 , 1 ))\n\n parameter_backend = self.get_backend().value\n description_backend = \"Enum default(aer_statevector_simulator): \"\\\n + \" A list of possible backends. aer is a local simulator and ibmq are backends provided by IBM.\"\n params.append((\"quantumBackend\", \"QuantumBackend\", description_backend, parameter_backend, \"select\", [qb.value for qb in QuantumBackends]))\n\n parameter_ibmq_custom_backend = self.get_ibmq_custom_backend()\n description_ibmq_custom_backend = \"str default(\\\"\\\") \"\\\n + \" The name of a custom backend of ibmq.\"\n params.append((\"ibmqCustomBackend\", \"IBMQ-Custom-Backend\", description_ibmq_custom_backend, str(parameter_ibmq_custom_backend), \"text\", \"\", \"\"))\n\n parameter_ibmq_token = self.get_ibmq_token()\n description_ibmq_token = \"str default(\\\"\\\") \"\\\n + \" The token of an account accessing the IBMQ online service.\"\n params.append((\"ibmqToken\", \"IBMQ-Token\", description_ibmq_token, parameter_ibmq_token, \"text\", \"\", \"\"))\n\n parameter_keep_cluster_mapping = self.get_keep_cluster_mapping()\n description_keep_cluster_mapping = \"bool (default False): \" \\\n + \"If True, keeps the cluster mapping when re-calculating.\"\n params.append((\"keepClusterMapping\", \"keep cluster mapping\", description_keep_cluster_mapping,\n parameter_keep_cluster_mapping, \"checkbox\"))\n\n return params\n \n def set_param_list(self, params: list = []) -> np.matrix:\n for param in params:\n if param[0] == \"numberClusters\":\n self.set_number_of_clusters(param[3])\n elif param[0] == \"maxQubits\":\n self.set_max_qubits(param[3])\n elif param[0] == \"shotsEach\":\n self.set_shots_each(param[3])\n elif param[0] == \"maxRuns\":\n self.set_max_runs(param[3])\n elif param[0] == \"relativeResidual\":\n self.set_relative_residual_amount(param[3])\n elif param[0] == \"quantumBackend\":\n self.set_backend(QuantumBackends[param[3]])\n elif param[0] == \"ibmqToken\":\n self.set_ibmq_token(param[3])\n elif param[0] == \"ibmqCustomBackend\":\n self.set_ibmq_custom_backend(param[3])\n elif param[0] == \"keepClusterMapping\":\n self.set_keep_cluster_mapping(param[3])\n\n def d2_plot(self, last_sequenz: List[int] , costumes: List[Costume] ) -> None:\n pass\n\n\nclass DestructiveInterferenceQuantumKMeansClustering(Clustering):\n def __init__(self, \n number_of_clusters = 2,\n max_qubits = 2,\n shots_each = 100,\n max_runs = 10,\n relative_residual_amount = 5,\n backend = QuantumBackends.aer_statevector_simulator,\n ibmq_token = \"\",\n ibmq_custom_backend = \"\"):\n super().__init__()\n self.clusterAlgo = DestructiveInterferenceQuantumKMeans()\n\n self.number_of_clusters = number_of_clusters\n self.max_qubits = max_qubits\n self.shots_each = shots_each\n self.max_runs = max_runs\n self.relative_residual_amount = relative_residual_amount\n self.backend = backend\n self.ibmq_token = ibmq_token\n self.ibmq_custom_backend = ibmq_custom_backend\n return\n \n def create_cluster(self, position_matrix : np.matrix , similarity_matrix : np.matrix) -> np.matrix:\n self.clusterAlgo.set_number_of_clusters(self.get_number_of_clusters())\n self.clusterAlgo.set_max_qubits(self.get_max_qubits())\n self.clusterAlgo.set_shots_each(self.get_shots_each())\n self.clusterAlgo.set_max_runs(self.get_max_runs())\n self.clusterAlgo.set_relative_residual_amount(self.get_relative_residual_amount())\n\n qBackend = QuantumBackends.get_quantum_backend(self.backend, self.ibmq_token, self.ibmq_custom_backend)\n\n self.clusterAlgo.set_backend(qBackend)\n\n label = np.zeros(similarity_matrix.shape[0])\n\n if similarity_matrix.any() == np.zeros((similarity_matrix.shape)).any():\n return label.astype(np.int)\n\n # run\n clusterMapping = self.clusterAlgo.Run(position_matrix)\n\n # write result into labels\n for i in range(0, len(label)):\n label[i] = int(clusterMapping[i])\n\n return label.astype(np.int)\n\n #getter and setter params\n def get_number_of_clusters(self):\n return self.number_of_clusters\n\n def set_number_of_clusters(self, number_of_clusters):\n self.number_of_clusters = number_of_clusters\n return\n\n def get_max_qubits(self):\n return self.max_qubits\n\n def set_max_qubits(self, max_qubits):\n self.max_qubits = max_qubits\n return\n\n def get_shots_each(self):\n return self.shots_each\n\n def set_shots_each(self, shots_each):\n self.shots_each = shots_each\n return\n\n def get_max_runs(self):\n return self.max_runs\n\n def set_max_runs(self, max_runs):\n self.max_runs = max_runs\n return\n\n def get_relative_residual_amount(self):\n return self.relative_residual_amount\n\n def set_relative_residual_amount(self, relative_residual_amount):\n self.relative_residual_amount = relative_residual_amount\n return\n\n def get_backend(self):\n return self.backend\n\n def set_backend(self, backend):\n self.backend = backend\n return\n\n def get_ibmq_token(self):\n return self.ibmq_token\n\n def set_ibmq_token(self, ibmq_token):\n self.ibmq_token = ibmq_token\n return\n\n def get_ibmq_custom_backend(self):\n return self.ibmq_custom_backend\n\n def set_ibmq_custom_backend(self, ibmq_custom_backend):\n self.ibmq_custom_backend = ibmq_custom_backend\n return\n\n def get_param_list(self) -> list:\n \"\"\"\n # each tuple has informations as follows\n # (pc_name[0] , showedname[1] , description[2] , actual value[3] , input type[4] ,\n # [5] number(min steps)/select (options) /checkbox() / text )\n \"\"\"\n params = []\n clusteringTypeName = \"Destructive Interference Quantum KMeans\"\n params.append((\"name\", \"ClusterTyp\" ,\"Name of choosen Clustering Type\", clusteringTypeName ,\"header\"))\n\n parameter_number_of_clusters = self.get_number_of_clusters()\n description_number_of_clusters = \"int > 0 (default=2)\"\\\n +\": k Clusters will be generated\"\n params.append((\"numberClusters\", \"Number of Clusters\" , description_number_of_clusters, parameter_number_of_clusters, \"number\", 1 , 1 ))\n \n parameter_max_qubits = self.get_max_qubits()\n description_max_qubits = \"int > 0 (default 2): \"\\\n +\"The amount of qubits that are used for executing the circuits.\"\n params.append((\"maxQubits\", \"max qubits\" ,description_max_qubits,parameter_max_qubits,\"number\", 1 , 1 ))\n\n parameter_shots_each = self.get_shots_each()\n description_shots_each = \"int > 0 (default 100): \"\\\n +\"The amount of shots for each circuit run.\"\n params.append((\"shotsEach\", \"shots each\" ,description_shots_each,parameter_shots_each,\"number\", 1 , 1 ))\n\n parameter_max_runs = self.get_max_runs()\n description_max_runs = \"int > 0 (default 10): \"\\\n +\"The amount of k mean iteration runs.\"\n params.append((\"maxRuns\", \"max runs\" ,description_max_runs,parameter_max_runs,\"number\", 1 , 1 ))\n\n parameter_relative_residual = self.get_relative_residual_amount()\n description_relative_residual = \"int > 0 (default 5): \"\\\n +\"The amount in percentage of how many data points can change their label between\" \\\n + \"two runs. The default is 5, i.e. when less then 5% of the data points change\" \\\n + \"their label, we consider this as converged\"\n params.append((\"relativeResidual\", \"relative residual amount\" ,description_relative_residual,parameter_relative_residual,\"number\", 1 , 1 ))\n\n parameter_backend = self.get_backend().value\n description_backend = \"Enum default(aer_statevector_simulator): \"\\\n + \" A list of possible backends. aer is a local simulator and ibmq are backends provided by IBM.\"\n params.append((\"quantumBackend\", \"QuantumBackend\", description_backend, parameter_backend, \"select\", [qb.value for qb in QuantumBackends]))\n\n parameter_ibmq_custom_backend = self.get_ibmq_custom_backend()\n description_ibmq_custom_backend = \"str default(\\\"\\\") \"\\\n + \" The name of a custom backend of ibmq.\"\n params.append((\"ibmqCustomBackend\", \"IBMQ-Custom-Backend\", description_ibmq_custom_backend, str(parameter_ibmq_custom_backend), \"text\", \"\", \"\"))\n\n parameter_ibmq_token = self.get_ibmq_token()\n description_ibmq_token = \"str default(\\\"\\\") \"\\\n + \" The token of an account accessing the IBMQ online service.\"\n params.append((\"ibmqToken\", \"IBMQ-Token\", description_ibmq_token, parameter_ibmq_token, \"text\", \"\", \"\"))\n\n parameter_keep_cluster_mapping = self.get_keep_cluster_mapping()\n description_keep_cluster_mapping = \"bool (default False): \" \\\n + \"If True, keeps the cluster mapping when re-calculating.\"\n params.append((\"keepClusterMapping\", \"keep cluster mapping\", description_keep_cluster_mapping,\n parameter_keep_cluster_mapping, \"checkbox\"))\n\n return params\n \n def set_param_list(self, params: list = []) -> np.matrix:\n for param in params:\n if param[0] == \"numberClusters\":\n self.set_number_of_clusters(param[3])\n elif param[0] == \"maxQubits\":\n self.set_max_qubits(param[3])\n elif param[0] == \"shotsEach\":\n self.set_shots_each(param[3])\n elif param[0] == \"maxRuns\":\n self.set_max_runs(param[3])\n elif param[0] == \"relativeResidual\":\n self.set_relative_residual_amount(param[3])\n elif param[0] == \"quantumBackend\":\n self.set_backend(QuantumBackends[param[3]])\n elif param[0] == \"ibmqToken\":\n self.set_ibmq_token(param[3])\n elif param[0] == \"ibmqCustomBackend\":\n self.set_ibmq_custom_backend(param[3])\n elif param[0] == \"keepClusterMapping\":\n self.set_keep_cluster_mapping(param[3])\n\n def d2_plot(self, last_sequenz: List[int] , costumes: List[Costume] ) -> None:\n pass\n\n\nclass StatePreparationQuantumKMeansClustering(Clustering):\n def __init__(self, \n number_of_clusters = 2,\n max_qubits = 2,\n shots_each = 100,\n max_runs = 10,\n relative_residual_amount = 5,\n backend = QuantumBackends.aer_statevector_simulator,\n ibmq_token = \"\",\n ibmq_custom_backend = \"\"):\n super().__init__()\n self.clusterAlgo = StatePreparationQuantumKMeans()\n\n self.number_of_clusters = number_of_clusters\n self.max_qubits = max_qubits\n self.shots_each = shots_each\n self.max_runs = max_runs\n self.relative_residual_amount = relative_residual_amount\n self.backend = backend\n self.ibmq_token = ibmq_token\n self.ibmq_custom_backend = ibmq_custom_backend\n return\n \n def create_cluster(self, position_matrix : np.matrix , similarity_matrix : np.matrix) -> np.matrix:\n self.clusterAlgo.set_number_of_clusters(self.get_number_of_clusters())\n self.clusterAlgo.set_max_qubits(self.get_max_qubits())\n self.clusterAlgo.set_shots_each(self.get_shots_each())\n self.clusterAlgo.set_max_runs(self.get_max_runs())\n self.clusterAlgo.set_relative_residual_amount(self.get_relative_residual_amount())\n\n qBackend = QuantumBackends.get_quantum_backend(self.backend, self.ibmq_token, self.ibmq_custom_backend)\n\n self.clusterAlgo.set_backend(qBackend)\n\n label = np.zeros(similarity_matrix.shape[0])\n\n if similarity_matrix.any() == np.zeros((similarity_matrix.shape)).any():\n return label.astype(np.int)\n\n # run\n clusterMapping = self.clusterAlgo.Run(position_matrix)\n\n # write result into labels\n for i in range(0, len(label)):\n label[i] = int(clusterMapping[i])\n\n return label.astype(np.int)\n\n #getter and setter params\n def get_number_of_clusters(self):\n return self.number_of_clusters\n\n def set_number_of_clusters(self, number_of_clusters):\n self.number_of_clusters = number_of_clusters\n return\n\n def get_max_qubits(self):\n return self.max_qubits\n\n def set_max_qubits(self, max_qubits):\n self.max_qubits = max_qubits\n return\n\n def get_shots_each(self):\n return self.shots_each\n\n def set_shots_each(self, shots_each):\n self.shots_each = shots_each\n return\n\n def get_max_runs(self):\n return self.max_runs\n\n def set_max_runs(self, max_runs):\n self.max_runs = max_runs\n return\n\n def get_relative_residual_amount(self):\n return self.relative_residual_amount\n\n def set_relative_residual_amount(self, relative_residual_amount):\n self.relative_residual_amount = relative_residual_amount\n return\n\n def get_backend(self):\n return self.backend\n\n def set_backend(self, backend):\n self.backend = backend\n return\n\n def get_ibmq_token(self):\n return self.ibmq_token\n\n def set_ibmq_token(self, ibmq_token):\n self.ibmq_token = ibmq_token\n return\n\n def get_ibmq_custom_backend(self):\n return self.ibmq_custom_backend\n\n def set_ibmq_custom_backend(self, ibmq_custom_backend):\n self.ibmq_custom_backend = ibmq_custom_backend\n return\n\n def get_param_list(self) -> list:\n \"\"\"\n # each tuple has informations as follows\n # (pc_name[0] , showedname[1] , description[2] , actual value[3] , input type[4] ,\n # [5] number(min steps)/select (options) /checkbox() / text )\n \"\"\"\n params = []\n clusteringTypeName = \"State Preparation Quantum KMeans\"\n params.append((\"name\", \"ClusterTyp\" ,\"Name of choosen Clustering Type\", clusteringTypeName ,\"header\"))\n\n parameter_number_of_clusters = self.get_number_of_clusters()\n description_number_of_clusters = \"int > 0 (default=2)\"\\\n +\": k Clusters will be generated\"\n params.append((\"numberClusters\", \"Number of Clusters\" , description_number_of_clusters, parameter_number_of_clusters, \"number\", 1 , 1 ))\n \n parameter_max_qubits = self.get_max_qubits()\n description_max_qubits = \"int > 0 (default 2): \"\\\n +\"The amount of qubits that are used for executing the circuits.\"\n params.append((\"maxQubits\", \"max qubits\" ,description_max_qubits,parameter_max_qubits,\"number\", 1 , 1 ))\n\n parameter_shots_each = self.get_shots_each()\n description_shots_each = \"int > 0 (default 100): \"\\\n +\"The amount of shots for each circuit run.\"\n params.append((\"shotsEach\", \"shots each\" ,description_shots_each,parameter_shots_each,\"number\", 1 , 1 ))\n\n parameter_max_runs = self.get_max_runs()\n description_max_runs = \"int > 0 (default 10): \"\\\n +\"The amount of k mean iteration runs.\"\n params.append((\"maxRuns\", \"max runs\" ,description_max_runs,parameter_max_runs,\"number\", 1 , 1 ))\n\n parameter_relative_residual = self.get_relative_residual_amount()\n description_relative_residual = \"int > 0 (default 5): \"\\\n +\"The amount in percentage of how many data points can change their label between\" \\\n + \"two runs. The default is 5, i.e. when less then 5% of the data points change\" \\\n + \"their label, we consider this as converged\"\n params.append((\"relativeResidual\", \"relative residual amount\" ,description_relative_residual,parameter_relative_residual,\"number\", 1 , 1 ))\n\n parameter_backend = self.get_backend().value\n description_backend = \"Enum default(aer_statevector_simulator): \"\\\n + \" A list of possible backends. aer is a local simulator and ibmq are backends provided by IBM.\"\n params.append((\"quantumBackend\", \"QuantumBackend\", description_backend, parameter_backend, \"select\", [qb.value for qb in QuantumBackends]))\n\n parameter_ibmq_custom_backend = self.get_ibmq_custom_backend()\n description_ibmq_custom_backend = \"str default(\\\"\\\") \"\\\n + \" The name of a custom backend of ibmq.\"\n params.append((\"ibmqCustomBackend\", \"IBMQ-Custom-Backend\", description_ibmq_custom_backend, str(parameter_ibmq_custom_backend), \"text\", \"\", \"\"))\n\n parameter_ibmq_token = self.get_ibmq_token()\n description_ibmq_token = \"str default(\\\"\\\") \"\\\n + \" The token of an account accessing the IBMQ online service.\"\n params.append((\"ibmqToken\", \"IBMQ-Token\", description_ibmq_token, parameter_ibmq_token, \"text\", \"\", \"\"))\n\n parameter_keep_cluster_mapping = self.get_keep_cluster_mapping()\n description_keep_cluster_mapping = \"bool (default False): \" \\\n + \"If True, keeps the cluster mapping when re-calculating.\"\n params.append((\"keepClusterMapping\", \"keep cluster mapping\", description_keep_cluster_mapping,\n parameter_keep_cluster_mapping, \"checkbox\"))\n\n return params\n \n def set_param_list(self, params: list = []) -> np.matrix:\n for param in params:\n if param[0] == \"numberClusters\":\n self.set_number_of_clusters(param[3])\n elif param[0] == \"maxQubits\":\n self.set_max_qubits(param[3])\n elif param[0] == \"shotsEach\":\n self.set_shots_each(param[3])\n elif param[0] == \"maxRuns\":\n self.set_max_runs(param[3])\n elif param[0] == \"relativeResidual\":\n self.set_relative_residual_amount(param[3])\n elif param[0] == \"quantumBackend\":\n self.set_backend(QuantumBackends[param[3]])\n elif param[0] == \"ibmqToken\":\n self.set_ibmq_token(param[3])\n elif param[0] == \"ibmqCustomBackend\":\n self.set_ibmq_custom_backend(param[3])\n elif param[0] == \"keepClusterMapping\":\n self.set_keep_cluster_mapping(param[3])\n\n def d2_plot(self, last_sequenz: List[int] , costumes: List[Costume] ) -> None:\n pass\n\n\nclass PositiveCorrelationQuantumKMeansClustering(Clustering):\n def __init__(self,\n number_of_clusters=2,\n shots_each=100,\n max_runs=10,\n relative_residual_amount=5,\n backend=QuantumBackends.aer_statevector_simulator,\n ibmq_token=\"\",\n ibmq_custom_backend=\"\"):\n super().__init__()\n self.clusterAlgo = PositiveCorrelationQuantumKmeans()\n\n self.number_of_clusters = number_of_clusters\n self.shots_each = shots_each\n self.max_runs = max_runs\n self.relative_residual_amount = relative_residual_amount\n self.backend = backend\n self.ibmq_token = ibmq_token\n self.ibmq_custom_backend = ibmq_custom_backend\n return\n\n def create_cluster(self, position_matrix: np.matrix, similarity_matrix: np.matrix) -> np.matrix:\n qBackend = QuantumBackends.get_quantum_backend(self.backend, self.ibmq_token, self.ibmq_custom_backend)\n\n label = np.zeros(similarity_matrix.shape[0])\n\n if similarity_matrix.any() == np.zeros((similarity_matrix.shape)).any():\n return label.astype(np.int)\n\n # run\n clusterMapping = self.clusterAlgo.fit(position_matrix, self.number_of_clusters, self.max_runs, self.relative_residual_amount, qBackend, self.shots_each)\n\n # write result into labels\n for i in range(0, len(label)):\n label[i] = int(clusterMapping[i])\n\n return label.astype(np.int)\n\n # getter and setter params\n def get_number_of_clusters(self):\n return self.number_of_clusters\n\n def set_number_of_clusters(self, number_of_clusters):\n self.number_of_clusters = number_of_clusters\n return\n\n def get_shots_each(self):\n return self.shots_each\n\n def set_shots_each(self, shots_each):\n self.shots_each = shots_each\n return\n\n def get_max_runs(self):\n return self.max_runs\n\n def set_max_runs(self, max_runs):\n self.max_runs = max_runs\n return\n\n def get_relative_residual_amount(self):\n return self.relative_residual_amount\n\n def set_relative_residual_amount(self, relative_residual_amount):\n self.relative_residual_amount = relative_residual_amount\n return\n\n def get_backend(self):\n return self.backend\n\n def set_backend(self, backend):\n self.backend = backend\n return\n\n def get_ibmq_token(self):\n return self.ibmq_token\n\n def set_ibmq_token(self, ibmq_token):\n self.ibmq_token = ibmq_token\n return\n\n def get_ibmq_custom_backend(self):\n return self.ibmq_custom_backend\n\n def set_ibmq_custom_backend(self, ibmq_custom_backend):\n self.ibmq_custom_backend = ibmq_custom_backend\n return\n\n def get_param_list(self) -> list:\n \"\"\"\n # each tuple has informations as follows\n # (pc_name[0] , showedname[1] , description[2] , actual value[3] , input type[4] ,\n # [5] number(min steps)/select (options) /checkbox() / text )\n \"\"\"\n params = []\n clusteringTypeName = \"Positive Correlation Quantum KMeans\"\n params.append((\"name\", \"ClusterTyp\", \"Name of choosen Clustering Type\", clusteringTypeName, \"header\"))\n\n parameter_number_of_clusters = self.get_number_of_clusters()\n description_number_of_clusters = \"int > 0 (default=2)\" \\\n + \": k Clusters will be generated\"\n params.append((\"numberClusters\", \"Number of Clusters\", description_number_of_clusters,\n parameter_number_of_clusters, \"number\", 1, 1))\n\n parameter_shots_each = self.get_shots_each()\n description_shots_each = \"int > 0 (default 100): \" \\\n + \"The amount of shots for each circuit run.\"\n params.append((\"shotsEach\", \"shots each\", description_shots_each, parameter_shots_each, \"number\", 1, 1))\n\n parameter_max_runs = self.get_max_runs()\n description_max_runs = \"int > 0 (default 10): \" \\\n + \"The amount of k mean iteration runs.\"\n params.append((\"maxRuns\", \"max runs\", description_max_runs, parameter_max_runs, \"number\", 1, 1))\n\n parameter_relative_residual = self.get_relative_residual_amount()\n description_relative_residual = \"int > 0 (default 5): \" \\\n + \"The amount in percentage of how many data points can change their label between\" \\\n + \"two runs. The default is 5, i.e. when less then 5% of the data points change\" \\\n + \"their label, we consider this as converged\"\n params.append((\"relativeResidual\", \"relative residual amount\", description_relative_residual,\n parameter_relative_residual, \"number\", 1, 1))\n\n parameter_backend = self.get_backend().value\n description_backend = \"Enum default(aer_statevector_simulator): \" \\\n + \" A list of possible backends. aer is a local simulator and ibmq are backends provided by IBM.\"\n params.append((\"quantumBackend\", \"QuantumBackend\", description_backend, parameter_backend, \"select\",\n [qb.value for qb in QuantumBackends]))\n\n parameter_ibmq_custom_backend = self.get_ibmq_custom_backend()\n description_ibmq_custom_backend = \"str default(\\\"\\\") \" \\\n + \" The name of a custom backend of ibmq.\"\n params.append((\"ibmqCustomBackend\", \"IBMQ-Custom-Backend\", description_ibmq_custom_backend,\n str(parameter_ibmq_custom_backend), \"text\", \"\", \"\"))\n\n parameter_ibmq_token = self.get_ibmq_token()\n description_ibmq_token = \"str default(\\\"\\\") \" \\\n + \" The token of an account accessing the IBMQ online service.\"\n params.append((\"ibmqToken\", \"IBMQ-Token\", description_ibmq_token, parameter_ibmq_token, \"text\", \"\", \"\"))\n\n parameter_keep_cluster_mapping = self.get_keep_cluster_mapping()\n description_keep_cluster_mapping = \"bool (default False): \" \\\n + \"If True, keeps the cluster mapping when re-calculating.\"\n params.append((\"keepClusterMapping\", \"keep cluster mapping\", description_keep_cluster_mapping,\n parameter_keep_cluster_mapping, \"checkbox\"))\n\n return params\n\n def set_param_list(self, params: list = []) -> np.matrix:\n for param in params:\n if param[0] == \"numberClusters\":\n self.set_number_of_clusters(param[3])\n elif param[0] == \"shotsEach\":\n self.set_shots_each(param[3])\n elif param[0] == \"maxRuns\":\n self.set_max_runs(param[3])\n elif param[0] == \"relativeResidual\":\n self.set_relative_residual_amount(param[3])\n elif param[0] == \"quantumBackend\":\n self.set_backend(QuantumBackends[param[3]])\n elif param[0] == \"ibmqToken\":\n self.set_ibmq_token(param[3])\n elif param[0] == \"ibmqCustomBackend\":\n self.set_ibmq_custom_backend(param[3])\n elif param[0] == \"keepClusterMapping\":\n self.set_keep_cluster_mapping(param[3])\n\n def d2_plot(self, last_sequenz: List[int] , costumes: List[Costume] ) -> None:\n pass\n\n\nclass ClassicalKMeans(Clustering):\n def __init__(\n self,\n number_of_clusters = 2,\n max_runs = 10,\n relative_residual_amount = 5\n ):\n super().__init__()\n self.number_of_clusters = number_of_clusters\n self.max_runs = max_runs\n self.relative_residual_amount = relative_residual_amount\n return\n\n def create_cluster(self, position_matrix : np.matrix, similarity_matrix : np.matrix ) -> np.matrix:\n n_clusters = self.get_number_of_clusters()\n random_state = 0\n max_iter = self.get_max_runs()\n tol = self.get_relative_residual_amount() / 100.0\n kmeansOutput = KMeans(\n n_clusters=n_clusters, \n random_state=random_state,\n max_iter=max_iter,\n tol=tol).fit(position_matrix)\n return kmeansOutput.labels_.astype(np.int)\n\n #getter and setter params\n def get_number_of_clusters(self):\n return self.number_of_clusters\n\n def set_number_of_clusters(self, number_of_clusters):\n self.number_of_clusters = number_of_clusters\n return\n\n def get_max_runs(self):\n return self.max_runs\n\n def set_max_runs(self, max_runs):\n self.max_runs = max_runs\n return\n\n def get_relative_residual_amount(self):\n return self.relative_residual_amount\n\n def set_relative_residual_amount(self, relative_residual_amount):\n self.relative_residual_amount = relative_residual_amount\n return\n\n def get_param_list(self) -> list:\n \"\"\"\n # each tuple has informations as follows\n # (pc_name[0] , showedname[1] , description[2] , actual value[3] , input type[4] ,\n # [5] number(min steps)/select (options) /checkbox() / text )\n \"\"\"\n params = []\n clusteringTypeName = \"Classical KMeans\"\n params.append((\"name\", \"ClusterTyp\" ,\"Name of choosen Clustering Type\", clusteringTypeName ,\"header\"))\n\n parameter_number_of_clusters = self.get_number_of_clusters()\n description_number_of_clusters = \"int > 0 (default=2)\"\\\n +\": k Clusters will be generated\"\n params.append((\"numberClusters\", \"Number of Clusters\" , description_number_of_clusters, parameter_number_of_clusters, \"number\", 1 , 1 ))\n \n parameter_max_runs = self.get_max_runs()\n description_max_runs = \"int > 0 (default 10): \"\\\n +\"The amount of k mean iteration runs.\"\n params.append((\"maxRuns\", \"max runs\" ,description_max_runs,parameter_max_runs,\"number\", 1 , 1 ))\n\n parameter_relative_residual = self.get_relative_residual_amount()\n description_relative_residual = \"int > 0 (default 5): \"\\\n +\"The amount in percentage of how many data points can change their label between\" \\\n + \"two runs. The default is 5, i.e. when less then 5% of the data points change\" \\\n + \"their label, we consider this as converged\"\n params.append((\"relativeResidual\", \"relative residual amount\" ,description_relative_residual,parameter_relative_residual,\"number\", 1 , 1 ))\n\n parameter_keep_cluster_mapping = self.get_keep_cluster_mapping()\n description_keep_cluster_mapping = \"bool (default False): \" \\\n + \"If True, keeps the cluster mapping when re-calculating.\"\n params.append((\"keepClusterMapping\", \"keep cluster mapping\", description_keep_cluster_mapping,\n parameter_keep_cluster_mapping, \"checkbox\"))\n\n return params\n \n def set_param_list(self, params: list = []) -> np.matrix:\n for param in params:\n if param[0] == \"numberClusters\":\n self.set_number_of_clusters(param[3])\n elif param[0] == \"maxRuns\":\n self.set_max_runs(param[3])\n elif param[0] == \"relativeResidual\":\n self.set_relative_residual_amount(param[3])\n elif param[0] == \"keepClusterMapping\":\n self.set_keep_cluster_mapping(param[3])\n\n def d2_plot(self, last_sequenz: List[int] , costumes: List[Costume] ) -> None:\n pass\n\n\nclass ClassicalKMedoids(Clustering):\n def __init__(\n self,\n number_of_clusters=2,\n max_runs=10\n ):\n super().__init__()\n self.number_of_clusters = number_of_clusters\n self.max_runs = max_runs\n self.method = 'alternate'\n self.init = 'build'\n return\n\n def create_cluster(self, position_matrix: np.matrix, similarity_matrix: np.matrix) -> np.matrix:\n n_clusters = self.get_number_of_clusters()\n random_state = 0\n method = self.get_method()\n init = self.get_init()\n max_iter = self.get_max_runs()\n kmedoidsOutput = KMedoids(\n n_clusters=n_clusters,\n method=method,\n init=init,\n random_state=random_state,\n max_iter=max_iter).fit(similarity_matrix)\n return kmedoidsOutput.labels_.astype(np.int)\n\n # getter and setter params\n def get_number_of_clusters(self):\n return self.number_of_clusters\n\n def set_number_of_clusters(self, number_of_clusters):\n self.number_of_clusters = number_of_clusters\n return\n\n def get_max_runs(self):\n return self.max_runs\n\n def set_max_runs(self, max_runs):\n self.max_runs = max_runs\n return\n\n def set_method(self, method):\n self.method = method\n return\n\n def get_method(self):\n return self.method\n\n def set_init(self, init):\n self.init = init\n return\n\n def get_init(self):\n return self.init\n\n def get_param_list(self) -> list:\n \"\"\"\n # each tuple has informations as follows\n # (pc_name[0] , showedname[1] , description[2] , actual value[3] , input type[4] ,\n # [5] number(min steps)/select (options) /checkbox() / text )\n \"\"\"\n params = []\n clusteringTypeName = \"Classical KMedoids\"\n params.append((\"name\", \"ClusterTyp\", \"Name of choosen Clustering Type\", clusteringTypeName, \"header\"))\n\n parameter_init = self.get_init()\n description_init = \"string (default=build)\" \\\n + \"Specify medoid initialization method. ‘random’ selects n_clusters \" \\\n \"elements from the dataset. ‘heuristic’ picks the n_clusters points with the \" \\\n \"smallest sum distance to every other point. ‘k-medoids++’ follows an \" \\\n \"approach based on k-means++_, and in general, gives initial medoids which \" \\\n \"are more separated than those generated by the other methods. ‘build’ is a \" \\\n \"greedy initialization of the medoids used in the original PAM algorithm. \" \\\n \"Often ‘build’ is more efficient but slower than other initializations on \" \\\n \"big datasets and it is also very non-robust, if there are outliers in the \" \\\n \"dataset, use another initialization. \"\n params.append((\"init\", \"Initialization\", description_init,\n parameter_init, \"text\", \"\", \"\"))\n\n parameter_method = self.get_method()\n description_method = \"string (default=alternate)\" \\\n + \"Which algorithm to use. ‘alternate’ is faster while ‘pam’ is more accurate.\"\n params.append((\"method\", \"Method\", description_method,\n parameter_method, \"text\", \"\", \"\"))\n\n parameter_number_of_clusters = self.get_number_of_clusters()\n description_number_of_clusters = \"int > 0 (default=2)\" \\\n + \": k Clusters will be generated\"\n params.append((\"numberClusters\", \"Number of Clusters\", description_number_of_clusters,\n parameter_number_of_clusters, \"number\", 1, 1))\n\n parameter_max_runs = self.get_max_runs()\n description_max_runs = \"int > 0 (default 10): \" \\\n + \"The amount of k medoids iteration runs.\"\n params.append((\"maxRuns\", \"max runs\", description_max_runs, parameter_max_runs, \"number\", 1, 1))\n\n parameter_keep_cluster_mapping = self.get_keep_cluster_mapping()\n description_keep_cluster_mapping = \"bool (default False): \" \\\n + \"If True, keeps the cluster mapping when re-calculating.\"\n params.append((\"keepClusterMapping\", \"keep cluster mapping\", description_keep_cluster_mapping, parameter_keep_cluster_mapping, \"checkbox\"))\n\n return params\n\n def set_param_list(self, params: list = []) -> np.matrix:\n for param in params:\n if param[0] == \"numberClusters\":\n self.set_number_of_clusters(param[3])\n elif param[0] == \"maxRuns\":\n self.set_max_runs(param[3])\n elif param[0] == \"init\":\n self.set_init(param[3])\n elif param[0] == \"method\":\n self.set_method(param[3])\n elif param[0] == \"keepClusterMapping\":\n self.set_keep_cluster_mapping(param[3])\n\n def d2_plot(self, last_sequenz: List[int], costumes: List[Costume]) -> None:\n pass\n" ]
[ [ "numpy.abs" ], [ "numpy.zeros", "sklearn.cluster.OPTICS", "sklearn.cluster.KMeans", "numpy.ones" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
zhangxinzhou/play_game
[ "854448f8416b2d3f98bb2c3ed0f7d834a61593de" ]
[ "opencv_learn/charpter12/demo_12.06.py" ]
[ "import cv2\nimport numpy as np\n\no = cv2.imread(\"contours.bmp\")\ngray = cv2.cvtColor(o, cv2.COLOR_BGR2GRAY)\nret, binary = cv2.threshold(gray, 127, 255, cv2.THRESH_BINARY)\ncontours, hierarchy = cv2.findContours(binary, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)\ncv2.imshow(\"original\", o)\nn = len(contours)\ncontoursImg = []\nfor i in range(n):\n temp = np.zeros(o.shape, np.uint8)\n contoursImg.append(temp)\n contoursImg[i] = cv2.drawContours(contoursImg[i], contours, i, (255, 255, 255), 3)\n if cv2.contourArea(contours[i]) > 12000:\n cv2.imshow(\"contours[\" + str(i) + \"]\", contoursImg[i])\n\ncv2.waitKey()\ncv2.destroyAllWindows()\n" ]
[ [ "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
bstriner/gym-learning-to-learn
[ "4cd93bf7a306255771a32e0d97b3d705b2666656" ]
[ "gym_learning_to_learn/envs/base_env.py" ]
[ "from gym import Env\nfrom gym.utils import seeding\nfrom gym import spaces\nimport numpy as np\nimport keras.backend as K\n\n\nclass BaseEnv(Env):\n metadata = {'render.modes': ['human', 'ansi']}\n\n def __init__(self, action_mapping):\n self._seed()\n self.verbose = 0\n self.viewer = None\n self.batch_size = 32\n self.optimizer = None\n self.model = None\n self.current_step = 0\n self.action_mapping = action_mapping\n self.action_space = action_mapping.action_space\n bounds = float('inf')\n self.observation_space = spaces.Box(-bounds, bounds, (4,))\n self.viewer = None\n self.best = None\n self.evaluate_test = False\n Env.__init__(self)\n\n def _seed(self, seed=None):\n self.np_random, seed = seeding.np_random(seed)\n return [seed]\n\n def create_model(self):\n pass\n\n def create_optimizer(self):\n pass\n\n def loss_scale(self, loss):\n return -np.log(loss)\n\n def _step(self, action):\n self.action_mapping.step(self.optimizer, action)\n loss_before = self.losses(self.data_val)\n if self.best is None:\n self.best = loss_before\n self.model.fit(self.data_train[0], self.data_train[1],\n validation_data=(self.data_val[0], self.data_val[1]),\n nb_epoch=1, verbose=self.verbose, batch_size=self.batch_size)\n loss_after = self.losses(self.data_val)\n self.current_step += 1\n observation = self._observation()\n if (loss_after > 1e10) or (not np.all(np.isfinite(observation))):\n print(\"Episode terminated due to NaN loss. Loss: {}, Obs: {}, Lr: {}\".format(loss_after, observation,\n K.get_value(\n self.optimizer.lr)))\n observation[0] = -1\n observation[1] = -1\n reward = np.float32(-10000)\n return observation, reward, True, {}\n # reward = (self.best - loss_after)\n # eps = 1e-8\n # reward = np.float32((1.0 / (eps + loss_after)))\n reward = self.loss_scale(loss_after)\n if self.verbose:\n print(\"LR: {}, Reward: {}, Loss: {}\".format(K.get_value(self.optimizer.lr), reward, loss_after))\n # reward = -loss_after\n assert np.all(np.isfinite(reward))\n if loss_after < self.best:\n self.best = loss_after\n done = self.current_step > self.max_steps\n # print(\"Step: {}\".format(observation))\n info = {}\n if self.evaluate_test:\n info[\"test_loss\"] = self.losses(self.data_test)\n return observation, reward, done, info\n\n def set_evaluate_test(self, evaluate_test):\n self.evaluate_test = evaluate_test\n\n def losses(self, data):\n loss = self.model.evaluate(data[0], data[1], verbose=self.verbose, batch_size=self.batch_size)\n return loss\n\n def _observation(self):\n # eps = 1e-8\n loss_train = self.loss_scale(self.losses(self.data_train))\n loss_val = self.loss_scale(self.losses(self.data_val))\n lr = K.get_value(self.optimizer.lr)\n nllr = -np.log(lr)\n ret = np.array([loss_train, loss_val, nllr, self.current_step])\n # assert np.all(np.isfinite(ret)), \"Lr: {}, Inf: {}\".format(lr, ret)\n return ret\n\n def observation_names(self):\n return [\"loss_train\", \"loss_val\", \"nl_lr\", \"step\"]\n\n def _reset(self):\n self.create_model()\n self.current_step = 0\n self.best = None\n observation = self._observation()\n return observation\n\n def _render(self, mode='human', close=False):\n if close:\n if self.viewer is not None:\n self.viewer.close()\n self.viewer = None\n return\n if mode == 'human':\n print(self._observation())\n elif mode == \"ansi\":\n return \"Observation: {}\\n\".format(self._observation())\n else:\n raise NotImplementedError(\"mode not supported: {}\".format(mode))\n" ]
[ [ "numpy.log", "numpy.array", "numpy.isfinite", "numpy.float32" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
cocolord/SegmenTron
[ "940015dc35614c15bd303f91f611878efbab8796", "940015dc35614c15bd303f91f611878efbab8796" ]
[ "segmentron/models/danet.py", "segmentron/models/ccnet.py" ]
[ "from __future__ import division\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom .segbase import SegBaseModel\nfrom .model_zoo import MODEL_REGISTRY\nfrom ..modules import _FCNHead, PAM_Module, CAM_Module\n\n__all__ = ['DANet']\n\n\n@MODEL_REGISTRY.register()\nclass DANet(SegBaseModel):\n r\"\"\"DANet model from the paper `\"Dual Attention Network for Scene Segmentation\"\n <https://arxiv.org/abs/1809.02983.pdf>`\n \"\"\"\n def __init__(self):\n super(DANet, self).__init__()\n self.head = DANetHead(2048, self.nclass)\n if self.aux:\n self.auxlayer = _FCNHead(728, self.nclass)\n self.__setattr__('decoder', ['head', 'auxlayer'] if self.aux else ['head'])\n\n def forward(self, x):\n imsize = x.size()[2:]\n _, _, c3, c4 = self.encoder(x)\n\n x = self.head(c4)\n x = list(x)\n x[0] = F.interpolate(x[0], imsize, mode='bilinear', align_corners=True)\n x[1] = F.interpolate(x[1], imsize, mode='bilinear', align_corners=True)\n x[2] = F.interpolate(x[2], imsize, mode='bilinear', align_corners=True)\n\n outputs = list()\n outputs.append(x[0])\n outputs.append(x[1])\n outputs.append(x[2])\n\n return tuple(outputs)\n\n\nclass DANetHead(nn.Module):\n def __init__(self, in_channels, out_channels, norm_layer=nn.BatchNorm2d):\n super(DANetHead, self).__init__()\n inter_channels = in_channels // 4\n self.conv5a = nn.Sequential(nn.Conv2d(in_channels, inter_channels, 3, padding=1, bias=False),\n norm_layer(inter_channels),\n nn.ReLU())\n \n self.conv5c = nn.Sequential(nn.Conv2d(in_channels, inter_channels, 3, padding=1, bias=False),\n norm_layer(inter_channels),\n nn.ReLU())\n\n self.sa = PAM_Module(inter_channels)\n self.sc = CAM_Module(inter_channels)\n self.conv51 = nn.Sequential(nn.Conv2d(inter_channels, inter_channels, 3, padding=1, bias=False),\n norm_layer(inter_channels),\n nn.ReLU())\n self.conv52 = nn.Sequential(nn.Conv2d(inter_channels, inter_channels, 3, padding=1, bias=False),\n norm_layer(inter_channels),\n nn.ReLU())\n\n self.conv6 = nn.Sequential(nn.Dropout2d(0.1, False), nn.Conv2d(512, out_channels, 1))\n self.conv7 = nn.Sequential(nn.Dropout2d(0.1, False), nn.Conv2d(512, out_channels, 1))\n\n self.conv8 = nn.Sequential(nn.Dropout2d(0.1, False), nn.Conv2d(512, out_channels, 1))\n\n def forward(self, x):\n feat1 = self.conv5a(x)\n sa_feat = self.sa(feat1)\n sa_conv = self.conv51(sa_feat)\n sa_output = self.conv6(sa_conv)\n\n feat2 = self.conv5c(x)\n sc_feat = self.sc(feat2)\n sc_conv = self.conv52(sc_feat)\n sc_output = self.conv7(sc_conv)\n\n feat_sum = sa_conv+sc_conv\n \n sasc_output = self.conv8(feat_sum)\n\n output = [sasc_output]\n output.append(sa_output)\n output.append(sc_output)\n return tuple(output)\n\n", "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom .segbase import SegBaseModel\nfrom .model_zoo import MODEL_REGISTRY\nfrom ..modules import _FCNHead\nfrom ..modules.cc_attention import CrissCrossAttention\nfrom ..config import cfg\n\n@MODEL_REGISTRY.register()\nclass CCNet(SegBaseModel):\n r\"\"\"CCNet\n Reference:\n Zilong Huang, et al. \"CCNet: Criss-Cross Attention for Semantic Segmentation.\"\n arXiv preprint arXiv:1811.11721 (2018).\n \"\"\"\n\n def __init__(self):\n super(CCNet, self).__init__()\n self.head = _CCHead(self.nclass, norm_layer=self.norm_layer)\n if self.aux:\n self.auxlayer = _FCNHead(1024, self.nclass, norm_layer=self.norm_layer)\n\n self.__setattr__('decoder', ['head', 'auxlayer'] if self.aux else ['head'])\n\n def forward(self, x):\n size = x.size()[2:]\n _, _, c3, c4 = self.base_forward(x)\n outputs = list()\n x = self.head(c4)\n x = F.interpolate(x, size, mode='bilinear', align_corners=True)\n outputs.append(x)\n\n if self.aux:\n auxout = self.auxlayer(c3)\n auxout = F.interpolate(auxout, size, mode='bilinear', align_corners=True)\n outputs.append(auxout)\n return tuple(outputs)\n\n\nclass _CCHead(nn.Module):\n def __init__(self, nclass, norm_layer=nn.BatchNorm2d):\n super(_CCHead, self).__init__()\n self.rcca = _RCCAModule(2048, 512, norm_layer)\n self.out = nn.Conv2d(512, nclass, 1)\n\n def forward(self, x):\n x = self.rcca(x)\n x = self.out(x)\n return x\n\n\nclass _RCCAModule(nn.Module):\n def __init__(self, in_channels, out_channels, norm_layer):\n super(_RCCAModule, self).__init__()\n self.recurrence = cfg.MODEL.CCNET.RECURRENCE\n inter_channels = in_channels // 4\n self.conva = nn.Sequential(\n nn.Conv2d(in_channels, inter_channels, 3, padding=1, bias=False),\n norm_layer(inter_channels),\n nn.ReLU(True))\n self.cca = CrissCrossAttention(inter_channels)\n self.convb = nn.Sequential(\n nn.Conv2d(inter_channels, inter_channels, 3, padding=1, bias=False),\n norm_layer(inter_channels),\n nn.ReLU(True))\n\n self.bottleneck = nn.Sequential(\n nn.Conv2d(in_channels + inter_channels, out_channels, 3, padding=1, bias=False),\n norm_layer(out_channels),\n nn.Dropout2d(0.1))\n\n def forward(self, x):\n out = self.conva(x)\n for i in range(self.recurrence):\n out = self.cca(out)\n out = self.convb(out)\n out = torch.cat([x, out], dim=1)\n out = self.bottleneck(out)\n\n return out\n" ]
[ [ "torch.nn.ReLU", "torch.nn.Conv2d", "torch.nn.Dropout2d", "torch.nn.functional.interpolate" ], [ "torch.nn.Dropout2d", "torch.cat", "torch.nn.Conv2d", "torch.nn.functional.interpolate", "torch.nn.ReLU" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
thisch/unyt
[ "27894c1edc275205a9ad2e0d9f47d11241e1f5c3" ]
[ "unyt/tests/test_units.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nTest symbolic unit handling.\n\n\n\n\n\"\"\"\n\n# -----------------------------------------------------------------------------\n# Copyright (c) 2018, yt Development Team.\n#\n# Distributed under the terms of the Modified BSD License.\n#\n# The full license is in the LICENSE file, distributed with this software.\n# -----------------------------------------------------------------------------\n\n\nimport numpy as np\nfrom numpy.testing import (\n assert_almost_equal,\n assert_allclose,\n assert_array_almost_equal_nulp,\n assert_equal,\n)\nimport operator\nimport pickle\nimport pytest\nfrom sympy import Symbol\n\nfrom unyt.testing import assert_allclose_units\nfrom unyt.unit_registry import UnitRegistry\nfrom unyt.dimensions import (\n mass,\n length,\n time,\n temperature,\n energy,\n magnetic_field_cgs,\n magnetic_field_mks,\n power,\n rate,\n)\nfrom unyt.exceptions import InvalidUnitOperation, UnitsNotReducible, UnitConversionError\nfrom unyt.unit_object import default_unit_registry, Unit, UnitParseError\nfrom unyt.unit_systems import cgs_unit_system, UnitSystem\nfrom unyt._unit_lookup_table import (\n default_unit_symbol_lut,\n name_alternatives,\n unit_prefixes,\n)\nimport unyt.unit_symbols as unit_symbols\nfrom unyt._physical_ratios import (\n m_per_pc,\n sec_per_year,\n m_per_km,\n m_per_mpc,\n mass_sun_kg,\n)\n\n\ndef test_no_conflicting_symbols():\n \"\"\"\n Check unit symbol definitions for conflicts.\n\n \"\"\"\n full_set = set(default_unit_symbol_lut.keys())\n\n # go through all possible prefix combos\n for symbol in default_unit_symbol_lut.keys():\n if default_unit_symbol_lut[symbol][4]:\n keys = unit_prefixes.keys()\n else:\n keys = [symbol]\n for prefix in keys:\n new_symbol = \"%s%s\" % (prefix, symbol)\n\n # test if we have seen this symbol\n assert new_symbol not in full_set, \"Duplicate symbol: %s\" % new_symbol\n\n full_set.add(new_symbol)\n\n\ndef test_dimensionless():\n \"\"\"\n Create dimensionless unit and check attributes.\n\n \"\"\"\n u1 = Unit()\n\n assert u1.is_dimensionless\n assert u1.expr == 1\n assert u1.base_value == 1\n assert u1.dimensions == 1\n assert u1 != \"hello!\"\n assert (u1 == \"hello\") is False\n\n u2 = Unit(\"\")\n\n assert u2.is_dimensionless\n assert u2.expr == 1\n assert u2.base_value == 1\n assert u2.dimensions == 1\n\n assert_equal(u1.latex_repr, \"\")\n assert_equal(u2.latex_repr, \"\")\n\n\ndef test_create_from_string():\n \"\"\"\n Create units with strings and check attributes.\n\n \"\"\"\n\n u1 = Unit(\"kg * m**2 * s**-2\")\n assert u1.dimensions == energy\n assert u1.base_value == 1.0\n\n # make sure order doesn't matter\n u2 = Unit(\"m**2 * s**-2 * kg\")\n assert u2.dimensions == energy\n assert u2.base_value == 1.0\n\n # Test rationals\n u3 = Unit(\"kg**0.5 * m**-0.5 * s**-1\")\n assert u3.dimensions == magnetic_field_cgs\n assert u3.base_value == 1.0\n\n # sqrt functions\n u4 = Unit(\"sqrt(kg)/sqrt(m)/s\")\n assert u4.dimensions == magnetic_field_cgs\n assert u4.base_value == 1.0\n\n # commutative sqrt function\n u5 = Unit(\"sqrt(kg/m)/s\")\n assert u5.dimensions == magnetic_field_cgs\n assert u5.base_value == 1.0\n\n # nonzero CGS conversion factor\n u6 = Unit(\"Msun/pc**3\")\n assert u6.dimensions == mass / length ** 3\n assert_array_almost_equal_nulp(\n np.array([u6.base_value]), np.array([mass_sun_kg / m_per_pc ** 3])\n )\n\n with pytest.raises(UnitParseError):\n Unit(\"m**m\")\n with pytest.raises(UnitParseError):\n Unit(\"m**g\")\n with pytest.raises(UnitParseError):\n Unit(\"m+g\")\n with pytest.raises(UnitParseError):\n Unit(\"m-g\")\n with pytest.raises(UnitParseError):\n Unit(\"hello!\")\n with pytest.raises(UnitParseError):\n Unit(\"True\")\n with pytest.raises(UnitParseError):\n Unit(\"else\")\n with pytest.raises(UnitParseError):\n Unit(\"hello(37)\")\n with pytest.raises(UnitParseError):\n Unit(\"hello(foo=37)\")\n\n cm = Unit(\"cm\")\n data = 1 * cm\n\n assert Unit(data) == cm\n assert Unit(b\"cm\") == cm\n\n\ndef test_create_from_expr():\n \"\"\"\n Create units from sympy Exprs and check attributes.\n\n \"\"\"\n pc_mks = m_per_pc\n yr_mks = sec_per_year\n\n # Symbol expr\n s1 = Symbol(\"pc\", positive=True)\n s2 = Symbol(\"yr\", positive=True)\n # Mul expr\n s3 = s1 * s2\n # Pow expr\n s4 = s1 ** 2 * s2 ** (-1)\n\n u1 = Unit(s1)\n u2 = Unit(s2)\n u3 = Unit(s3)\n u4 = Unit(s4)\n\n assert u1.expr == s1\n assert u2.expr == s2\n assert u3.expr == s3\n assert u4.expr == s4\n\n assert_allclose_units(u1.base_value, pc_mks, 1e-12)\n assert_allclose_units(u2.base_value, yr_mks, 1e-12)\n assert_allclose_units(u3.base_value, pc_mks * yr_mks, 1e-12)\n assert_allclose_units(u4.base_value, pc_mks ** 2 / yr_mks, 1e-12)\n\n assert u1.dimensions == length\n assert u2.dimensions == time\n assert u3.dimensions == length * time\n assert u4.dimensions == length ** 2 / time\n\n\ndef test_create_with_duplicate_dimensions():\n \"\"\"\n Create units with overlapping dimensions. Ex: km/Mpc.\n\n \"\"\"\n\n u1 = Unit(\"J * s**-1\")\n u2 = Unit(\"km/s/Mpc\")\n km_mks = m_per_km\n Mpc_mks = m_per_mpc\n\n assert u1.base_value == 1\n assert u1.dimensions == power\n\n assert_allclose_units(u2.base_value, km_mks / Mpc_mks, 1e-12)\n assert u2.dimensions == rate\n\n\ndef test_create_new_symbol():\n \"\"\"\n Create unit with unknown symbol.\n\n \"\"\"\n u1 = Unit(\"abc\", base_value=42, dimensions=(mass / time))\n\n assert u1.expr == Symbol(\"abc\", positive=True)\n assert u1.base_value == 42\n assert u1.dimensions == mass / time\n\n u1 = Unit(\"abc\", base_value=42, dimensions=length ** 3)\n\n assert u1.expr == Symbol(\"abc\", positive=True)\n assert u1.base_value == 42\n assert u1.dimensions == length ** 3\n\n u1 = Unit(\"abc\", base_value=42, dimensions=length * (mass * length))\n\n assert u1.expr == Symbol(\"abc\", positive=True)\n assert u1.base_value == 42\n assert u1.dimensions == length ** 2 * mass\n\n with pytest.raises(UnitParseError):\n Unit(\"abc\", base_value=42, dimensions=length ** length)\n with pytest.raises(UnitParseError):\n Unit(\"abc\", base_value=42, dimensions=length ** (length * length))\n with pytest.raises(UnitParseError):\n Unit(\"abc\", base_value=42, dimensions=length - mass)\n with pytest.raises(UnitParseError):\n Unit(\"abc\", base_value=42, dimensions=length + mass)\n\n\ndef test_create_fail_on_unknown_symbol():\n \"\"\"\n Fail to create unit with unknown symbol, without base_value and dimensions.\n\n \"\"\"\n with pytest.raises(UnitParseError):\n Unit(Symbol(\"jigawatts\"))\n\n\ndef test_create_fail_on_bad_symbol_type():\n \"\"\"\n Fail to create unit with bad symbol type.\n\n \"\"\"\n with pytest.raises(UnitParseError):\n Unit([1]) # something other than Expr and str\n\n\ndef test_create_fail_on_bad_dimensions_type():\n \"\"\"\n Fail to create unit with bad dimensions type.\n\n \"\"\"\n with pytest.raises(UnitParseError):\n Unit(\"a\", base_value=1, dimensions=\"(mass)\")\n\n\ndef test_create_fail_on_dimensions_content():\n \"\"\"\n Fail to create unit with bad dimensions expr.\n\n \"\"\"\n a = Symbol(\"a\")\n with pytest.raises(UnitParseError):\n Unit(\"a\", base_value=1, dimensions=a)\n\n\ndef test_create_fail_on_base_value_type():\n \"\"\"\n Fail to create unit with bad base_value type.\n\n \"\"\"\n with pytest.raises(UnitParseError):\n Unit(\"a\", base_value=\"a\", dimensions=(mass / time))\n\n\ndef test_string_representation():\n \"\"\"\n Check unit string representation.\n\n \"\"\"\n pc = Unit(\"pc\")\n Myr = Unit(\"Myr\")\n speed = pc / Myr\n dimensionless = Unit()\n\n assert str(pc) == \"pc\"\n assert str(Myr) == \"Myr\"\n assert str(speed) == \"pc/Myr\"\n assert repr(speed) == \"pc/Myr\"\n assert str(dimensionless) == \"dimensionless\"\n assert repr(dimensionless) == \"(dimensionless)\"\n\n\ndef test_multiplication():\n \"\"\"\n Multiply two units.\n\n \"\"\"\n msun_mks = mass_sun_kg\n pc_mks = m_per_pc\n\n # Create symbols\n msun_sym = Symbol(\"Msun\", positive=True)\n pc_sym = Symbol(\"pc\", positive=True)\n s_sym = Symbol(\"s\", positive=True)\n\n # Create units\n u1 = Unit(\"Msun\")\n u2 = Unit(\"pc\")\n\n # Mul operation\n u3 = u1 * u2\n\n assert u3.expr == msun_sym * pc_sym\n assert_allclose_units(u3.base_value, msun_mks * pc_mks, 1e-12)\n assert u3.dimensions == mass * length\n\n # Pow and Mul operations\n u4 = Unit(\"pc**2\")\n u5 = Unit(\"Msun * s\")\n\n u6 = u4 * u5\n\n assert u6.expr == pc_sym ** 2 * msun_sym * s_sym\n assert_allclose_units(u6.base_value, pc_mks ** 2 * msun_mks, 1e-12)\n assert u6.dimensions == length ** 2 * mass * time\n\n\ndef test_division():\n \"\"\"\n Divide two units.\n\n \"\"\"\n pc_mks = m_per_pc\n km_mks = m_per_km\n\n # Create symbols\n pc_sym = Symbol(\"pc\", positive=True)\n km_sym = Symbol(\"km\", positive=True)\n s_sym = Symbol(\"s\", positive=True)\n\n # Create units\n u1 = Unit(\"pc\")\n u2 = Unit(\"km * s\")\n\n u3 = u1 / u2\n\n assert u3.expr == pc_sym / (km_sym * s_sym)\n assert_allclose_units(u3.base_value, pc_mks / km_mks, 1e-12)\n assert u3.dimensions == 1 / time\n\n\ndef test_power():\n \"\"\"\n Take units to some power.\n\n \"\"\"\n from sympy import nsimplify\n\n pc_mks = m_per_pc\n mK_mks = 1e-3\n u1_dims = mass * length ** 2 * time ** -3 * temperature ** 4\n u1 = Unit(\"kg * pc**2 * s**-3 * mK**4\")\n\n u2 = u1 ** 2\n\n assert u2.dimensions == u1_dims ** 2\n assert_allclose_units(u2.base_value, (pc_mks ** 2 * mK_mks ** 4) ** 2, 1e-12)\n\n u3 = u1 ** (-1.0 / 3)\n\n assert u3.dimensions == nsimplify(u1_dims ** (-1.0 / 3))\n assert_allclose_units(\n u3.base_value, (pc_mks ** 2 * mK_mks ** 4) ** (-1.0 / 3), 1e-12\n )\n\n\ndef test_equality():\n \"\"\"\n Check unit equality with different symbols, but same dimensions and\n base_value.\n\n \"\"\"\n u1 = Unit(\"km * s**-1\")\n u2 = Unit(\"m * ms**-1\")\n\n assert u1 == u2\n assert u1.copy() == u2\n\n\ndef test_invalid_operations():\n u1 = Unit(\"cm\")\n u2 = Unit(\"m\")\n\n with pytest.raises(InvalidUnitOperation):\n u1 + u2\n with pytest.raises(InvalidUnitOperation):\n u1 += u2\n with pytest.raises(InvalidUnitOperation):\n 1 + u1\n with pytest.raises(InvalidUnitOperation):\n u1 + 1\n with pytest.raises(InvalidUnitOperation):\n u1 - u2\n with pytest.raises(InvalidUnitOperation):\n u1 -= u2\n with pytest.raises(InvalidUnitOperation):\n 1 - u1\n with pytest.raises(InvalidUnitOperation):\n u1 - 1\n with pytest.raises(InvalidUnitOperation):\n u1 *= u2\n with pytest.raises(InvalidUnitOperation):\n u1 * \"hello!\"\n with pytest.raises(InvalidUnitOperation):\n u1 /= u2\n with pytest.raises(InvalidUnitOperation):\n u1 / \"hello!\"\n\n\ndef test_base_equivalent():\n \"\"\"\n Check base equivalent of a unit.\n\n \"\"\"\n Msun_mks = mass_sun_kg\n Mpc_mks = m_per_mpc\n\n u1 = Unit(\"Msun * Mpc**-3\")\n u2 = Unit(\"kg * m**-3\")\n u3 = u1.get_base_equivalent()\n\n assert u2.expr == u3.expr\n assert u2 == u3\n\n assert_allclose_units(u1.base_value, Msun_mks / Mpc_mks ** 3, 1e-12)\n assert u2.base_value == 1\n assert u3.base_value == 1\n\n mass_density = mass / length ** 3\n\n assert u1.dimensions == mass_density\n assert u2.dimensions == mass_density\n assert u3.dimensions == mass_density\n\n assert_allclose_units(\n u1.get_conversion_factor(u3)[0], Msun_mks / Mpc_mks ** 3, 1e-12\n )\n\n with pytest.raises(UnitConversionError):\n u1.get_conversion_factor(Unit(\"m\"))\n\n with pytest.raises(UnitConversionError):\n u1.get_conversion_factor(Unit(\"degF\"))\n\n reg = UnitRegistry(unit_system=cgs_unit_system)\n\n u = Unit(\"kg\", registry=reg)\n\n assert u.get_base_equivalent() == Unit(\"g\")\n\n u = Unit(\"kg\")\n\n assert u.get_base_equivalent() == Unit(\"kg\")\n\n u = Unit(\"A\")\n assert u.get_base_equivalent(unit_system=\"mks\") == Unit(\"A\")\n\n\ndef test_temperature_offsets():\n u1 = Unit(\"degC\")\n u2 = Unit(\"degF\")\n\n with pytest.raises(InvalidUnitOperation):\n operator.mul(u1, u2)\n with pytest.raises(InvalidUnitOperation):\n operator.truediv(u1, u2)\n\n\ndef test_latex_repr():\n registry = UnitRegistry()\n\n # create a fake comoving unit\n registry.add(\n \"pccm\",\n registry.lut[\"pc\"][0] / (1 + 2),\n length,\n \"\\\\rm{pc}/(1+z)\",\n prefixable=True,\n )\n\n test_unit = Unit(\"Mpccm\", registry=registry)\n assert_almost_equal(test_unit.base_value, m_per_mpc / 3)\n assert_equal(test_unit.latex_repr, r\"\\rm{Mpc}/(1+z)\")\n\n test_unit = Unit(\"cm**-3\", base_value=1.0, registry=registry)\n assert_equal(test_unit.latex_repr, \"\\\\frac{1}{\\\\rm{cm}^{3}}\")\n\n test_unit = Unit(\"m_geom/l_geom**3\")\n assert_equal(test_unit.latex_repr, \"\\\\frac{1}{M_\\\\odot^{2}}\")\n\n test_unit = Unit(\"1e9*cm\")\n assert_equal(test_unit.latex_repr, \"1.0 \\\\times 10^{9}\\\\ \\\\rm{cm}\")\n\n test_unit = Unit(\"1.0*cm\")\n assert_equal(test_unit.latex_repr, \"\\\\rm{cm}\")\n\n\ndef test_latitude_longitude():\n lat = unit_symbols.lat\n lon = unit_symbols.lon\n deg = unit_symbols.deg\n assert_equal(lat.units.base_offset, 90.0)\n assert_equal((deg * 90.0).in_units(\"lat\").value, 0.0)\n assert_equal((deg * 180).in_units(\"lat\").value, -90.0)\n assert_equal((lat * 0.0).in_units(\"deg\"), deg * 90.0)\n assert_equal((lat * -90).in_units(\"deg\"), deg * 180)\n\n assert_equal(lon.units.base_offset, -180.0)\n assert_equal((deg * 0.0).in_units(\"lon\").value, -180.0)\n assert_equal((deg * 90.0).in_units(\"lon\").value, -90.0)\n assert_equal((deg * 180).in_units(\"lon\").value, 0.0)\n assert_equal((deg * 360).in_units(\"lon\").value, 180.0)\n\n assert_equal((lon * -180.0).in_units(\"deg\"), deg * 0.0)\n assert_equal((lon * -90.0).in_units(\"deg\"), deg * 90.0)\n assert_equal((lon * 0.0).in_units(\"deg\"), deg * 180.0)\n assert_equal((lon * 180.0).in_units(\"deg\"), deg * 360)\n\n\ndef test_creation_from_ytarray():\n from unyt import electrostatic_unit, elementary_charge_cgs\n\n u1 = Unit(electrostatic_unit)\n assert_equal(str(u1), \"statC\")\n assert_equal(u1, Unit(\"esu\"))\n assert_equal(u1, electrostatic_unit.units)\n\n u2 = Unit(elementary_charge_cgs)\n assert_equal(str(u2), \"4.80320467299766e-10*statC\")\n assert_equal(u2, Unit(\"4.80320467299766e-10*statC\"))\n assert_equal(u1, elementary_charge_cgs.units)\n\n assert_allclose((u1 / u2).base_value, electrostatic_unit / elementary_charge_cgs)\n\n with pytest.raises(UnitParseError):\n Unit([1, 2, 3] * elementary_charge_cgs)\n\n\ndef test_list_same_dimensions():\n from unyt import m\n\n reg = default_unit_registry\n for equiv in reg.list_same_dimensions(m):\n assert Unit(equiv).dimensions is length\n\n\ndef test_decagram():\n dag = Unit(\"dag\")\n g = Unit(\"g\")\n assert dag.get_conversion_factor(g) == (10.0, None)\n\n\ndef test_pickle():\n cm = Unit(\"cm\")\n assert cm == pickle.loads(pickle.dumps(cm))\n\n\ndef test_preserve_offset():\n from unyt import degF, dimensionless\n\n new_unit = degF * dimensionless\n\n assert new_unit is not degF\n assert new_unit == degF\n assert new_unit.base_offset == degF.base_offset\n\n new_unit = degF / dimensionless\n\n assert new_unit is not degF\n assert new_unit == degF\n assert new_unit.base_offset == degF.base_offset\n\n with pytest.raises(InvalidUnitOperation):\n dimensionless / degF\n\n\ndef test_code_unit():\n from unyt import UnitRegistry\n\n ureg = UnitRegistry()\n ureg.add(\"code_length\", 10.0, length)\n ureg.add(\"code_magnetic_field\", 2.0, magnetic_field_mks)\n u = Unit(\"code_length\", registry=ureg)\n assert u.is_code_unit is True\n assert u.get_base_equivalent() == Unit(\"m\")\n u = Unit(\"cm\")\n assert u.is_code_unit is False\n\n u = Unit(\"code_magnetic_field\", registry=ureg)\n assert u.get_base_equivalent(\"mks\") == Unit(\"T\")\n with pytest.raises(UnitsNotReducible):\n assert u.get_base_equivalent(\"cgs\")\n\n # see issue #60\n u = Unit(\"s/m\")\n assert u.get_mks_equivalent() == Unit(\"s/m\")\n assert u.get_mks_equivalent() != Unit(\"ohm\")\n assert u.get_cgs_equivalent() == Unit(\"s/cm\")\n\n u = Unit(\"kC\")\n assert u.get_cgs_equivalent() == Unit(\"kesu\")\n assert u.get_cgs_equivalent().get_mks_equivalent() == u\n\n UnitSystem(ureg.unit_system_id, \"code_length\", \"kg\", \"s\", registry=ureg)\n\n u = Unit(\"cm\", registry=ureg)\n ue = u.get_base_equivalent(\"code\")\n\n assert str(ue) == \"code_length\"\n assert ue.base_value == 10\n assert ue.dimensions is length\n\n class FakeDataset(object):\n unit_registry = ureg\n\n ds = FakeDataset()\n\n UnitSystem(ds, \"code_length\", \"kg\", \"s\", registry=ureg)\n\n u = Unit(\"cm\", registry=ureg)\n ue = u.get_base_equivalent(ds)\n\n assert str(ue) == \"code_length\"\n assert ue.base_value == 10\n assert ue.dimensions is length\n\n with pytest.raises(UnitParseError):\n Unit(\"code_length\")\n\n\ndef test_bad_equivalence():\n from unyt import cm\n\n with pytest.raises(KeyError):\n cm.has_equivalent(\"dne\")\n\n\ndef test_em_unit_base_equivalent():\n from unyt import A, cm\n\n with pytest.raises(UnitsNotReducible):\n (A / cm).get_base_equivalent(\"cgs\")\n\n\ndef test_symbol_lut_length():\n for v in default_unit_symbol_lut.values():\n assert len(v) == 5\n\n\ndef test_simplify():\n import unyt as u\n\n answers = {\n u.Hz * u.s: \"dimensionless\",\n u.kg / u.g: \"1000\",\n u.Hz * u.s * u.km: \"km\",\n u.kHz * u.s: \"1000\",\n u.kHz * u.s * u.km: \"1000*km\",\n u.kHz * u.s ** 2: \"1000*s\",\n u.kHz * u.s ** 2 * u.km: \"1000*km*s\",\n u.Hz ** -1 * u.s: \"s/Hz\",\n u.Hz ** -1 * u.s * u.km: \"km*s/Hz\",\n u.Hz ** 1.5 * u.s ** 1.7: \"sqrt(Hz)*s**(7/10)\",\n u.Hz ** 1.5 * u.s ** 1.7 * u.km: \"sqrt(Hz)*km*s**(7/10)\",\n u.m ** 2 / u.cm ** 2: \"10000\",\n }\n\n for unit, answer in answers.items():\n assert str(unit.simplify()) == answer\n\n\ndef test_micro_prefix():\n import unyt as u\n\n # both versions of unicode mu work correctly\n assert u.um == u.µm\n assert u.um == u.μm\n\n # parsing both versions works as well\n assert u.ug == u.Unit(\"µg\")\n assert u.ug == u.Unit(\"μg\")\n\n\ndef test_name_alternatives():\n import unyt\n from unyt._unit_lookup_table import (\n default_unit_name_alternatives,\n name_alternatives,\n inv_name_alternatives,\n )\n\n # concatenated list of all alternative unit names\n allowed_names = sum(name_alternatives.values(), [])\n\n # ensure the values are all tuples and not e.g. strings\n for val in default_unit_name_alternatives.values():\n assert isinstance(val, tuple)\n\n # all names are unique\n assert len(set(allowed_names)) == len(allowed_names)\n # each allowed name has a key in the inverse dict\n assert len(inv_name_alternatives.keys()) == len(allowed_names)\n assert set(inv_name_alternatives.keys()) == set(allowed_names)\n\n for name in allowed_names:\n assert hasattr(unyt, name)\n assert hasattr(unyt.unit_symbols, name)\n\n\ndef test_attosecond():\n from unyt import Unit, attosecond, second\n\n assert Unit(\"as\") == attosecond\n assert str(Unit(\"as\")) == \"as\"\n assert Unit(\"as/s\") == attosecond / second\n\n\ndef test_micro():\n from unyt import Unit\n\n assert str(Unit(\"um\")) == \"µm\"\n assert str(Unit(\"us\")) == \"µs\"\n\n\ndef test_show_all_units_doc_table_ops():\n for name in set(name_alternatives.keys()):\n u = Unit(name)\n (1 * u).in_mks()\n try:\n (1 * u).in_cgs()\n except UnitsNotReducible:\n pass\n" ]
[ [ "numpy.testing.assert_equal", "numpy.array", "numpy.testing.assert_almost_equal", "numpy.testing.assert_allclose" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
pierrick-giffard/parcels
[ "2447d4785b915e17f59f6c1f90703a35d2235c91" ]
[ "tests/test_kernel_language.py" ]
[ "from parcels import FieldSet, ParticleSet, ScipyParticle, JITParticle, Kernel, Variable, ErrorCode\nfrom parcels.kernels.seawaterdensity import polyTEOS10_bsq, UNESCO_Density\nfrom parcels import random as parcels_random\nimport numpy as np\nimport pytest\nimport random as py_random\nfrom os import path\nimport sys\n\n\nptype = {'scipy': ScipyParticle, 'jit': JITParticle}\n\n\ndef expr_kernel(name, pset, expr):\n pycode = \"\"\"def %s(particle, fieldset, time):\n particle.p = %s\"\"\" % (name, expr)\n return Kernel(pset.fieldset, pset.ptype, pyfunc=None,\n funccode=pycode, funcname=name,\n funcvars=['particle'])\n\n\ndef fieldset(xdim=20, ydim=20):\n \"\"\" Standard unit mesh fieldset \"\"\"\n lon = np.linspace(0., 1., xdim, dtype=np.float32)\n lat = np.linspace(0., 1., ydim, dtype=np.float32)\n U, V = np.meshgrid(lat, lon)\n data = {'U': np.array(U, dtype=np.float32), 'V': np.array(V, dtype=np.float32)}\n dimensions = {'lat': lat, 'lon': lon}\n return FieldSet.from_data(data, dimensions, mesh='flat', transpose=True)\n\n\[email protected](name=\"fieldset\")\ndef fieldset_fixture(xdim=20, ydim=20):\n return fieldset(xdim=xdim, ydim=ydim)\n\n\[email protected]('mode', ['scipy', 'jit'])\[email protected]('name, expr, result', [\n ('Add', '2 + 5', 7),\n ('Sub', '6 - 2', 4),\n ('Mul', '3 * 5', 15),\n ('Div', '24 / 4', 6),\n])\ndef test_expression_int(fieldset, mode, name, expr, result, npart=10):\n \"\"\" Test basic arithmetic expressions \"\"\"\n class TestParticle(ptype[mode]):\n p = Variable('p', dtype=np.float32)\n pset = ParticleSet(fieldset, pclass=TestParticle,\n lon=np.linspace(0., 1., npart),\n lat=np.zeros(npart) + 0.5)\n pset.execute(expr_kernel('Test%s' % name, pset, expr), endtime=1., dt=1.)\n assert(np.all(result == pset.p))\n\n\[email protected]('mode', ['scipy', 'jit'])\[email protected]('name, expr, result', [\n ('Add', '2. + 5.', 7),\n ('Sub', '6. - 2.', 4),\n ('Mul', '3. * 5.', 15),\n ('Div', '24. / 4.', 6),\n ('Pow', '2 ** 3', 8),\n])\ndef test_expression_float(fieldset, mode, name, expr, result, npart=10):\n \"\"\" Test basic arithmetic expressions \"\"\"\n class TestParticle(ptype[mode]):\n p = Variable('p', dtype=np.float32)\n pset = ParticleSet(fieldset, pclass=TestParticle,\n lon=np.linspace(0., 1., npart),\n lat=np.zeros(npart) + 0.5)\n pset.execute(expr_kernel('Test%s' % name, pset, expr), endtime=1., dt=1.)\n assert(np.all(result == pset.p))\n\n\[email protected]('mode', ['scipy', 'jit'])\[email protected]('name, expr, result', [\n ('True', 'True', True),\n ('False', 'False', False),\n ('And', 'True and False', False),\n ('Or', 'True or False', True),\n ('Equal', '5 == 5', True),\n ('Lesser', '5 < 3', False),\n ('LesserEq', '3 <= 5', True),\n ('Greater', '4 > 2', True),\n ('GreaterEq', '2 >= 4', False),\n])\ndef test_expression_bool(fieldset, mode, name, expr, result, npart=10):\n \"\"\" Test basic arithmetic expressions \"\"\"\n class TestParticle(ptype[mode]):\n p = Variable('p', dtype=np.float32)\n pset = ParticleSet(fieldset, pclass=TestParticle,\n lon=np.linspace(0., 1., npart),\n lat=np.zeros(npart) + 0.5)\n pset.execute(expr_kernel('Test%s' % name, pset, expr), endtime=1., dt=1.)\n if mode == 'jit':\n assert(np.all(result == (pset.p == 1)))\n else:\n assert(np.all(result == pset.p))\n\n\[email protected]('mode', ['scipy', 'jit'])\ndef test_while_if_break(fieldset, mode):\n \"\"\"Test while, if and break commands\"\"\"\n class TestParticle(ptype[mode]):\n p = Variable('p', dtype=np.float32, initial=0.)\n pset = ParticleSet(fieldset, pclass=TestParticle, lon=[0], lat=[0])\n\n def kernel(particle, fieldset, time):\n while particle.p < 30:\n if particle.p > 9:\n break\n particle.p += 1\n if particle.p > 5:\n particle.p *= 2.\n pset.execute(kernel, endtime=1., dt=1.)\n assert np.allclose(pset.p, 20., rtol=1e-12)\n\n\[email protected]('mode', ['scipy', 'jit'])\ndef test_nested_if(fieldset, mode):\n \"\"\"Test nested if commands\"\"\"\n class TestParticle(ptype[mode]):\n p0 = Variable('p0', dtype=np.int32, initial=0)\n p1 = Variable('p1', dtype=np.int32, initial=1)\n pset = ParticleSet(fieldset, pclass=TestParticle, lon=0, lat=0)\n\n def kernel(particle, fieldset, time):\n if particle.p1 >= particle.p0:\n var = particle.p0\n if var + 1 < particle.p1:\n particle.p1 = -1\n\n pset.execute(kernel, endtime=10, dt=1.)\n assert np.allclose([pset.p0[0], pset.p1[0]], [0, 1])\n\n\[email protected]('mode', ['scipy', 'jit'])\ndef test_pass(fieldset, mode):\n \"\"\"Test pass commands\"\"\"\n class TestParticle(ptype[mode]):\n p = Variable('p', dtype=np.int32, initial=0)\n pset = ParticleSet(fieldset, pclass=TestParticle, lon=0, lat=0)\n\n def kernel(particle, fieldset, time):\n particle.p = -1\n pass\n\n pset.execute(kernel, endtime=10, dt=1.)\n assert np.allclose(pset[0].p, -1)\n\n\[email protected]('mode', ['scipy', 'jit'])\ndef test_dt_as_variable_in_kernel(fieldset, mode):\n pset = ParticleSet(fieldset, pclass=ptype[mode], lon=0, lat=0)\n\n def kernel(particle, fieldset, time):\n dt = 1. # noqa\n\n pset.execute(kernel, endtime=10, dt=1.)\n\n\ndef test_parcels_tmpvar_in_kernel(fieldset):\n \"\"\"Tests for error thrown if vartiable with 'tmp' defined in custom kernel\"\"\"\n error_thrown = False\n pset = ParticleSet(fieldset, pclass=JITParticle, lon=0, lat=0)\n\n def kernel_tmpvar(particle, fieldset, time):\n parcels_tmpvar0 = 0 # noqa\n\n try:\n pset.execute(kernel_tmpvar, endtime=1, dt=1.)\n except NotImplementedError:\n error_thrown = True\n assert error_thrown\n\n\[email protected]('mode', ['scipy', 'jit'])\ndef test_if_withfield(fieldset, mode):\n \"\"\"Test combination of if and Field sampling commands\"\"\"\n class TestParticle(ptype[mode]):\n p = Variable('p', dtype=np.float32, initial=0.)\n pset = ParticleSet(fieldset, pclass=TestParticle, lon=[0], lat=[0])\n\n def kernel(particle, fieldset, time):\n u = fieldset.U[time, 0, 0, 1.]\n particle.p = 0\n if fieldset.U[time, 0, 0, 1.] == u:\n particle.p += 1\n if fieldset.U[time, 0, 0, 1.] == fieldset.U[time, 0, 0, 1.]:\n particle.p += 1\n if True:\n particle.p += 1\n if fieldset.U[time, 0, 0, 1.] == u and 1 == 1:\n particle.p += 1\n if fieldset.U[time, 0, 0, 1.] == fieldset.U[time, 0, 0, 1.] and fieldset.U[time, 0, 0, 1.] == fieldset.U[time, 0, 0, 1.]:\n particle.p += 1\n if fieldset.U[time, 0, 0, 1.] == u:\n particle.p += 1\n else:\n particle.p += 1000\n if fieldset.U[time, 0, 0, 1.] == 3:\n particle.p += 1000\n else:\n particle.p += 1\n\n pset.execute(kernel, endtime=1., dt=1.)\n assert np.allclose(pset.p, 7., rtol=1e-12)\n\n\[email protected](\n 'mode',\n ['scipy',\n pytest.param('jit',\n marks=pytest.mark.xfail(\n (sys.version_info >= (3, 0)) or (sys.platform == 'win32'),\n reason=\"py.test FD capturing does not work for jit on python3 or Win\"))\n ])\ndef test_print(fieldset, mode, capfd):\n \"\"\"Test print statements\"\"\"\n class TestParticle(ptype[mode]):\n p = Variable('p', dtype=np.float32, initial=0.)\n pset = ParticleSet(fieldset, pclass=TestParticle, lon=[0.5], lat=[0.5])\n\n def kernel(particle, fieldset, time):\n particle.p = fieldset.U[time, particle.depth, particle.lat, particle.lon]\n tmp = 5\n print(\"%d %f %f\" % (particle.id, particle.p, tmp))\n pset.execute(kernel, endtime=1., dt=1.)\n out, err = capfd.readouterr()\n lst = out.split(' ')\n tol = 1e-8\n assert abs(float(lst[0]) - pset.id[0]) < tol and abs(float(lst[1]) - pset.p[0]) < tol and abs(float(lst[2]) - 5) < tol\n\n def kernel2(particle, fieldset, time):\n tmp = 3\n print(\"%f\" % (tmp))\n pset.execute(kernel2, endtime=1., dt=1.)\n out, err = capfd.readouterr()\n lst = out.split(' ')\n assert abs(float(lst[0]) - 3) < tol\n\n\ndef random_series(npart, rngfunc, rngargs, mode):\n random = parcels_random if mode == 'jit' else py_random\n random.seed(1234)\n func = getattr(random, rngfunc)\n series = [func(*rngargs) for _ in range(npart)]\n random.seed(1234) # Reset the RNG seed\n return series\n\n\[email protected]('mode', ['scipy', 'jit'])\[email protected]('rngfunc, rngargs', [\n ('random', []),\n ('uniform', [0., 20.]),\n ('randint', [0, 20]),\n])\ndef test_random_float(fieldset, mode, rngfunc, rngargs, npart=10):\n \"\"\" Test basic random number generation \"\"\"\n class TestParticle(ptype[mode]):\n p = Variable('p', dtype=np.float32 if rngfunc == 'randint' else np.float32)\n pset = ParticleSet(fieldset, pclass=TestParticle,\n lon=np.linspace(0., 1., npart),\n lat=np.zeros(npart) + 0.5)\n series = random_series(npart, rngfunc, rngargs, mode)\n kernel = expr_kernel('TestRandom_%s' % rngfunc, pset,\n 'random.%s(%s)' % (rngfunc, ', '.join([str(a) for a in rngargs])))\n pset.execute(kernel, endtime=1., dt=1.)\n assert np.allclose(pset.p, series, atol=1e-9)\n\n\[email protected]('mode', ['scipy', 'jit'])\[email protected]('c_inc', ['str', 'file'])\ndef test_c_kernel(fieldset, mode, c_inc):\n coord_type = np.float32 if c_inc == 'str' else np.float64\n pset = ParticleSet(fieldset, pclass=ptype[mode], lon=[0.5], lat=[0],\n lonlatdepth_dtype=coord_type)\n\n def func(U, lon, dt):\n u = U.data[0, 2, 1]\n return lon + u * dt\n\n if c_inc == 'str':\n c_include = \"\"\"\n static inline ErrorCode func(CField *f, float *lon, double *dt)\n {\n float data2D[2][2][2];\n ErrorCode err = getCell2D(f, 1, 2, 0, data2D, 1); CHECKERROR(err);\n float u = data2D[0][0][0];\n *lon += u * *dt;\n return SUCCESS;\n }\n \"\"\"\n else:\n c_include = path.join(path.dirname(__file__), 'customed_header.h')\n\n def ckernel(particle, fieldset, time):\n func('parcels_customed_Cfunc_pointer_args', fieldset.U, particle.lon, particle.dt)\n\n def pykernel(particle, fieldset, time):\n particle.lon = func(fieldset.U, particle.lon, particle.dt)\n\n if mode == 'scipy':\n kernel = pset.Kernel(pykernel)\n else:\n kernel = pset.Kernel(ckernel, c_include=c_include)\n pset.execute(kernel, endtime=3., dt=3.)\n assert np.allclose(pset.lon[0], 0.81578948)\n\n\[email protected]('mode', ['scipy', 'jit'])\ndef test_dt_modif_by_kernel(fieldset, mode):\n class TestParticle(ptype[mode]):\n age = Variable('age', dtype=np.float32)\n pset = ParticleSet(fieldset, pclass=TestParticle, lon=[0.5], lat=[0])\n\n def modif_dt(particle, fieldset, time):\n particle.age += particle.dt\n particle.dt = 2\n\n endtime = 4\n pset.execute(modif_dt, endtime=endtime, dt=1.)\n assert np.isclose(pset.age[0], endtime)\n\n\[email protected]('mode', ['scipy', 'jit'])\[email protected]('dt', [1e-2, 1e-6])\ndef test_small_dt(fieldset, mode, dt, npart=10):\n pset = ParticleSet(fieldset, pclass=ptype[mode], lon=np.zeros(npart),\n lat=np.zeros(npart), time=np.arange(0, npart)*dt*10)\n\n def DoNothing(particle, fieldset, time):\n return ErrorCode.Success\n\n pset.execute(DoNothing, dt=dt, runtime=dt*100)\n assert np.allclose([p.time for p in pset], dt*100)\n\n\[email protected]('mode', ['scipy', 'jit'])\ndef test_seawaterdensity_kernels(mode):\n\n def generate_fieldset(xdim=2, ydim=2, zdim=2, tdim=1):\n lon = np.linspace(0., 10., xdim, dtype=np.float32)\n lat = np.linspace(0., 10., ydim, dtype=np.float32)\n depth = np.linspace(0, 2000, zdim, dtype=np.float32)\n time = np.zeros(tdim, dtype=np.float64)\n U = np.ones((tdim, zdim, ydim, xdim))\n V = np.ones((tdim, zdim, ydim, xdim))\n abs_salinity = 30 * np.ones((tdim, zdim, ydim, xdim))\n cons_temperature = 10 * np.ones((tdim, zdim, ydim, xdim))\n dimensions = {'lat': lat, 'lon': lon, 'depth': depth, 'time': time}\n data = {'U': np.array(U, dtype=np.float32), 'V': np.array(V, dtype=np.float32),\n 'abs_salinity': np.array(abs_salinity, dtype=np.float32),\n 'cons_temperature': np.array(cons_temperature, dtype=np.float32)}\n return (data, dimensions)\n\n data, dimensions = generate_fieldset()\n fieldset = FieldSet.from_data(data, dimensions)\n\n class DensParticle(ptype[mode]):\n density = Variable('density', dtype=np.float32)\n\n pset = ParticleSet(fieldset, pclass=DensParticle, lon=5, lat=5, depth=1000)\n\n pset.execute(polyTEOS10_bsq, runtime=0, dt=0)\n assert np.allclose(pset[0].density, 1022.85377)\n\n\[email protected]('mode', ['scipy', 'jit'])\[email protected]('pressure', [0, 10])\ndef test_UNESCOdensity_kernel(mode, pressure):\n\n def generate_fieldset(p, xdim=2, ydim=2, zdim=2, tdim=1):\n lon = np.linspace(0., 10., xdim, dtype=np.float32)\n lat = np.linspace(0., 10., ydim, dtype=np.float32)\n depth = np.linspace(0, 2000, zdim, dtype=np.float32)\n time = np.zeros(tdim, dtype=np.float64)\n U = np.ones((tdim, zdim, ydim, xdim))\n V = np.ones((tdim, zdim, ydim, xdim))\n psu_salinity = 8 * np.ones((tdim, zdim, ydim, xdim))\n cons_temperature = 10 * np.ones((tdim, zdim, ydim, xdim))\n cons_pressure = p * np.ones((tdim, zdim, ydim, xdim))\n dimensions = {'lat': lat, 'lon': lon, 'depth': depth, 'time': time}\n data = {'U': np.array(U, dtype=np.float32), 'V': np.array(V, dtype=np.float32),\n 'psu_salinity': np.array(psu_salinity, dtype=np.float32),\n 'cons_pressure': np.array(cons_pressure, dtype=np.float32),\n 'cons_temperature': np.array(cons_temperature, dtype=np.float32)}\n return (data, dimensions)\n\n data, dimensions = generate_fieldset(pressure)\n fieldset = FieldSet.from_data(data, dimensions)\n\n class DensParticle(ptype[mode]):\n density = Variable('density', dtype=np.float32)\n\n pset = ParticleSet(fieldset, pclass=DensParticle, lon=5, lat=5, depth=1000)\n\n pset.execute(UNESCO_Density, runtime=0, dt=0)\n\n if(pressure == 0):\n assert np.allclose(pset[0].density, 1005.9465)\n elif(pressure == 10):\n assert np.allclose(pset[0].density, 1006.4179)\n" ]
[ [ "numpy.allclose", "numpy.linspace", "numpy.arange", "numpy.ones", "numpy.all", "numpy.array", "numpy.meshgrid", "numpy.zeros", "numpy.isclose" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
PudPawat/protest-detection-violence-estimation
[ "6469c3ae47a7d99308458174fe16bd2c5c7821aa" ]
[ "resnext_wsl.py" ]
[ "\n# Copyright (c) Facebook, Inc. and its affiliates.\n# All rights reserved.\n#\n# This source code is licensed under the license found in the\n# LICENSE file in the root directory of this source tree.\n\n# Optional list of dependencies required by the package\n\n'''\n Code From : https://github.com/facebookresearch/WSL-Images/blob/master/hubconf.py\n'''\ndependencies = ['torch', 'torchvision']\n\ntry:\n from torch.hub import load_state_dict_from_url\nexcept ImportError:\n from torch.utils.model_zoo import load_url as load_state_dict_from_url\n \nfrom Res import ResNet, Bottleneck\n\n\nmodel_urls = {\n 'resnext101_32x8d': 'https://download.pytorch.org/models/ig_resnext101_32x8-c38310e5.pth',\n 'resnext101_32x16d': 'https://download.pytorch.org/models/ig_resnext101_32x16-c6f796b0.pth',\n 'resnext101_32x32d': 'https://download.pytorch.org/models/ig_resnext101_32x32-e4b90b00.pth',\n 'resnext101_32x48d': 'https://download.pytorch.org/models/ig_resnext101_32x48-3e41cc8a.pth',\n}\n\n\ndef _resnext(arch, block, layers, pretrained, progress, **kwargs):\n model = ResNet(block, layers, **kwargs)\n state_dict = load_state_dict_from_url(model_urls[arch], progress=progress)\n model.load_state_dict(state_dict)\n return model\n\n\ndef resnext101_32x8d_wsl(progress=True, **kwargs):\n \"\"\"Constructs a ResNeXt-101 32x8 model pre-trained on weakly-supervised data\n and finetuned on ImageNet from Figure 5 in\n `\"Exploring the Limits of Weakly Supervised Pretraining\" <https://arxiv.org/abs/1805.00932>`_\n Args:\n progress (bool): If True, displays a progress bar of the download to stderr.\n \"\"\"\n kwargs['groups'] = 32\n kwargs['width_per_group'] = 8\n return _resnext('resnext101_32x8d', Bottleneck, [3, 4, 23, 3], True, progress, **kwargs)\n\n\ndef resnext101_32x16d_wsl(progress=True, **kwargs):\n \"\"\"Constructs a ResNeXt-101 32x16 model pre-trained on weakly-supervised data\n and finetuned on ImageNet from Figure 5 in\n `\"Exploring the Limits of Weakly Supervised Pretraining\" <https://arxiv.org/abs/1805.00932>`_\n Args:\n progress (bool): If True, displays a progress bar of the download to stderr.\n \"\"\"\n kwargs['groups'] = 32\n kwargs['width_per_group'] = 16\n return _resnext('resnext101_32x16d', Bottleneck, [3, 4, 23, 3], True, progress, **kwargs)\n\n\ndef resnext101_32x32d_wsl(progress=True, **kwargs):\n \"\"\"Constructs a ResNeXt-101 32x32 model pre-trained on weakly-supervised data\n and finetuned on ImageNet from Figure 5 in\n `\"Exploring the Limits of Weakly Supervised Pretraining\" <https://arxiv.org/abs/1805.00932>`_\n Args:\n progress (bool): If True, displays a progress bar of the download to stderr.\n \"\"\"\n kwargs['groups'] = 32\n kwargs['width_per_group'] = 32\n return _resnext('resnext101_32x32d', Bottleneck, [3, 4, 23, 3], True, progress, **kwargs)\n\n\ndef resnext101_32x48d_wsl(progress=True, **kwargs):\n \"\"\"Constructs a ResNeXt-101 32x48 model pre-trained on weakly-supervised data\n and finetuned on ImageNet from Figure 5 in\n `\"Exploring the Limits of Weakly Supervised Pretraining\" <https://arxiv.org/abs/1805.00932>`_\n Args:\n progress (bool): If True, displays a progress bar of the download to stderr.\n \"\"\"\n kwargs['groups'] = 32\n kwargs['width_per_group'] = 48\n return _resnext('resnext101_32x48d', Bottleneck, [3, 4, 23, 3], True, progress, **kwargs)\n" ]
[ [ "torch.utils.model_zoo.load_url" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
spetrescu/sesn
[ "43ecc5da7083364eea2c66742c17231c18465973", "43ecc5da7083364eea2c66742c17231c18465973" ]
[ "models/mnist_ses.py", "models/stl_ses.py" ]
[ "'''MIT License. Copyright (c) 2020 Ivan Sosnovik, Michał Szmaja'''\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom .impl.ses_conv import SESMaxProjection\nfrom .impl.ses_conv import SESConv_Z2_H, SESConv_H_H\n\n\nclass MNIST_SES_Scalar(nn.Module):\n\n def __init__(self, pool_size=4, kernel_size=11, scales=[1.0], basis_type='A', **kwargs):\n super().__init__()\n C1, C2, C3 = 32, 63, 95\n self.main = nn.Sequential(\n SESConv_Z2_H(1, C1, kernel_size, 7, scales=scales,\n padding=kernel_size // 2, bias=True,\n basis_type=basis_type, **kwargs),\n SESMaxProjection(),\n nn.ReLU(True),\n nn.MaxPool2d(2),\n nn.BatchNorm2d(C1),\n\n SESConv_Z2_H(C1, C2, kernel_size, 7, scales=scales,\n padding=kernel_size // 2, bias=True,\n basis_type=basis_type, **kwargs),\n SESMaxProjection(),\n nn.ReLU(True),\n nn.MaxPool2d(2),\n nn.BatchNorm2d(C2),\n\n SESConv_Z2_H(C2, C3, kernel_size, 7, scales=scales,\n padding=kernel_size // 2, bias=True,\n basis_type=basis_type, **kwargs),\n SESMaxProjection(),\n nn.ReLU(True),\n nn.MaxPool2d(pool_size, padding=2),\n nn.BatchNorm2d(C3),\n )\n\n self.linear = nn.Sequential(\n nn.Linear(4 * C3, 256, bias=False),\n nn.BatchNorm1d(256),\n nn.ReLU(True),\n nn.Dropout(kwargs.get('dropout', 0.7)),\n nn.Linear(256, 10)\n )\n\n def forward(self, x):\n x = self.main(x)\n x = x.view(x.size(0), -1)\n x = self.linear(x)\n return x\n\n\nclass MNIST_SES_V(nn.Module):\n\n def __init__(self, pool_size=4, kernel_size=11, scales=[1.0], basis_type='A', dropout=0.7, **kwargs):\n super().__init__()\n C1, C2, C3 = 32, 63, 95\n self.main = nn.Sequential(\n SESConv_Z2_H(1, C1, kernel_size, 7, scales=scales,\n padding=kernel_size // 2, bias=True,\n basis_type=basis_type, **kwargs),\n nn.ReLU(True),\n nn.MaxPool3d([1, 2, 2], stride=[1, 2, 2]),\n nn.BatchNorm3d(C1),\n\n SESConv_H_H(C1, C2, 1, kernel_size, 7, scales=scales,\n padding=kernel_size // 2, bias=True,\n basis_type=basis_type, **kwargs),\n nn.ReLU(True),\n nn.MaxPool3d([1, 2, 2], stride=[1, 2, 2]),\n nn.BatchNorm3d(C2),\n\n SESConv_H_H(C2, C3, 1, kernel_size, 7, scales=scales,\n padding=kernel_size // 2, bias=True,\n basis_type=basis_type, **kwargs),\n SESMaxProjection(),\n nn.ReLU(True),\n nn.MaxPool2d(pool_size, padding=2),\n nn.BatchNorm2d(C3),\n )\n\n self.linear = nn.Sequential(\n nn.Linear(4 * C3, 256, bias=False),\n nn.BatchNorm1d(256),\n nn.ReLU(True),\n nn.Dropout(dropout),\n nn.Linear(256, 10)\n )\n\n def forward(self, x):\n x = self.main(x)\n x = x.view(x.size(0), -1)\n x = self.linear(x)\n return x\n\n\ndef mnist_ses_vector_56p(**kwargs):\n num_scales = 4\n factor = 2.0\n min_scale = 1.5\n mult = 1.4\n size = 13\n dropout = 0.7\n q = factor ** (1 / (num_scales - 1))\n scales = [min_scale * q**i for i in range(num_scales)]\n scales = [round(s, 2) for s in scales]\n model = MNIST_SES_V(pool_size=8, kernel_size=size, scales=scales,\n basis_type='B', mult=mult, max_order=4, dropout=dropout)\n return nn.Sequential(nn.Upsample(scale_factor=2), model)\n\n\ndef mnist_ses_vector_56(**kwargs):\n num_scales = 4\n factor = 2.0\n min_scale = 2.0\n mult = 1.5\n size = 15\n dropout = 0.7\n q = factor ** (1 / (num_scales - 1))\n scales = [min_scale * q**i for i in range(num_scales)]\n scales = [round(s, 2) for s in scales]\n model = MNIST_SES_V(pool_size=8, kernel_size=size, scales=scales,\n basis_type='B', mult=mult, max_order=4, dropout=dropout)\n return nn.Sequential(nn.Upsample(scale_factor=2), model)\n\n\ndef mnist_ses_scalar_56p(**kwargs):\n num_scales = 3\n factor = 3.0\n min_scale = 1.0\n mult = 1.4\n size = 17\n dropout = 0.7\n q = factor ** (1 / (num_scales - 1))\n scales = [min_scale * q**i for i in range(num_scales)]\n scales = [round(s, 2) for s in scales]\n model = MNIST_SES_Scalar(pool_size=8, kernel_size=size, scales=scales,\n basis_type='B', mult=mult, max_order=4, dropout=dropout)\n return nn.Sequential(nn.Upsample(scale_factor=2), model)\n\n\ndef mnist_ses_scalar_56(**kwargs):\n num_scales = 3\n factor = 2.0\n min_scale = 2.0\n mult = 1.4\n size = 15\n dropout = 0.7\n q = factor ** (1 / (num_scales - 1))\n scales = [min_scale * q**i for i in range(num_scales)]\n scales = [round(s, 2) for s in scales]\n model = MNIST_SES_Scalar(pool_size=8, kernel_size=size, scales=scales,\n basis_type='B', mult=mult, max_order=4, dropout=dropout)\n return nn.Sequential(nn.Upsample(scale_factor=2), model)\n\n\ndef mnist_ses_vector_28p(**kwargs):\n num_scales = 3\n factor = 3.0\n min_scale = 1.5\n mult = 1.4\n size = 15\n dropout = 0.7\n q = factor ** (1 / (num_scales - 1))\n scales = [min_scale * q**i for i in range(num_scales)]\n scales = [round(s, 2) for s in scales]\n model = MNIST_SES_V(pool_size=4, kernel_size=size, scales=scales,\n basis_type='B', mult=mult, max_order=4, dropout=dropout)\n return model\n\n\ndef mnist_ses_vector_28(**kwargs):\n num_scales = 3\n factor = 3.0\n min_scale = 1.5\n mult = 1.5\n size = 13\n dropout = 0.7\n q = factor ** (1 / (num_scales - 1))\n scales = [min_scale * q**i for i in range(num_scales)]\n scales = [round(s, 2) for s in scales]\n model = MNIST_SES_V(pool_size=4, kernel_size=size, scales=scales,\n basis_type='B', mult=mult, max_order=4, dropout=dropout)\n return model\n\n\ndef mnist_ses_scalar_28p(**kwargs):\n num_scales = 4\n factor = 3.0\n min_scale = 1.5\n mult = 1.4\n size = 13\n dropout = 0.7\n q = factor ** (1 / (num_scales - 1))\n scales = [min_scale * q**i for i in range(num_scales)]\n scales = [round(s, 2) for s in scales]\n model = MNIST_SES_Scalar(pool_size=4, kernel_size=size, scales=scales,\n basis_type='B', mult=mult, max_order=4, dropout=dropout)\n return model\n\n\ndef mnist_ses_scalar_28(**kwargs):\n num_scales = 4\n factor = 3.0\n min_scale = 1.7\n mult = 1.5\n size = 15\n dropout = 0.7\n q = factor ** (1 / (num_scales - 1))\n scales = [min_scale * q**i for i in range(num_scales)]\n scales = [round(s, 2) for s in scales]\n model = MNIST_SES_Scalar(pool_size=4, kernel_size=size, scales=scales,\n basis_type='B', mult=mult, max_order=4, dropout=dropout)\n return model\n", "'''It is a modified version of the unofficial implementaion of \n'Wide Residual Networks'\nPaper: https://arxiv.org/abs/1605.07146\nCode: https://github.com/xternalz/WideResNet-pytorch\n\nMIT License\nCopyright (c) 2020 Ivan Sosnovik, Michał Szmaja\nCopyright (c) 2019 xternalz\n'''\nimport torch\nimport torch.nn as nn\nimport torch.nn.init as init\nimport torch.nn.functional as F\nimport math\n\nfrom .impl.ses_conv import SESConv_H_H, SESConv_Z2_H, SESConv_H_H_1x1, SESMaxProjection\n\n\nclass BasicBlock(nn.Module):\n def __init__(self, in_planes, out_planes, stride, dropRate=0.0, scales=[1.0], pool=False, interscale=False):\n super(BasicBlock, self).__init__()\n self.bn1 = nn.BatchNorm3d(in_planes)\n self.relu1 = nn.ReLU(inplace=True)\n if pool:\n self.conv1 = nn.Sequential(\n SESMaxProjection(),\n SESConv_Z2_H(in_planes, out_planes, kernel_size=7, effective_size=3,\n stride=stride, padding=3, bias=False, scales=scales, basis_type='A')\n )\n else:\n if interscale:\n self.conv1 = SESConv_H_H(in_planes, out_planes, 2, kernel_size=7, effective_size=3, stride=stride,\n padding=3, bias=False, scales=scales, basis_type='A')\n else:\n self.conv1 = SESConv_H_H(in_planes, out_planes, 1, kernel_size=7, effective_size=3, stride=stride,\n padding=3, bias=False, scales=scales, basis_type='A')\n self.bn2 = nn.BatchNorm3d(out_planes)\n self.relu2 = nn.ReLU(inplace=True)\n self.conv2 = SESConv_H_H(out_planes, out_planes, 1, kernel_size=7, effective_size=3, stride=1,\n padding=3, bias=False, scales=scales, basis_type='A')\n self.droprate = dropRate\n self.equalInOut = (in_planes == out_planes)\n self.convShortcut = (not self.equalInOut) and SESConv_H_H_1x1(in_planes, out_planes,\n stride=stride, bias=False, num_scales=len(scales)) or None\n\n def forward(self, x):\n if not self.equalInOut:\n x = self.relu1(self.bn1(x))\n else:\n out = self.relu1(self.bn1(x))\n out = self.relu2(self.bn2(self.conv1(out if self.equalInOut else x)))\n if self.droprate > 0:\n out = F.dropout(out, p=self.droprate, training=self.training)\n out = self.conv2(out)\n return torch.add(x if self.equalInOut else self.convShortcut(x), out)\n\n\nclass NetworkBlock(nn.Module):\n def __init__(self, nb_layers, in_planes, out_planes, block, stride, dropRate=0.0, scales=[0.0], pool=False, interscale=False):\n super(NetworkBlock, self).__init__()\n self.layer = self._make_layer(block, in_planes, out_planes,\n nb_layers, stride, dropRate, scales, pool, interscale)\n\n def _make_layer(self, block, in_planes, out_planes, nb_layers, stride, dropRate, scales, pool, interscale):\n layers = []\n for i in range(nb_layers):\n pool_layer = pool and (i == 0)\n interscale_layer = interscale and (i == 0)\n layers.append(block(i == 0 and in_planes or out_planes,\n out_planes, i == 0 and stride or 1, dropRate, scales,\n pool=pool_layer, interscale=interscale_layer))\n return nn.Sequential(*layers)\n\n def forward(self, x):\n return self.layer(x)\n\n\nclass WideResNet(nn.Module):\n def __init__(self, depth, num_classes, widen_factor=1, dropRate=0.0, scales=[1.0],\n pools=[False, False, False], interscale=[False, False, False]):\n super(WideResNet, self).__init__()\n nChannels = [16, 16 * widen_factor, 32 * widen_factor, 64 * widen_factor]\n assert((depth - 4) % 6 == 0)\n n = (depth - 4) // 6\n block = BasicBlock\n # 1st conv before any network block\n self.conv1 = SESConv_Z2_H(3, nChannels[0], kernel_size=7, effective_size=3, stride=1,\n padding=3, bias=False, scales=scales, basis_type='A')\n # 1st block\n self.block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1,\n dropRate, scales=scales, pool=pools[0], interscale=interscale[0])\n # 2nd block\n self.block2 = NetworkBlock(n, nChannels[1], nChannels[2], block, 2,\n dropRate, scales=scales, pool=pools[1], interscale=interscale[1])\n # 3rd block\n self.block3 = NetworkBlock(n, nChannels[2], nChannels[3], block, 2,\n dropRate, scales=scales, pool=pools[2], interscale=interscale[2])\n # global average pooling and classifier\n self.proj = SESMaxProjection()\n self.bn1 = nn.BatchNorm2d(nChannels[3])\n self.relu = nn.ReLU(inplace=True)\n self.fc = nn.Linear(nChannels[3], num_classes)\n self.nChannels = nChannels[3]\n\n for m in self.modules():\n if isinstance(m, (SESConv_H_H, SESConv_Z2_H, SESConv_H_H_1x1)):\n nelement = m.weight.nelement()\n n = nelement / m.in_channels\n # n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n elif isinstance(m, (nn.BatchNorm2d, nn.BatchNorm3d)):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n elif isinstance(m, nn.Linear):\n m.bias.data.zero_()\n\n def forward(self, x):\n out = self.conv1(x)\n out = self.block1(out)\n out = self.block2(out)\n out = self.block3(out)\n out = self.proj(out)\n out = self.relu(self.bn1(out))\n\n out = F.adaptive_avg_pool2d(out, 1)\n out = out.view(-1, self.nChannels)\n out = self.fc(out)\n return out\n\n\ndef wrn_16_8_ses_a(num_classes, **kwargs):\n scales = [0.9 * 1.41**i for i in range(3)]\n return WideResNet(depth=16, num_classes=10, widen_factor=8, dropRate=0.3, scales=scales, pools=[False, False, False])\n\n\ndef wrn_16_8_ses_b(num_classes, **kwargs):\n scales = [0.9 * 1.41**i for i in range(3)]\n return WideResNet(depth=16, num_classes=10, widen_factor=8, dropRate=0.3, scales=scales, pools=[False, True, True])\n\n\ndef wrn_16_8_ses_c(num_classes, **kwargs):\n scales = [1.1 * 1.41**i for i in range(3)]\n return WideResNet(depth=16, num_classes=10, widen_factor=8, dropRate=0.3, scales=scales,\n pools=[False, False, False], interscale=[True, False, False])\n" ]
[ [ "torch.nn.BatchNorm1d", "torch.nn.Dropout", "torch.nn.Linear", "torch.nn.MaxPool2d", "torch.nn.MaxPool3d", "torch.nn.Upsample", "torch.nn.BatchNorm2d", "torch.nn.ReLU", "torch.nn.BatchNorm3d" ], [ "torch.nn.Sequential", "torch.nn.functional.dropout", "torch.nn.functional.adaptive_avg_pool2d", "torch.nn.Linear", "torch.nn.BatchNorm2d", "torch.nn.ReLU", "torch.nn.BatchNorm3d" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
tomaslovato/mocsy
[ "ede19cde4be5cd37ed192a3f3394e81302d11616" ]
[ "examples/test_mocsy.py" ]
[ "# -*- coding: utf-8 -*-\nimport sys\nimport numpy as np\nsys.path.append(\"../\")\nimport mocsy\n\n# Define input data (typical values at depth from 0 to 5000 meters)\ntemp = np.repeat(2.0, 6).astype('float32')\ndepth = np.arange (0, 6000, 1000).astype('float32')\nsal = np.repeat(35.0, 6).astype('float32')\nalk = np.repeat(2295.*1.e-6, 6).astype('float32')\ndic = np.repeat(2154.*1.e-6, 6).astype('float32')\nsil = phos = np.repeat(0.0, 6).astype('float32')\nPatm = np.repeat(1.0, 6).astype('float32')\noptK1K2 = 'l'\n\n# Create output arrays\n# --------------------\n\n# computed variables at 6 input points\nlat = np.zeros((6,)).astype('float32')\nph = np.zeros((6,)).astype('float32')\npco2 = np.zeros((6,)).astype('float32')\nfco2 = np.zeros((6,)).astype('float32')\nco2 = np.zeros((6,)).astype('float32')\nhco3 = np.zeros((6,)).astype('float32')\nco3 = np.zeros((6,)).astype('float32')\nOmegaA = np.zeros((6,)).astype('float32')\nOmegaC = np.zeros((6,)).astype('float32')\nBetaD = np.zeros((6,)).astype('float32')\nrhoSW = np.zeros((6,)).astype('float32')\np = np.zeros((6,)).astype('float32')\ntempis = np.zeros((6,)).astype('float32')\n# values of derivatives w/ respect to 6 input variables and at 6 input points\nph_deriv = np.zeros((6*6,)).astype('float32')\npco2_deriv = np.zeros((6*6,)).astype('float32')\nOmegaA_deriv = np.zeros((6*6,)).astype('float32')\nph_deriv = ph_deriv.reshape ((6,6), order='F')\npco2_deriv = pco2_deriv.reshape ((6,6), order='F')\nOmegaA_deriv = OmegaA_deriv.reshape ((6,6), order='F')\n\n# Run mocsy.vars()\n# Notice that option names are all lowercase\nph, pco2, fco2, co2, hco3, co3, OmegaA, OmegaC, BetaD, rhoSW, p, tempis = \\\n mocsy.mvars (temp, sal, alk, dic, sil, phos, Patm, depth, lat,\n optcon='mol/kg', optt='Tinsitu', optp='db', optb='l10', optk1k2=optK1K2, optkf='dg')\n\n# Compute automatic derivatives (using automatic differentiation)\nph_deriv, pco2_deriv, fco2_deriv, co2_deriv, hco3_deriv, co3_deriv, OmegaA_deriv, OmegaC_deriv = \\\n mocsy.mderivauto(temp, sal, alk, dic, sil, phos, Patm, depth, lat, # INPUT\n optcon='mol/kg', optt='Tinsitu', optp='db', optb='l10', optk1k2=optK1K2, optkf='dg') # INPUT OPTIONS\n\n# Compute buffer factors from Egleston\n# pco2_deriv[2,:] are derivatives of pCO2 w/ respect to DIC\n# pco2_deriv[1,:] are ... w/ respect to Alk\ngamma_DIC = pco2 / pco2_deriv[1,:]\ngamma_Alk = pco2 / pco2_deriv[0,:]\n\nbeta_DIC = -1. / (np.log(10.) * ph_deriv[1,:])\nbeta_Alk = -1. / (np.log(10.) * ph_deriv[0,:])\n\n# Here, we use Omega of Aragonite (use of Calcite would have been equaly valid)\nomega_DIC = OmegaA / OmegaA_deriv[1,:]\nomega_Alk = OmegaA / OmegaA_deriv[0,:]\n\n# print mocsy results\n# -------------------\n\nprint ('{:s}'.format('-' * 181))\nprint (\" pH pCO2 fCO2 CO2* HCO3- CO32- OmegaA OmegaC R Density Press Temperature gamma_DIC gamma_Alk beta_DIC beta_Alk omega_DIC omega_Alk\")\nprint (\"(total) (uatm) (uatm) (mol/kg) (mol/kg) (mol/kg) (kg/m3) (db) (C)\")\nprint ('{:s}'.format('-' * 181))\n\nprntstr=' {:6.4f} {:6.1f} {:6.1f} {:6.4E} {:6.4E} {:6.4E} {:6.2f} {:6.2f} {:6.2f} {:7.2f} {:6.1f} {:6.3f} {:12.9f} {:12.9f} {:12.9f} {:12.9f} {:12.9f} {:12.9f}'\nfor i in range (0, 6):\n print (prntstr.format(ph[i], pco2[i], fco2[i], co2[i], hco3[i], co3[i], OmegaA[i], OmegaC[i], BetaD[i], rhoSW[i], p[i], tempis[i], gamma_DIC[i], gamma_Alk[i], beta_DIC[i], beta_Alk[i], omega_DIC[i], omega_Alk[i]))\nprint ('{:s}'.format('-' * 181))\n\n# Print derivatives of pH with respect to phosphate, silicate, temperature and salinity\nprint (\"\")\nprint ('{:s}'.format('-' * 45))\nprint (\" dpH/dPhos dpH/dSil dpH/dT dpH/dS\")\nprint (\" pH/µMol pH/µMol pH/°C pH/psu\")\nprint ('{:s}'.format('-' * 45))\nprntstr=' {:10.4f} {:10.5f} {:10.6f} {:10.6f}'\nfor i in range (0, 6):\n print (prntstr.format(ph_deriv[2,i], ph_deriv[3,i], ph_deriv[4,i], ph_deriv[5,i]))\nprint ('{:s}'.format('-' * 45))\n" ]
[ [ "numpy.arange", "numpy.repeat", "numpy.zeros", "numpy.log" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
JayChanHoi/adaptive_gradient_clipping
[ "ab9a7c88bf67843d87919dd51d7b722e063ad2b8" ]
[ "models/nfnet.py" ]
[ "\"\"\" Normalization Free Nets. NFNet, NF-RegNet, NF-ResNet (pre-activation) Models\n\nPaper: `Characterizing signal propagation to close the performance gap in unnormalized ResNets`\n - https://arxiv.org/abs/2101.08692\n\nPaper: `High-Performance Large-Scale Image Recognition Without Normalization`\n - https://arxiv.org/abs/2102.06171\n\nOfficial Deepmind JAX code: https://github.com/deepmind/deepmind-research/tree/master/nfnets\n\nStatus:\n* These models are a work in progress, experiments ongoing.\n* Pretrained weights for two models so far, more to come.\n* Model details updated to closer match official JAX code now that it's released\n* NF-ResNet, NF-RegNet-B, and NFNet-F models supported\n\nHacked together by / copyright Ross Wightman, 2021.\n\"\"\"\nimport math\nfrom dataclasses import dataclass, field\nfrom collections import OrderedDict\nfrom typing import Tuple, Optional\nfrom functools import partial\n\nimport torch\nimport torch.nn as nn\n\nfrom .helpers import build_model_with_cfg\n# from .registry import register_model\nfrom .layers import ClassifierHead, DropPath, AvgPool2dSame, ScaledStdConv2d, ScaledStdConv2dSame,\\\n get_act_layer, get_act_fn, get_attn, make_divisible\n\n\ndef _dcfg(url='', **kwargs):\n return {\n 'url': url,\n 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),\n 'crop_pct': 0.9, 'interpolation': 'bicubic',\n 'mean': (0.485, 0.456, 0.406), 'std': (0.229, 0.224, 0.225),\n 'first_conv': 'stem.conv1', 'classifier': 'head.fc',\n **kwargs\n }\n\n\ndefault_cfgs = dict(\n dm_nfnet_f0=_dcfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-dnf-weights/dm_nfnet_f0-604f9c3a.pth',\n pool_size=(6, 6), input_size=(3, 192, 192), test_input_size=(3, 256, 256), crop_pct=.9),\n dm_nfnet_f1=_dcfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-dnf-weights/dm_nfnet_f1-fc540f82.pth',\n pool_size=(7, 7), input_size=(3, 224, 224), test_input_size=(3, 320, 320), crop_pct=0.91),\n dm_nfnet_f2=_dcfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-dnf-weights/dm_nfnet_f2-89875923.pth',\n pool_size=(8, 8), input_size=(3, 256, 256), test_input_size=(3, 352, 352), crop_pct=0.92),\n dm_nfnet_f3=_dcfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-dnf-weights/dm_nfnet_f3-d74ab3aa.pth',\n pool_size=(10, 10), input_size=(3, 320, 320), test_input_size=(3, 416, 416), crop_pct=0.94),\n dm_nfnet_f4=_dcfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-dnf-weights/dm_nfnet_f4-0ac5b10b.pth',\n pool_size=(12, 12), input_size=(3, 384, 384), test_input_size=(3, 512, 512), crop_pct=0.951),\n dm_nfnet_f5=_dcfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-dnf-weights/dm_nfnet_f5-ecb20ab1.pth',\n pool_size=(13, 13), input_size=(3, 416, 416), test_input_size=(3, 544, 544), crop_pct=0.954),\n dm_nfnet_f6=_dcfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-dnf-weights/dm_nfnet_f6-e0f12116.pth',\n pool_size=(14, 14), input_size=(3, 448, 448), test_input_size=(3, 576, 576), crop_pct=0.956),\n\n nfnet_f0=_dcfg(\n url='', pool_size=(6, 6), input_size=(3, 192, 192), test_input_size=(3, 256, 256)),\n nfnet_f1=_dcfg(\n url='', pool_size=(7, 7), input_size=(3, 224, 224), test_input_size=(3, 320, 320)),\n nfnet_f2=_dcfg(\n url='', pool_size=(8, 8), input_size=(3, 256, 256), test_input_size=(3, 352, 352)),\n nfnet_f3=_dcfg(\n url='', pool_size=(10, 10), input_size=(3, 320, 320), test_input_size=(3, 416, 416)),\n nfnet_f4=_dcfg(\n url='', pool_size=(12, 12), input_size=(3, 384, 384), test_input_size=(3, 512, 512)),\n nfnet_f5=_dcfg(\n url='', pool_size=(13, 13), input_size=(3, 416, 416), test_input_size=(3, 544, 544)),\n nfnet_f6=_dcfg(\n url='', pool_size=(14, 14), input_size=(3, 448, 448), test_input_size=(3, 576, 576)),\n nfnet_f7=_dcfg(\n url='', pool_size=(15, 15), input_size=(3, 480, 480), test_input_size=(3, 608, 608)),\n\n nfnet_f0s=_dcfg(\n url='', pool_size=(6, 6), input_size=(3, 192, 192), test_input_size=(3, 256, 256)),\n nfnet_f1s=_dcfg(\n url='', pool_size=(7, 7), input_size=(3, 224, 224), test_input_size=(3, 320, 320)),\n nfnet_f2s=_dcfg(\n url='', pool_size=(8, 8), input_size=(3, 256, 256), test_input_size=(3, 352, 352)),\n nfnet_f3s=_dcfg(\n url='', pool_size=(10, 10), input_size=(3, 320, 320), test_input_size=(3, 416, 416)),\n nfnet_f4s=_dcfg(\n url='', pool_size=(12, 12), input_size=(3, 384, 384), test_input_size=(3, 512, 512)),\n nfnet_f5s=_dcfg(\n url='', pool_size=(13, 13), input_size=(3, 416, 416), test_input_size=(3, 544, 544)),\n nfnet_f6s=_dcfg(\n url='', pool_size=(14, 14), input_size=(3, 448, 448), test_input_size=(3, 576, 576)),\n nfnet_f7s=_dcfg(\n url='', pool_size=(15, 15), input_size=(3, 480, 480), test_input_size=(3, 608, 608)),\n\n nfnet_l0=_dcfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/nfnet_l0_ra2-45c6688d.pth',\n pool_size=(7, 7), input_size=(3, 224, 224), test_input_size=(3, 288, 288), crop_pct=1.0),\n eca_nfnet_l0=_dcfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/ecanfnet_l0_ra2-e3e9ac50.pth',\n hf_hub='timm/eca_nfnet_l0',\n pool_size=(7, 7), input_size=(3, 224, 224), test_input_size=(3, 288, 288), crop_pct=1.0),\n eca_nfnet_l1=_dcfg(\n url='',\n pool_size=(8, 8), input_size=(3, 256, 256), test_input_size=(3, 320, 320), crop_pct=1.0),\n\n nf_regnet_b0=_dcfg(\n url='', pool_size=(6, 6), input_size=(3, 192, 192), test_input_size=(3, 256, 256), first_conv='stem.conv'),\n nf_regnet_b1=_dcfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/nf_regnet_b1_256_ra2-ad85cfef.pth',\n pool_size=(8, 8), input_size=(3, 256, 256), test_input_size=(3, 288, 288), first_conv='stem.conv'), # NOT to paper spec\n nf_regnet_b2=_dcfg(\n url='', pool_size=(8, 8), input_size=(3, 240, 240), test_input_size=(3, 272, 272), first_conv='stem.conv'),\n nf_regnet_b3=_dcfg(\n url='', pool_size=(9, 9), input_size=(3, 288, 288), test_input_size=(3, 320, 320), first_conv='stem.conv'),\n nf_regnet_b4=_dcfg(\n url='', pool_size=(10, 10), input_size=(3, 320, 320), test_input_size=(3, 384, 384), first_conv='stem.conv'),\n nf_regnet_b5=_dcfg(\n url='', pool_size=(12, 12), input_size=(3, 384, 384), test_input_size=(3, 456, 456), first_conv='stem.conv'),\n\n nf_resnet26=_dcfg(url='', first_conv='stem.conv'),\n nf_resnet50=_dcfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/nf_resnet50_ra2-9f236009.pth',\n pool_size=(8, 8), input_size=(3, 256, 256), test_input_size=(3, 288, 288), crop_pct=0.94, first_conv='stem.conv'),\n nf_resnet101=_dcfg(url='', first_conv='stem.conv'),\n\n nf_seresnet26=_dcfg(url='', first_conv='stem.conv'),\n nf_seresnet50=_dcfg(url='', first_conv='stem.conv'),\n nf_seresnet101=_dcfg(url='', first_conv='stem.conv'),\n\n nf_ecaresnet26=_dcfg(url='', first_conv='stem.conv'),\n nf_ecaresnet50=_dcfg(url='', first_conv='stem.conv'),\n nf_ecaresnet101=_dcfg(url='', first_conv='stem.conv'),\n)\n\n\n@dataclass\nclass NfCfg:\n depths: Tuple[int, int, int, int]\n channels: Tuple[int, int, int, int]\n alpha: float = 0.2\n stem_type: str = '3x3'\n stem_chs: Optional[int] = None\n group_size: Optional[int] = None\n attn_layer: Optional[str] = None\n attn_kwargs: dict = None\n attn_gain: float = 2.0 # NF correction gain to apply if attn layer is used\n width_factor: float = 1.0\n bottle_ratio: float = 0.5\n num_features: int = 0 # num out_channels for final conv, no final_conv if 0\n ch_div: int = 8 # round channels % 8 == 0 to keep tensor-core use optimal\n reg: bool = False # enables EfficientNet-like options used in RegNet variants, expand from in_chs, se in middle\n extra_conv: bool = False # extra 3x3 bottleneck convolution for NFNet models\n gamma_in_act: bool = False\n same_padding: bool = False\n skipinit: bool = False # disabled by default, non-trivial performance impact\n zero_init_fc: bool = False\n act_layer: str = 'silu'\n\n\ndef _nfres_cfg(\n depths, channels=(256, 512, 1024, 2048), group_size=None, act_layer='relu', attn_layer=None, attn_kwargs=None):\n attn_kwargs = attn_kwargs or {}\n cfg = NfCfg(\n depths=depths, channels=channels, stem_type='7x7_pool', stem_chs=64, bottle_ratio=0.25,\n group_size=group_size, act_layer=act_layer, attn_layer=attn_layer, attn_kwargs=attn_kwargs)\n return cfg\n\n\ndef _nfreg_cfg(depths, channels=(48, 104, 208, 440)):\n num_features = 1280 * channels[-1] // 440\n attn_kwargs = dict(reduction_ratio=0.5, divisor=8)\n cfg = NfCfg(\n depths=depths, channels=channels, stem_type='3x3', group_size=8, width_factor=0.75, bottle_ratio=2.25,\n num_features=num_features, reg=True, attn_layer='se', attn_kwargs=attn_kwargs)\n return cfg\n\n\ndef _nfnet_cfg(\n depths, channels=(256, 512, 1536, 1536), group_size=128, bottle_ratio=0.5, feat_mult=2.,\n act_layer='gelu', attn_layer='se', attn_kwargs=None):\n num_features = int(channels[-1] * feat_mult)\n attn_kwargs = attn_kwargs if attn_kwargs is not None else dict(reduction_ratio=0.5, divisor=8)\n cfg = NfCfg(\n depths=depths, channels=channels, stem_type='deep_quad', stem_chs=128, group_size=group_size,\n bottle_ratio=bottle_ratio, extra_conv=True, num_features=num_features, act_layer=act_layer,\n attn_layer=attn_layer, attn_kwargs=attn_kwargs)\n return cfg\n\n\ndef _dm_nfnet_cfg(depths, channels=(256, 512, 1536, 1536), act_layer='gelu', skipinit=True):\n attn_kwargs = dict(reduction_ratio=0.5, divisor=8)\n cfg = NfCfg(\n depths=depths, channels=channels, stem_type='deep_quad', stem_chs=128, group_size=128,\n bottle_ratio=0.5, extra_conv=True, gamma_in_act=True, same_padding=True, skipinit=skipinit,\n num_features=int(channels[-1] * 2.0), act_layer=act_layer, attn_layer='se', attn_kwargs=attn_kwargs)\n return cfg\n\n\nmodel_cfgs = dict(\n # NFNet-F models w/ GELU compatible with DeepMind weights\n dm_nfnet_f0=_dm_nfnet_cfg(depths=(1, 2, 6, 3)),\n dm_nfnet_f1=_dm_nfnet_cfg(depths=(2, 4, 12, 6)),\n dm_nfnet_f2=_dm_nfnet_cfg(depths=(3, 6, 18, 9)),\n dm_nfnet_f3=_dm_nfnet_cfg(depths=(4, 8, 24, 12)),\n dm_nfnet_f4=_dm_nfnet_cfg(depths=(5, 10, 30, 15)),\n dm_nfnet_f5=_dm_nfnet_cfg(depths=(6, 12, 36, 18)),\n dm_nfnet_f6=_dm_nfnet_cfg(depths=(7, 14, 42, 21)),\n\n # NFNet-F models w/ GELU (I will likely deprecate/remove these models and just keep dm_ ver for GELU)\n nfnet_f0=_nfnet_cfg(depths=(1, 2, 6, 3)),\n nfnet_f1=_nfnet_cfg(depths=(2, 4, 12, 6)),\n nfnet_f2=_nfnet_cfg(depths=(3, 6, 18, 9)),\n nfnet_f3=_nfnet_cfg(depths=(4, 8, 24, 12)),\n nfnet_f4=_nfnet_cfg(depths=(5, 10, 30, 15)),\n nfnet_f5=_nfnet_cfg(depths=(6, 12, 36, 18)),\n nfnet_f6=_nfnet_cfg(depths=(7, 14, 42, 21)),\n nfnet_f7=_nfnet_cfg(depths=(8, 16, 48, 24)),\n\n # NFNet-F models w/ SiLU (much faster in PyTorch)\n nfnet_f0s=_nfnet_cfg(depths=(1, 2, 6, 3), act_layer='silu'),\n nfnet_f1s=_nfnet_cfg(depths=(2, 4, 12, 6), act_layer='silu'),\n nfnet_f2s=_nfnet_cfg(depths=(3, 6, 18, 9), act_layer='silu'),\n nfnet_f3s=_nfnet_cfg(depths=(4, 8, 24, 12), act_layer='silu'),\n nfnet_f4s=_nfnet_cfg(depths=(5, 10, 30, 15), act_layer='silu'),\n nfnet_f5s=_nfnet_cfg(depths=(6, 12, 36, 18), act_layer='silu'),\n nfnet_f6s=_nfnet_cfg(depths=(7, 14, 42, 21), act_layer='silu'),\n nfnet_f7s=_nfnet_cfg(depths=(8, 16, 48, 24), act_layer='silu'),\n\n # Experimental 'light' versions of NFNet-F that are little leaner\n nfnet_l0=_nfnet_cfg(\n depths=(1, 2, 6, 3), feat_mult=1.5, group_size=64, bottle_ratio=0.25,\n attn_kwargs=dict(reduction_ratio=0.25, divisor=8), act_layer='silu'),\n eca_nfnet_l0=_nfnet_cfg(\n depths=(1, 2, 6, 3), feat_mult=1.5, group_size=64, bottle_ratio=0.25,\n attn_layer='eca', attn_kwargs=dict(), act_layer='silu'),\n eca_nfnet_l1=_nfnet_cfg(\n depths=(2, 4, 12, 6), feat_mult=2, group_size=64, bottle_ratio=0.25,\n attn_layer='eca', attn_kwargs=dict(), act_layer='silu'),\n\n # EffNet influenced RegNet defs.\n # NOTE: These aren't quite the official ver, ch_div=1 must be set for exact ch counts. I round to ch_div=8.\n nf_regnet_b0=_nfreg_cfg(depths=(1, 3, 6, 6)),\n nf_regnet_b1=_nfreg_cfg(depths=(2, 4, 7, 7)),\n nf_regnet_b2=_nfreg_cfg(depths=(2, 4, 8, 8), channels=(56, 112, 232, 488)),\n nf_regnet_b3=_nfreg_cfg(depths=(2, 5, 9, 9), channels=(56, 128, 248, 528)),\n nf_regnet_b4=_nfreg_cfg(depths=(2, 6, 11, 11), channels=(64, 144, 288, 616)),\n nf_regnet_b5=_nfreg_cfg(depths=(3, 7, 14, 14), channels=(80, 168, 336, 704)),\n # FIXME add B6-B8\n\n # ResNet (preact, D style deep stem/avg down) defs\n nf_resnet26=_nfres_cfg(depths=(2, 2, 2, 2)),\n nf_resnet50=_nfres_cfg(depths=(3, 4, 6, 3)),\n nf_resnet101=_nfres_cfg(depths=(3, 4, 23, 3)),\n\n nf_seresnet26=_nfres_cfg(depths=(2, 2, 2, 2), attn_layer='se', attn_kwargs=dict(reduction_ratio=1/16)),\n nf_seresnet50=_nfres_cfg(depths=(3, 4, 6, 3), attn_layer='se', attn_kwargs=dict(reduction_ratio=1/16)),\n nf_seresnet101=_nfres_cfg(depths=(3, 4, 23, 3), attn_layer='se', attn_kwargs=dict(reduction_ratio=1/16)),\n\n nf_ecaresnet26=_nfres_cfg(depths=(2, 2, 2, 2), attn_layer='eca', attn_kwargs=dict()),\n nf_ecaresnet50=_nfres_cfg(depths=(3, 4, 6, 3), attn_layer='eca', attn_kwargs=dict()),\n nf_ecaresnet101=_nfres_cfg(depths=(3, 4, 23, 3), attn_layer='eca', attn_kwargs=dict()),\n\n)\n\n\nclass GammaAct(nn.Module):\n def __init__(self, act_type='relu', gamma: float = 1.0, inplace=False):\n super().__init__()\n self.act_fn = get_act_fn(act_type)\n self.gamma = gamma\n self.inplace = inplace\n\n def forward(self, x):\n return self.act_fn(x, inplace=self.inplace).mul_(self.gamma)\n\n\ndef act_with_gamma(act_type, gamma: float = 1.):\n def _create(inplace=False):\n return GammaAct(act_type, gamma=gamma, inplace=inplace)\n return _create\n\n\nclass DownsampleAvg(nn.Module):\n def __init__(\n self, in_chs, out_chs, stride=1, dilation=1, first_dilation=None, conv_layer=ScaledStdConv2d):\n \"\"\" AvgPool Downsampling as in 'D' ResNet variants. Support for dilation.\"\"\"\n super(DownsampleAvg, self).__init__()\n avg_stride = stride if dilation == 1 else 1\n if stride > 1 or dilation > 1:\n avg_pool_fn = AvgPool2dSame if avg_stride == 1 and dilation > 1 else nn.AvgPool2d\n self.pool = avg_pool_fn(2, avg_stride, ceil_mode=True, count_include_pad=False)\n else:\n self.pool = nn.Identity()\n self.conv = conv_layer(in_chs, out_chs, 1, stride=1)\n\n def forward(self, x):\n return self.conv(self.pool(x))\n\n\nclass NormFreeBlock(nn.Module):\n \"\"\"Normalization-Free pre-activation block.\n \"\"\"\n\n def __init__(\n self, in_chs, out_chs=None, stride=1, dilation=1, first_dilation=None,\n alpha=1.0, beta=1.0, bottle_ratio=0.25, group_size=None, ch_div=1, reg=True, extra_conv=False,\n skipinit=False, attn_layer=None, attn_gain=2.0, act_layer=None, conv_layer=None, drop_path_rate=0.):\n super().__init__()\n first_dilation = first_dilation or dilation\n out_chs = out_chs or in_chs\n # RegNet variants scale bottleneck from in_chs, otherwise scale from out_chs like ResNet\n mid_chs = make_divisible(in_chs * bottle_ratio if reg else out_chs * bottle_ratio, ch_div)\n groups = 1 if not group_size else mid_chs // group_size\n if group_size and group_size % ch_div == 0:\n mid_chs = group_size * groups # correct mid_chs if group_size divisible by ch_div, otherwise error\n self.alpha = alpha\n self.beta = beta\n self.attn_gain = attn_gain\n\n if in_chs != out_chs or stride != 1 or dilation != first_dilation:\n self.downsample = DownsampleAvg(\n in_chs, out_chs, stride=stride, dilation=dilation, first_dilation=first_dilation, conv_layer=conv_layer)\n else:\n self.downsample = None\n\n self.act1 = act_layer()\n self.conv1 = conv_layer(in_chs, mid_chs, 1)\n self.act2 = act_layer(inplace=True)\n self.conv2 = conv_layer(mid_chs, mid_chs, 3, stride=stride, dilation=first_dilation, groups=groups)\n if extra_conv:\n self.act2b = act_layer(inplace=True)\n self.conv2b = conv_layer(mid_chs, mid_chs, 3, stride=1, dilation=dilation, groups=groups)\n else:\n self.act2b = None\n self.conv2b = None\n if reg and attn_layer is not None:\n self.attn = attn_layer(mid_chs) # RegNet blocks apply attn btw conv2 & 3\n else:\n self.attn = None\n self.act3 = act_layer()\n self.conv3 = conv_layer(mid_chs, out_chs, 1, gain_init=1. if skipinit else 0.)\n if not reg and attn_layer is not None:\n self.attn_last = attn_layer(out_chs) # ResNet blocks apply attn after conv3\n else:\n self.attn_last = None\n self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0 else nn.Identity()\n self.skipinit_gain = nn.Parameter(torch.tensor(0.)) if skipinit else None\n\n def forward(self, x):\n out = self.act1(x) * self.beta\n\n # shortcut branch\n shortcut = x\n if self.downsample is not None:\n shortcut = self.downsample(out)\n\n # residual branch\n out = self.conv1(out)\n out = self.conv2(self.act2(out))\n if self.conv2b is not None:\n out = self.conv2b(self.act2b(out))\n if self.attn is not None:\n out = self.attn_gain * self.attn(out)\n out = self.conv3(self.act3(out))\n if self.attn_last is not None:\n out = self.attn_gain * self.attn_last(out)\n out = self.drop_path(out)\n\n if self.skipinit_gain is not None:\n out.mul_(self.skipinit_gain) # this slows things down more than expected, TBD\n out = out * self.alpha + shortcut\n return out\n\n\ndef create_stem(in_chs, out_chs, stem_type='', conv_layer=None, act_layer=None, preact_feature=True):\n stem_stride = 2\n stem_feature = dict(num_chs=out_chs, reduction=2, module='stem.conv')\n stem = OrderedDict()\n assert stem_type in ('', 'deep', 'deep_tiered', 'deep_quad', '3x3', '7x7', 'deep_pool', '3x3_pool', '7x7_pool')\n if 'deep' in stem_type:\n if 'quad' in stem_type:\n # 4 deep conv stack as in NFNet-F models\n assert not 'pool' in stem_type\n stem_chs = (out_chs // 8, out_chs // 4, out_chs // 2, out_chs)\n strides = (2, 1, 1, 2)\n stem_stride = 4\n stem_feature = dict(num_chs=out_chs // 2, reduction=2, module='stem.conv3')\n else:\n if 'tiered' in stem_type:\n stem_chs = (3 * out_chs // 8, out_chs // 2, out_chs) # 'T' resnets in resnet.py\n else:\n stem_chs = (out_chs // 2, out_chs // 2, out_chs) # 'D' ResNets\n strides = (2, 1, 1)\n stem_feature = dict(num_chs=out_chs // 2, reduction=2, module='stem.conv2')\n last_idx = len(stem_chs) - 1\n for i, (c, s) in enumerate(zip(stem_chs, strides)):\n stem[f'conv{i + 1}'] = conv_layer(in_chs, c, kernel_size=3, stride=s)\n if i != last_idx:\n stem[f'act{i + 2}'] = act_layer(inplace=True)\n in_chs = c\n elif '3x3' in stem_type:\n # 3x3 stem conv as in RegNet\n stem['conv'] = conv_layer(in_chs, out_chs, kernel_size=3, stride=2)\n else:\n # 7x7 stem conv as in ResNet\n stem['conv'] = conv_layer(in_chs, out_chs, kernel_size=7, stride=2)\n\n if 'pool' in stem_type:\n stem['pool'] = nn.MaxPool2d(3, stride=2, padding=1)\n stem_stride = 4\n\n return nn.Sequential(stem), stem_stride, stem_feature\n\n\n# from https://github.com/deepmind/deepmind-research/tree/master/nfnets\n_nonlin_gamma = dict(\n identity=1.0,\n celu=1.270926833152771,\n elu=1.2716004848480225,\n gelu=1.7015043497085571,\n leaky_relu=1.70590341091156,\n log_sigmoid=1.9193484783172607,\n log_softmax=1.0002083778381348,\n relu=1.7139588594436646,\n relu6=1.7131484746932983,\n selu=1.0008515119552612,\n sigmoid=4.803835391998291,\n silu=1.7881293296813965,\n softsign=2.338853120803833,\n softplus=1.9203323125839233,\n tanh=1.5939117670059204,\n)\n\n\nclass NormFreeNet(nn.Module):\n \"\"\" Normalization-Free Network\n\n As described in :\n `Characterizing signal propagation to close the performance gap in unnormalized ResNets`\n - https://arxiv.org/abs/2101.08692\n and\n `High-Performance Large-Scale Image Recognition Without Normalization` - https://arxiv.org/abs/2102.06171\n\n This model aims to cover both the NFRegNet-Bx models as detailed in the paper's code snippets and\n the (preact) ResNet models described earlier in the paper.\n\n There are a few differences:\n * channels are rounded to be divisible by 8 by default (keep tensor core kernels happy),\n this changes channel dim and param counts slightly from the paper models\n * activation correcting gamma constants are moved into the ScaledStdConv as it has less performance\n impact in PyTorch when done with the weight scaling there. This likely wasn't a concern in the JAX impl.\n * a config option `gamma_in_act` can be enabled to not apply gamma in StdConv as described above, but\n apply it in each activation. This is slightly slower, numerically different, but matches official impl.\n * skipinit is disabled by default, it seems to have a rather drastic impact on GPU memory use and throughput\n for what it is/does. Approx 8-10% throughput loss.\n \"\"\"\n def __init__(self, cfg: NfCfg, num_classes=1000, in_chans=3, global_pool='avg', output_stride=32,\n drop_rate=0., drop_path_rate=0.):\n super().__init__()\n self.num_classes = num_classes\n self.drop_rate = drop_rate\n assert cfg.act_layer in _nonlin_gamma, f\"Please add non-linearity constants for activation ({cfg.act_layer}).\"\n conv_layer = ScaledStdConv2dSame if cfg.same_padding else ScaledStdConv2d\n if cfg.gamma_in_act:\n act_layer = act_with_gamma(cfg.act_layer, gamma=_nonlin_gamma[cfg.act_layer])\n conv_layer = partial(conv_layer, eps=1e-4) # DM weights better with higher eps\n else:\n act_layer = get_act_layer(cfg.act_layer)\n conv_layer = partial(conv_layer, gamma=_nonlin_gamma[cfg.act_layer])\n attn_layer = partial(get_attn(cfg.attn_layer), **cfg.attn_kwargs) if cfg.attn_layer else None\n\n stem_chs = make_divisible((cfg.stem_chs or cfg.channels[0]) * cfg.width_factor, cfg.ch_div)\n self.stem, stem_stride, stem_feat = create_stem(\n in_chans, stem_chs, cfg.stem_type, conv_layer=conv_layer, act_layer=act_layer)\n\n self.feature_info = [stem_feat]\n drop_path_rates = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(cfg.depths)).split(cfg.depths)]\n prev_chs = stem_chs\n net_stride = stem_stride\n dilation = 1\n expected_var = 1.0\n stages = []\n for stage_idx, stage_depth in enumerate(cfg.depths):\n stride = 1 if stage_idx == 0 and stem_stride > 2 else 2\n if net_stride >= output_stride and stride > 1:\n dilation *= stride\n stride = 1\n net_stride *= stride\n first_dilation = 1 if dilation in (1, 2) else 2\n\n blocks = []\n for block_idx in range(cfg.depths[stage_idx]):\n first_block = block_idx == 0 and stage_idx == 0\n out_chs = make_divisible(cfg.channels[stage_idx] * cfg.width_factor, cfg.ch_div)\n blocks += [NormFreeBlock(\n in_chs=prev_chs, out_chs=out_chs,\n alpha=cfg.alpha,\n beta=1. / expected_var ** 0.5,\n stride=stride if block_idx == 0 else 1,\n dilation=dilation,\n first_dilation=first_dilation,\n group_size=cfg.group_size,\n bottle_ratio=1. if cfg.reg and first_block else cfg.bottle_ratio,\n ch_div=cfg.ch_div,\n reg=cfg.reg,\n extra_conv=cfg.extra_conv,\n skipinit=cfg.skipinit,\n attn_layer=attn_layer,\n attn_gain=cfg.attn_gain,\n act_layer=act_layer,\n conv_layer=conv_layer,\n drop_path_rate=drop_path_rates[stage_idx][block_idx],\n )]\n if block_idx == 0:\n expected_var = 1. # expected var is reset after first block of each stage\n expected_var += cfg.alpha ** 2 # Even if reset occurs, increment expected variance\n first_dilation = dilation\n prev_chs = out_chs\n self.feature_info += [dict(num_chs=prev_chs, reduction=net_stride, module=f'stages.{stage_idx}')]\n stages += [nn.Sequential(*blocks)]\n self.stages = nn.Sequential(*stages)\n\n if cfg.num_features:\n # The paper NFRegNet models have an EfficientNet-like final head convolution.\n self.num_features = make_divisible(cfg.width_factor * cfg.num_features, cfg.ch_div)\n self.final_conv = conv_layer(prev_chs, self.num_features, 1)\n self.feature_info[-1] = dict(num_chs=self.num_features, reduction=net_stride, module=f'final_conv')\n else:\n self.num_features = prev_chs\n self.final_conv = nn.Identity()\n self.final_act = act_layer(inplace=cfg.num_features > 0)\n\n self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate)\n\n for n, m in self.named_modules():\n if 'fc' in n and isinstance(m, nn.Linear):\n if cfg.zero_init_fc:\n nn.init.zeros_(m.weight)\n else:\n nn.init.normal_(m.weight, 0., .01)\n if m.bias is not None:\n nn.init.zeros_(m.bias)\n elif isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_in', nonlinearity='linear')\n if m.bias is not None:\n nn.init.zeros_(m.bias)\n\n def get_classifier(self):\n return self.head.fc\n\n def reset_classifier(self, num_classes, global_pool='avg'):\n self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate)\n\n def forward_features(self, x):\n x = self.stem(x)\n x = self.stages(x)\n x = self.final_conv(x)\n x = self.final_act(x)\n return x\n\n def forward(self, x):\n x = self.forward_features(x)\n x = self.head(x)\n return x\n\n\ndef _create_normfreenet(variant, pretrained=False, **kwargs):\n model_cfg = model_cfgs[variant]\n feature_cfg = dict(flatten_sequential=True)\n return build_model_with_cfg(\n NormFreeNet, variant, pretrained,\n default_cfg=default_cfgs[variant],\n model_cfg=model_cfg,\n feature_cfg=feature_cfg,\n **kwargs)\n\n\n# @register_model\ndef dm_nfnet_f0(pretrained=False, **kwargs):\n \"\"\" NFNet-F0 (DeepMind weight compatible)\n `High-Performance Large-Scale Image Recognition Without Normalization`\n - https://arxiv.org/abs/2102.06171\n \"\"\"\n return _create_normfreenet('dm_nfnet_f0', pretrained=pretrained, **kwargs)\n\n\n# @register_model\ndef dm_nfnet_f1(pretrained=False, **kwargs):\n \"\"\" NFNet-F1 (DeepMind weight compatible)\n `High-Performance Large-Scale Image Recognition Without Normalization`\n - https://arxiv.org/abs/2102.06171\n \"\"\"\n return _create_normfreenet('dm_nfnet_f1', pretrained=pretrained, **kwargs)\n\n\n# @register_model\ndef dm_nfnet_f2(pretrained=False, **kwargs):\n \"\"\" NFNet-F2 (DeepMind weight compatible)\n `High-Performance Large-Scale Image Recognition Without Normalization`\n - https://arxiv.org/abs/2102.06171\n \"\"\"\n return _create_normfreenet('dm_nfnet_f2', pretrained=pretrained, **kwargs)\n\n\n# @register_model\ndef dm_nfnet_f3(pretrained=False, **kwargs):\n \"\"\" NFNet-F3 (DeepMind weight compatible)\n `High-Performance Large-Scale Image Recognition Without Normalization`\n - https://arxiv.org/abs/2102.06171\n \"\"\"\n return _create_normfreenet('dm_nfnet_f3', pretrained=pretrained, **kwargs)\n\n\n# @register_model\ndef dm_nfnet_f4(pretrained=False, **kwargs):\n \"\"\" NFNet-F4 (DeepMind weight compatible)\n `High-Performance Large-Scale Image Recognition Without Normalization`\n - https://arxiv.org/abs/2102.06171\n \"\"\"\n return _create_normfreenet('dm_nfnet_f4', pretrained=pretrained, **kwargs)\n\n\n# @register_model\ndef dm_nfnet_f5(pretrained=False, **kwargs):\n \"\"\" NFNet-F5 (DeepMind weight compatible)\n `High-Performance Large-Scale Image Recognition Without Normalization`\n - https://arxiv.org/abs/2102.06171\n \"\"\"\n return _create_normfreenet('dm_nfnet_f5', pretrained=pretrained, **kwargs)\n\n\n# @register_model\ndef dm_nfnet_f6(pretrained=False, **kwargs):\n \"\"\" NFNet-F6 (DeepMind weight compatible)\n `High-Performance Large-Scale Image Recognition Without Normalization`\n - https://arxiv.org/abs/2102.06171\n \"\"\"\n return _create_normfreenet('dm_nfnet_f6', pretrained=pretrained, **kwargs)\n\n\n# @register_model\ndef nfnet_f0(pretrained=False, **kwargs):\n \"\"\" NFNet-F0\n `High-Performance Large-Scale Image Recognition Without Normalization`\n - https://arxiv.org/abs/2102.06171\n \"\"\"\n return _create_normfreenet('nfnet_f0', pretrained=pretrained, **kwargs)\n\n\n# @register_model\ndef nfnet_f1(pretrained=False, **kwargs):\n \"\"\" NFNet-F1\n `High-Performance Large-Scale Image Recognition Without Normalization`\n - https://arxiv.org/abs/2102.06171\n \"\"\"\n return _create_normfreenet('nfnet_f1', pretrained=pretrained, **kwargs)\n\n\n# @register_model\ndef nfnet_f2(pretrained=False, **kwargs):\n \"\"\" NFNet-F2\n `High-Performance Large-Scale Image Recognition Without Normalization`\n - https://arxiv.org/abs/2102.06171\n \"\"\"\n return _create_normfreenet('nfnet_f2', pretrained=pretrained, **kwargs)\n\n\n# @register_model\ndef nfnet_f3(pretrained=False, **kwargs):\n \"\"\" NFNet-F3\n `High-Performance Large-Scale Image Recognition Without Normalization`\n - https://arxiv.org/abs/2102.06171\n \"\"\"\n return _create_normfreenet('nfnet_f3', pretrained=pretrained, **kwargs)\n\n\n# @register_model\ndef nfnet_f4(pretrained=False, **kwargs):\n \"\"\" NFNet-F4\n `High-Performance Large-Scale Image Recognition Without Normalization`\n - https://arxiv.org/abs/2102.06171\n \"\"\"\n return _create_normfreenet('nfnet_f4', pretrained=pretrained, **kwargs)\n\n\n# @register_model\ndef nfnet_f5(pretrained=False, **kwargs):\n \"\"\" NFNet-F5\n `High-Performance Large-Scale Image Recognition Without Normalization`\n - https://arxiv.org/abs/2102.06171\n \"\"\"\n return _create_normfreenet('nfnet_f5', pretrained=pretrained, **kwargs)\n\n\n# @register_model\ndef nfnet_f6(pretrained=False, **kwargs):\n \"\"\" NFNet-F6\n `High-Performance Large-Scale Image Recognition Without Normalization`\n - https://arxiv.org/abs/2102.06171\n \"\"\"\n return _create_normfreenet('nfnet_f6', pretrained=pretrained, **kwargs)\n\n\n# @register_model\ndef nfnet_f7(pretrained=False, **kwargs):\n \"\"\" NFNet-F7\n `High-Performance Large-Scale Image Recognition Without Normalization`\n - https://arxiv.org/abs/2102.06171\n \"\"\"\n return _create_normfreenet('nfnet_f7', pretrained=pretrained, **kwargs)\n\n\n# @register_model\ndef nfnet_f0s(pretrained=False, **kwargs):\n \"\"\" NFNet-F0 w/ SiLU\n `High-Performance Large-Scale Image Recognition Without Normalization`\n - https://arxiv.org/abs/2102.06171\n \"\"\"\n return _create_normfreenet('nfnet_f0s', pretrained=pretrained, **kwargs)\n\n\n# @register_model\ndef nfnet_f1s(pretrained=False, **kwargs):\n \"\"\" NFNet-F1 w/ SiLU\n `High-Performance Large-Scale Image Recognition Without Normalization`\n - https://arxiv.org/abs/2102.06171\n \"\"\"\n return _create_normfreenet('nfnet_f1s', pretrained=pretrained, **kwargs)\n\n\n# @register_model\ndef nfnet_f2s(pretrained=False, **kwargs):\n \"\"\" NFNet-F2 w/ SiLU\n `High-Performance Large-Scale Image Recognition Without Normalization`\n - https://arxiv.org/abs/2102.06171\n \"\"\"\n return _create_normfreenet('nfnet_f2s', pretrained=pretrained, **kwargs)\n\n\n# @register_model\ndef nfnet_f3s(pretrained=False, **kwargs):\n \"\"\" NFNet-F3 w/ SiLU\n `High-Performance Large-Scale Image Recognition Without Normalization`\n - https://arxiv.org/abs/2102.06171\n \"\"\"\n return _create_normfreenet('nfnet_f3s', pretrained=pretrained, **kwargs)\n\n\n# @register_model\ndef nfnet_f4s(pretrained=False, **kwargs):\n \"\"\" NFNet-F4 w/ SiLU\n `High-Performance Large-Scale Image Recognition Without Normalization`\n - https://arxiv.org/abs/2102.06171\n \"\"\"\n return _create_normfreenet('nfnet_f4s', pretrained=pretrained, **kwargs)\n\n\n# @register_model\ndef nfnet_f5s(pretrained=False, **kwargs):\n \"\"\" NFNet-F5 w/ SiLU\n `High-Performance Large-Scale Image Recognition Without Normalization`\n - https://arxiv.org/abs/2102.06171\n \"\"\"\n return _create_normfreenet('nfnet_f5s', pretrained=pretrained, **kwargs)\n\n\n# @register_model\ndef nfnet_f6s(pretrained=False, **kwargs):\n \"\"\" NFNet-F6 w/ SiLU\n `High-Performance Large-Scale Image Recognition Without Normalization`\n - https://arxiv.org/abs/2102.06171\n \"\"\"\n return _create_normfreenet('nfnet_f6s', pretrained=pretrained, **kwargs)\n\n\n# @register_model\ndef nfnet_f7s(pretrained=False, **kwargs):\n \"\"\" NFNet-F7 w/ SiLU\n `High-Performance Large-Scale Image Recognition Without Normalization`\n - https://arxiv.org/abs/2102.06171\n \"\"\"\n return _create_normfreenet('nfnet_f7s', pretrained=pretrained, **kwargs)\n\n\n# @register_model\ndef nfnet_l0(pretrained=False, **kwargs):\n \"\"\" NFNet-L0b w/ SiLU\n My experimental 'light' model w/ F0 repeats, 1.5x final_conv mult, 64 group_size, .25 bottleneck & SE ratio\n \"\"\"\n return _create_normfreenet('nfnet_l0', pretrained=pretrained, **kwargs)\n\n\n# @register_model\ndef eca_nfnet_l0(pretrained=False, **kwargs):\n \"\"\" ECA-NFNet-L0 w/ SiLU\n My experimental 'light' model w/ F0 repeats, 1.5x final_conv mult, 64 group_size, .25 bottleneck & ECA attn\n \"\"\"\n return _create_normfreenet('eca_nfnet_l0', pretrained=pretrained, **kwargs)\n\n\n# @register_model\ndef eca_nfnet_l1(pretrained=False, **kwargs):\n \"\"\" ECA-NFNet-L1 w/ SiLU\n My experimental 'light' model w/ F1 repeats, 2.0x final_conv mult, 64 group_size, .25 bottleneck & ECA attn\n \"\"\"\n return _create_normfreenet('eca_nfnet_l1', pretrained=pretrained, **kwargs)\n\n\n# @register_model\ndef nf_regnet_b0(pretrained=False, **kwargs):\n \"\"\" Normalization-Free RegNet-B0\n `Characterizing signal propagation to close the performance gap in unnormalized ResNets`\n - https://arxiv.org/abs/2101.08692\n \"\"\"\n return _create_normfreenet('nf_regnet_b0', pretrained=pretrained, **kwargs)\n\n\n# @register_model\ndef nf_regnet_b1(pretrained=False, **kwargs):\n \"\"\" Normalization-Free RegNet-B1\n `Characterizing signal propagation to close the performance gap in unnormalized ResNets`\n - https://arxiv.org/abs/2101.08692\n \"\"\"\n return _create_normfreenet('nf_regnet_b1', pretrained=pretrained, **kwargs)\n\n\n# @register_model\ndef nf_regnet_b2(pretrained=False, **kwargs):\n \"\"\" Normalization-Free RegNet-B2\n `Characterizing signal propagation to close the performance gap in unnormalized ResNets`\n - https://arxiv.org/abs/2101.08692\n \"\"\"\n return _create_normfreenet('nf_regnet_b2', pretrained=pretrained, **kwargs)\n\n\n# @register_model\ndef nf_regnet_b3(pretrained=False, **kwargs):\n \"\"\" Normalization-Free RegNet-B3\n `Characterizing signal propagation to close the performance gap in unnormalized ResNets`\n - https://arxiv.org/abs/2101.08692\n \"\"\"\n return _create_normfreenet('nf_regnet_b3', pretrained=pretrained, **kwargs)\n\n\n# @register_model\ndef nf_regnet_b4(pretrained=False, **kwargs):\n \"\"\" Normalization-Free RegNet-B4\n `Characterizing signal propagation to close the performance gap in unnormalized ResNets`\n - https://arxiv.org/abs/2101.08692\n \"\"\"\n return _create_normfreenet('nf_regnet_b4', pretrained=pretrained, **kwargs)\n\n\n# @register_model\ndef nf_regnet_b5(pretrained=False, **kwargs):\n \"\"\" Normalization-Free RegNet-B5\n `Characterizing signal propagation to close the performance gap in unnormalized ResNets`\n - https://arxiv.org/abs/2101.08692\n \"\"\"\n return _create_normfreenet('nf_regnet_b5', pretrained=pretrained, **kwargs)\n\n\n# @register_model\ndef nf_resnet26(pretrained=False, **kwargs):\n \"\"\" Normalization-Free ResNet-26\n `Characterizing signal propagation to close the performance gap in unnormalized ResNets`\n - https://arxiv.org/abs/2101.08692\n \"\"\"\n return _create_normfreenet('nf_resnet26', pretrained=pretrained, **kwargs)\n\n\n# @register_model\ndef nf_resnet50(pretrained=False, **kwargs):\n \"\"\" Normalization-Free ResNet-50\n `Characterizing signal propagation to close the performance gap in unnormalized ResNets`\n - https://arxiv.org/abs/2101.08692\n \"\"\"\n return _create_normfreenet('nf_resnet50', pretrained=pretrained, **kwargs)\n\n\n# @register_model\ndef nf_resnet101(pretrained=False, **kwargs):\n \"\"\" Normalization-Free ResNet-101\n `Characterizing signal propagation to close the performance gap in unnormalized ResNets`\n - https://arxiv.org/abs/2101.08692\n \"\"\"\n return _create_normfreenet('nf_resnet101', pretrained=pretrained, **kwargs)\n\n\n# @register_model\ndef nf_seresnet26(pretrained=False, **kwargs):\n \"\"\" Normalization-Free SE-ResNet26\n \"\"\"\n return _create_normfreenet('nf_seresnet26', pretrained=pretrained, **kwargs)\n\n\n# @register_model\ndef nf_seresnet50(pretrained=False, **kwargs):\n \"\"\" Normalization-Free SE-ResNet50\n \"\"\"\n return _create_normfreenet('nf_seresnet50', pretrained=pretrained, **kwargs)\n\n\n# @register_model\ndef nf_seresnet101(pretrained=False, **kwargs):\n \"\"\" Normalization-Free SE-ResNet101\n \"\"\"\n return _create_normfreenet('nf_seresnet101', pretrained=pretrained, **kwargs)\n\n\n# @register_model\ndef nf_ecaresnet26(pretrained=False, **kwargs):\n \"\"\" Normalization-Free ECA-ResNet26\n \"\"\"\n return _create_normfreenet('nf_ecaresnet26', pretrained=pretrained, **kwargs)\n\n\n# @register_model\ndef nf_ecaresnet50(pretrained=False, **kwargs):\n \"\"\" Normalization-Free ECA-ResNet50\n \"\"\"\n return _create_normfreenet('nf_ecaresnet50', pretrained=pretrained, **kwargs)\n\n\n# @register_model\ndef nf_ecaresnet101(pretrained=False, **kwargs):\n \"\"\" Normalization-Free ECA-ResNet101\n \"\"\"\n return _create_normfreenet('nf_ecaresnet101', pretrained=pretrained, **kwargs)\n" ]
[ [ "torch.nn.Sequential", "torch.tensor", "torch.nn.MaxPool2d", "torch.nn.Identity", "torch.nn.init.normal_", "torch.nn.init.zeros_", "torch.nn.init.kaiming_normal_" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
lesserwhirls/scipy-cwt
[ "ee673656d879d9356892621e23ed0ced3d358621", "ee673656d879d9356892621e23ed0ced3d358621", "ee673656d879d9356892621e23ed0ced3d358621", "ee673656d879d9356892621e23ed0ced3d358621", "ee673656d879d9356892621e23ed0ced3d358621" ]
[ "scipy/lib/lapack/setupscons.py", "scipy/weave/tests/test_scxx_dict.py", "scipy/integrate/setupscons.py", "scipy/interpolate/tests/test_regression.py", "scipy/maxentropy/tests/test_maxentropy.py" ]
[ "#!/usr/bin/env python\n\nimport os\nfrom glob import glob\n\ndef configuration(parent_package='',top_path=None):\n from numpy.distutils.misc_util import Configuration\n from numpy.distutils.system_info import get_info\n\n config = Configuration('lapack',parent_package,top_path)\n\n config.add_sconscript('SConstruct')\n config.add_data_dir('tests')\n\n return config\n\nif __name__ == '__main__':\n from numpy.distutils.core import setup\n\n setup(**configuration(top_path='').todict())\n", "\"\"\" Test refcounting and behavior of SCXX.\n\"\"\"\n\nimport sys\n\nfrom numpy.testing import TestCase, dec, assert_, assert_raises\n\nfrom scipy.weave import inline_tools\n\n\nclass TestDictConstruct(TestCase):\n #------------------------------------------------------------------------\n # Check that construction from basic types is allowed and have correct\n # reference counts\n #------------------------------------------------------------------------\n @dec.slow\n def test_empty(self):\n # strange int value used to try and make sure refcount is 2.\n code = \"\"\"\n py::dict val;\n return_val = val;\n \"\"\"\n res = inline_tools.inline(code)\n assert_(sys.getrefcount(res) == 2)\n assert_(res == {})\n\n\nclass TestDictHasKey(TestCase):\n\n @dec.slow\n def test_obj(self):\n class Foo:\n pass\n key = Foo()\n a = {}\n a[key] = 12345\n code = \"\"\"\n return_val = a.has_key(key);\n \"\"\"\n res = inline_tools.inline(code,['a','key'])\n assert_(res)\n\n @dec.slow\n def test_int(self):\n a = {}\n a[1234] = 12345\n code = \"\"\"\n return_val = a.has_key(1234);\n \"\"\"\n res = inline_tools.inline(code,['a'])\n assert_(res)\n\n @dec.slow\n def test_double(self):\n a = {}\n a[1234.] = 12345\n code = \"\"\"\n return_val = a.has_key(1234.);\n \"\"\"\n res = inline_tools.inline(code,['a'])\n assert_(res)\n\n @dec.slow\n def test_complex(self):\n a = {}\n a[1+1j] = 12345\n key = 1+1j\n code = \"\"\"\n return_val = a.has_key(key);\n \"\"\"\n res = inline_tools.inline(code,['a','key'])\n assert_(res)\n\n @dec.slow\n def test_string(self):\n a = {}\n a[\"b\"] = 12345\n code = \"\"\"\n return_val = a.has_key(\"b\");\n \"\"\"\n res = inline_tools.inline(code,['a'])\n assert_(res)\n\n @dec.slow\n def test_std_string(self):\n a = {}\n a[\"b\"] = 12345\n key_name = \"b\"\n code = \"\"\"\n return_val = a.has_key(key_name);\n \"\"\"\n res = inline_tools.inline(code,['a','key_name'])\n assert_(res)\n\n @dec.slow\n def test_string_fail(self):\n a = {}\n a[\"b\"] = 12345\n code = \"\"\"\n return_val = a.has_key(\"c\");\n \"\"\"\n res = inline_tools.inline(code,['a'])\n assert_(not res)\n\n\nclass TestDictGetItemOp(TestCase):\n\n def generic_get(self,code,args=['a']):\n a = {}\n a['b'] = 12345\n\n res = inline_tools.inline(code,args)\n assert_(res == a['b'])\n\n @dec.slow\n def test_char(self):\n self.generic_get('return_val = a[\"b\"];')\n\n @dec.knownfailureif(True)\n @dec.slow\n def test_char_fail(self):\n # We can't through a KeyError for dicts on RHS of\n # = but not on LHS. Not sure how to deal with this.\n assert_raises(KeyError, self.generic_get, 'return_val = a[\"c\"];')\n\n @dec.slow\n def test_string(self):\n self.generic_get('return_val = a[std::string(\"b\")];')\n\n\n @dec.slow\n def test_obj(self):\n code = \"\"\"\n py::object name = \"b\";\n return_val = a[name];\n \"\"\"\n self.generic_get(code,['a'])\n\n @dec.knownfailureif(True)\n @dec.slow\n def test_obj_fail(self):\n # We can't through a KeyError for dicts on RHS of\n # = but not on LHS. Not sure how to deal with this.\n code = \"\"\"\n py::object name = \"c\";\n return_val = a[name];\n \"\"\"\n assert_raises(KeyError, self.generic_get, code, ['a'])\n\n\nclass TestDictSetOperator(TestCase):\n\n def generic_new(self,key,val):\n # test that value is set correctly and that reference counts\n # on dict, key, and val are being handled correctly.\n a = {}\n # call once to handle mysterious addition of one ref count\n # on first call to inline.\n inline_tools.inline(\"a[key] = val;\",['a','key','val'])\n assert_(a[key] == val)\n before = sys.getrefcount(a), sys.getrefcount(key), sys.getrefcount(val)\n inline_tools.inline(\"a[key] = val;\",['a','key','val'])\n assert_(a[key] == val)\n after = sys.getrefcount(a), sys.getrefcount(key), sys.getrefcount(val)\n assert_(before == after)\n\n def generic_overwrite(self,key,val):\n a = {}\n overwritten = 1\n a[key] = overwritten # put an item in the dict to be overwritten\n # call once to handle mysterious addition of one ref count\n # on first call to inline.\n before_overwritten = sys.getrefcount(overwritten)\n inline_tools.inline(\"a[key] = val;\",['a','key','val'])\n assert_(a[key] == val)\n before = sys.getrefcount(a), sys.getrefcount(key), sys.getrefcount(val)\n inline_tools.inline(\"a[key] = val;\",['a','key','val'])\n assert_(a[key] == val)\n after = sys.getrefcount(a), sys.getrefcount(key), sys.getrefcount(val)\n after_overwritten = sys.getrefcount(overwritten)\n assert_(before == after)\n assert_(before_overwritten == after_overwritten)\n\n @dec.slow\n def test_new_int_int(self):\n key,val = 1234,12345\n self.generic_new(key,val)\n\n @dec.slow\n def test_new_double_int(self):\n key,val = 1234.,12345\n self.generic_new(key,val)\n\n @dec.slow\n def test_new_std_string_int(self):\n key,val = \"hello\",12345\n self.generic_new(key,val)\n\n @dec.slow\n def test_new_complex_int(self):\n key,val = 1+1j,12345\n self.generic_new(key,val)\n\n @dec.slow\n def test_new_obj_int(self):\n class Foo:\n pass\n key,val = Foo(),12345\n self.generic_new(key,val)\n\n @dec.slow\n def test_overwrite_int_int(self):\n key,val = 1234,12345\n self.generic_overwrite(key,val)\n\n @dec.slow\n def test_overwrite_double_int(self):\n key,val = 1234.,12345\n self.generic_overwrite(key,val)\n\n @dec.slow\n def test_overwrite_std_string_int(self):\n key,val = \"hello\",12345\n self.generic_overwrite(key,val)\n\n @dec.slow\n def test_overwrite_complex_int(self):\n key,val = 1+1j,12345\n self.generic_overwrite(key,val)\n\n @dec.slow\n def test_overwrite_obj_int(self):\n class Foo:\n pass\n key,val = Foo(),12345\n self.generic_overwrite(key,val)\n\n\nclass TestDictDel(TestCase):\n\n def generic(self,key):\n # test that value is set correctly and that reference counts\n # on dict, key, are being handled correctly. after deletion,\n # the keys refcount should be one less than before.\n a = {}\n a[key] = 1\n inline_tools.inline(\"a.del(key);\",['a','key'])\n assert_(key not in a)\n a[key] = 1\n before = sys.getrefcount(a), sys.getrefcount(key)\n inline_tools.inline(\"a.del(key);\",['a','key'])\n assert_(key not in a)\n after = sys.getrefcount(a), sys.getrefcount(key)\n assert_(before[0] == after[0])\n assert_(before[1] == after[1] + 1)\n\n @dec.slow\n def test_int(self):\n key = 1234\n self.generic(key)\n\n @dec.slow\n def test_double(self):\n key = 1234.\n self.generic(key)\n\n @dec.slow\n def test_std_string(self):\n key = \"hello\"\n self.generic(key)\n\n @dec.slow\n def test_complex(self):\n key = 1+1j\n self.generic(key)\n\n @dec.slow\n def test_obj(self):\n class Foo:\n pass\n key = Foo()\n self.generic(key)\n\n\nclass TestDictOthers(TestCase):\n\n @dec.slow\n def test_clear(self):\n a = {}\n a[\"hello\"] = 1\n inline_tools.inline(\"a.clear();\",['a'])\n assert_(not a)\n\n @dec.slow\n def test_items(self):\n a = {}\n a[\"hello\"] = 1\n items = inline_tools.inline(\"return_val = a.items();\",['a'])\n assert_(items == a.items())\n\n @dec.slow\n def test_values(self):\n a = {}\n a[\"hello\"] = 1\n values = inline_tools.inline(\"return_val = a.values();\",['a'])\n assert_(values == a.values())\n\n @dec.slow\n def test_keys(self):\n a = {}\n a[\"hello\"] = 1\n keys = inline_tools.inline(\"return_val = a.keys();\",['a'])\n assert_(keys == a.keys())\n\n @dec.slow\n def test_update(self):\n a,b = {},{}\n a[\"hello\"] = 1\n b[\"hello\"] = 2\n inline_tools.inline(\"a.update(b);\",['a','b'])\n assert_(a == b)\n\n\nif __name__ == \"__main__\":\n import nose\n nose.run(argv=['', __file__])\n", "#!/usr/bin/env python\n\nfrom os.path import join\n\ndef configuration(parent_package='',top_path=None):\n from numpy.distutils.misc_util import Configuration\n\n config = Configuration('integrate', parent_package, top_path)\n\n config.add_sconscript('SConstruct')\n config.add_data_dir('tests')\n\n return config\n\nif __name__ == '__main__':\n from numpy.distutils.core import setup\n setup(**configuration(top_path='').todict())\n", "import numpy as np\nimport scipy.interpolate as interp\nfrom numpy.testing import assert_almost_equal, TestCase\n\nclass TestRegression(TestCase):\n def test_spalde_scalar_input(self):\n \"\"\"Ticket #629\"\"\"\n x = np.linspace(0,10)\n y = x**3\n tck = interp.splrep(x, y, k=3, t=[5])\n res = interp.spalde(np.float64(1), tck)\n des = np.array([ 1., 3., 6., 6.])\n assert_almost_equal(res, des)\n", "#!/usr/bin/env python\n\n\"\"\" Test functions for maximum entropy module.\n\nAuthor: Ed Schofield, 2003-2005\nCopyright: Ed Schofield, 2003-2005\n\"\"\"\n\nfrom numpy.testing import assert_almost_equal, TestCase, run_module_suite\nfrom numpy import arange, log, exp, ones\nfrom scipy.maxentropy.maxentropy import logsumexp\n\nclass TestMaxentropy(TestCase):\n \"\"\"Test whether logsumexp() function correctly handles large\n inputs.\n \"\"\"\n def test_logsumexp(self):\n a = arange(200)\n desired = log(sum(exp(a)))\n assert_almost_equal(logsumexp(a), desired)\n\n # Now test with large numbers\n b = [1000,1000]\n desired = 1000.0 + log(2.0)\n assert_almost_equal(logsumexp(b), desired)\n\n n = 1000\n b = ones(n)*10000\n desired = 10000.0 + log(n)\n assert_almost_equal(logsumexp(b), desired)\n\n\nif __name__ == \"__main__\":\n run_module_suite()\n" ]
[ [ "numpy.distutils.misc_util.Configuration" ], [ "scipy.weave.inline_tools.inline", "numpy.testing.assert_raises", "numpy.testing.assert_", "numpy.testing.dec.knownfailureif" ], [ "numpy.distutils.misc_util.Configuration" ], [ "scipy.interpolate.splrep", "numpy.linspace", "numpy.testing.assert_almost_equal", "numpy.float64", "numpy.array" ], [ "numpy.log", "numpy.testing.run_module_suite", "numpy.arange", "numpy.ones", "numpy.exp", "scipy.maxentropy.maxentropy.logsumexp" ] ]
[ { "matplotlib": [], "numpy": [ "1.11", "1.19", "1.24", "1.16", "1.23", "1.20", "1.7", "1.12", "1.21", "1.22", "1.14", "1.6", "1.13", "1.9", "1.17", "1.10", "1.18", "1.15", "1.8" ], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.12" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [ "1.11", "1.19", "1.24", "1.16", "1.23", "1.20", "1.7", "1.12", "1.21", "1.22", "1.14", "1.6", "1.13", "1.9", "1.17", "1.10", "1.18", "1.15", "1.8" ], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
old-school-kid/keras
[ "326cf80085a9d7d980b968ea1ca235490e32833b" ]
[ "keras/layers/dense_attention_test.py" ]
[ "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests dense attention layers.\"\"\"\n\nimport tensorflow.compat.v2 as tf\n\nfrom absl.testing import parameterized\nimport numpy as np\n\nimport keras\nfrom keras import combinations\nfrom keras import testing_utils\nfrom keras.layers import core\nfrom keras.layers import dense_attention\nfrom keras.mixed_precision import policy\n\n\[email protected](combinations.combine(mode=['graph', 'eager']))\nclass BaseDenseAttentionTest(tf.test.TestCase, parameterized.TestCase):\n\n def test_one_dim_with_mask(self):\n # Scores tensor of shape [1, 1, 1]\n scores = np.array([[[1.1]]], dtype=np.float32)\n # Value tensor of shape [1, 1, 1]\n v = np.array([[[1.6]]], dtype=np.float32)\n # Scores mask tensor of shape [1, 1, 1]\n scores_mask = np.array([[[True]]], dtype=np.bool_)\n actual, actual_scores = dense_attention.BaseDenseAttention()._apply_scores(\n scores=scores, value=v, scores_mask=scores_mask)\n\n # Expected softmax_scores = [[[1]]]\n expected_scores = np.array([[[1.]]], dtype=np.float32)\n self.assertAllClose(expected_scores, actual_scores)\n # Expected tensor of shape [1, 1, 1].\n # expected000 = softmax_scores[0, 0] * 1.6 = 1.6\n expected = np.array([[[1.6]]], dtype=np.float32)\n self.assertAllClose(expected, actual)\n\n def test_one_dim_no_mask(self):\n # Scores tensor of shape [1, 1, 1]\n scores = np.array([[[1.1]]], dtype=np.float32)\n # Value tensor of shape [1, 1, 1]\n v = np.array([[[1.6]]], dtype=np.float32)\n actual, actual_scores = dense_attention.BaseDenseAttention()._apply_scores(\n scores=scores, value=v)\n\n # Expected softmax_scores = [[[1]]]\n expected_scores = np.array([[[1.]]], dtype=np.float32)\n self.assertAllClose(expected_scores, actual_scores)\n # Expected tensor of shape [1, 1, 1].\n # expected000 = softmax_scores[0, 0] * 1.6 = 1.6\n expected = np.array([[[1.6]]], dtype=np.float32)\n self.assertAllClose(expected, actual)\n\n def test_multi_dim_with_mask(self):\n # Scores tensor of shape [1, 1, 3]\n scores = np.array([[[1., 0., 1.]]], dtype=np.float32)\n # Value tensor of shape [1, 3, 1]\n v = np.array([[[1.6], [0.7], [-0.8]]], dtype=np.float32)\n # Scores mask tensor of shape [1, 1, 3]\n scores_mask = np.array([[[True, True, False]]], dtype=np.bool_)\n actual, actual_scores = dense_attention.BaseDenseAttention()._apply_scores(\n scores=scores, value=v, scores_mask=scores_mask)\n\n # Expected softmax scores = softmax(scores) with zeros in positions where\n # v_mask == False.\n # => softmax_scores000 = exp(1)/(exp(1) + exp(0)) = 0.73105857863\n # softmax_scores001 = exp(0)/(exp(1) + exp(0)) = 0.26894142137\n # softmax_scores002 = 0\n expected_scores = np.array([[[0.73105857863, 0.26894142137, 0.]]],\n dtype=np.float32)\n self.assertAllClose(expected_scores, actual_scores)\n # Expected tensor of shape [1, 1, 1].\n # expected000 = 0.73105857863 * 1.6 + 0.26894142137 * 0.7 - 0 * 0.8\n # = 1.35795272077\n expected = np.array([[[1.35795272077]]], dtype=np.float32)\n self.assertAllClose(expected, actual)\n\n def test_multi_dim_no_mask(self):\n # Scores tensor of shape [1, 1, 3]\n scores = np.array([[[1., 0., 1.]]], dtype=np.float32)\n # Value tensor of shape [1, 3, 1]\n v = np.array([[[1.6], [0.7], [-0.8]]], dtype=np.float32)\n actual, actual_scores = dense_attention.BaseDenseAttention()._apply_scores(\n scores=scores, value=v)\n\n # Expected softmax_scores = softmax(scores).\n # => softmax_scores000 = exp(1)/(exp(1) + exp(0) + exp(1))\n # = 0.42231879825\n # softmax_scores001 = exp(0)/(exp(1) + exp(0) + exp(1))\n # = 0.15536240349\n # softmax_scores002 = exp(1)/(exp(1) + exp(0) + exp(1))\n # = 0.42231879825\n expected_scores = np.array(\n [[[0.42231879825, 0.15536240349, 0.42231879825]]], dtype=np.float32)\n self.assertAllClose(expected_scores, actual_scores)\n # Expected tensor of shape [1, 1, 1].\n # expected000 = 0.42231879825 * 1.6 + 0.15536240349 * 0.7\n # - 0.42231879825 * 0.8\n # = 0.44660872104\n expected = np.array([[[0.44660872104]]], dtype=np.float32)\n self.assertAllClose(expected, actual)\n\n def test_one_dim_batch_size_two(self):\n # Scores tensor of shape [2, 1, 1]\n scores = np.array([[[1.1]], [[2.1]]], dtype=np.float32)\n # Value tensor of shape [2, 1, 1]\n v = np.array([[[1.6]], [[2.6]]], dtype=np.float32)\n # Scpres mask tensor of shape [2, 1, 1]\n scores_mask = np.array([[[True]], [[True]]], dtype=np.bool_)\n actual, actual_scores = dense_attention.BaseDenseAttention()._apply_scores(\n scores=scores, value=v, scores_mask=scores_mask)\n\n # Expected softmax_scores = [[[1]], [[1]]]\n expected_scores = np.array([[[1.]], [[1.]]], dtype=np.float32)\n self.assertAllClose(expected_scores, actual_scores)\n # Expected tensor of shape [2, 1, 1].\n # expected000 = softmax_scores[0, 0] * 1.6 = 1.6\n # expected100 = softmax_scores[1, 0] * 2.6 = 2.6\n expected = np.array([[[1.6]], [[2.6]]], dtype=np.float32)\n self.assertAllClose(expected, actual)\n\n def test_shape_with_dropout(self):\n # scores: Scores float tensor of shape `[batch_size, tq, tv]`.\n # value: Value tensor of shape `[batch_size, tv, dim]`.\n batch_size = 4\n tq = 5\n tv = 6\n dim = 7\n scores = np.ones((batch_size, tq, tv))\n value = np.ones((batch_size, tv, dim))\n actual, actual_scores = dense_attention.BaseDenseAttention(\n dropout=0.1)._apply_scores(\n scores=scores, value=value, training=False)\n\n # Expected Tensor of shape `[batch_size, tq, tv]`.\n expected_scores_shape = [batch_size, tq, tv]\n self.assertAllEqual(expected_scores_shape, tf.shape(actual_scores))\n # Expected Tensor of shape `[batch_size, tq, dim]`.\n expected_shape = [batch_size, tq, dim]\n self.assertAllEqual(expected_shape, tf.shape(actual))\n\n def test_serialization(self):\n # Test serialization with causal\n layer = dense_attention.BaseDenseAttention(causal=True)\n\n config = keras.layers.serialize(layer)\n new_layer = keras.layers.deserialize(config)\n self.assertEqual(new_layer.causal, True)\n\n config = layer.get_config()\n new_layer = dense_attention.BaseDenseAttention.from_config(config)\n self.assertEqual(new_layer.causal, True)\n\n\[email protected](combinations.combine(mode=['graph', 'eager']))\nclass AttentionTest(tf.test.TestCase, parameterized.TestCase):\n\n def test_calculate_scores_one_dim(self):\n # Query tensor of shape [1, 1, 1]\n q = np.array([[[1.1]]], dtype=np.float32)\n # Key tensor of shape [1, 1, 1]\n k = np.array([[[1.6]]], dtype=np.float32)\n attention_layer = dense_attention.Attention()\n attention_layer.build(input_shape=([1, 1, 1], [1, 1, 1]))\n actual = attention_layer._calculate_scores(query=q, key=k)\n\n # Expected tensor of shape [1, 1, 1].\n # expected000 = 1.1*1.6 = 1.76\n expected = np.array([[[1.76]]], dtype=np.float32)\n self.assertAllClose(expected, actual)\n\n def test_calculate_scores_multi_dim(self):\n # Query tensor of shape [1, 2, 4]\n q = np.array([[[1., 1.1, 1.2, 1.3], [2., 2.1, 2.2, 2.3]]], dtype=np.float32)\n # Key tensor of shape [1, 3, 4]\n k = np.array(\n [[[1.5, 1.6, 1.7, 1.8], [2.5, 2.6, 2.7, 2.8], [3.5, 3.6, 3.7, 3.8]]],\n dtype=np.float32)\n attention_layer = dense_attention.Attention()\n attention_layer.build(input_shape=([1, 2, 4], [1, 3, 4]))\n actual = attention_layer._calculate_scores(query=q, key=k)\n\n # Expected tensor of shape [1, 2, 3].\n # expected000 = 1.*1.5+1.1*1.6+1.2*1.7+1.3*1.8 = 7.64\n # expected001 = 1.*2.5+1.1*2.6+1.2*2.7+1.3*2.8 = 12.24\n # expected002 = 1.*3.5+1.1*3.6+1.2*3.7+1.3*3.8 = 16.84\n # expected010 = 2.*1.5+2.1*1.6+2.2*1.7+2.3*1.8 = 14.24\n # expected011 = 2.*2.5+2.1*2.6+2.2*2.7+2.3*2.8 = 22.84\n # expected012 = 2.*3.5+2.1*3.6+2.2*3.7+2.3*3.8 = 31.44\n expected = np.array([[[7.64, 12.24, 16.84], [14.24, 22.84, 31.44]]],\n dtype=np.float32)\n self.assertAllClose(expected, actual)\n \n def test_calculate_scores_multi_dim_concat(self):\n # Query tensor of shape [1, 2, 4]\n q = np.array([[[1., 1.1, 1.2, 1.3], [2., 2.1, 2.2, 2.3]]], dtype=np.float32)\n # Key tensor of shape [1, 3, 4]\n k = np.array(\n [[[1.5, 1.6, 1.7, 1.8], [2.5, 2.6, 2.7, 2.8], [3.5, 3.6, 3.7, 3.8]]],\n dtype=np.float32)\n attention_layer = dense_attention.Attention(score_mode='concat')\n attention_layer.concat_score_weight = 1\n attention_layer.build(input_shape=([1, 2, 4], [1, 3, 4]))\n actual = keras.backend.get_value(\n attention_layer._calculate_scores(query=q, key=k))\n\n # pylint:disable=line-too-long\n # expected000 = tanh(1.+1.5) + tanh(1.1+1.6) + tanh(1.2+1.7) + tanh(1.3+1.8) = 3.96753427840\n # expected001 = tanh(1.+2.5) + tanh(1.1+2.6) + tanh(1.2+2.7) + tanh(1.3+2.8) = 3.99558784825\n # expected002 = tanh(1.+3.5) + tanh(1.1+3.6) + tanh(1.2+3.7) + tanh(1.3+3.8) = 3.99940254147\n # expected010 = tanh(2.+1.5) + tanh(2.1+1.6) + tanh(2.2+1.7) + tanh(2.3+1.8) = 3.99558784825\n # expected011 = tanh(2.+2.5) + tanh(2.1+2.6) + tanh(2.2+2.7) + tanh(2.3+2.8) = 3.99940254147\n # expected012 = tanh(2.+3.5) + tanh(2.1+3.6) + tanh(2.2+3.7) + tanh(2.3+3.8) = 3.99991913657\n expected = np.array([[[3.96753427840, 3.99558784825, 3.99940254147],\n [3.99558784825, 3.99940254147, 3.99991913657]]],\n dtype=np.float32)\n self.assertAllClose(expected, actual)\n\n def test_calculate_scores_one_dim_batch_size_two(self):\n # Query tensor of shape [2, 1, 1]\n q = np.array([[[1.1]], [[2.1]]], dtype=np.float32)\n # Key tensor of shape [2, 1, 1]\n k = np.array([[[1.6]], [[2.6]]], dtype=np.float32)\n attention_layer = dense_attention.Attention()\n attention_layer.build(input_shape=([2, 1, 1], [2, 1, 1]))\n actual = attention_layer._calculate_scores(query=q, key=k)\n\n # Expected tensor of shape [2, 1, 1].\n # expected000 = 1.1*1.6 = 1.76\n # expected100 = 2.1*2.6 = 5.46\n expected = np.array([[[1.76]], [[5.46]]], dtype=np.float32)\n self.assertAllClose(expected, actual)\n\n def test_calculate_scores_one_dim_with_scale(self):\n \"\"\"Tests that scores are multiplied by scale.\"\"\"\n # Query tensor of shape [1, 1, 1]\n q = np.array([[[1.1]]], dtype=np.float32)\n # Key tensor of shape [1, 1, 1]\n k = np.array([[[1.6]]], dtype=np.float32)\n attention_layer = dense_attention.Attention(use_scale=True)\n attention_layer.build(input_shape=([1, 1, 1], [1, 1, 1]))\n attention_layer.scale = -2.\n actual = attention_layer._calculate_scores(query=q, key=k)\n\n # Expected tensor of shape [1, 1, 1].\n # expected000 = -2*1.1*1.6 = -3.52\n expected = np.array([[[-3.52]]], dtype=np.float32)\n self.assertAllClose(expected, actual)\n \n def test_calculate_scores_one_dim_with_scale_concat(self):\n \"\"\"Tests that scores are multiplied by scale.\"\"\"\n # Query tensor of shape [1, 1, 1]\n q = np.array([[[1.1]]], dtype=np.float32)\n # Key tensor of shape [1, 1, 1]\n k = np.array([[[1.6]]], dtype=np.float32)\n attention_layer = dense_attention.Attention(use_scale=True, score_mode='concat')\n attention_layer.concat_score_weight = 1\n attention_layer.build(input_shape=([1, 1, 1], [1, 1, 1]))\n attention_layer.scale = 2.\n actual = keras.backend.get_value(\n attention_layer._calculate_scores(query=q, key=k))\n\n # Expected tensor of shape [1, 1, 1].\n # expected000 = tanh(2*(1.1+1.6)) = 0.9999592018254402\n expected = np.array([[[0.999959202]]], dtype=np.float32)\n self.assertAllClose(expected, actual)\n\n def test_shape(self):\n # Query tensor of shape [1, 2, 4]\n q = np.array([[[1., 1.1, 1.2, 1.3], [2., 2.1, 2.2, 2.3]]], dtype=np.float32)\n # Value tensor of shape [1, 3, 4]\n v = np.array(\n [[[1.5, 1.6, 1.7, 1.8], [2.5, 2.6, 2.7, 2.8], [3.5, 3.6, 3.7, 3.8]]],\n dtype=np.float32)\n # Value mask tensor of shape [1, 3]\n v_mask = np.array([[True, True, False]], dtype=np.bool_)\n attention_layer = dense_attention.Attention()\n actual = attention_layer([q, v], mask=[None, v_mask])\n\n expected_shape = [1, 2, 4]\n self.assertAllEqual(expected_shape, tf.shape(actual))\n \n def test_shape_concat(self):\n # Query tensor of shape [1, 2, 4]\n q = np.array([[[1., 1.1, 1.2, 1.3], [2., 2.1, 2.2, 2.3]]], dtype=np.float32)\n # Value tensor of shape [1, 3, 4]\n v = np.array(\n [[[1.5, 1.6, 1.7, 1.8], [2.5, 2.6, 2.7, 2.8], [3.5, 3.6, 3.7, 3.8]]],\n dtype=np.float32)\n # Value mask tensor of shape [1, 3]\n v_mask = np.array([[True, True, False]], dtype=np.bool_)\n attention_layer = dense_attention.Attention(score_mode='concat')\n attention_layer.concat_score_weight = 1\n actual = attention_layer([q, v], mask=[None, v_mask])\n\n expected_shape = [1, 2, 4]\n self.assertAllEqual(expected_shape, tf.shape(actual))\n\n def test_shape_with_key(self):\n # Query tensor of shape [1, 2, 4]\n q = np.array([[[1., 1.1, 1.2, 1.3], [2., 2.1, 2.2, 2.3]]], dtype=np.float32)\n # Value tensor of shape [1, 3, 4]\n v = np.array(\n [[[1.5, 1.6, 1.7, 1.8], [2.5, 2.6, 2.7, 2.8], [3.5, 3.6, 3.7, 3.8]]],\n dtype=np.float32)\n # Key tensor of shape [1, 3, 4]\n k = np.array(\n [[[1.5, 1.6, 1.7, 1.8], [2.5, 2.6, 2.7, 2.8], [3.5, 3.6, 3.7, 3.8]]],\n dtype=np.float32)\n # Value mask tensor of shape [1, 3]\n v_mask = np.array([[True, True, False]], dtype=np.bool_)\n attention_layer = dense_attention.Attention()\n actual = attention_layer([q, v, k], mask=[None, v_mask])\n\n expected_shape = [1, 2, 4]\n self.assertAllEqual(expected_shape, tf.shape(actual))\n \n def test_shape_with_key_concat(self):\n # Query tensor of shape [1, 2, 4]\n q = np.array([[[1., 1.1, 1.2, 1.3], [2., 2.1, 2.2, 2.3]]], dtype=np.float32)\n # Value tensor of shape [1, 3, 4]\n v = np.array(\n [[[1.5, 1.6, 1.7, 1.8], [2.5, 2.6, 2.7, 2.8], [3.5, 3.6, 3.7, 3.8]]],\n dtype=np.float32)\n # Key tensor of shape [1, 3, 4]\n k = np.array(\n [[[1.5, 1.6, 1.7, 1.8], [2.5, 2.6, 2.7, 2.8], [3.5, 3.6, 3.7, 3.8]]],\n dtype=np.float32)\n # Value mask tensor of shape [1, 3]\n v_mask = np.array([[True, True, False]], dtype=np.bool_)\n attention_layer = dense_attention.Attention(score_mode='concat')\n attention_layer.concat_score_weight = 1\n actual = attention_layer([q, v, k], mask=[None, v_mask])\n\n expected_shape = [1, 2, 4]\n self.assertAllEqual(expected_shape, tf.shape(actual))\n\n def test_multi_dim(self):\n # Query tensor of shape [1, 1, 1]\n q = np.array([[[1.1]]], dtype=np.float32)\n # Value tensor of shape [1, 3, 1]\n v = np.array([[[1.6], [0.7], [-0.8]]], dtype=np.float32)\n # Value mask tensor of shape [1, 3]\n v_mask = np.array([[True, True, False]], dtype=np.bool_)\n attention_layer = dense_attention.Attention()\n actual = attention_layer([q, v], mask=[None, v_mask])\n\n # Expected scores of shape [1, 1, 3]\n # scores = [[[1.1*1.6, 1.1*0.7, -1.1*0.8]]] = [[[1.76, 0.77, -0.88]]]\n # Expected attention distribution = softmax(scores) with zeros in\n # positions where v_mask == False.\n # => attention_distribution000 = exp(1.76)/(exp(1.76) + exp(0.77))\n # = 0.72908792234\n # attention_distribution001 = exp(0.77)/(exp(1.76) + exp(0.77))\n # = 0.27091207765\n # attention_distribution002 = 0\n #\n # Expected tensor of shape [1, 1, 1].\n # expected000 = 0.72908792234 * 1.6 + 0.27091207765 * 0.7 - 0 * 0.8\n # = 1.3561791301\n expected = np.array([[[1.3561791301]]], dtype=np.float32)\n self.assertAllClose(expected, actual)\n\n def test_multi_dim_with_key(self):\n # Query tensor of shape [1, 1, 1]\n q = np.array([[[1.1]]], dtype=np.float32)\n # Value tensor of shape [1, 3, 1]\n v = np.array([[[0.5], [0.8], [-0.3]]], dtype=np.float32)\n # Key tensor of shape [1, 3, 1]\n k = np.array([[[1.6], [0.7], [-0.8]]], dtype=np.float32)\n # Value mask tensor of shape [1, 3]\n v_mask = np.array([[True, True, False]], dtype=np.bool_)\n attention_layer = dense_attention.Attention()\n actual = attention_layer([q, v, k], mask=[None, v_mask])\n\n # Expected scores of shape [1, 1, 3]\n # scores = [[[1.1*1.6, 1.1*0.7, -1.1*0.8]]] = [[[1.76, 0.77, -0.88]]]\n # Expected attention distribution = softmax(scores) with zeros in\n # positions where v_mask == False.\n # => attention_distribution000 = exp(1.76)/(exp(1.76) + exp(0.77))\n # = 0.72908792234\n # attention_distribution001 = exp(0.77)/(exp(1.76) + exp(0.77))\n # = 0.27091207765\n # attention_distribution002 = 0\n #\n # Expected tensor of shape [1, 1, 1].\n # expected000 = 0.72908792234 * 0.5 + 0.27091207765 * 0.8 - 0 * 0.3\n # = 0.58127362329\n expected = np.array([[[0.58127362329]]], dtype=np.float32)\n self.assertAllClose(expected, actual)\n\n @parameterized.named_parameters(\n ('', False),\n ('return_attention_scores', True),\n )\n def test_multi_dim_with_query_mask(self, return_attention_scores):\n # Query tensor of shape [1, 2, 1]\n q = np.array([[[1.1], [-0.5]]], dtype=np.float32)\n # Value tensor of shape [1, 3, 1]\n v = np.array([[[1.6], [0.7], [-0.8]]], dtype=np.float32)\n # Query mask tensor of shape [1, 2]\n q_mask = np.array([[True, False]], dtype=np.bool_)\n # Value mask tensor of shape [1, 3]\n v_mask = np.array([[True, True, False]], dtype=np.bool_)\n attention_layer = dense_attention.Attention()\n if return_attention_scores:\n actual, actual_scores = attention_layer(\n [q, v],\n mask=[q_mask, v_mask],\n return_attention_scores=return_attention_scores)\n else:\n actual = attention_layer([q, v],\n mask=[q_mask, v_mask],\n return_attention_scores=return_attention_scores)\n\n # Expected scores of shape [1, 2, 3]\n # scores = [[[1.1*1.6, 1.1*0.7, -1.1*0.8], [-0.5*1.6, -0.5*0.7, 0.5*0.8]]]\n # = [[[1.76, 0.77, -0.88], [-0.8, -0.35, 0.4]]]\n # Expected attention distribution = softmax(scores) with zeros in\n # positions where v_mask == False.\n # => attention_distribution000 = exp(1.76)/(exp(1.76) + exp(0.77))\n # = 0.72908792234\n # attention_distribution001 = exp(0.77)/(exp(1.76) + exp(0.77))\n # = 0.27091207765\n # attention_distribution002 = 0\n # => attention_distribution010 = exp(-0.8)/(exp(-0.8) + exp(-0.35))\n # = 0.38936076605\n # attention_distribution011 = exp(-0.35)/(exp(-0.8) + exp(-0.35))\n # = 0.61063923394\n # attention_distribution012 = 0\n if return_attention_scores:\n expected_scores = np.array([[[0.72908792234, 0.27091207765, 0.],\n [0.38936076605, 0.61063923394, 0.]]],\n dtype=np.float32)\n self.assertAllClose(expected_scores, actual_scores)\n # Expected tensor of shape [1, 2, 1] with zeros where q_mask == False.\n # expected000 = 0.72908792234 * 1.6 + 0.27091207765 * 0.7 - 0 * 0.8\n # = 1.3561791301\n # expected000 = 0\n expected = np.array([[[1.3561791301], [0.]]], dtype=np.float32)\n self.assertAllClose(expected, actual)\n\n def test_scale_None(self):\n \"\"\"Tests that scale is None by default.\"\"\"\n attention_layer = dense_attention.Attention()\n attention_layer.build(input_shape=([1, 1, 1], [1, 1, 1]))\n self.assertIsNone(attention_layer.scale)\n\n def test_scale_init_eager(self):\n \"\"\"Tests that scale initializes to 1 when use_scale=True.\"\"\"\n if not tf.executing_eagerly():\n self.skipTest('Only run in eager mode')\n attention_layer = dense_attention.Attention(use_scale=True)\n attention_layer.build(input_shape=([1, 1, 1], [1, 1, 1]))\n self.assertAllClose(1., attention_layer.scale.value())\n\n def test_scale_init_graph(self):\n \"\"\"Tests that scale initializes to 1 when use_scale=True.\"\"\"\n with self.cached_session() as sess:\n attention_layer = dense_attention.Attention(use_scale=True)\n attention_layer.build(input_shape=([1, 1, 1], [1, 1, 1]))\n sess.run(attention_layer.scale.initializer)\n self.assertAllClose(1., attention_layer.scale.value())\n\n @parameterized.named_parameters(\n ('', False),\n ('return_attention_scores', True),\n )\n def test_self_attention_causal(self, return_attention_scores):\n # Query-value tensor of shape [1, 3, 1]\n q = np.array([[[0.5], [0.8], [-0.3]]], dtype=np.float32)\n attention_layer = dense_attention.Attention(causal=True)\n if return_attention_scores:\n actual, actual_scores = attention_layer(\n [q, q], return_attention_scores=return_attention_scores)\n else:\n actual = attention_layer([q, q],\n return_attention_scores=return_attention_scores)\n\n # Expected scores of shape [1, 3, 3]\n # scores = [[0.25, 0.4, -0.15], [0.4, 0.64, -0.24], [-0.15, -0.24, 0.09]]\n # Expected attention distribution = softmax(scores) lower triangular\n # => attention_distribution00 = [1., 0., 0.]\n # attention_distribution01\n # = [exp(0.4), exp(0.64), 0.] / (exp(0.4) + exp(0.64))\n # = [0.44028635073, 0.55971364926, 0.]\n # attention_distribution02\n # = [exp(-0.15), exp(-0.24), exp(0.09)]\n # / (exp(-0.15) + exp(-0.24) + exp(0.09))\n # = [0.31395396638, 0.28693232061, 0.399113713]\n if return_attention_scores:\n expected_scores = np.array(\n [[[1., 0., 0.], [0.44028635073, 0.55971364926, 0.],\n [0.31395396638, 0.28693232061, 0.399113713]]],\n dtype=np.float32)\n self.assertAllClose(expected_scores, actual_scores)\n # Expected tensor of shape [1, 3, 1].\n # expected000 = 0.5\n # expected010 = 0.44028635073 * 0.5 + 0.55971364926 * 0.8\n # = 0.66791409477\n # expected020 = 0.31395396638 * 0.5 +0.28693232061 * 0.8 -0.399113713 * 0.3\n # = 0.26678872577\n expected = np.array([[[0.5], [0.66791409477], [0.26678872577]]],\n dtype=np.float32)\n self.assertAllClose(expected, actual)\n\n def test_inputs_not_list(self):\n attention_layer = dense_attention.Attention()\n q = np.array([[[1.1]]], dtype=np.float32)\n with self.assertRaisesRegex(\n ValueError, 'Attention layer must be called on a list of inputs'):\n attention_layer(q)\n\n def test_inputs_too_short(self):\n attention_layer = dense_attention.Attention()\n q = np.array([[[1.1]]], dtype=np.float32)\n with self.assertRaisesRegex(\n ValueError, 'Attention layer accepts inputs list of length 2 or 3'):\n attention_layer([q])\n\n def test_inputs_too_long(self):\n attention_layer = dense_attention.Attention()\n q = np.array([[[1.1]]], dtype=np.float32)\n with self.assertRaisesRegex(\n ValueError, 'Attention layer accepts inputs list of length 2 or 3'):\n attention_layer([q, q, q, q])\n\n def test_mask_not_list(self):\n attention_layer = dense_attention.Attention()\n q = np.array([[[1.1]]], dtype=np.float32)\n mask = np.array([[True]], dtype=np.bool_)\n with self.assertRaisesRegex(ValueError,\n 'Attention layer mask must be a list'):\n attention_layer([q, q], mask=mask)\n\n def test_mask_too_short(self):\n attention_layer = dense_attention.Attention()\n q = np.array([[[1.1]]], dtype=np.float32)\n mask = np.array([[True]], dtype=np.bool_)\n with self.assertRaisesRegex(\n ValueError, 'Attention layer mask must be a list of length 2'):\n attention_layer([q, q], mask=[mask])\n\n def test_mask_too_long(self):\n attention_layer = dense_attention.Attention()\n q = np.array([[[1.1]]], dtype=np.float32)\n mask = np.array([[True]], dtype=np.bool_)\n with self.assertRaisesRegex(\n ValueError, 'Attention layer mask must be a list of length 2'):\n attention_layer([q, q], mask=[mask, mask, mask])\n\n def test_override_mask(self):\n attention_layer = dense_attention.Attention()\n q = core.Masking()(np.array([[[1.1]]], dtype=np.float32))\n mask = np.array([[False]], dtype=np.bool_)\n actual = attention_layer([q, q], mask=[mask, mask])\n self.assertAllClose([[[0]]], actual)\n\n def test_implicit_mask(self):\n attention_layer = dense_attention.Attention()\n q = core.Masking(1.1)(np.array([[[1.1], [1]]], dtype=np.float32))\n v = core.Masking(1.2)(np.array([[[1.2], [1]]], dtype=np.float32))\n actual = attention_layer([q, v])\n self.assertAllClose([[[0], [1]]], actual)\n\n @parameterized.named_parameters(\n ('', False),\n ('use_scale', True),\n )\n def test_serialization(self, use_scale):\n # Test serialization with use_scale\n layer = dense_attention.Attention(use_scale=use_scale)\n\n config = keras.layers.serialize(layer)\n new_layer = keras.layers.deserialize(config)\n self.assertEqual(new_layer.use_scale, use_scale)\n\n config = layer.get_config()\n new_layer = dense_attention.Attention.from_config(config)\n self.assertEqual(new_layer.use_scale, use_scale)\n\n\[email protected](combinations.combine(mode=['graph', 'eager']))\nclass AdditiveAttentionTest(tf.test.TestCase, parameterized.TestCase):\n\n def test_calculate_scores_one_dim(self):\n # Query tensor of shape [1, 1, 1]\n q = np.array([[[1.1]]], dtype=np.float32)\n # Key tensor of shape [1, 1, 1]\n k = np.array([[[1.6]]], dtype=np.float32)\n attention_layer = dense_attention.AdditiveAttention()\n attention_layer.build(input_shape=([1, 1, 1], [1, 1, 1]))\n # Scale tensor of shape [1]\n attention_layer.scale = np.array([[[0.5]]], dtype=np.float32)\n actual = attention_layer._calculate_scores(query=q, key=k)\n\n # Expected tensor of shape [1, 1, 1].\n # expected000 = 0.5 * tanh(1.1 + 1.6) = 0.49550372683\n expected = np.array([[[0.49550372683]]], dtype=np.float32)\n self.assertAllClose(expected, actual)\n\n def test_calculate_scores_multi_dim(self):\n # Query tensor of shape [1, 2, 4]\n q = np.array([[[1., 1.1, 1.2, 1.3], [2., 2.1, 2.2, 2.3]]], dtype=np.float32)\n # Key tensor of shape [1, 3, 4]\n k = np.array(\n [[[1.5, 1.6, 1.7, 1.8], [2.5, 2.6, 2.7, 2.8], [3.5, 3.6, 3.7, 3.8]]],\n dtype=np.float32)\n attention_layer = dense_attention.AdditiveAttention()\n attention_layer.build(input_shape=([1, 2, 4], [1, 3, 4]))\n # Scale tensor of shape [4]\n attention_layer.scale = np.array([[[0.5, 0.6, 0.7, 0.8]]], dtype=np.float32)\n actual = attention_layer._calculate_scores(query=q, key=k)\n\n # pylint:disable=line-too-long\n # expected000 = 0.5*tanh(1.+1.5) + 0.6*tanh(1.1+1.6) + 0.7*tanh(1.2+1.7) + 0.8*tanh(1.3+1.8) = 2.58044532581\n # expected001 = 0.5*tanh(1.+2.5) + 0.6*tanh(1.1+2.6) + 0.7*tanh(1.2+2.7) + 0.8*tanh(1.3+2.8) = 2.59734317449\n # expected002 = 0.5*tanh(1.+3.5) + 0.6*tanh(1.1+3.6) + 0.7*tanh(1.2+3.7) + 0.8*tanh(1.3+3.8) = 2.59964024652\n # expected010 = 0.5*tanh(2.+1.5) + 0.6*tanh(2.1+1.6) + 0.7*tanh(2.2+1.7) + 0.8*tanh(2.3+1.8) = 2.59734317449\n # expected011 = 0.5*tanh(2.+2.5) + 0.6*tanh(2.1+2.6) + 0.7*tanh(2.2+2.7) + 0.8*tanh(2.3+2.8) = 2.59964024652\n # expected012 = 0.5*tanh(2.+3.5) + 0.6*tanh(2.1+3.6) + 0.7*tanh(2.2+3.7) + 0.8*tanh(2.3+3.8) = 2.59995130916\n # pylint:enable=line-too-long\n expected = np.array([[[2.58044532581, 2.59734317449, 2.59964024652],\n [2.59734317449, 2.59964024652, 2.59995130916]]],\n dtype=np.float32)\n self.assertAllClose(expected, actual)\n\n def test_calculate_scores_one_dim_batch_size_two(self):\n # Query tensor of shape [2, 1, 1]\n q = np.array([[[1.1]], [[2.1]]], dtype=np.float32)\n # Key tensor of shape [2, 1, 1]\n k = np.array([[[1.6]], [[2.6]]], dtype=np.float32)\n attention_layer = dense_attention.AdditiveAttention()\n attention_layer.build(input_shape=([2, 1, 1], [2, 1, 1]))\n # Scale tensor of shape [1]\n attention_layer.scale = np.array([[[0.5]]], dtype=np.float32)\n actual = attention_layer._calculate_scores(query=q, key=k)\n\n # Expected tensor of shape [2, 1, 1].\n # expected000 = 0.5 * tanh(1.1 + 1.6) = 0.49550372683\n # expected100 = 0.5 * tanh(2.1 + 2.6) = 0.49991728277\n expected = np.array([[[0.49550372683]], [[0.49991728277]]],\n dtype=np.float32)\n self.assertAllClose(expected, actual)\n\n def test_shape(self):\n # Query tensor of shape [1, 2, 4]\n q = np.array([[[1., 1.1, 1.2, 1.3], [2., 2.1, 2.2, 2.3]]], dtype=np.float32)\n # Value tensor of shape [1, 3, 4]\n v = np.array(\n [[[1.5, 1.6, 1.7, 1.8], [2.5, 2.6, 2.7, 2.8], [3.5, 3.6, 3.7, 3.8]]],\n dtype=np.float32)\n # Value mask tensor of shape [1, 3]\n v_mask = np.array([[True, True, False]], dtype=np.bool_)\n attention_layer = dense_attention.AdditiveAttention()\n actual = attention_layer([q, v], mask=[None, v_mask])\n\n expected_shape = [1, 2, 4]\n self.assertAllEqual(expected_shape, tf.shape(actual))\n\n def test_shape_no_scale(self):\n # Query tensor of shape [1, 2, 4]\n q = np.array([[[1., 1.1, 1.2, 1.3], [2., 2.1, 2.2, 2.3]]], dtype=np.float32)\n # Value tensor of shape [1, 3, 4]\n v = np.array(\n [[[1.5, 1.6, 1.7, 1.8], [2.5, 2.6, 2.7, 2.8], [3.5, 3.6, 3.7, 3.8]]],\n dtype=np.float32)\n # Value mask tensor of shape [1, 3]\n v_mask = np.array([[True, True, False]], dtype=np.bool_)\n attention_layer = dense_attention.AdditiveAttention(use_scale=False)\n actual = attention_layer([q, v], mask=[None, v_mask])\n\n expected_shape = [1, 2, 4]\n self.assertAllEqual(expected_shape, tf.shape(actual))\n\n def test_shape_with_key(self):\n # Query tensor of shape [1, 2, 4]\n q = np.array([[[1., 1.1, 1.2, 1.3], [2., 2.1, 2.2, 2.3]]], dtype=np.float32)\n # Value tensor of shape [1, 3, 4]\n v = np.array(\n [[[1.5, 1.6, 1.7, 1.8], [2.5, 2.6, 2.7, 2.8], [3.5, 3.6, 3.7, 3.8]]],\n dtype=np.float32)\n # Key tensor of shape [1, 3, 4]\n k = np.array(\n [[[1.5, 1.6, 1.7, 1.8], [2.5, 2.6, 2.7, 2.8], [3.5, 3.6, 3.7, 3.8]]],\n dtype=np.float32)\n # Value mask tensor of shape [1, 3]\n v_mask = np.array([[True, True, False]], dtype=np.bool_)\n attention_layer = dense_attention.AdditiveAttention()\n actual = attention_layer([q, v, k], mask=[None, v_mask])\n\n expected_shape = [1, 2, 4]\n self.assertAllEqual(expected_shape, tf.shape(actual))\n\n def test_multi_dim(self):\n # Query tensor of shape [1, 1, 1]\n q = np.array([[[1.1]]], dtype=np.float32)\n # Value tensor of shape [1, 3, 1]\n v = np.array([[[1.6], [0.7], [-0.8]]], dtype=np.float32)\n # Value mask tensor of shape [1, 3]\n v_mask = np.array([[True, True, False]], dtype=np.bool_)\n attention_layer = dense_attention.AdditiveAttention()\n attention_layer.build(input_shape=([1, 1, 1], [1, 3, 1]))\n # Scale tensor of shape [1]\n attention_layer.scale = np.array([[[0.5]]], dtype=np.float32)\n actual = attention_layer([q, v], mask=[None, v_mask])\n\n # pylint:disable=line-too-long\n # Expected scores of shape [1, 1, 3]\n # scores = [[[0.5 * tanh(1.1 + 1.6), 0.5 * tanh(1.1 + 0.7), 0.5 * tanh(1.1 - 0.8)]]]\n # = [[[0.49550372683, 0.47340300642, 0.14565630622]]]\n # Expected attention distribution = softmax(scores) with zeros in\n # positions where v_mask == False.\n # => attention_distribution000\n # = exp(0.49550372683)/(exp(0.49550372683) + exp(0.47340300642))\n # = 0.50552495521\n # attention_distribution001\n # = exp(0.47340300642)/(exp(0.49550372683) + exp(0.47340300642))\n # = 0.49447504478\n # attention_distribution002 = 0\n #\n # Expected tensor of shape [1, 1, 1].\n # expected000 = 0.50552495521 * 1.6 + 0.49447504478 * 0.7 - 0 * 0.8\n # = 1.15497245968\n # pylint:enable=line-too-long\n expected = np.array([[[1.15497245968]]], dtype=np.float32)\n self.assertAllClose(expected, actual)\n\n def test_multi_dim_with_key(self):\n # Query tensor of shape [1, 1, 1]\n q = np.array([[[1.1]]], dtype=np.float32)\n # Value tensor of shape [1, 3, 1]\n v = np.array([[[0.5], [0.8], [-0.3]]], dtype=np.float32)\n # Key tensor of shape [1, 3, 1]\n k = np.array([[[1.6], [0.7], [-0.8]]], dtype=np.float32)\n # Value mask tensor of shape [1, 3]\n v_mask = np.array([[True, True, False]], dtype=np.bool_)\n attention_layer = dense_attention.AdditiveAttention()\n attention_layer.build(input_shape=([1, 1, 1], [1, 3, 1]))\n # Scale tensor of shape [1]\n attention_layer.scale = np.array([[[0.5]]], dtype=np.float32)\n actual = attention_layer([q, v, k], mask=[None, v_mask])\n\n # pylint:disable=line-too-long\n # Expected scores of shape [1, 1, 3]\n # scores = [[[0.5 * tanh(1.1 + 1.6), 0.5 * tanh(1.1 + 0.7), 0.5 * tanh(1.1 - 0.8)]]]\n # = [[[0.49550372683, 0.47340300642, 0.14565630622]]]\n # Expected attention distribution = softmax(scores) with zeros in\n # positions where v_mask == False.\n # => attention_distribution000\n # = exp(0.49550372683)/(exp(0.49550372683) + exp(0.47340300642))\n # = 0.50552495521\n # attention_distribution001\n # = exp(0.47340300642)/(exp(0.49550372683) + exp(0.47340300642))\n # = 0.49447504478\n # attention_distribution002 = 0\n #\n # Expected tensor of shape [1, 1, 1].\n # expected000 = 0.50552495521 * 0.5 + 0.49447504478 * 0.8 - 0 * 0.3\n # = 0.64834251342\n # pylint:enable=line-too-long\n expected = np.array([[[0.64834251342]]], dtype=np.float32)\n self.assertAllClose(expected, actual)\n\n def test_multi_dim_with_query_mask(self):\n # Query tensor of shape [1, 2, 1]\n q = np.array([[[1.1], [-0.5]]], dtype=np.float32)\n # Value tensor of shape [1, 3, 1]\n v = np.array([[[1.6], [0.7], [-0.8]]], dtype=np.float32)\n # Query mask tensor of shape [1, 2]\n q_mask = np.array([[True, False]], dtype=np.bool_)\n # Value mask tensor of shape [1, 3]\n v_mask = np.array([[True, True, False]], dtype=np.bool_)\n attention_layer = dense_attention.AdditiveAttention()\n attention_layer.build(input_shape=([1, 1, 1], [1, 3, 1]))\n # Scale tensor of shape [1]\n attention_layer.scale = np.array([[[0.5]]], dtype=np.float32)\n actual = attention_layer([q, v], mask=[q_mask, v_mask])\n\n # pylint:disable=line-too-long\n # Expected scores of shape [1, 2, 3]\n # scores = [[[0.5 * tanh(1.1 + 1.6), 0.5 * tanh(1.1 + 0.7), 0.5 * tanh(1.1 - 0.8)],\n # [0.5 * tanh(-0.5 + 1.6), 0.5 * tanh(-0.5 + 0.7), 0.5 * tanh(-0.5 - 0.8)]]]\n # = [[[0.49550372683, 0.47340300642, 0.14565630622],\n # [0.40024951088, 0.09868766011, -0.43086157965]]]\n # Expected attention distribution = softmax(scores) with zeros in\n # positions where v_mask == False.\n # => attention_distribution000\n # = exp(0.49550372683)/(exp(0.49550372683) + exp(0.47340300642))\n # = 0.50552495521\n # attention_distribution001\n # = exp(0.47340300642)/(exp(0.49550372683) + exp(0.47340300642))\n # = 0.49447504478\n # attention_distribution002 = 0\n # => attention_distribution010\n # = exp(0.40024951088)/(exp(0.40024951088) + exp(0.09868766011))\n # = 0.57482427975\n # attention_distribution011\n # = exp(0.09868766011)/(exp(0.40024951088) + exp(0.09868766011))\n # = 0.42517572025\n # attention_distribution012 = 0\n #\n # Expected tensor of shape [1, 2, 1] with zeros where q_mask == False.\n # expected000 = 0.50552495521 * 1.6 + 0.49447504478 * 0.7 - 0 * 0.8\n # = 1.15497245968\n # expected000 = 0\n # pylint:enable=line-too-long\n expected = np.array([[[1.15497245968], [0.]]], dtype=np.float32)\n self.assertAllClose(expected, actual)\n\n def test_serialization(self):\n # Test serialization with use_scale\n layer = dense_attention.AdditiveAttention(use_scale=True)\n\n config = keras.layers.serialize(layer)\n new_layer = keras.layers.deserialize(config)\n self.assertEqual(new_layer.use_scale, True)\n\n config = layer.get_config()\n new_layer = dense_attention.AdditiveAttention.from_config(config)\n self.assertEqual(new_layer.use_scale, True)\n\n @testing_utils.enable_v2_dtype_behavior\n def test_mixed_float16_policy(self):\n # Test case for GitHub issue:\n # https://github.com/tensorflow/tensorflow/issues/46064\n with policy.policy_scope('mixed_float16'):\n q = tf.cast(tf.random.uniform((2, 3, 4), seed=1), 'float16')\n v = tf.cast(tf.random.uniform((2, 3, 4), seed=2), 'float16')\n k = tf.cast(tf.random.uniform((2, 3, 4), seed=3), 'float16')\n layer = dense_attention.AdditiveAttention(causal=True)\n _ = layer([q, v, k])\n\n\[email protected](combinations.combine(mode=['graph', 'eager']))\nclass LowerTriangularMaskTest(tf.test.TestCase, parameterized.TestCase):\n\n def test_square_shape(self):\n actual = dense_attention._lower_triangular_mask([3, 3])\n expected = np.array(\n [[True, False, False], [True, True, False], [True, True, True]],\n dtype=np.bool_)\n self.assertAllEqual(expected, actual)\n\n def test_orthogonal_shape(self):\n actual = dense_attention._lower_triangular_mask([3, 2])\n expected = np.array([[True, False], [True, True], [True, True]],\n dtype=np.bool_)\n self.assertAllEqual(expected, actual)\n\n def test_three_dim(self):\n actual = dense_attention._lower_triangular_mask([1, 3, 3])\n expected = np.array(\n [[[True, False, False], [True, True, False], [True, True, True]]],\n dtype=np.bool_)\n self.assertAllEqual(expected, actual)\n\n\nif __name__ == '__main__':\n tf.test.main()\n" ]
[ [ "tensorflow.compat.v2.executing_eagerly", "tensorflow.compat.v2.test.main", "numpy.ones", "tensorflow.compat.v2.shape", "tensorflow.compat.v2.random.uniform", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Fritingo/AlexNet_on_browser
[ "3e674dd84e25ee74f2efde77882b4faa788907c2" ]
[ "AlexNet/Alexnet_to_onnx.py" ]
[ "import torch\n\nfrom inference_Alexnet import AlexNet\n\n\ndef main():\n pytorch_model = AlexNet()\n pytorch_model.load_state_dict(torch.load('cifar100_Alexnet.pt'))\n pytorch_model.eval()\n dummy_input = torch.zeros(128*128*4)\n torch.onnx.export(pytorch_model, dummy_input, 'cifar100_Alexnet.onnx', verbose=True)\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "torch.onnx.export", "torch.load", "torch.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
vincefn/silx
[ "4b239abfc90d2fa7d6ab61425f8bfc7b83c0f444", "4b239abfc90d2fa7d6ab61425f8bfc7b83c0f444", "4b239abfc90d2fa7d6ab61425f8bfc7b83c0f444", "4b239abfc90d2fa7d6ab61425f8bfc7b83c0f444", "13301e61627f98fa837008250ac74a0627a7a560", "13301e61627f98fa837008250ac74a0627a7a560", "13301e61627f98fa837008250ac74a0627a7a560", "4b239abfc90d2fa7d6ab61425f8bfc7b83c0f444", "4b239abfc90d2fa7d6ab61425f8bfc7b83c0f444", "13301e61627f98fa837008250ac74a0627a7a560" ]
[ "examples/plotStats.py", "silx/gui/plot/CompareImages.py", "silx/io/spech5.py", "silx/opencl/backprojection.py", "silx/utils/test/test_proxy.py", "silx/gui/_glutils/Texture.py", "silx/gui/plot3d/scene/cutplane.py", "silx/gui/plot3d/items/volume.py", "examples/colormapDialog.py", "silx/image/marchingsquares/test/test_mergeimpl.py" ]
[ "#!/usr/bin/env python\n# coding: utf-8\n# /*##########################################################################\n#\n# Copyright (c) 2016-2019 European Synchrotron Radiation Facility\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n#\n# ###########################################################################*/\n\"\"\"This script is a simple example of how to add your own statistic to a\n:class:`~silx.gui.plot.statsWidget.StatsWidget` from customs\n:class:`~silx.gui.plot.stats.Stats` and display it.\n\nOn this example we will:\n\n - show sum of values for each type\n - compute curve integrals (only for 'curve').\n - compute center of mass for all possible items\n\n.. note:: for now the possible types manged by the Stats are ('curve', 'image',\n 'scatter' and 'histogram')\n\"\"\"\n\n__authors__ = [\"H. Payno\"]\n__license__ = \"MIT\"\n__date__ = \"24/07/2018\"\n\n\nfrom silx.gui import qt\nfrom silx.gui.colors import Colormap\nfrom silx.gui.plot import Plot1D\nfrom silx.gui.plot.stats.stats import StatBase\nimport numpy\n\n\nclass Integral(StatBase):\n \"\"\"\n Simple calculation of the line integral\n \"\"\"\n def __init__(self):\n StatBase.__init__(self, name='integral', compatibleKinds=('curve',))\n\n def calculate(self, context):\n xData, yData = context.data\n return numpy.trapz(x=xData, y=yData)\n\n\nclass COM(StatBase):\n \"\"\"\n Compute data center of mass\n \"\"\"\n def __init__(self):\n StatBase.__init__(self, name='COM', description=\"Center of mass\")\n\n def calculate(self, context):\n if context.kind in ('curve', 'histogram'):\n xData, yData = context.data\n deno = numpy.sum(yData).astype(numpy.float32)\n if deno == 0.0:\n return 0.0\n else:\n return numpy.sum(xData * yData).astype(numpy.float32) / deno\n elif context.kind == 'scatter':\n xData, yData, values = context.data\n values = values.astype(numpy.float64)\n deno = numpy.sum(values)\n if deno == 0.0:\n return float('inf'), float('inf')\n else:\n comX = numpy.sum(xData * values) / deno\n comY = numpy.sum(yData * values) / deno\n return comX, comY\n\n\ndef main():\n app = qt.QApplication([])\n\n plot = Plot1D()\n\n x = numpy.arange(21)\n y = numpy.arange(21)\n plot.addCurve(x=x, y=y, legend='myCurve')\n plot.addCurve(x=x, y=(y + 5), legend='myCurve2')\n\n plot.setActiveCurve('myCurve')\n\n plot.addScatter(x=[0, 2, 5, 5, 12, 20],\n y=[2, 3, 4, 20, 15, 6],\n value=[5, 6, 7, 10, 90, 20],\n colormap=Colormap('viridis'),\n legend='myScatter')\n\n stats = [\n ('sum', numpy.sum),\n Integral(),\n (COM(), '{0:.2f}'),\n ]\n\n plot.getStatsWidget().setStats(stats)\n plot.getStatsWidget().parent().setVisible(True)\n\n plot.show()\n app.exec_()\n\n\nif __name__ == '__main__':\n main()\n", "# coding: utf-8\n# /*##########################################################################\n#\n# Copyright (c) 2018 European Synchrotron Radiation Facility\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n#\n# ###########################################################################*/\n\"\"\"A widget dedicated to compare 2 images.\n\"\"\"\n\n__authors__ = [\"V. Valls\"]\n__license__ = \"MIT\"\n__date__ = \"23/07/2018\"\n\n\nimport enum\nimport logging\nimport numpy\nimport weakref\nimport collections\nimport math\n\nimport silx.image.bilinear\nfrom silx.gui import qt\nfrom silx.gui import plot\nfrom silx.gui import icons\nfrom silx.gui.colors import Colormap\nfrom silx.gui.plot import tools\n\n_logger = logging.getLogger(__name__)\n\nfrom silx.opencl import ocl\nif ocl is not None:\n from silx.opencl import sift\nelse: # No OpenCL device or no pyopencl\n sift = None\n\n\[email protected]\nclass VisualizationMode(enum.Enum):\n \"\"\"Enum for each visualization mode available.\"\"\"\n ONLY_A = 'a'\n ONLY_B = 'b'\n VERTICAL_LINE = 'vline'\n HORIZONTAL_LINE = 'hline'\n COMPOSITE_RED_BLUE_GRAY = \"rbgchannel\"\n COMPOSITE_RED_BLUE_GRAY_NEG = \"rbgnegchannel\"\n\n\[email protected]\nclass AlignmentMode(enum.Enum):\n \"\"\"Enum for each alignment mode available.\"\"\"\n ORIGIN = 'origin'\n CENTER = 'center'\n STRETCH = 'stretch'\n AUTO = 'auto'\n\n\nAffineTransformation = collections.namedtuple(\"AffineTransformation\",\n [\"tx\", \"ty\", \"sx\", \"sy\", \"rot\"])\n\"\"\"Contains a 2D affine transformation: translation, scale and rotation\"\"\"\n\n\nclass CompareImagesToolBar(qt.QToolBar):\n \"\"\"ToolBar containing specific tools to custom the configuration of a\n :class:`CompareImages` widget\n\n Use :meth:`setCompareWidget` to connect this toolbar to a specific\n :class:`CompareImages` widget.\n\n :param Union[qt.QWidget,None] parent: Parent of this widget.\n \"\"\"\n def __init__(self, parent=None):\n qt.QToolBar.__init__(self, parent)\n\n self.__compareWidget = None\n\n menu = qt.QMenu(self)\n self.__visualizationAction = qt.QAction(self)\n self.__visualizationAction.setMenu(menu)\n self.__visualizationAction.setCheckable(False)\n self.addAction(self.__visualizationAction)\n self.__visualizationGroup = qt.QActionGroup(self)\n self.__visualizationGroup.setExclusive(True)\n self.__visualizationGroup.triggered.connect(self.__visualizationModeChanged)\n\n icon = icons.getQIcon(\"compare-mode-a\")\n action = qt.QAction(icon, \"Display the first image only\", self)\n action.setIconVisibleInMenu(True)\n action.setCheckable(True)\n action.setShortcut(qt.QKeySequence(qt.Qt.Key_A))\n action.setProperty(\"mode\", VisualizationMode.ONLY_A)\n menu.addAction(action)\n self.__aModeAction = action\n self.__visualizationGroup.addAction(action)\n\n icon = icons.getQIcon(\"compare-mode-b\")\n action = qt.QAction(icon, \"Display the second image only\", self)\n action.setIconVisibleInMenu(True)\n action.setCheckable(True)\n action.setShortcut(qt.QKeySequence(qt.Qt.Key_B))\n action.setProperty(\"mode\", VisualizationMode.ONLY_B)\n menu.addAction(action)\n self.__bModeAction = action\n self.__visualizationGroup.addAction(action)\n\n icon = icons.getQIcon(\"compare-mode-vline\")\n action = qt.QAction(icon, \"Vertical compare mode\", self)\n action.setIconVisibleInMenu(True)\n action.setCheckable(True)\n action.setShortcut(qt.QKeySequence(qt.Qt.Key_V))\n action.setProperty(\"mode\", VisualizationMode.VERTICAL_LINE)\n menu.addAction(action)\n self.__vlineModeAction = action\n self.__visualizationGroup.addAction(action)\n\n icon = icons.getQIcon(\"compare-mode-hline\")\n action = qt.QAction(icon, \"Horizontal compare mode\", self)\n action.setIconVisibleInMenu(True)\n action.setCheckable(True)\n action.setShortcut(qt.QKeySequence(qt.Qt.Key_H))\n action.setProperty(\"mode\", VisualizationMode.HORIZONTAL_LINE)\n menu.addAction(action)\n self.__hlineModeAction = action\n self.__visualizationGroup.addAction(action)\n\n icon = icons.getQIcon(\"compare-mode-rb-channel\")\n action = qt.QAction(icon, \"Blue/red compare mode (additive mode)\", self)\n action.setIconVisibleInMenu(True)\n action.setCheckable(True)\n action.setShortcut(qt.QKeySequence(qt.Qt.Key_C))\n action.setProperty(\"mode\", VisualizationMode.COMPOSITE_RED_BLUE_GRAY)\n menu.addAction(action)\n self.__brChannelModeAction = action\n self.__visualizationGroup.addAction(action)\n\n icon = icons.getQIcon(\"compare-mode-rbneg-channel\")\n action = qt.QAction(icon, \"Yellow/cyan compare mode (subtractive mode)\", self)\n action.setIconVisibleInMenu(True)\n action.setCheckable(True)\n action.setShortcut(qt.QKeySequence(qt.Qt.Key_W))\n action.setProperty(\"mode\", VisualizationMode.COMPOSITE_RED_BLUE_GRAY_NEG)\n menu.addAction(action)\n self.__ycChannelModeAction = action\n self.__visualizationGroup.addAction(action)\n\n menu = qt.QMenu(self)\n self.__alignmentAction = qt.QAction(self)\n self.__alignmentAction.setMenu(menu)\n self.__alignmentAction.setIconVisibleInMenu(True)\n self.addAction(self.__alignmentAction)\n self.__alignmentGroup = qt.QActionGroup(self)\n self.__alignmentGroup.setExclusive(True)\n self.__alignmentGroup.triggered.connect(self.__alignmentModeChanged)\n\n icon = icons.getQIcon(\"compare-align-origin\")\n action = qt.QAction(icon, \"Align images on their upper-left pixel\", self)\n action.setProperty(\"mode\", AlignmentMode.ORIGIN)\n action.setIconVisibleInMenu(True)\n action.setCheckable(True)\n self.__originAlignAction = action\n menu.addAction(action)\n self.__alignmentGroup.addAction(action)\n\n icon = icons.getQIcon(\"compare-align-center\")\n action = qt.QAction(icon, \"Center images\", self)\n action.setProperty(\"mode\", AlignmentMode.CENTER)\n action.setIconVisibleInMenu(True)\n action.setCheckable(True)\n self.__centerAlignAction = action\n menu.addAction(action)\n self.__alignmentGroup.addAction(action)\n\n icon = icons.getQIcon(\"compare-align-stretch\")\n action = qt.QAction(icon, \"Stretch the second image on the first one\", self)\n action.setProperty(\"mode\", AlignmentMode.STRETCH)\n action.setIconVisibleInMenu(True)\n action.setCheckable(True)\n self.__stretchAlignAction = action\n menu.addAction(action)\n self.__alignmentGroup.addAction(action)\n\n icon = icons.getQIcon(\"compare-align-auto\")\n action = qt.QAction(icon, \"Auto-alignment of the second image\", self)\n action.setProperty(\"mode\", AlignmentMode.AUTO)\n action.setIconVisibleInMenu(True)\n action.setCheckable(True)\n self.__autoAlignAction = action\n menu.addAction(action)\n if sift is None:\n action.setEnabled(False)\n action.setToolTip(\"Sift module is not available\")\n self.__alignmentGroup.addAction(action)\n\n icon = icons.getQIcon(\"compare-keypoints\")\n action = qt.QAction(icon, \"Display/hide alignment keypoints\", self)\n action.setCheckable(True)\n action.triggered.connect(self.__keypointVisibilityChanged)\n self.addAction(action)\n self.__displayKeypoints = action\n\n def setCompareWidget(self, widget):\n \"\"\"\n Connect this tool bar to a specific :class:`CompareImages` widget.\n\n :param Union[None,CompareImages] widget: The widget to connect with.\n \"\"\"\n compareWidget = self.getCompareWidget()\n if compareWidget is not None:\n compareWidget.sigConfigurationChanged.disconnect(self.__updateSelectedActions)\n compareWidget = widget\n if compareWidget is None:\n self.__compareWidget = None\n else:\n self.__compareWidget = weakref.ref(compareWidget)\n if compareWidget is not None:\n widget.sigConfigurationChanged.connect(self.__updateSelectedActions)\n self.__updateSelectedActions()\n\n def getCompareWidget(self):\n \"\"\"Returns the connected widget.\n\n :rtype: CompareImages\n \"\"\"\n if self.__compareWidget is None:\n return None\n else:\n return self.__compareWidget()\n\n def __updateSelectedActions(self):\n \"\"\"\n Update the state of this tool bar according to the state of the\n connected :class:`CompareImages` widget.\n \"\"\"\n widget = self.getCompareWidget()\n if widget is None:\n return\n\n mode = widget.getVisualizationMode()\n action = None\n for a in self.__visualizationGroup.actions():\n actionMode = a.property(\"mode\")\n if mode == actionMode:\n action = a\n break\n old = self.__visualizationGroup.blockSignals(True)\n if action is not None:\n # Check this action\n action.setChecked(True)\n else:\n action = self.__visualizationGroup.checkedAction()\n if action is not None:\n # Uncheck this action\n action.setChecked(False)\n self.__updateVisualizationMenu()\n self.__visualizationGroup.blockSignals(old)\n\n mode = widget.getAlignmentMode()\n action = None\n for a in self.__alignmentGroup.actions():\n actionMode = a.property(\"mode\")\n if mode == actionMode:\n action = a\n break\n old = self.__alignmentGroup.blockSignals(True)\n if action is not None:\n # Check this action\n action.setChecked(True)\n else:\n action = self.__alignmentGroup.checkedAction()\n if action is not None:\n # Uncheck this action\n action.setChecked(False)\n self.__updateAlignmentMenu()\n self.__alignmentGroup.blockSignals(old)\n\n def __visualizationModeChanged(self, selectedAction):\n \"\"\"Called when user requesting changes of the visualization mode.\n \"\"\"\n self.__updateVisualizationMenu()\n widget = self.getCompareWidget()\n if widget is not None:\n mode = selectedAction.property(\"mode\")\n widget.setVisualizationMode(mode)\n\n def __updateVisualizationMenu(self):\n \"\"\"Update the state of the action containing visualization menu.\n \"\"\"\n selectedAction = self.__visualizationGroup.checkedAction()\n if selectedAction is not None:\n self.__visualizationAction.setText(selectedAction.text())\n self.__visualizationAction.setIcon(selectedAction.icon())\n self.__visualizationAction.setToolTip(selectedAction.toolTip())\n else:\n self.__visualizationAction.setText(\"\")\n self.__visualizationAction.setIcon(qt.QIcon())\n self.__visualizationAction.setToolTip(\"\")\n\n def __alignmentModeChanged(self, selectedAction):\n \"\"\"Called when user requesting changes of the alignment mode.\n \"\"\"\n self.__updateAlignmentMenu()\n widget = self.getCompareWidget()\n if widget is not None:\n mode = selectedAction.property(\"mode\")\n widget.setAlignmentMode(mode)\n\n def __updateAlignmentMenu(self):\n \"\"\"Update the state of the action containing alignment menu.\n \"\"\"\n selectedAction = self.__alignmentGroup.checkedAction()\n if selectedAction is not None:\n self.__alignmentAction.setText(selectedAction.text())\n self.__alignmentAction.setIcon(selectedAction.icon())\n self.__alignmentAction.setToolTip(selectedAction.toolTip())\n else:\n self.__alignmentAction.setText(\"\")\n self.__alignmentAction.setIcon(qt.QIcon())\n self.__alignmentAction.setToolTip(\"\")\n\n def __keypointVisibilityChanged(self):\n \"\"\"Called when action managing keypoints visibility changes\"\"\"\n widget = self.getCompareWidget()\n if widget is not None:\n keypointsVisible = self.__displayKeypoints.isChecked()\n widget.setKeypointsVisible(keypointsVisible)\n\n\nclass CompareImagesStatusBar(qt.QStatusBar):\n \"\"\"StatusBar containing specific information contained in a\n :class:`CompareImages` widget\n\n Use :meth:`setCompareWidget` to connect this toolbar to a specific\n :class:`CompareImages` widget.\n\n :param Union[qt.QWidget,None] parent: Parent of this widget.\n \"\"\"\n def __init__(self, parent=None):\n qt.QStatusBar.__init__(self, parent)\n self.setSizeGripEnabled(False)\n self.layout().setSpacing(0)\n self.__compareWidget = None\n self._label1 = qt.QLabel(self)\n self._label1.setFrameShape(qt.QFrame.WinPanel)\n self._label1.setFrameShadow(qt.QFrame.Sunken)\n self._label2 = qt.QLabel(self)\n self._label2.setFrameShape(qt.QFrame.WinPanel)\n self._label2.setFrameShadow(qt.QFrame.Sunken)\n self._transform = qt.QLabel(self)\n self._transform.setFrameShape(qt.QFrame.WinPanel)\n self._transform.setFrameShadow(qt.QFrame.Sunken)\n self.addWidget(self._label1)\n self.addWidget(self._label2)\n self.addWidget(self._transform)\n self._pos = None\n self._updateStatusBar()\n\n def setCompareWidget(self, widget):\n \"\"\"\n Connect this tool bar to a specific :class:`CompareImages` widget.\n\n :param Union[None,CompareImages] widget: The widget to connect with.\n \"\"\"\n compareWidget = self.getCompareWidget()\n if compareWidget is not None:\n compareWidget.getPlot().sigPlotSignal.disconnect(self.__plotSignalReceived)\n compareWidget.sigConfigurationChanged.disconnect(self.__dataChanged)\n compareWidget = widget\n if compareWidget is None:\n self.__compareWidget = None\n else:\n self.__compareWidget = weakref.ref(compareWidget)\n if compareWidget is not None:\n compareWidget.getPlot().sigPlotSignal.connect(self.__plotSignalReceived)\n compareWidget.sigConfigurationChanged.connect(self.__dataChanged)\n\n def getCompareWidget(self):\n \"\"\"Returns the connected widget.\n\n :rtype: CompareImages\n \"\"\"\n if self.__compareWidget is None:\n return None\n else:\n return self.__compareWidget()\n\n def __plotSignalReceived(self, event):\n \"\"\"Called when old style signals at emmited from the plot.\"\"\"\n if event[\"event\"] == \"mouseMoved\":\n x, y = event[\"x\"], event[\"y\"]\n self.__mouseMoved(x, y)\n\n def __mouseMoved(self, x, y):\n \"\"\"Called when mouse move over the plot.\"\"\"\n self._pos = x, y\n self._updateStatusBar()\n\n def __dataChanged(self):\n \"\"\"Called when internal data from the connected widget changes.\"\"\"\n self._updateStatusBar()\n\n def _formatData(self, data):\n \"\"\"Format pixel of an image.\n\n It supports intensity, RGB, and RGBA.\n\n :param Union[int,float,numpy.ndarray,str]: Value of a pixel\n :rtype: str\n \"\"\"\n if data is None:\n return \"No data\"\n if isinstance(data, (int, numpy.integer)):\n return \"%d\" % data\n if isinstance(data, (float, numpy.floating)):\n return \"%f\" % data\n if isinstance(data, numpy.ndarray):\n # RGBA value\n if data.shape == (3,):\n return \"R:%d G:%d B:%d\" % (data[0], data[1], data[2])\n elif data.shape == (4,):\n return \"R:%d G:%d B:%d A:%d\" % (data[0], data[1], data[2], data[3])\n _logger.debug(\"Unsupported data format %s. Cast it to string.\", type(data))\n return str(data)\n\n def _updateStatusBar(self):\n \"\"\"Update the content of the status bar\"\"\"\n widget = self.getCompareWidget()\n if widget is None:\n self._label1.setText(\"Image1: NA\")\n self._label2.setText(\"Image2: NA\")\n self._transform.setVisible(False)\n else:\n transform = widget.getTransformation()\n self._transform.setVisible(transform is not None)\n if transform is not None:\n has_notable_translation = not numpy.isclose(transform.tx, 0.0, atol=0.01) \\\n or not numpy.isclose(transform.ty, 0.0, atol=0.01)\n has_notable_scale = not numpy.isclose(transform.sx, 1.0, atol=0.01) \\\n or not numpy.isclose(transform.sy, 1.0, atol=0.01)\n has_notable_rotation = not numpy.isclose(transform.rot, 0.0, atol=0.01)\n\n strings = []\n if has_notable_translation:\n strings.append(\"Translation\")\n if has_notable_scale:\n strings.append(\"Scale\")\n if has_notable_rotation:\n strings.append(\"Rotation\")\n if strings == []:\n has_translation = not numpy.isclose(transform.tx, 0.0) \\\n or not numpy.isclose(transform.ty, 0.0)\n has_scale = not numpy.isclose(transform.sx, 1.0) \\\n or not numpy.isclose(transform.sy, 1.0)\n has_rotation = not numpy.isclose(transform.rot, 0.0)\n if has_translation or has_scale or has_rotation:\n text = \"No big changes\"\n else:\n text = \"No changes\"\n else:\n text = \"+\".join(strings)\n self._transform.setText(\"Align: \" + text)\n\n strings = []\n if not numpy.isclose(transform.ty, 0.0):\n strings.append(\"Translation x: %0.3fpx\" % transform.tx)\n if not numpy.isclose(transform.ty, 0.0):\n strings.append(\"Translation y: %0.3fpx\" % transform.ty)\n if not numpy.isclose(transform.sx, 1.0):\n strings.append(\"Scale x: %0.3f\" % transform.sx)\n if not numpy.isclose(transform.sy, 1.0):\n strings.append(\"Scale y: %0.3f\" % transform.sy)\n if not numpy.isclose(transform.rot, 0.0):\n strings.append(\"Rotation: %0.3fdeg\" % (transform.rot * 180 / numpy.pi))\n if strings == []:\n text = \"No transformation\"\n else:\n text = \"\\n\".join(strings)\n self._transform.setToolTip(text)\n\n if self._pos is None:\n self._label1.setText(\"Image1: NA\")\n self._label2.setText(\"Image2: NA\")\n else:\n data1, data2 = widget.getRawPixelData(self._pos[0], self._pos[1])\n if isinstance(data1, str):\n self._label1.setToolTip(data1)\n text1 = \"NA\"\n else:\n self._label1.setToolTip(\"\")\n text1 = self._formatData(data1)\n if isinstance(data2, str):\n self._label2.setToolTip(data2)\n text2 = \"NA\"\n else:\n self._label2.setToolTip(\"\")\n text2 = self._formatData(data2)\n self._label1.setText(\"Image1: %s\" % text1)\n self._label2.setText(\"Image2: %s\" % text2)\n\n\nclass CompareImages(qt.QMainWindow):\n \"\"\"Widget providing tools to compare 2 images.\n\n .. image:: img/CompareImages.png\n\n :param Union[qt.QWidget,None] parent: Parent of this widget.\n :param backend: The backend to use, in:\n 'matplotlib' (default), 'mpl', 'opengl', 'gl', 'none'\n or a :class:`BackendBase.BackendBase` class\n :type backend: str or :class:`BackendBase.BackendBase`\n \"\"\"\n\n VisualizationMode = VisualizationMode\n \"\"\"Available visualization modes\"\"\"\n\n AlignmentMode = AlignmentMode\n \"\"\"Available alignment modes\"\"\"\n\n sigConfigurationChanged = qt.Signal()\n \"\"\"Emitted when the configuration of the widget (visualization mode,\n alignement mode...) have changed.\"\"\"\n\n def __init__(self, parent=None, backend=None):\n qt.QMainWindow.__init__(self, parent)\n\n if parent is None:\n self.setWindowTitle('Compare images')\n else:\n self.setWindowFlags(qt.Qt.Widget)\n\n self.__transformation = None\n self.__raw1 = None\n self.__raw2 = None\n self.__data1 = None\n self.__data2 = None\n self.__previousSeparatorPosition = None\n\n self.__plot = plot.PlotWidget(parent=self, backend=backend)\n self.__plot.getXAxis().setLabel('Columns')\n self.__plot.getYAxis().setLabel('Rows')\n if silx.config.DEFAULT_PLOT_IMAGE_Y_AXIS_ORIENTATION == 'downward':\n self.__plot.getYAxis().setInverted(True)\n\n self.__plot.setKeepDataAspectRatio(True)\n self.__plot.sigPlotSignal.connect(self.__plotSlot)\n self.__plot.setAxesDisplayed(False)\n\n self.setCentralWidget(self.__plot)\n\n legend = VisualizationMode.VERTICAL_LINE.name\n self.__plot.addXMarker(\n 0,\n legend=legend,\n text='',\n draggable=True,\n color='blue',\n constraint=self.__separatorConstraint)\n self.__vline = self.__plot._getMarker(legend)\n\n legend = VisualizationMode.HORIZONTAL_LINE.name\n self.__plot.addYMarker(\n 0,\n legend=legend,\n text='',\n draggable=True,\n color='blue',\n constraint=self.__separatorConstraint)\n self.__hline = self.__plot._getMarker(legend)\n\n # default values\n self.__visualizationMode = \"\"\n self.__alignmentMode = \"\"\n self.__keypointsVisible = True\n\n self.setAlignmentMode(AlignmentMode.ORIGIN)\n self.setVisualizationMode(VisualizationMode.VERTICAL_LINE)\n self.setKeypointsVisible(False)\n\n # Toolbars\n\n self._createToolBars(self.__plot)\n if self._interactiveModeToolBar is not None:\n self.addToolBar(self._interactiveModeToolBar)\n if self._imageToolBar is not None:\n self.addToolBar(self._imageToolBar)\n if self._compareToolBar is not None:\n self.addToolBar(self._compareToolBar)\n\n # Statusbar\n\n self._createStatusBar(self.__plot)\n if self._statusBar is not None:\n self.setStatusBar(self._statusBar)\n\n def _createStatusBar(self, plot):\n self._statusBar = CompareImagesStatusBar(self)\n self._statusBar.setCompareWidget(self)\n\n def _createToolBars(self, plot):\n \"\"\"Create tool bars displayed by the widget\"\"\"\n toolBar = tools.InteractiveModeToolBar(parent=self, plot=plot)\n self._interactiveModeToolBar = toolBar\n toolBar = tools.ImageToolBar(parent=self, plot=plot)\n self._imageToolBar = toolBar\n toolBar = CompareImagesToolBar(self)\n toolBar.setCompareWidget(self)\n self._compareToolBar = toolBar\n\n def getPlot(self):\n \"\"\"Returns the plot which is used to display the images.\n\n :rtype: silx.gui.plot.PlotWidget\n \"\"\"\n return self.__plot\n\n def getRawPixelData(self, x, y):\n \"\"\"Return the raw pixel of each image data from axes positions.\n\n If the coordinate is outside of the image it returns None element in\n the tuple.\n\n The pixel is reach from the raw data image without filter or\n transformation. But the coordinate x and y are in the reference of the\n current displayed mode.\n\n :param float x: X-coordinate of the pixel in the current displayed plot\n :param float y: Y-coordinate of the pixel in the current displayed plot\n :return: A tuple of for each images containing pixel information. It\n could be a scalar value or an array in case of RGB/RGBA informations.\n It also could be a string containing information is some cases.\n :rtype: Tuple(Union[int,float,numpy.ndarray,str],Union[int,float,numpy.ndarray,str])\n \"\"\"\n data2 = None\n alignmentMode = self.__alignmentMode\n raw1, raw2 = self.__raw1, self.__raw2\n if alignmentMode == AlignmentMode.ORIGIN:\n x1 = x\n y1 = y\n x2 = x\n y2 = y\n elif alignmentMode == AlignmentMode.CENTER:\n yy = max(raw1.shape[0], raw2.shape[0])\n xx = max(raw1.shape[1], raw2.shape[1])\n x1 = x - (xx - raw1.shape[1]) * 0.5\n x2 = x - (xx - raw2.shape[1]) * 0.5\n y1 = y - (yy - raw1.shape[0]) * 0.5\n y2 = y - (yy - raw2.shape[0]) * 0.5\n elif alignmentMode == AlignmentMode.STRETCH:\n x1 = x\n y1 = y\n x2 = x * raw2.shape[1] / raw1.shape[1]\n y2 = x * raw2.shape[1] / raw1.shape[1]\n elif alignmentMode == AlignmentMode.AUTO:\n x1 = x\n y1 = y\n # Not implemented\n data2 = \"Not implemented with sift\"\n else:\n assert(False)\n\n x1, y1 = int(x1), int(y1)\n if raw1 is None or y1 < 0 or y1 >= raw1.shape[0] or x1 < 0 or x1 >= raw1.shape[1]:\n data1 = None\n else:\n data1 = raw1[y1, x1]\n\n if data2 is None:\n x2, y2 = int(x2), int(y2)\n if raw2 is None or y2 < 0 or y2 >= raw2.shape[0] or x2 < 0 or x2 >= raw2.shape[1]:\n data2 = None\n else:\n data2 = raw2[y2, x2]\n\n return data1, data2\n\n def setVisualizationMode(self, mode):\n \"\"\"Set the visualization mode.\n\n :param str mode: New visualization to display the image comparison\n \"\"\"\n if self.__visualizationMode == mode:\n return\n self.__visualizationMode = mode\n mode = self.getVisualizationMode()\n self.__vline.setVisible(mode == VisualizationMode.VERTICAL_LINE)\n self.__hline.setVisible(mode == VisualizationMode.HORIZONTAL_LINE)\n self.__updateData()\n self.sigConfigurationChanged.emit()\n\n def getVisualizationMode(self):\n \"\"\"Returns the current interaction mode.\"\"\"\n return self.__visualizationMode\n\n def setAlignmentMode(self, mode):\n \"\"\"Set the alignment mode.\n\n :param str mode: New alignement to apply to images\n \"\"\"\n if self.__alignmentMode == mode:\n return\n self.__alignmentMode = mode\n self.__updateData()\n self.sigConfigurationChanged.emit()\n\n def getAlignmentMode(self):\n \"\"\"Returns the current selected alignemnt mode.\"\"\"\n return self.__alignmentMode\n\n def setKeypointsVisible(self, isVisible):\n \"\"\"Set keypoints visibility.\n\n :param bool isVisible: If True, keypoints are displayed (if some)\n \"\"\"\n if self.__keypointsVisible == isVisible:\n return\n self.__keypointsVisible = isVisible\n self.__updateKeyPoints()\n self.sigConfigurationChanged.emit()\n\n def __setDefaultAlignmentMode(self):\n \"\"\"Reset the alignemnt mode to the default value\"\"\"\n self.setAlignmentMode(AlignmentMode.ORIGIN)\n\n def __plotSlot(self, event):\n \"\"\"Handle events from the plot\"\"\"\n if event['event'] in ('markerMoving', 'markerMoved'):\n mode = self.getVisualizationMode()\n legend = mode.name\n if event['label'] == legend:\n if mode == VisualizationMode.VERTICAL_LINE:\n value = int(float(str(event['xdata'])))\n elif mode == VisualizationMode.HORIZONTAL_LINE:\n value = int(float(str(event['ydata'])))\n else:\n assert(False)\n if self.__previousSeparatorPosition != value:\n self.__separatorMoved(value)\n self.__previousSeparatorPosition = value\n\n def __separatorConstraint(self, x, y):\n \"\"\"Manage contains on the separators to clamp them inside the images.\"\"\"\n if self.__data1 is None:\n return 0, 0\n x = int(x)\n if x < 0:\n x = 0\n elif x > self.__data1.shape[1]:\n x = self.__data1.shape[1]\n y = int(y)\n if y < 0:\n y = 0\n elif y > self.__data1.shape[0]:\n y = self.__data1.shape[0]\n return x, y\n\n def __updateSeparators(self):\n \"\"\"Redraw images according to the current state of the separators.\n \"\"\"\n mode = self.getVisualizationMode()\n if mode == VisualizationMode.VERTICAL_LINE:\n pos = self.__vline.getXPosition()\n self.__separatorMoved(pos)\n self.__previousSeparatorPosition = pos\n elif mode == VisualizationMode.HORIZONTAL_LINE:\n pos = self.__hline.getYPosition()\n self.__separatorMoved(pos)\n self.__previousSeparatorPosition = pos\n else:\n self.__image1.setOrigin((0, 0))\n self.__image2.setOrigin((0, 0))\n\n def __separatorMoved(self, pos):\n \"\"\"Called when vertical or horizontal separators have moved.\n\n Update the displayed images.\n \"\"\"\n if self.__data1 is None:\n return\n\n mode = self.getVisualizationMode()\n if mode == VisualizationMode.VERTICAL_LINE:\n pos = int(pos)\n if pos <= 0:\n pos = 0\n elif pos >= self.__data1.shape[1]:\n pos = self.__data1.shape[1]\n data1 = self.__data1[:, 0:pos]\n data2 = self.__data2[:, pos:]\n self.__image1.setData(data1, copy=False)\n self.__image2.setData(data2, copy=False)\n self.__image2.setOrigin((pos, 0))\n elif mode == VisualizationMode.HORIZONTAL_LINE:\n pos = int(pos)\n if pos <= 0:\n pos = 0\n elif pos >= self.__data1.shape[0]:\n pos = self.__data1.shape[0]\n data1 = self.__data1[0:pos, :]\n data2 = self.__data2[pos:, :]\n self.__image1.setData(data1, copy=False)\n self.__image2.setData(data2, copy=False)\n self.__image2.setOrigin((0, pos))\n else:\n assert(False)\n\n def setData(self, image1, image2):\n \"\"\"Set images to compare.\n\n Images can contains floating-point or integer values, or RGB and RGBA\n values, but should have comparable intensities.\n\n RGB and RGBA images are provided as an array as `[width,height,channels]`\n of usigned integer 8-bits or floating-points between 0.0 to 1.0.\n\n :param numpy.ndarray image1: The first image\n :param numpy.ndarray image2: The second image\n \"\"\"\n self.__raw1 = image1\n self.__raw2 = image2\n self.__updateData()\n self.__plot.resetZoom()\n\n def setImage1(self, image1):\n \"\"\"Set image1 to be compared.\n\n Images can contains floating-point or integer values, or RGB and RGBA\n values, but should have comparable intensities.\n\n RGB and RGBA images are provided as an array as `[width,height,channels]`\n of usigned integer 8-bits or floating-points between 0.0 to 1.0.\n\n :param numpy.ndarray image1: The first image\n \"\"\"\n self.__raw1 = image1\n self.__updateData()\n self.__plot.resetZoom()\n\n def setImage2(self, image2):\n \"\"\"Set image2 to be compared.\n\n Images can contains floating-point or integer values, or RGB and RGBA\n values, but should have comparable intensities.\n\n RGB and RGBA images are provided as an array as `[width,height,channels]`\n of usigned integer 8-bits or floating-points between 0.0 to 1.0.\n\n :param numpy.ndarray image2: The second image\n \"\"\"\n self.__raw2 = image2\n self.__updateData()\n self.__plot.resetZoom()\n\n def __updateKeyPoints(self):\n \"\"\"Update the displayed keypoints using cached keypoints.\n \"\"\"\n if self.__keypointsVisible:\n data = self.__matching_keypoints\n else:\n data = [], [], []\n self.__plot.addScatter(x=data[0],\n y=data[1],\n z=1,\n value=data[2],\n legend=\"keypoints\",\n colormap=Colormap(\"spring\"))\n\n def __updateData(self):\n \"\"\"Compute aligned image when the alignement mode changes.\n\n This function cache input images which are used when\n vertical/horizontal separators moves.\n \"\"\"\n raw1, raw2 = self.__raw1, self.__raw2\n if raw1 is None or raw2 is None:\n return\n\n alignmentMode = self.getAlignmentMode()\n self.__transformation = None\n\n if alignmentMode == AlignmentMode.ORIGIN:\n yy = max(raw1.shape[0], raw2.shape[0])\n xx = max(raw1.shape[1], raw2.shape[1])\n size = yy, xx\n data1 = self.__createMarginImage(raw1, size, transparent=True)\n data2 = self.__createMarginImage(raw2, size, transparent=True)\n self.__matching_keypoints = [0.0], [0.0], [1.0]\n elif alignmentMode == AlignmentMode.CENTER:\n yy = max(raw1.shape[0], raw2.shape[0])\n xx = max(raw1.shape[1], raw2.shape[1])\n size = yy, xx\n data1 = self.__createMarginImage(raw1, size, transparent=True, center=True)\n data2 = self.__createMarginImage(raw2, size, transparent=True, center=True)\n self.__matching_keypoints = ([data1.shape[1] // 2],\n [data1.shape[0] // 2],\n [1.0])\n elif alignmentMode == AlignmentMode.STRETCH:\n data1 = raw1\n data2 = self.__rescaleImage(raw2, data1.shape)\n self.__matching_keypoints = ([0, data1.shape[1], data1.shape[1], 0],\n [0, 0, data1.shape[0], data1.shape[0]],\n [1.0, 1.0, 1.0, 1.0])\n elif alignmentMode == AlignmentMode.AUTO:\n # TODO: sift implementation do not support RGBA images\n yy = max(raw1.shape[0], raw2.shape[0])\n xx = max(raw1.shape[1], raw2.shape[1])\n size = yy, xx\n data1 = self.__createMarginImage(raw1, size)\n data2 = self.__createMarginImage(raw2, size)\n self.__matching_keypoints = [0.0], [0.0], [1.0]\n try:\n data1, data2 = self.__createSiftData(data1, data2)\n if data2 is None:\n raise ValueError(\"Unexpected None value\")\n except Exception as e:\n # TODO: Display it on the GUI\n _logger.error(e)\n self.__setDefaultAlignmentMode()\n return\n else:\n assert(False)\n\n mode = self.getVisualizationMode()\n if mode == VisualizationMode.COMPOSITE_RED_BLUE_GRAY_NEG:\n data1 = self.__composeImage(data1, data2, mode)\n data2 = numpy.empty((0, 0))\n elif mode == VisualizationMode.COMPOSITE_RED_BLUE_GRAY:\n data1 = self.__composeImage(data1, data2, mode)\n data2 = numpy.empty((0, 0))\n elif mode == VisualizationMode.ONLY_A:\n data2 = numpy.empty((0, 0))\n elif mode == VisualizationMode.ONLY_B:\n data1 = numpy.empty((0, 0))\n\n self.__data1, self.__data2 = data1, data2\n self.__plot.addImage(data1, z=0, legend=\"image1\", resetzoom=False)\n self.__plot.addImage(data2, z=0, legend=\"image2\", resetzoom=False)\n self.__image1 = self.__plot.getImage(\"image1\")\n self.__image2 = self.__plot.getImage(\"image2\")\n self.__updateKeyPoints()\n\n # Set the separator into the middle\n if self.__previousSeparatorPosition is None:\n value = self.__data1.shape[1] // 2\n self.__vline.setPosition(value, 0)\n value = self.__data1.shape[0] // 2\n self.__hline.setPosition(0, value)\n self.__updateSeparators()\n\n # Avoid to change the colormap range when the separator is moving\n # TODO: The colormap histogram will still be wrong\n mode1 = self.__getImageMode(data1)\n mode2 = self.__getImageMode(data2)\n if mode1 == \"intensity\" and mode1 == mode2:\n if self.__data1.size == 0:\n vmin = self.__data2.min()\n vmax = self.__data2.max()\n elif self.__data2.size == 0:\n vmin = self.__data1.min()\n vmax = self.__data1.max()\n else:\n vmin = min(self.__data1.min(), self.__data2.min())\n vmax = max(self.__data1.max(), self.__data2.max())\n colormap = Colormap(vmin=vmin, vmax=vmax)\n self.__image1.setColormap(colormap)\n self.__image2.setColormap(colormap)\n\n def __getImageMode(self, image):\n \"\"\"Returns a value identifying the way the image is stored in the\n array.\n\n :param numpy.ndarray image: Image to check\n :rtype: str\n \"\"\"\n if len(image.shape) == 2:\n return \"intensity\"\n elif len(image.shape) == 3:\n if image.shape[2] == 3:\n return \"rgb\"\n elif image.shape[2] == 4:\n return \"rgba\"\n raise TypeError(\"'image' argument is not an image.\")\n\n def __rescaleImage(self, image, shape):\n \"\"\"Rescale an image to the requested shape.\n\n :rtype: numpy.ndarray\n \"\"\"\n mode = self.__getImageMode(image)\n if mode == \"intensity\":\n data = self.__rescaleArray(image, shape)\n elif mode == \"rgb\":\n data = numpy.empty((shape[0], shape[1], 3), dtype=image.dtype)\n for c in range(3):\n data[:, :, c] = self.__rescaleArray(image[:, :, c], shape)\n elif mode == \"rgba\":\n data = numpy.empty((shape[0], shape[1], 4), dtype=image.dtype)\n for c in range(4):\n data[:, :, c] = self.__rescaleArray(image[:, :, c], shape)\n return data\n\n def __composeImage(self, data1, data2, mode):\n \"\"\"Returns an RBG image containing composition of data1 and data2 in 2\n different channels\n\n :param numpy.ndarray data1: First image\n :param numpy.ndarray data1: Second image\n :param VisualizationMode mode: Composition mode.\n :rtype: numpy.ndarray\n \"\"\"\n assert(data1.shape[0:2] == data2.shape[0:2])\n mode1 = self.__getImageMode(data1)\n if mode1 in [\"rgb\", \"rgba\"]:\n intensity1 = self.__luminosityImage(data1)\n vmin1, vmax1 = 0.0, 1.0\n else:\n intensity1 = data1\n vmin1, vmax1 = data1.min(), data1.max()\n\n mode2 = self.__getImageMode(data2)\n if mode2 in [\"rgb\", \"rgba\"]:\n intensity2 = self.__luminosityImage(data2)\n vmin2, vmax2 = 0.0, 1.0\n else:\n intensity2 = data2\n vmin2, vmax2 = data2.min(), data2.max()\n\n vmin, vmax = min(vmin1, vmin2) * 1.0, max(vmax1, vmax2) * 1.0\n shape = data1.shape\n result = numpy.empty((shape[0], shape[1], 3), dtype=numpy.uint8)\n a = (intensity1 - vmin) * (1.0 / (vmax - vmin)) * 255.0\n b = (intensity2 - vmin) * (1.0 / (vmax - vmin)) * 255.0\n if mode == VisualizationMode.COMPOSITE_RED_BLUE_GRAY:\n result[:, :, 0] = a\n result[:, :, 1] = (a + b) / 2\n result[:, :, 2] = b\n elif mode == VisualizationMode.COMPOSITE_RED_BLUE_GRAY_NEG:\n result[:, :, 0] = 255 - b\n result[:, :, 1] = 255 - (a + b) / 2\n result[:, :, 2] = 255 - a\n return result\n\n def __luminosityImage(self, image):\n \"\"\"Returns the luminosity channel from an RBG(A) image.\n The alpha channel is ignored.\n\n :rtype: numpy.ndarray\n \"\"\"\n mode = self.__getImageMode(image)\n assert(mode in [\"rgb\", \"rgba\"])\n is_uint8 = image.dtype.type == numpy.uint8\n # luminosity\n image = 0.21 * image[..., 0] + 0.72 * image[..., 1] + 0.07 * image[..., 2]\n if is_uint8:\n image = image / 255.0\n return image\n\n def __rescaleArray(self, image, shape):\n \"\"\"Rescale a 2D array to the requested shape.\n\n :rtype: numpy.ndarray\n \"\"\"\n y, x = numpy.ogrid[:shape[0], :shape[1]]\n y, x = y * 1.0 * (image.shape[0] - 1) / (shape[0] - 1), x * 1.0 * (image.shape[1] - 1) / (shape[1] - 1)\n b = silx.image.bilinear.BilinearImage(image)\n # TODO: could be optimized using strides\n x2d = numpy.zeros_like(y) + x\n y2d = numpy.zeros_like(x) + y\n result = b.map_coordinates((y2d, x2d))\n return result\n\n def __createMarginImage(self, image, size, transparent=False, center=False):\n \"\"\"Returns a new image with margin to respect the requested size.\n\n :rtype: numpy.ndarray\n \"\"\"\n assert(image.shape[0] <= size[0])\n assert(image.shape[1] <= size[1])\n if image.shape == size:\n return image\n mode = self.__getImageMode(image)\n\n if center:\n pos0 = size[0] // 2 - image.shape[0] // 2\n pos1 = size[1] // 2 - image.shape[1] // 2\n else:\n pos0, pos1 = 0, 0\n\n if mode == \"intensity\":\n data = numpy.zeros(size, dtype=image.dtype)\n data[pos0:pos0 + image.shape[0], pos1:pos1 + image.shape[1]] = image\n # TODO: It is maybe possible to put NaN on the margin\n else:\n if transparent:\n data = numpy.zeros((size[0], size[1], 4), dtype=numpy.uint8)\n else:\n data = numpy.zeros((size[0], size[1], 3), dtype=numpy.uint8)\n depth = min(data.shape[2], image.shape[2])\n data[pos0:pos0 + image.shape[0], pos1:pos1 + image.shape[1], 0:depth] = image[:, :, 0:depth]\n if transparent and depth == 3:\n data[pos0:pos0 + image.shape[0], pos1:pos1 + image.shape[1], 3] = 255\n return data\n\n def __toAffineTransformation(self, sift_result):\n \"\"\"Returns an affine transformation from the sift result.\n\n :param dict sift_result: Result of sift when using `all_result=True`\n :rtype: AffineTransformation\n \"\"\"\n offset = sift_result[\"offset\"]\n matrix = sift_result[\"matrix\"]\n\n tx = offset[0]\n ty = offset[1]\n a = matrix[0, 0]\n b = matrix[0, 1]\n c = matrix[1, 0]\n d = matrix[1, 1]\n rot = math.atan2(-b, a)\n sx = (-1.0 if a < 0 else 1.0) * math.sqrt(a**2 + b**2)\n sy = (-1.0 if d < 0 else 1.0) * math.sqrt(c**2 + d**2)\n return AffineTransformation(tx, ty, sx, sy, rot)\n\n def getTransformation(self):\n \"\"\"Retuns the affine transformation applied to the second image to align\n it to the first image.\n\n This result is only valid for sift alignment.\n\n :rtype: Union[None,AffineTransformation]\n \"\"\"\n return self.__transformation\n\n def __createSiftData(self, image, second_image):\n \"\"\"Generate key points and aligned images from 2 images.\n\n If no keypoints matches, unaligned data are anyway returns.\n\n :rtype: Tuple(numpy.ndarray,numpy.ndarray)\n \"\"\"\n devicetype = \"GPU\"\n\n # Compute base image\n sift_ocl = sift.SiftPlan(template=image, devicetype=devicetype)\n keypoints = sift_ocl(image)\n\n # Check image compatibility\n second_keypoints = sift_ocl(second_image)\n mp = sift.MatchPlan()\n match = mp(keypoints, second_keypoints)\n _logger.info(\"Number of Keypoints within image 1: %i\" % keypoints.size)\n _logger.info(\" within image 2: %i\" % second_keypoints.size)\n\n self.__matching_keypoints = (match[:].x[:, 0],\n match[:].y[:, 0],\n match[:].scale[:, 0])\n matching_keypoints = match.shape[0]\n _logger.info(\"Matching keypoints: %i\" % matching_keypoints)\n if matching_keypoints == 0:\n return image, second_image\n\n # TODO: Problem here is we have to compute 2 time sift\n # The first time to extract matching keypoints, second time\n # to extract the aligned image.\n\n # Normalize the second image\n sa = sift.LinearAlign(image, devicetype=devicetype)\n data1 = image\n # TODO: Create a sift issue: if data1 is RGB and data2 intensity\n # it returns None, while extracting manually keypoints (above) works\n result = sa.align(second_image, return_all=True)\n data2 = result[\"result\"]\n self.__transformation = self.__toAffineTransformation(result)\n return data1, data2\n", "# coding: utf-8\n# /*##########################################################################\n# Copyright (C) 2016-2018 European Synchrotron Radiation Facility\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n#\n# ############################################################################*/\n\"\"\"This module provides a h5py-like API to access SpecFile data.\n\nAPI description\n+++++++++++++++\n\nSpecfile data structure exposed by this API:\n\n::\n\n /\n 1.1/\n title = \"…\"\n start_time = \"…\"\n instrument/\n specfile/\n file_header = \"…\"\n scan_header = \"…\"\n positioners/\n motor_name = value\n …\n mca_0/\n data = …\n calibration = …\n channels = …\n preset_time = …\n elapsed_time = …\n live_time = …\n\n mca_1/\n …\n …\n measurement/\n colname0 = …\n colname1 = …\n …\n mca_0/\n data -> /1.1/instrument/mca_0/data\n info -> /1.1/instrument/mca_0/\n …\n sample/\n ub_matrix = …\n unit_cell = …\n unit_cell_abc = …\n unit_cell_alphabetagamma = …\n 2.1/\n …\n\n``file_header`` and ``scan_header`` are the raw headers as they\nappear in the original file, as a string of lines separated by newline (``\\\\n``) characters.\n\nThe title is the content of the ``#S`` scan header line without the leading\n``#S`` and without the scan number (e.g ``\"ascan ss1vo -4.55687 -0.556875 40 0.2\"``).\n\nThe start time is converted to ISO8601 format (``\"2016-02-23T22:49:05Z\"``),\nif the original date format is standard.\n\nNumeric datasets are stored in *float32* format, except for scalar integers\nwhich are stored as *int64*.\n\nMotor positions (e.g. ``/1.1/instrument/positioners/motor_name``) can be\n1D numpy arrays if they are measured as scan data, or else scalars as defined\non ``#P`` scan header lines. A simple test is done to check if the motor name\nis also a data column header defined in the ``#L`` scan header line.\n\nScan data (e.g. ``/1.1/measurement/colname0``) is accessed by column,\nthe dataset name ``colname0`` being the column label as defined in the ``#L``\nscan header line.\n\nIf a ``/`` character is present in a column label or in a motor name in the\noriginal SPEC file, it will be substituted with a ``%`` character in the\ncorresponding dataset name.\n\nMCA data is exposed as a 2D numpy array containing all spectra for a given\nanalyser. The number of analysers is calculated as the number of MCA spectra\nper scan data line. Demultiplexing is then performed to assign the correct\nspectra to a given analyser.\n\nMCA calibration is an array of 3 scalars, from the ``#@CALIB`` header line.\nIt is identical for all MCA analysers, as there can be only one\n``#@CALIB`` line per scan.\n\nMCA channels is an array containing all channel numbers. This information is\ncomputed from the ``#@CHANN`` scan header line (if present), or computed from\nthe shape of the first spectrum in a scan (``[0, … len(first_spectrum] - 1]``).\n\nAccessing data\n++++++++++++++\n\nData and groups are accessed in :mod:`h5py` fashion::\n\n from silx.io.spech5 import SpecH5\n\n # Open a SpecFile\n sfh5 = SpecH5(\"test.dat\")\n\n # using SpecH5 as a regular group to access scans\n scan1group = sfh5[\"1.1\"]\n instrument_group = scan1group[\"instrument\"]\n\n # alternative: full path access\n measurement_group = sfh5[\"/1.1/measurement\"]\n\n # accessing a scan data column by name as a 1D numpy array\n data_array = measurement_group[\"Pslit HGap\"]\n\n # accessing all mca-spectra for one MCA device\n mca_0_spectra = measurement_group[\"mca_0/data\"]\n\n:class:`SpecH5` files and groups provide a :meth:`keys` method::\n\n >>> sfh5.keys()\n ['96.1', '97.1', '98.1']\n >>> sfh5['96.1'].keys()\n ['title', 'start_time', 'instrument', 'measurement']\n\nThey can also be treated as iterators:\n\n.. code-block:: python\n\n from silx.io import is_dataset\n\n for scan_group in SpecH5(\"test.dat\"):\n dataset_names = [item.name in scan_group[\"measurement\"] if\n is_dataset(item)]\n print(\"Found data columns in scan \" + scan_group.name)\n print(\", \".join(dataset_names))\n\nYou can test for existence of data or groups::\n\n >>> \"/1.1/measurement/Pslit HGap\" in sfh5\n True\n >>> \"positioners\" in sfh5[\"/2.1/instrument\"]\n True\n >>> \"spam\" in sfh5[\"1.1\"]\n False\n\n.. note::\n\n Text used to be stored with a dtype ``numpy.string_`` in silx versions\n prior to *0.7.0*. The type ``numpy.string_`` is a byte-string format.\n The consequence of this is that you had to decode strings before using\n them in **Python 3**::\n\n >>> from silx.io.spech5 import SpecH5\n >>> sfh5 = SpecH5(\"31oct98.dat\")\n >>> sfh5[\"/68.1/title\"]\n b'68 ascan tx3 -28.5 -24.5 20 0.5'\n >>> sfh5[\"/68.1/title\"].decode()\n '68 ascan tx3 -28.5 -24.5 20 0.5'\n\n From silx version *0.7.0* onwards, text is now stored as unicode. This\n corresponds to the default text type in python 3, and to the *unicode*\n type in Python 2.\n\n To be on the safe side, you can test for the presence of a *decode*\n attribute, to ensure that you always work with unicode text::\n\n >>> title = sfh5[\"/68.1/title\"]\n >>> if hasattr(title, \"decode\"):\n ... title = title.decode()\n\n\"\"\"\n\nimport datetime\nimport logging\nimport re\nimport io\n\nimport h5py\nimport numpy\nimport six\n\nfrom silx import version as silx_version\nfrom .specfile import SpecFile\nfrom . import commonh5\n\n__authors__ = [\"P. Knobel\", \"D. Naudet\"]\n__license__ = \"MIT\"\n__date__ = \"17/07/2018\"\n\nlogger1 = logging.getLogger(__name__)\n\n\ntext_dtype = h5py.special_dtype(vlen=six.text_type)\n\n\ndef to_h5py_utf8(str_list):\n \"\"\"Convert a string or a list of strings to a numpy array of\n unicode strings that can be written to HDF5 as utf-8.\n\n This ensures that the type will be consistent between python 2 and\n python 3, if attributes or datasets are saved to an HDF5 file.\n \"\"\"\n return numpy.array(str_list, dtype=text_dtype)\n\n\ndef _get_number_of_mca_analysers(scan):\n \"\"\"\n :param SpecFile sf: :class:`SpecFile` instance\n \"\"\"\n number_of_mca_spectra = len(scan.mca)\n # Scan.data is transposed\n number_of_data_lines = scan.data.shape[1]\n\n if not number_of_data_lines == 0:\n # Number of MCA spectra must be a multiple of number of data lines\n assert number_of_mca_spectra % number_of_data_lines == 0\n return number_of_mca_spectra // number_of_data_lines\n elif number_of_mca_spectra:\n # Case of a scan without data lines, only MCA.\n # Our only option is to assume that the number of analysers\n # is the number of #@CHANN lines\n return len(scan.mca.channels)\n else:\n return 0\n\n\ndef _motor_in_scan(sf, scan_key, motor_name):\n \"\"\"\n :param sf: :class:`SpecFile` instance\n :param scan_key: Scan identification key (e.g. ``1.1``)\n :param motor_name: Name of motor as defined in file header lines\n :return: ``True`` if motor exists in scan, else ``False``\n :raise: ``KeyError`` if scan_key not found in SpecFile\n \"\"\"\n if scan_key not in sf:\n raise KeyError(\"Scan key %s \" % scan_key +\n \"does not exist in SpecFile %s\" % sf.filename)\n ret = motor_name in sf[scan_key].motor_names\n if not ret and \"%\" in motor_name:\n motor_name = motor_name.replace(\"%\", \"/\")\n ret = motor_name in sf[scan_key].motor_names\n return ret\n\n\ndef _column_label_in_scan(sf, scan_key, column_label):\n \"\"\"\n :param sf: :class:`SpecFile` instance\n :param scan_key: Scan identification key (e.g. ``1.1``)\n :param column_label: Column label as defined in scan header\n :return: ``True`` if data column label exists in scan, else ``False``\n :raise: ``KeyError`` if scan_key not found in SpecFile\n \"\"\"\n if scan_key not in sf:\n raise KeyError(\"Scan key %s \" % scan_key +\n \"does not exist in SpecFile %s\" % sf.filename)\n ret = column_label in sf[scan_key].labels\n if not ret and \"%\" in column_label:\n column_label = column_label.replace(\"%\", \"/\")\n ret = column_label in sf[scan_key].labels\n return ret\n\n\ndef _parse_UB_matrix(header_line):\n \"\"\"Parse G3 header line and return UB matrix\n\n :param str header_line: G3 header line\n :return: UB matrix\n \"\"\"\n return numpy.array(list(map(float, header_line.split()))).reshape((1, 3, 3))\n\n\ndef _ub_matrix_in_scan(scan):\n \"\"\"Return True if scan header has a G3 line and all values are not 0.\n\n :param scan: specfile.Scan instance\n :return: True or False\n \"\"\"\n if \"G3\" not in scan.scan_header_dict:\n return False\n return numpy.any(_parse_UB_matrix(scan.scan_header_dict[\"G3\"]))\n\n\ndef _parse_unit_cell(header_line):\n return numpy.array(list(map(float, header_line.split()))[0:6]).reshape((1, 6))\n\n\ndef _unit_cell_in_scan(scan):\n \"\"\"Return True if scan header has a G1 line and all values are not 0.\n\n :param scan: specfile.Scan instance\n :return: True or False\n \"\"\"\n if \"G1\" not in scan.scan_header_dict:\n return False\n return numpy.any(_parse_unit_cell(scan.scan_header_dict[\"G1\"]))\n\n\ndef _parse_ctime(ctime_lines, analyser_index=0):\n \"\"\"\n :param ctime_lines: e.g ``@CTIME %f %f %f``, first word ``@CTIME`` optional\n When multiple CTIME lines are present in a scan header, this argument\n is a concatenation of them separated by a ``\\\\n`` character.\n :param analyser_index: MCA device/analyser index, when multiple devices\n are in a scan.\n :return: (preset_time, live_time, elapsed_time)\n \"\"\"\n ctime_lines = ctime_lines.lstrip(\"@CTIME \")\n ctimes_lines_list = ctime_lines.split(\"\\n\")\n if len(ctimes_lines_list) == 1:\n # single @CTIME line for all devices\n ctime_line = ctimes_lines_list[0]\n else:\n ctime_line = ctimes_lines_list[analyser_index]\n if not len(ctime_line.split()) == 3:\n raise ValueError(\"Incorrect format for @CTIME header line \" +\n '(expected \"@CTIME %f %f %f\").')\n return list(map(float, ctime_line.split()))\n\n\ndef spec_date_to_iso8601(date, zone=None):\n \"\"\"Convert SpecFile date to Iso8601.\n\n :param date: Date (see supported formats below)\n :type date: str\n :param zone: Time zone as it appears in a ISO8601 date\n\n Supported formats:\n\n * ``DDD MMM dd hh:mm:ss YYYY``\n * ``DDD YYYY/MM/dd hh:mm:ss YYYY``\n\n where `DDD` is the abbreviated weekday, `MMM` is the month abbreviated\n name, `MM` is the month number (zero padded), `dd` is the weekday number\n (zero padded) `YYYY` is the year, `hh` the hour (zero padded), `mm` the\n minute (zero padded) and `ss` the second (zero padded).\n All names are expected to be in english.\n\n Examples::\n\n >>> spec_date_to_iso8601(\"Thu Feb 11 09:54:35 2016\")\n '2016-02-11T09:54:35'\n\n >>> spec_date_to_iso8601(\"Sat 2015/03/14 03:53:50\")\n '2015-03-14T03:53:50'\n \"\"\"\n months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul',\n 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']\n days = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']\n\n days_rx = '(?P<day>' + '|'.join(days) + ')'\n months_rx = '(?P<month>' + '|'.join(months) + ')'\n year_rx = r'(?P<year>\\d{4})'\n day_nb_rx = r'(?P<day_nb>[0-3 ]\\d)'\n month_nb_rx = r'(?P<month_nb>[0-1]\\d)'\n hh_rx = r'(?P<hh>[0-2]\\d)'\n mm_rx = r'(?P<mm>[0-5]\\d)'\n ss_rx = r'(?P<ss>[0-5]\\d)'\n tz_rx = r'(?P<tz>[+-]\\d\\d:\\d\\d){0,1}'\n\n # date formats must have either month_nb (1..12) or month (Jan, Feb, ...)\n re_tpls = ['{days} {months} {day_nb} {hh}:{mm}:{ss}{tz} {year}',\n '{days} {year}/{month_nb}/{day_nb} {hh}:{mm}:{ss}{tz}']\n\n grp_d = None\n\n for rx in re_tpls:\n full_rx = rx.format(days=days_rx,\n months=months_rx,\n year=year_rx,\n day_nb=day_nb_rx,\n month_nb=month_nb_rx,\n hh=hh_rx,\n mm=mm_rx,\n ss=ss_rx,\n tz=tz_rx)\n m = re.match(full_rx, date)\n\n if m:\n grp_d = m.groupdict()\n break\n\n if not grp_d:\n raise ValueError('Date format not recognized : {0}'.format(date))\n\n year = grp_d['year']\n\n month = grp_d.get('month_nb')\n\n if not month:\n month = '{0:02d}'.format(months.index(grp_d.get('month')) + 1)\n\n day = grp_d['day_nb']\n\n tz = grp_d['tz']\n if not tz:\n tz = zone\n\n time = '{0}:{1}:{2}'.format(grp_d['hh'],\n grp_d['mm'],\n grp_d['ss'])\n\n full_date = '{0}-{1}-{2}T{3}{4}'.format(year,\n month,\n day,\n time,\n tz if tz else '')\n return full_date\n\n\ndef _demultiplex_mca(scan, analyser_index):\n \"\"\"Return MCA data for a single analyser.\n\n Each MCA spectrum is a 1D array. For each analyser, there is one\n spectrum recorded per scan data line. When there are more than a single\n MCA analyser in a scan, the data will be multiplexed. For instance if\n there are 3 analysers, the consecutive spectra for the first analyser must\n be accessed as ``mca[0], mca[3], mca[6]…``.\n\n :param scan: :class:`Scan` instance containing the MCA data\n :param analyser_index: 0-based index referencing the analyser\n :type analyser_index: int\n :return: 2D numpy array containing all spectra for one analyser\n \"\"\"\n number_of_analysers = _get_number_of_mca_analysers(scan)\n number_of_spectra = len(scan.mca)\n number_of_spectra_per_analyser = number_of_spectra // number_of_analysers\n len_spectrum = len(scan.mca[analyser_index])\n\n mca_array = numpy.empty((number_of_spectra_per_analyser, len_spectrum))\n\n for i in range(number_of_spectra_per_analyser):\n mca_array[i, :] = scan.mca[analyser_index + i * number_of_analysers]\n\n return mca_array\n\n\n# Node classes\nclass SpecH5Dataset(object):\n \"\"\"This convenience class is to be inherited by all datasets, for\n compatibility purpose with code that tests for\n ``isinstance(obj, SpecH5Dataset)``.\n\n This legacy behavior is deprecated. The correct way to test\n if an object is a dataset is to use :meth:`silx.io.utils.is_dataset`.\n\n Datasets must also inherit :class:`SpecH5NodeDataset` or\n :class:`SpecH5LazyNodeDataset` which actually implement all the\n API.\"\"\"\n pass\n\n\nclass SpecH5NodeDataset(commonh5.Dataset, SpecH5Dataset):\n \"\"\"This class inherits :class:`commonh5.Dataset`, to which it adds\n little extra functionality. The main additional functionality is the\n proxy behavior that allows to mimic the numpy array stored in this\n class.\n \"\"\"\n def __init__(self, name, data, parent=None, attrs=None):\n # get proper value types, to inherit from numpy\n # attributes (dtype, shape, size)\n if isinstance(data, six.string_types):\n # use unicode (utf-8 when saved to HDF5 output)\n value = to_h5py_utf8(data)\n elif isinstance(data, float):\n # use 32 bits for float scalars\n value = numpy.float32(data)\n elif isinstance(data, int):\n value = numpy.int_(data)\n else:\n # Enforce numpy array\n array = numpy.array(data)\n data_kind = array.dtype.kind\n\n if data_kind in [\"S\", \"U\"]:\n value = numpy.asarray(array,\n dtype=text_dtype)\n elif data_kind in [\"f\"]:\n value = numpy.asarray(array, dtype=numpy.float32)\n else:\n value = array\n commonh5.Dataset.__init__(self, name, value, parent, attrs)\n\n def __getattr__(self, item):\n \"\"\"Proxy to underlying numpy array methods.\n \"\"\"\n if hasattr(self[()], item):\n return getattr(self[()], item)\n\n raise AttributeError(\"SpecH5Dataset has no attribute %s\" % item)\n\n\nclass SpecH5LazyNodeDataset(commonh5.LazyLoadableDataset, SpecH5Dataset):\n \"\"\"This class inherits :class:`commonh5.LazyLoadableDataset`,\n to which it adds a proxy behavior that allows to mimic the numpy\n array stored in this class.\n\n The class has to be inherited and the :meth:`_create_data` method has to be\n implemented to return the numpy data exposed by the dataset. This factory\n method is only called once, when the data is needed.\n \"\"\"\n def __getattr__(self, item):\n \"\"\"Proxy to underlying numpy array methods.\n \"\"\"\n if hasattr(self[()], item):\n return getattr(self[()], item)\n\n raise AttributeError(\"SpecH5Dataset has no attribute %s\" % item)\n\n def _create_data(self):\n \"\"\"\n Factory to create the data exposed by the dataset when it is needed.\n\n It has to be implemented for the class to work.\n\n :rtype: numpy.ndarray\n \"\"\"\n raise NotImplementedError()\n\n\nclass SpecH5Group(object):\n \"\"\"This convenience class is to be inherited by all groups, for\n compatibility purposes with code that tests for\n ``isinstance(obj, SpecH5Group)``.\n\n This legacy behavior is deprecated. The correct way to test\n if an object is a group is to use :meth:`silx.io.utils.is_group`.\n\n Groups must also inherit :class:`silx.io.commonh5.Group`, which\n actually implements all the methods and attributes.\"\"\"\n pass\n\n\nclass SpecH5(commonh5.File, SpecH5Group):\n \"\"\"This class opens a SPEC file and exposes it as a *h5py.File*.\n\n It inherits :class:`silx.io.commonh5.Group` (via :class:`commonh5.File`),\n which implements most of its API.\n \"\"\"\n\n def __init__(self, filename):\n \"\"\"\n :param filename: Path to SpecFile in filesystem\n :type filename: str\n \"\"\"\n if isinstance(filename, io.IOBase):\n # see https://github.com/silx-kit/silx/issues/858\n filename = filename.name\n\n self._sf = SpecFile(filename)\n\n attrs = {\"NX_class\": to_h5py_utf8(\"NXroot\"),\n \"file_time\": to_h5py_utf8(\n datetime.datetime.now().isoformat()),\n \"file_name\": to_h5py_utf8(filename),\n \"creator\": to_h5py_utf8(\"silx spech5 %s\" % silx_version)}\n commonh5.File.__init__(self, filename, attrs=attrs)\n\n for scan_key in self._sf.keys():\n scan = self._sf[scan_key]\n scan_group = ScanGroup(scan_key, parent=self, scan=scan)\n self.add_node(scan_group)\n\n def close(self):\n self._sf.close()\n self._sf = None\n\n\nclass ScanGroup(commonh5.Group, SpecH5Group):\n def __init__(self, scan_key, parent, scan):\n \"\"\"\n\n :param parent: parent Group\n :param str scan_key: Scan key (e.g. \"1.1\")\n :param scan: specfile.Scan object\n \"\"\"\n commonh5.Group.__init__(self, scan_key, parent=parent,\n attrs={\"NX_class\": to_h5py_utf8(\"NXentry\")})\n\n # take title in #S after stripping away scan number and spaces\n s_hdr_line = scan.scan_header_dict[\"S\"]\n title = s_hdr_line.lstrip(\"0123456789\").lstrip()\n self.add_node(SpecH5NodeDataset(name=\"title\",\n data=to_h5py_utf8(title),\n parent=self))\n\n if \"D\" in scan.scan_header_dict:\n try:\n start_time_str = spec_date_to_iso8601(scan.scan_header_dict[\"D\"])\n except (IndexError, ValueError):\n logger1.warning(\"Could not parse date format in scan %s header.\" +\n \" Using original date not converted to ISO-8601\",\n scan_key)\n start_time_str = scan.scan_header_dict[\"D\"]\n elif \"D\" in scan.file_header_dict:\n logger1.warning(\"No #D line in scan %s header. \" +\n \"Using file header for start_time.\",\n scan_key)\n try:\n start_time_str = spec_date_to_iso8601(scan.file_header_dict[\"D\"])\n except (IndexError, ValueError):\n logger1.warning(\"Could not parse date format in scan %s header. \" +\n \"Using original date not converted to ISO-8601\",\n scan_key)\n start_time_str = scan.file_header_dict[\"D\"]\n else:\n logger1.warning(\"No #D line in %s header. Setting date to empty string.\",\n scan_key)\n start_time_str = \"\"\n self.add_node(SpecH5NodeDataset(name=\"start_time\",\n data=to_h5py_utf8(start_time_str),\n parent=self))\n\n self.add_node(InstrumentGroup(parent=self, scan=scan))\n self.add_node(MeasurementGroup(parent=self, scan=scan))\n if _unit_cell_in_scan(scan) or _ub_matrix_in_scan(scan):\n self.add_node(SampleGroup(parent=self, scan=scan))\n\n\nclass InstrumentGroup(commonh5.Group, SpecH5Group):\n def __init__(self, parent, scan):\n \"\"\"\n\n :param parent: parent Group\n :param scan: specfile.Scan object\n \"\"\"\n commonh5.Group.__init__(self, name=\"instrument\", parent=parent,\n attrs={\"NX_class\": to_h5py_utf8(\"NXinstrument\")})\n\n self.add_node(InstrumentSpecfileGroup(parent=self, scan=scan))\n self.add_node(PositionersGroup(parent=self, scan=scan))\n\n num_analysers = _get_number_of_mca_analysers(scan)\n for anal_idx in range(num_analysers):\n self.add_node(InstrumentMcaGroup(parent=self,\n analyser_index=anal_idx,\n scan=scan))\n\n\nclass InstrumentSpecfileGroup(commonh5.Group, SpecH5Group):\n def __init__(self, parent, scan):\n commonh5.Group.__init__(self, name=\"specfile\", parent=parent,\n attrs={\"NX_class\": to_h5py_utf8(\"NXcollection\")})\n self.add_node(SpecH5NodeDataset(\n name=\"file_header\",\n data=to_h5py_utf8(scan.file_header),\n parent=self,\n attrs={}))\n self.add_node(SpecH5NodeDataset(\n name=\"scan_header\",\n data=to_h5py_utf8(scan.scan_header),\n parent=self,\n attrs={}))\n\n\nclass PositionersGroup(commonh5.Group, SpecH5Group):\n def __init__(self, parent, scan):\n commonh5.Group.__init__(self, name=\"positioners\", parent=parent,\n attrs={\"NX_class\": to_h5py_utf8(\"NXcollection\")})\n for motor_name in scan.motor_names:\n safe_motor_name = motor_name.replace(\"/\", \"%\")\n if motor_name in scan.labels and scan.data.shape[0] > 0:\n # return a data column if one has the same label as the motor\n motor_value = scan.data_column_by_name(motor_name)\n else:\n # Take value from #P scan header.\n # (may return float(\"inf\") if #P line is missing from scan hdr)\n motor_value = scan.motor_position_by_name(motor_name)\n self.add_node(SpecH5NodeDataset(name=safe_motor_name,\n data=motor_value,\n parent=self))\n\n\nclass InstrumentMcaGroup(commonh5.Group, SpecH5Group):\n def __init__(self, parent, analyser_index, scan):\n name = \"mca_%d\" % analyser_index\n commonh5.Group.__init__(self, name=name, parent=parent,\n attrs={\"NX_class\": to_h5py_utf8(\"NXdetector\")})\n\n mcaDataDataset = McaDataDataset(parent=self,\n analyser_index=analyser_index,\n scan=scan)\n self.add_node(mcaDataDataset)\n spectrum_length = mcaDataDataset.shape[-1]\n mcaDataDataset = None\n\n if len(scan.mca.channels) == 1:\n # single @CALIB line applying to multiple devices\n calibration_dataset = scan.mca.calibration[0]\n channels_dataset = scan.mca.channels[0]\n else:\n calibration_dataset = scan.mca.calibration[analyser_index]\n channels_dataset = scan.mca.channels[analyser_index]\n\n channels_length = len(channels_dataset) \n if (channels_length > 1) and (spectrum_length > 0):\n logger1.info(\"Spectrum and channels length mismatch\")\n # this should always be the case\n if channels_length > spectrum_length:\n channels_dataset = channels_dataset[:spectrum_length]\n elif channels_length < spectrum_length:\n # only trust first channel and increment\n channel0 = channels_dataset[0]\n increment = channels_dataset[1] - channels_dataset[0]\n channels_dataset = numpy.linspace(channel0,\n channel0 + increment * spectrum_length,\n spectrum_length, endpoint=False)\n\n self.add_node(SpecH5NodeDataset(name=\"calibration\",\n data=calibration_dataset,\n parent=self))\n self.add_node(SpecH5NodeDataset(name=\"channels\",\n data=channels_dataset,\n parent=self))\n\n if \"CTIME\" in scan.mca_header_dict:\n ctime_line = scan.mca_header_dict['CTIME']\n preset_time, live_time, elapsed_time = _parse_ctime(ctime_line, analyser_index)\n self.add_node(SpecH5NodeDataset(name=\"preset_time\",\n data=preset_time,\n parent=self))\n self.add_node(SpecH5NodeDataset(name=\"live_time\",\n data=live_time,\n parent=self))\n self.add_node(SpecH5NodeDataset(name=\"elapsed_time\",\n data=elapsed_time,\n parent=self))\n\n\nclass McaDataDataset(SpecH5LazyNodeDataset):\n \"\"\"Lazy loadable dataset for MCA data\"\"\"\n def __init__(self, parent, analyser_index, scan):\n commonh5.LazyLoadableDataset.__init__(\n self, name=\"data\", parent=parent,\n attrs={\"interpretation\": to_h5py_utf8(\"spectrum\"),})\n self._scan = scan\n self._analyser_index = analyser_index\n self._shape = None\n self._num_analysers = _get_number_of_mca_analysers(self._scan)\n\n def _create_data(self):\n return _demultiplex_mca(self._scan, self._analyser_index)\n\n @property\n def shape(self):\n if self._shape is None:\n num_spectra_in_file = len(self._scan.mca)\n num_spectra_per_analyser = num_spectra_in_file // self._num_analysers\n len_spectrum = len(self._scan.mca[self._analyser_index])\n self._shape = num_spectra_per_analyser, len_spectrum\n return self._shape\n\n @property\n def size(self):\n return numpy.prod(self.shape, dtype=numpy.intp)\n\n @property\n def dtype(self):\n # we initialize the data with numpy.empty() without specifying a dtype\n # in _demultiplex_mca()\n return numpy.empty((1, )).dtype\n\n def __len__(self):\n return self.shape[0]\n\n def __getitem__(self, item):\n # optimization for fetching a single spectrum if data not already loaded\n if not self._is_initialized:\n if isinstance(item, six.integer_types):\n if item < 0:\n # negative indexing\n item += len(self)\n return self._scan.mca[self._analyser_index +\n item * self._num_analysers]\n # accessing a slice or element of a single spectrum [i, j:k]\n try:\n spectrum_idx, channel_idx_or_slice = item\n assert isinstance(spectrum_idx, six.integer_types)\n except (ValueError, TypeError, AssertionError):\n pass\n else:\n if spectrum_idx < 0:\n item += len(self)\n idx = self._analyser_index + spectrum_idx * self._num_analysers\n return self._scan.mca[idx][channel_idx_or_slice]\n\n return super(McaDataDataset, self).__getitem__(item)\n\n\nclass MeasurementGroup(commonh5.Group, SpecH5Group):\n def __init__(self, parent, scan):\n \"\"\"\n\n :param parent: parent Group\n :param scan: specfile.Scan object\n \"\"\"\n commonh5.Group.__init__(self, name=\"measurement\", parent=parent,\n attrs={\"NX_class\": to_h5py_utf8(\"NXcollection\"),})\n for label in scan.labels:\n safe_label = label.replace(\"/\", \"%\")\n self.add_node(SpecH5NodeDataset(name=safe_label,\n data=scan.data_column_by_name(label),\n parent=self))\n\n num_analysers = _get_number_of_mca_analysers(scan)\n for anal_idx in range(num_analysers):\n self.add_node(MeasurementMcaGroup(parent=self, analyser_index=anal_idx))\n\n\nclass MeasurementMcaGroup(commonh5.Group, SpecH5Group):\n def __init__(self, parent, analyser_index):\n basename = \"mca_%d\" % analyser_index\n commonh5.Group.__init__(self, name=basename, parent=parent,\n attrs={})\n\n target_name = self.name.replace(\"measurement\", \"instrument\")\n self.add_node(commonh5.SoftLink(name=\"data\",\n path=target_name + \"/data\",\n parent=self))\n self.add_node(commonh5.SoftLink(name=\"info\",\n path=target_name,\n parent=self))\n\n\nclass SampleGroup(commonh5.Group, SpecH5Group):\n def __init__(self, parent, scan):\n \"\"\"\n\n :param parent: parent Group\n :param scan: specfile.Scan object\n \"\"\"\n commonh5.Group.__init__(self, name=\"sample\", parent=parent,\n attrs={\"NX_class\": to_h5py_utf8(\"NXsample\"),})\n\n if _unit_cell_in_scan(scan):\n self.add_node(SpecH5NodeDataset(name=\"unit_cell\",\n data=_parse_unit_cell(scan.scan_header_dict[\"G1\"]),\n parent=self,\n attrs={\"interpretation\": to_h5py_utf8(\"scalar\")}))\n self.add_node(SpecH5NodeDataset(name=\"unit_cell_abc\",\n data=_parse_unit_cell(scan.scan_header_dict[\"G1\"])[0, 0:3],\n parent=self,\n attrs={\"interpretation\": to_h5py_utf8(\"scalar\")}))\n self.add_node(SpecH5NodeDataset(name=\"unit_cell_alphabetagamma\",\n data=_parse_unit_cell(scan.scan_header_dict[\"G1\"])[0, 3:6],\n parent=self,\n attrs={\"interpretation\": to_h5py_utf8(\"scalar\")}))\n if _ub_matrix_in_scan(scan):\n self.add_node(SpecH5NodeDataset(name=\"ub_matrix\",\n data=_parse_UB_matrix(scan.scan_header_dict[\"G3\"]),\n parent=self,\n attrs={\"interpretation\": to_h5py_utf8(\"scalar\")}))\n", "#!/usr/bin/env python\n# coding: utf-8\n# /*##########################################################################\n#\n# Copyright (c) 2016 European Synchrotron Radiation Facility\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n#\n# ###########################################################################*/\n\"\"\"Module for (filtered) backprojection on the GPU\"\"\"\n\nfrom __future__ import absolute_import, print_function, with_statement, division\n\n__authors__ = [\"A. Mirone, P. Paleo\"]\n__license__ = \"MIT\"\n__date__ = \"25/01/2019\"\n\nimport logging\nimport numpy as np\n\nfrom .common import pyopencl\nfrom .processing import EventDescription, OpenclProcessing, BufferDescription\nfrom .sinofilter import SinoFilter\nfrom .sinofilter import fourier_filter as fourier_filter_\nfrom ..utils.deprecation import deprecated\n\nif pyopencl:\n mf = pyopencl.mem_flags\n import pyopencl.array as parray\nelse:\n raise ImportError(\"Please install pyopencl in order to use opencl backprojection\")\nlogger = logging.getLogger(__name__)\n\n\ndef _sizeof(Type):\n \"\"\"\n return the size (in bytes) of a scalar type, like the C behavior\n \"\"\"\n return np.dtype(Type).itemsize\n\n\ndef _idivup(a, b):\n \"\"\"\n return the integer division, plus one if `a` is not a multiple of `b`\n \"\"\"\n return (a + (b - 1)) // b\n\n\nclass Backprojection(OpenclProcessing):\n \"\"\"A class for performing the backprojection using OpenCL\"\"\"\n kernel_files = [\"backproj.cl\", \"array_utils.cl\"]\n\n def __init__(self, sino_shape, slice_shape=None, axis_position=None,\n angles=None, filter_name=None, ctx=None, devicetype=\"all\",\n platformid=None, deviceid=None, profile=False,\n extra_options=None):\n \"\"\"Constructor of the OpenCL (filtered) backprojection\n\n :param sino_shape: shape of the sinogram. The sinogram is in the format\n (n_b, n_a) where n_b is the number of detector bins\n and n_a is the number of angles.\n :param slice_shape: Optional, shape of the reconstructed slice. By\n default, it is a square slice where the dimension\n is the \"x dimension\" of the sinogram (number of\n bins).\n :param axis_position: Optional, axis position. Default is\n `(shape[1]-1)/2.0`.\n :param angles: Optional, a list of custom angles in radian.\n :param filter_name: Optional, name of the filter for FBP. Default is\n the Ram-Lak filter.\n :param ctx: actual working context, left to None for automatic\n initialization from device type or platformid/deviceid\n :param devicetype: type of device, can be \"CPU\", \"GPU\", \"ACC\" or \"ALL\"\n :param platformid: integer with the platform_identifier, as given by\n clinfo\n :param deviceid: Integer with the device identifier, as given by clinfo\n :param profile: switch on profiling to be able to profile at the kernel\n level, store profiling elements (makes code slightly\n slower)\n :param extra_options: Advanced extra options in the form of a dict.\n Current options are: cutoff,\n \"\"\"\n # OS X enforces a workgroup size of 1 when the kernel has\n # synchronization barriers if sys.platform.startswith('darwin'):\n # assuming no discrete GPU\n # raise NotImplementedError(\"Backprojection is not implemented on CPU for OS X yet\")\n\n OpenclProcessing.__init__(self, ctx=ctx, devicetype=devicetype,\n platformid=platformid, deviceid=deviceid,\n profile=profile)\n\n self._init_geometry(sino_shape, slice_shape, angles, axis_position,\n extra_options)\n self._allocate_memory()\n self._compute_angles()\n self._init_kernels()\n self._init_filter(filter_name)\n\n def _init_geometry(self, sino_shape, slice_shape, angles, axis_position,\n extra_options):\n \"\"\"Geometry Initialization\n\n :param sino_shape: shape of the sinogram. The sinogram is in the format\n (n_b, n_a) where n_b is the number of detector bins\n and n_a is the number of angles.\n :param slice_shape: shape of the reconstructed slice. By\n default, it is a square slice where the dimension\n is the \"x dimension\" of the sinogram (number of\n bins).\n :param angles: list of projection angles in radian.\n :param axis_position: axis position\n :param dict extra_options: Advanced extra options\n \"\"\"\n self.shape = sino_shape\n self.num_bins = np.int32(sino_shape[1])\n self.num_projs = np.int32(sino_shape[0])\n self.angles = angles\n if slice_shape is None:\n self.slice_shape = (self.num_bins, self.num_bins)\n else:\n self.slice_shape = slice_shape\n self.dimrec_shape = (\n _idivup(self.slice_shape[0], 32) * 32,\n _idivup(self.slice_shape[1], 32) * 32\n )\n if axis_position:\n self.axis_pos = np.float32(axis_position)\n else:\n self.axis_pos = np.float32((sino_shape[1] - 1.) / 2)\n self.axis_array = None # TODO: add axis correction front-end\n self._init_extra_options(extra_options)\n\n def _init_extra_options(self, extra_options):\n \"\"\"Backprojection extra option initialization\n\n :param dict extra_options: Advanced extra options\n \"\"\"\n self.extra_options = {\n \"cutoff\": 1.,\n }\n if extra_options is not None:\n self.extra_options.update(extra_options)\n\n def _allocate_memory(self):\n # Host memory\n self.slice = np.zeros(self.dimrec_shape, dtype=np.float32)\n self.is_cpu = False\n if self.device.type == \"CPU\":\n self.is_cpu = True\n\n # Device memory\n self.buffers = [\n BufferDescription(\"_d_slice\", self.dimrec_shape, np.float32, mf.READ_WRITE),\n BufferDescription(\"d_sino\", self.shape, np.float32, mf.READ_WRITE), # before transferring to texture (if available)\n BufferDescription(\"d_cos\", (self.num_projs,), np.float32, mf.READ_ONLY),\n BufferDescription(\"d_sin\", (self.num_projs,), np.float32, mf.READ_ONLY),\n BufferDescription(\"d_axes\", (self.num_projs,), np.float32, mf.READ_ONLY),\n ]\n self.allocate_buffers(use_array=True)\n self.d_sino = self.cl_mem[\"d_sino\"] # shorthand\n\n # Texture memory (if relevant)\n if not(self.is_cpu):\n self._allocate_textures()\n\n # Local memory\n self.local_mem = 256 * 3 * _sizeof(np.float32) # constant for all image sizes\n\n def _compute_angles(self):\n if self.angles is None:\n self.angles = np.linspace(0, np.pi, self.num_projs, False)\n h_cos = np.cos(self.angles).astype(np.float32)\n h_sin = np.sin(self.angles).astype(np.float32)\n self.cl_mem[\"d_cos\"][:] = h_cos[:]\n self.cl_mem[\"d_sin\"][:] = h_sin[:]\n if self.axis_array:\n self.cl_mem[\"d_axes\"][:] = self.axis_array.astype(np.float32)[:]\n else:\n self.cl_mem[\"d_axes\"][:] = np.ones(self.num_projs, dtype=\"f\") * self.axis_pos\n\n def _init_kernels(self):\n OpenclProcessing.compile_kernels(self, self.kernel_files)\n # check that workgroup can actually be (16, 16)\n self.compiletime_workgroup_size = self.kernels.max_workgroup_size(\"backproj_cpu_kernel\")\n # Workgroup and ndrange sizes are always the same\n self.wg = (16, 16)\n self.ndrange = (\n _idivup(int(self.dimrec_shape[1]), 32) * self.wg[0],\n _idivup(int(self.dimrec_shape[0]), 32) * self.wg[1]\n )\n # Prepare arguments for the kernel call\n if self.is_cpu:\n d_sino_ref = self.d_sino.data\n else:\n d_sino_ref = self.d_sino_tex\n self._backproj_kernel_args = (\n # num of projections (int32)\n self.num_projs,\n # num of bins (int32)\n self.num_bins,\n # axis position (float32)\n self.axis_pos,\n # d_slice (__global float32*)\n self.cl_mem[\"_d_slice\"].data,\n # d_sino (__read_only image2d_t or float*)\n d_sino_ref,\n # gpu_offset_x (float32) # TODO custom ?\n -np.float32((self.num_bins - 1) / 2. - self.axis_pos),\n # gpu_offset_y (float32) # TODO custom ?\n -np.float32((self.num_bins - 1) / 2. - self.axis_pos),\n # d_cos (__global float32*)\n self.cl_mem[\"d_cos\"].data,\n # d_sin (__global float32*)\n self.cl_mem[\"d_sin\"].data,\n # d_axis (__global float32*)\n self.cl_mem[\"d_axes\"].data,\n # shared mem (__local float32*)\n self._get_local_mem()\n )\n\n def _allocate_textures(self):\n \"\"\"\n Allocate the texture for the sinogram.\n \"\"\"\n self.d_sino_tex = pyopencl.Image(\n self.ctx,\n mf.READ_ONLY | mf.USE_HOST_PTR,\n pyopencl.ImageFormat(\n pyopencl.channel_order.INTENSITY,\n pyopencl.channel_type.FLOAT\n ),\n hostbuf=np.zeros(self.shape[::-1], dtype=np.float32)\n )\n\n def _init_filter(self, filter_name):\n \"\"\"Filter initialization\n\n :param str filter_name: filter name\n \"\"\"\n self.filter_name = filter_name or \"ram-lak\"\n self.sino_filter = SinoFilter(\n self.shape,\n ctx=self.ctx,\n filter_name=self.filter_name,\n extra_options=self.extra_options,\n )\n\n def _get_local_mem(self):\n return pyopencl.LocalMemory(self.local_mem) # constant for all image sizes\n\n def _cpy2d_to_slice(self, dst):\n ndrange = (int(self.slice_shape[1]), int(self.slice_shape[0]))\n slice_shape_ocl = np.int32(ndrange)\n wg = None\n kernel_args = (\n dst.data,\n self.cl_mem[\"_d_slice\"].data,\n np.int32(self.slice_shape[1]),\n np.int32(self.dimrec_shape[1]),\n np.int32((0, 0)),\n np.int32((0, 0)),\n slice_shape_ocl\n )\n return self.kernels.cpy2d(self.queue, ndrange, wg, *kernel_args)\n\n def _transfer_to_texture(self, sino):\n if isinstance(sino, parray.Array):\n return self._transfer_device_to_texture(sino)\n sino2 = sino\n if not(sino.flags[\"C_CONTIGUOUS\"] and sino.dtype == np.float32):\n sino2 = np.ascontiguousarray(sino, dtype=np.float32)\n if self.is_cpu:\n ev = pyopencl.enqueue_copy(\n self.queue,\n self.d_sino.data,\n sino2\n )\n what = \"transfer filtered sino H->D buffer\"\n ev.wait()\n else:\n ev = pyopencl.enqueue_copy(\n self.queue,\n self.d_sino_tex,\n sino2,\n origin=(0, 0),\n region=self.shape[::-1]\n )\n what = \"transfer filtered sino H->D texture\"\n return EventDescription(what, ev)\n\n def _transfer_device_to_texture(self, d_sino):\n if self.is_cpu:\n if id(self.d_sino) == id(d_sino):\n return\n ev = pyopencl.enqueue_copy(\n self.queue,\n self.d_sino.data,\n d_sino\n )\n what = \"transfer filtered sino D->D buffer\"\n ev.wait()\n else:\n ev = pyopencl.enqueue_copy(\n self.queue,\n self.d_sino_tex,\n d_sino.data,\n offset=0,\n origin=(0, 0),\n region=self.shape[::-1]\n )\n what = \"transfer filtered sino D->D texture\"\n return EventDescription(what, ev)\n\n def backprojection(self, sino, output=None):\n \"\"\"Perform the backprojection on an input sinogram\n\n :param sino: sinogram.\n :param output: optional, output slice.\n If provided, the result will be written in this array.\n :return: backprojection of sinogram\n \"\"\"\n events = []\n with self.sem:\n events.append(self._transfer_to_texture(sino))\n # Call the backprojection kernel\n if self.is_cpu:\n kernel_to_call = self.kernels.backproj_cpu_kernel\n else:\n kernel_to_call = self.kernels.backproj_kernel\n kernel_to_call(\n self.queue,\n self.ndrange,\n self.wg,\n *self._backproj_kernel_args\n )\n # Return\n if output is None:\n res = self.cl_mem[\"_d_slice\"].get()\n res = res[:self.slice_shape[0], :self.slice_shape[1]]\n else:\n res = output\n self._cpy2d_to_slice(output)\n\n # /with self.sem\n if self.profile:\n self.events += events\n\n return res\n\n def filtered_backprojection(self, sino, output=None):\n \"\"\"\n Compute the filtered backprojection (FBP) on a sinogram.\n\n :param sino: sinogram (`np.ndarray` or `pyopencl.array.Array`)\n with the shape (n_projections, n_bins)\n :param output: output (`np.ndarray` or `pyopencl.array.Array`).\n If nothing is provided, a new numpy array is returned.\n \"\"\"\n # Filter\n self.sino_filter(sino, output=self.d_sino)\n # Backproject\n res = self.backprojection(self.d_sino, output=output)\n return res\n\n __call__ = filtered_backprojection\n\n\n # -------------------\n # - Compatibility -\n # -------------------\n\n @deprecated(replacement=\"Backprojection.sino_filter\", since_version=\"0.10\")\n def filter_projections(self, sino, rescale=True):\n self.sino_filter(sino, output=self.d_sino)\n\n\n\ndef fourier_filter(sino, filter_=None, fft_size=None):\n return fourier_filter_(sino, filter_=filter_, fft_size=fft_size)\n\n", "# coding: utf-8\n# /*##########################################################################\n#\n# Copyright (c) 2016 European Synchrotron Radiation Facility\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n#\n# ###########################################################################*/\n\"\"\"Tests for weakref module\"\"\"\n\n__authors__ = [\"V. Valls\"]\n__license__ = \"MIT\"\n__date__ = \"02/10/2017\"\n\n\nimport unittest\nimport pickle\nimport numpy\nfrom ..proxy import Proxy\n\n\nclass Thing(object):\n\n def __init__(self, value):\n self.value = value\n\n def __getitem__(self, selection):\n return selection + 1\n\n def method(self, value):\n return value + 2\n\n\nclass InheritedProxy(Proxy):\n \"\"\"Inheriting the proxy allow to specialisze methods\"\"\"\n\n def __init__(self, obj, value):\n Proxy.__init__(self, obj)\n self.value = value + 2\n\n def __getitem__(self, selection):\n return selection + 3\n\n def method(self, value):\n return value + 4\n\n\nclass TestProxy(unittest.TestCase):\n \"\"\"Test that the proxy behave as expected\"\"\"\n\n def text_init(self):\n obj = Thing(10)\n p = Proxy(obj)\n self.assertTrue(isinstance(p, Thing))\n self.assertTrue(isinstance(p, Proxy))\n\n # methods and properties\n\n def test_has_special_method(self):\n obj = Thing(10)\n p = Proxy(obj)\n self.assertTrue(hasattr(p, \"__getitem__\"))\n\n def test_missing_special_method(self):\n obj = Thing(10)\n p = Proxy(obj)\n self.assertFalse(hasattr(p, \"__and__\"))\n\n def test_method(self):\n obj = Thing(10)\n p = Proxy(obj)\n self.assertEqual(p.method(10), obj.method(10))\n\n def test_property(self):\n obj = Thing(10)\n p = Proxy(obj)\n self.assertEqual(p.value, obj.value)\n\n # special functions\n\n def test_getitem(self):\n obj = Thing(10)\n p = Proxy(obj)\n self.assertEqual(p[10], obj[10])\n\n def test_setitem(self):\n obj = numpy.array([10, 20, 30])\n p = Proxy(obj)\n p[0] = 20\n self.assertEqual(obj[0], 20)\n\n def test_slice(self):\n obj = numpy.arange(20)\n p = Proxy(obj)\n expected = obj[0:10:2]\n result = p[0:10:2]\n self.assertEqual(list(result), list(expected))\n\n # binary comparator methods\n\n def test_lt(self):\n obj = numpy.array([20])\n p = Proxy(obj)\n expected = obj < obj\n result = p < p\n self.assertEqual(result, expected)\n\n # binary numeric methods\n\n def test_add(self):\n obj = numpy.array([20])\n proxy = Proxy(obj)\n expected = obj + obj\n result = proxy + proxy\n self.assertEqual(result, expected)\n\n def test_iadd(self):\n expected = numpy.array([20])\n expected += 10\n obj = numpy.array([20])\n result = Proxy(obj)\n result += 10\n self.assertEqual(result, expected)\n\n def test_radd(self):\n obj = numpy.array([20])\n p = Proxy(obj)\n expected = 10 + obj\n result = 10 + p\n self.assertEqual(result, expected)\n\n # binary logical methods\n\n def test_and(self):\n obj = numpy.array([20])\n p = Proxy(obj)\n expected = obj & obj\n result = p & p\n self.assertEqual(result, expected)\n\n def test_iand(self):\n expected = numpy.array([20])\n expected &= 10\n obj = numpy.array([20])\n result = Proxy(obj)\n result &= 10\n self.assertEqual(result, expected)\n\n def test_rand(self):\n obj = numpy.array([20])\n p = Proxy(obj)\n expected = 10 & obj\n result = 10 & p\n self.assertEqual(result, expected)\n\n # unary methods\n\n def test_neg(self):\n obj = numpy.array([20])\n p = Proxy(obj)\n expected = -obj\n result = -p\n self.assertEqual(result, expected)\n\n def test_round(self):\n obj = 20.5\n p = Proxy(obj)\n expected = round(obj)\n result = round(p)\n self.assertEqual(result, expected)\n\n # cast\n\n def test_bool(self):\n obj = True\n p = Proxy(obj)\n if p:\n pass\n else:\n self.fail()\n\n def test_str(self):\n obj = Thing(10)\n p = Proxy(obj)\n expected = str(obj)\n result = str(p)\n self.assertEqual(result, expected)\n\n def test_repr(self):\n obj = Thing(10)\n p = Proxy(obj)\n expected = repr(obj)\n result = repr(p)\n self.assertEqual(result, expected)\n\n def test_text_bool(self):\n obj = \"\"\n p = Proxy(obj)\n if p:\n self.fail()\n else:\n pass\n\n def test_text_str(self):\n obj = \"a\"\n p = Proxy(obj)\n expected = str(obj)\n result = str(p)\n self.assertEqual(result, expected)\n\n def test_text_repr(self):\n obj = \"a\"\n p = Proxy(obj)\n expected = repr(obj)\n result = repr(p)\n self.assertEqual(result, expected)\n\n def test_hash(self):\n obj = [0, 1, 2]\n p = Proxy(obj)\n with self.assertRaises(TypeError):\n hash(p)\n obj = (0, 1, 2)\n p = Proxy(obj)\n hash(p)\n\n\nclass TestInheritedProxy(unittest.TestCase):\n \"\"\"Test that inheriting the Proxy class behave as expected\"\"\"\n\n # methods and properties\n\n def test_method(self):\n obj = Thing(10)\n p = InheritedProxy(obj, 11)\n self.assertEqual(p.method(10), 11 + 3)\n\n def test_property(self):\n obj = Thing(10)\n p = InheritedProxy(obj, 11)\n self.assertEqual(p.value, 11 + 2)\n\n # special functions\n\n def test_getitem(self):\n obj = Thing(10)\n p = InheritedProxy(obj, 11)\n self.assertEqual(p[12], 12 + 3)\n\n\nclass TestPickle(unittest.TestCase):\n\n def test_dumps(self):\n obj = Thing(10)\n p = Proxy(obj)\n expected = pickle.dumps(obj)\n result = pickle.dumps(p)\n self.assertEqual(result, expected)\n\n def test_loads(self):\n obj = Thing(10)\n p = Proxy(obj)\n obj2 = pickle.loads(pickle.dumps(p))\n self.assertTrue(isinstance(obj2, Thing))\n self.assertFalse(isinstance(obj2, Proxy))\n self.assertEqual(obj.value, obj2.value)\n\n\ndef suite():\n loadTests = unittest.defaultTestLoader.loadTestsFromTestCase\n test_suite = unittest.TestSuite()\n test_suite.addTest(loadTests(TestProxy))\n test_suite.addTest(loadTests(TestPickle))\n test_suite.addTest(loadTests(TestInheritedProxy))\n return test_suite\n\n\nif __name__ == '__main__':\n unittest.main(defaultTest='suite')\n", "# coding: utf-8\n# /*##########################################################################\n#\n# Copyright (c) 2014-2018 European Synchrotron Radiation Facility\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n#\n# ###########################################################################*/\n\"\"\"This module provides a class wrapping OpenGL 2D and 3D texture.\"\"\"\n\n__authors__ = [\"T. Vincent\"]\n__license__ = \"MIT\"\n__date__ = \"04/10/2016\"\n\n\nimport collections\nfrom ctypes import c_void_p\nimport logging\n\nimport numpy\n\nfrom . import gl, utils\n\n\n_logger = logging.getLogger(__name__)\n\n\nclass Texture(object):\n \"\"\"Base class to wrap OpenGL 2D and 3D texture\n\n :param internalFormat: OpenGL texture internal format\n :param data: The data to copy to the texture or None for an empty texture\n :type data: numpy.ndarray or None\n :param format_: Input data format if different from internalFormat\n :param shape: If data is None, shape of the texture\n (height, width) or (depth, height, width)\n :type shape: List[int]\n :param int texUnit: The texture unit to use\n :param minFilter: OpenGL texture minimization filter (default: GL_NEAREST)\n :param magFilter: OpenGL texture magnification filter (default: GL_LINEAR)\n :param wrap: Texture wrap mode for dimensions: (t, s) or (r, t, s)\n If a single value is provided, it used for all dimensions.\n :type wrap: OpenGL wrap mode or 2 or 3-tuple of wrap mode\n \"\"\"\n\n def __init__(self, internalFormat, data=None, format_=None,\n shape=None, texUnit=0,\n minFilter=None, magFilter=None, wrap=None):\n\n self._internalFormat = internalFormat\n if format_ is None:\n format_ = self.internalFormat\n\n if data is None:\n assert shape is not None\n else:\n assert shape is None\n data = numpy.array(data, copy=False, order='C')\n if format_ != gl.GL_RED:\n shape = data.shape[:-1] # Last dimension is channels\n else:\n shape = data.shape\n\n assert len(shape) in (2, 3)\n self._shape = tuple(shape)\n self._ndim = len(shape)\n\n self.texUnit = texUnit\n\n self._name = gl.glGenTextures(1)\n self.bind(self.texUnit)\n\n self._minFilter = None\n self.minFilter = minFilter if minFilter is not None else gl.GL_NEAREST\n\n self._magFilter = None\n self.magFilter = magFilter if magFilter is not None else gl.GL_LINEAR\n\n if wrap is not None:\n if not isinstance(wrap, collections.Iterable):\n wrap = [wrap] * self.ndim\n\n assert len(wrap) == self.ndim\n\n gl.glTexParameter(self.target,\n gl.GL_TEXTURE_WRAP_S,\n wrap[-1])\n gl.glTexParameter(self.target,\n gl.GL_TEXTURE_WRAP_T,\n wrap[-2])\n if self.ndim == 3:\n gl.glTexParameter(self.target,\n gl.GL_TEXTURE_WRAP_R,\n wrap[0])\n\n gl.glPixelStorei(gl.GL_UNPACK_ALIGNMENT, 1)\n\n # This are the defaults, useless to set if not modified\n # gl.glPixelStorei(gl.GL_UNPACK_ROW_LENGTH, 0)\n # gl.glPixelStorei(gl.GL_UNPACK_SKIP_PIXELS, 0)\n # gl.glPixelStorei(gl.GL_UNPACK_SKIP_ROWS, 0)\n # gl.glPixelStorei(gl.GL_UNPACK_IMAGE_HEIGHT, 0)\n # gl.glPixelStorei(gl.GL_UNPACK_SKIP_IMAGES, 0)\n\n if data is None:\n data = c_void_p(0)\n type_ = gl.GL_UNSIGNED_BYTE\n else:\n type_ = utils.numpyToGLType(data.dtype)\n\n if self.ndim == 2:\n _logger.debug(\n 'Creating 2D texture shape: (%d, %d),'\n ' internal format: %s, format: %s, type: %s',\n self.shape[0], self.shape[1],\n str(self.internalFormat), str(format_), str(type_))\n\n gl.glTexImage2D(\n gl.GL_TEXTURE_2D,\n 0,\n self.internalFormat,\n self.shape[1],\n self.shape[0],\n 0,\n format_,\n type_,\n data)\n else:\n _logger.debug(\n 'Creating 3D texture shape: (%d, %d, %d),'\n ' internal format: %s, format: %s, type: %s',\n self.shape[0], self.shape[1], self.shape[2],\n str(self.internalFormat), str(format_), str(type_))\n\n gl.glTexImage3D(\n gl.GL_TEXTURE_3D,\n 0,\n self.internalFormat,\n self.shape[2],\n self.shape[1],\n self.shape[0],\n 0,\n format_,\n type_,\n data)\n\n gl.glBindTexture(self.target, 0)\n\n @property\n def target(self):\n \"\"\"OpenGL target type of this texture\"\"\"\n return gl.GL_TEXTURE_2D if self.ndim == 2 else gl.GL_TEXTURE_3D\n\n @property\n def ndim(self):\n \"\"\"The number of dimensions: 2 or 3\"\"\"\n return self._ndim\n\n @property\n def internalFormat(self):\n \"\"\"Texture internal format\"\"\"\n return self._internalFormat\n\n @property\n def shape(self):\n \"\"\"Shape of the texture: (height, width) or (depth, height, width)\"\"\"\n return self._shape\n\n @property\n def name(self):\n \"\"\"OpenGL texture name\"\"\"\n if self._name is not None:\n return self._name\n else:\n raise RuntimeError(\n \"No OpenGL texture resource, discard has already been called\")\n\n @property\n def minFilter(self):\n \"\"\"Minifying function parameter (GL_TEXTURE_MIN_FILTER)\"\"\"\n return self._minFilter\n\n @minFilter.setter\n def minFilter(self, minFilter):\n if minFilter != self.minFilter:\n self._minFilter = minFilter\n self.bind()\n gl.glTexParameter(self.target,\n gl.GL_TEXTURE_MIN_FILTER,\n self.minFilter)\n\n @property\n def magFilter(self):\n \"\"\"Magnification function parameter (GL_TEXTURE_MAG_FILTER)\"\"\"\n return self._magFilter\n\n @magFilter.setter\n def magFilter(self, magFilter):\n if magFilter != self.magFilter:\n self._magFilter = magFilter\n self.bind()\n gl.glTexParameter(self.target,\n gl.GL_TEXTURE_MAG_FILTER,\n self.magFilter)\n\n def discard(self):\n \"\"\"Delete associated OpenGL texture\"\"\"\n if self._name is not None:\n gl.glDeleteTextures(self._name)\n self._name = None\n else:\n _logger.warning(\"Discard as already been called\")\n\n def bind(self, texUnit=None):\n \"\"\"Bind the texture to a texture unit.\n\n :param int texUnit: The texture unit to use\n \"\"\"\n if texUnit is None:\n texUnit = self.texUnit\n gl.glActiveTexture(gl.GL_TEXTURE0 + texUnit)\n gl.glBindTexture(self.target, self.name)\n\n # with statement\n\n def __enter__(self):\n self.bind()\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n gl.glActiveTexture(gl.GL_TEXTURE0 + self.texUnit)\n gl.glBindTexture(self.target, 0)\n\n def update(self,\n format_,\n data,\n offset=(0, 0, 0),\n texUnit=None):\n \"\"\"Update the content of the texture.\n\n Texture is not resized, so data must fit into texture with the\n given offset.\n\n :param format_: The OpenGL format of the data\n :param data: The data to use to update the texture\n :param offset: The offset in the texture where to copy the data\n :type offset: List[int]\n :param int texUnit:\n The texture unit to use (default: the one provided at init)\n \"\"\"\n data = numpy.array(data, copy=False, order='C')\n\n assert data.ndim == self.ndim\n assert len(offset) >= self.ndim\n for i in range(self.ndim):\n assert offset[i] + data.shape[i] <= self.shape[i]\n\n gl.glPixelStorei(gl.GL_UNPACK_ALIGNMENT, 1)\n\n # This are the defaults, useless to set if not modified\n # gl.glPixelStorei(gl.GL_UNPACK_ROW_LENGTH, 0)\n # gl.glPixelStorei(gl.GL_UNPACK_SKIP_PIXELS, 0)\n # gl.glPixelStorei(gl.GL_UNPACK_SKIP_ROWS, 0)\n # gl.glPixelStorei(gl.GL_UNPACK_IMAGE_HEIGHT, 0)\n # gl.glPixelStorei(gl.GL_UNPACK_SKIP_IMAGES, 0)\n\n self.bind(texUnit)\n\n type_ = utils.numpyToGLType(data.dtype)\n\n if self.ndim == 2:\n gl.glTexSubImage2D(gl.GL_TEXTURE_2D,\n 0,\n offset[1],\n offset[0],\n data.shape[1],\n data.shape[0],\n format_,\n type_,\n data)\n gl.glBindTexture(gl.GL_TEXTURE_2D, 0)\n else:\n gl.glTexSubImage3D(gl.GL_TEXTURE_3D,\n 0,\n offset[2],\n offset[1],\n offset[0],\n data.shape[2],\n data.shape[1],\n data.shape[0],\n format_,\n type_,\n data)\n gl.glBindTexture(gl.GL_TEXTURE_3D, 0)\n", "# coding: utf-8\n# /*##########################################################################\n#\n# Copyright (c) 2016-2018 European Synchrotron Radiation Facility\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n#\n# ###########################################################################*/\n\"\"\"A cut plane in a 3D texture: hackish implementation...\n\"\"\"\n\nfrom __future__ import absolute_import, division, unicode_literals\n\n__authors__ = [\"T. Vincent\"]\n__license__ = \"MIT\"\n__date__ = \"11/01/2018\"\n\nimport string\nimport numpy\n\nfrom ... import _glutils\nfrom ..._glutils import gl\n\nfrom .function import Colormap\nfrom .primitives import Box, Geometry, PlaneInGroup\nfrom . import transform, utils\n\n\nclass ColormapMesh3D(Geometry):\n \"\"\"A 3D mesh with color from a 3D texture.\"\"\"\n\n _shaders = (\"\"\"\n attribute vec3 position;\n attribute vec3 normal;\n\n uniform mat4 matrix;\n uniform mat4 transformMat;\n //uniform mat3 matrixInvTranspose;\n uniform vec3 dataScale;\n uniform vec3 texCoordsOffset;\n\n varying vec4 vCameraPosition;\n varying vec3 vPosition;\n varying vec3 vNormal;\n varying vec3 vTexCoords;\n\n void main(void)\n {\n vCameraPosition = transformMat * vec4(position, 1.0);\n //vNormal = matrixInvTranspose * normalize(normal);\n vPosition = position;\n vTexCoords = dataScale * position + texCoordsOffset;\n vNormal = normal;\n gl_Position = matrix * vec4(position, 1.0);\n }\n \"\"\",\n string.Template(\"\"\"\n varying vec4 vCameraPosition;\n varying vec3 vPosition;\n varying vec3 vNormal;\n varying vec3 vTexCoords;\n uniform sampler3D data;\n uniform float alpha;\n\n $colormapDecl\n\n $clippingDecl\n $lightingFunction\n\n void main(void)\n {\n float value = texture3D(data, vTexCoords).r;\n vec4 color = $colormapCall(value);\n color.a = alpha;\n\n $clippingCall(vCameraPosition);\n\n gl_FragColor = $lightingCall(color, vPosition, vNormal);\n }\n \"\"\"))\n\n def __init__(self, position, normal, data, copy=True,\n mode='triangles', indices=None, colormap=None):\n assert mode in self._TRIANGLE_MODES\n data = numpy.array(data, copy=copy, order='C')\n assert data.ndim == 3\n self._data = data\n self._texture = None\n self._update_texture = True\n self._update_texture_filter = False\n self._alpha = 1.\n self._colormap = colormap or Colormap() # Default colormap\n self._colormap.addListener(self._cmapChanged)\n self._interpolation = 'linear'\n super(ColormapMesh3D, self).__init__(mode,\n indices,\n position=position,\n normal=normal)\n\n self.isBackfaceVisible = True\n self.textureOffset = 0., 0., 0.\n \"\"\"Offset to add to texture coordinates\"\"\"\n\n def setData(self, data, copy=True):\n data = numpy.array(data, copy=copy, order='C')\n assert data.ndim == 3\n self._data = data\n self._update_texture = True\n\n def getData(self, copy=True):\n return numpy.array(self._data, copy=copy)\n\n @property\n def interpolation(self):\n \"\"\"The texture interpolation mode: 'linear' or 'nearest'\"\"\"\n return self._interpolation\n\n @interpolation.setter\n def interpolation(self, interpolation):\n assert interpolation in ('linear', 'nearest')\n self._interpolation = interpolation\n self._update_texture_filter = True\n self.notify()\n\n @property\n def alpha(self):\n \"\"\"Transparency of the plane, float in [0, 1]\"\"\"\n return self._alpha\n\n @alpha.setter\n def alpha(self, alpha):\n self._alpha = float(alpha)\n\n @property\n def colormap(self):\n \"\"\"The colormap used by this primitive\"\"\"\n return self._colormap\n\n def _cmapChanged(self, source, *args, **kwargs):\n \"\"\"Broadcast colormap changes\"\"\"\n self.notify(*args, **kwargs)\n\n def prepareGL2(self, ctx):\n if self._texture is None or self._update_texture:\n if self._texture is not None:\n self._texture.discard()\n\n if self.interpolation == 'nearest':\n filter_ = gl.GL_NEAREST\n else:\n filter_ = gl.GL_LINEAR\n self._update_texture = False\n self._update_texture_filter = False\n self._texture = _glutils.Texture(\n gl.GL_R32F, self._data, gl.GL_RED,\n minFilter=filter_,\n magFilter=filter_,\n wrap=gl.GL_CLAMP_TO_EDGE)\n\n if self._update_texture_filter:\n self._update_texture_filter = False\n if self.interpolation == 'nearest':\n filter_ = gl.GL_NEAREST\n else:\n filter_ = gl.GL_LINEAR\n self._texture.minFilter = filter_\n self._texture.magFilter = filter_\n\n super(ColormapMesh3D, self).prepareGL2(ctx)\n\n def renderGL2(self, ctx):\n fragment = self._shaders[1].substitute(\n clippingDecl=ctx.clipper.fragDecl,\n clippingCall=ctx.clipper.fragCall,\n lightingFunction=ctx.viewport.light.fragmentDef,\n lightingCall=ctx.viewport.light.fragmentCall,\n colormapDecl=self.colormap.decl,\n colormapCall=self.colormap.call\n )\n program = ctx.glCtx.prog(self._shaders[0], fragment)\n program.use()\n\n ctx.viewport.light.setupProgram(ctx, program)\n self.colormap.setupProgram(ctx, program)\n\n if not self.isBackfaceVisible:\n gl.glCullFace(gl.GL_BACK)\n gl.glEnable(gl.GL_CULL_FACE)\n\n program.setUniformMatrix('matrix', ctx.objectToNDC.matrix)\n program.setUniformMatrix('transformMat',\n ctx.objectToCamera.matrix,\n safe=True)\n gl.glUniform1f(program.uniforms['alpha'], self._alpha)\n\n shape = self._data.shape\n scales = 1./shape[2], 1./shape[1], 1./shape[0]\n gl.glUniform3f(program.uniforms['dataScale'], *scales)\n gl.glUniform3f(program.uniforms['texCoordsOffset'], *self.textureOffset)\n\n gl.glUniform1i(program.uniforms['data'], self._texture.texUnit)\n\n ctx.clipper.setupProgram(ctx, program)\n\n self._texture.bind()\n self._draw(program)\n\n if not self.isBackfaceVisible:\n gl.glDisable(gl.GL_CULL_FACE)\n\n\nclass CutPlane(PlaneInGroup):\n \"\"\"A cutting plane in a 3D texture\"\"\"\n\n def __init__(self, point=(0., 0., 0.), normal=(0., 0., 1.)):\n self._data = None\n self._mesh = None\n self._alpha = 1.\n self._interpolation = 'linear'\n self._colormap = Colormap()\n super(CutPlane, self).__init__(point, normal)\n\n def setData(self, data, copy=True):\n if data is None:\n self._data = None\n if self._mesh is not None:\n self._children.remove(self._mesh)\n self._mesh = None\n\n else:\n data = numpy.array(data, copy=copy, order='C')\n assert data.ndim == 3\n self._data = data\n if self._mesh is not None:\n self._mesh.setData(data, copy=False)\n\n def getData(self, copy=True):\n return None if self._mesh is None else self._mesh.getData(copy=copy)\n\n @property\n def alpha(self):\n return self._alpha\n\n @alpha.setter\n def alpha(self, alpha):\n self._alpha = float(alpha)\n if self._mesh is not None:\n self._mesh.alpha = alpha\n\n @property\n def colormap(self):\n return self._colormap\n\n @property\n def interpolation(self):\n \"\"\"The texture interpolation mode: 'linear' (default) or 'nearest'\"\"\"\n return self._interpolation\n\n @interpolation.setter\n def interpolation(self, interpolation):\n assert interpolation in ('nearest', 'linear')\n if interpolation != self.interpolation:\n self._interpolation = interpolation\n if self._mesh is not None:\n self._mesh.interpolation = interpolation\n self.notify()\n\n def prepareGL2(self, ctx):\n if self.isValid:\n\n contourVertices = self.contourVertices\n\n if self._mesh is None and self._data is not None:\n self._mesh = ColormapMesh3D(contourVertices,\n normal=self.plane.normal,\n data=self._data,\n copy=False,\n mode='fan',\n colormap=self.colormap)\n self._mesh.alpha = self._alpha\n self._mesh.interpolation = self.interpolation\n self._children.insert(0, self._mesh)\n\n if self._mesh is not None:\n if (contourVertices is None or\n len(contourVertices) == 0):\n self._mesh.visible = False\n else:\n self._mesh.visible = True\n self._mesh.setAttribute('normal', self.plane.normal)\n self._mesh.setAttribute('position', contourVertices)\n\n needTextureOffset = False\n if self.interpolation == 'nearest':\n # If cut plane is co-linear with array bin edges add texture offset\n planePt = self.plane.point\n for index, normal in enumerate(((1., 0., 0.),\n (0., 1., 0.),\n (0., 0., 1.))):\n if (numpy.all(numpy.equal(self.plane.normal, normal)) and\n int(planePt[index]) == planePt[index]):\n needTextureOffset = True\n break\n\n if needTextureOffset:\n self._mesh.textureOffset = self.plane.normal * 1e-6\n else:\n self._mesh.textureOffset = 0., 0., 0.\n\n super(CutPlane, self).prepareGL2(ctx)\n\n def renderGL2(self, ctx):\n with self.viewport.light.turnOff():\n super(CutPlane, self).renderGL2(ctx)\n\n def _bounds(self, dataBounds=False):\n if not dataBounds:\n vertices = self.contourVertices\n if vertices is not None:\n return numpy.array(\n (vertices.min(axis=0), vertices.max(axis=0)),\n dtype=numpy.float32)\n else:\n return None # Plane in not slicing the data volume\n else:\n if self._data is None:\n return None\n else:\n depth, height, width = self._data.shape\n return numpy.array(((0., 0., 0.),\n (width, height, depth)),\n dtype=numpy.float32)\n\n @property\n def contourVertices(self):\n \"\"\"The vertices of the contour of the plane/bounds intersection.\"\"\"\n # TODO copy from PlaneInGroup, refactor all that!\n bounds = self.bounds(dataBounds=True)\n if bounds is None:\n return None # No bounds: no vertices\n\n # Check if cache is valid and return it\n cachebounds, cachevertices = self._cache\n if numpy.all(numpy.equal(bounds, cachebounds)):\n return cachevertices\n\n # Cache is not OK, rebuild it\n boxVertices = Box.getVertices(copy=True)\n boxVertices = bounds[0] + boxVertices * (bounds[1] - bounds[0])\n lineIndices = Box.getLineIndices(copy=False)\n vertices = utils.boxPlaneIntersect(\n boxVertices, lineIndices, self.plane.normal, self.plane.point)\n\n self._cache = bounds, vertices if len(vertices) != 0 else None\n\n return self._cache[1]\n\n # Render transforms RW, TODO refactor this!\n @property\n def transforms(self):\n return self._transforms\n\n @transforms.setter\n def transforms(self, iterable):\n self._transforms.removeListener(self._transformChanged)\n if isinstance(iterable, transform.TransformList):\n # If it is a TransformList, do not create one to enable sharing.\n self._transforms = iterable\n else:\n assert hasattr(iterable, '__iter__')\n self._transforms = transform.TransformList(iterable)\n self._transforms.addListener(self._transformChanged)\n", "# coding: utf-8\n# /*##########################################################################\n#\n# Copyright (c) 2017-2019 European Synchrotron Radiation Facility\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n#\n# ###########################################################################*/\n\"\"\"This module provides 3D array item class and its sub-items.\n\"\"\"\n\nfrom __future__ import absolute_import\n\n__authors__ = [\"T. Vincent\"]\n__license__ = \"MIT\"\n__date__ = \"24/04/2018\"\n\nimport logging\nimport time\nimport numpy\n\nfrom silx.math.combo import min_max\nfrom silx.math.marchingcubes import MarchingCubes\n\nfrom ... import qt\nfrom ...colors import rgba\n\nfrom ..scene import cutplane, primitives, transform, utils\n\nfrom .core import BaseNodeItem, Item3D, ItemChangedType, Item3DChangedType\nfrom .mixins import ColormapMixIn, InterpolationMixIn, PlaneMixIn\nfrom ._pick import PickingResult\n\n\n_logger = logging.getLogger(__name__)\n\n\nclass CutPlane(Item3D, ColormapMixIn, InterpolationMixIn, PlaneMixIn):\n \"\"\"Class representing a cutting plane in a :class:`ScalarField3D` item.\n\n :param parent: 3D Data set in which the cut plane is applied.\n \"\"\"\n\n def __init__(self, parent):\n plane = cutplane.CutPlane(normal=(0, 1, 0))\n\n Item3D.__init__(self, parent=parent)\n ColormapMixIn.__init__(self)\n InterpolationMixIn.__init__(self)\n PlaneMixIn.__init__(self, plane=plane)\n\n self._dataRange = None\n\n self._getScenePrimitive().children = [plane]\n\n # Connect scene primitive to mix-in class\n ColormapMixIn._setSceneColormap(self, plane.colormap)\n InterpolationMixIn._setPrimitive(self, plane)\n\n parent.sigItemChanged.connect(self._parentChanged)\n\n def _parentChanged(self, event):\n \"\"\"Handle data change in the parent this plane belongs to\"\"\"\n if event == ItemChangedType.DATA:\n data = self.sender().getData(copy=False)\n self._getPlane().setData(data, copy=False)\n\n # Store data range info as 3-tuple of values\n self._dataRange = self.sender().getDataRange()\n self._setRangeFromData(\n None if self._dataRange is None else numpy.array(self._dataRange))\n\n self._updated(ItemChangedType.DATA)\n\n # Colormap\n\n def getDisplayValuesBelowMin(self):\n \"\"\"Return whether values <= colormap min are displayed or not.\n\n :rtype: bool\n \"\"\"\n return self._getPlane().colormap.displayValuesBelowMin\n\n def setDisplayValuesBelowMin(self, display):\n \"\"\"Set whether to display values <= colormap min.\n\n :param bool display: True to show values below min,\n False to discard them\n \"\"\"\n display = bool(display)\n if display != self.getDisplayValuesBelowMin():\n self._getPlane().colormap.displayValuesBelowMin = display\n self._updated(ItemChangedType.ALPHA)\n\n def getDataRange(self):\n \"\"\"Return the range of the data as a 3-tuple of values.\n\n positive min is NaN if no data is positive.\n\n :return: (min, positive min, max) or None.\n \"\"\"\n return self._dataRange\n\n def getData(self, copy=True):\n \"\"\"Return 3D dataset.\n\n :param bool copy:\n True (default) to get a copy,\n False to get the internal data (DO NOT modify!)\n :return: The data set (or None if not set)\n \"\"\"\n parent = self.parent()\n return None if parent is None else parent.getData(copy=copy)\n\n def _pickFull(self, context):\n \"\"\"Perform picking in this item at given widget position.\n\n :param PickContext context: Current picking context\n :return: Object holding the results or None\n :rtype: Union[None,PickingResult]\n \"\"\"\n rayObject = context.getPickingSegment(frame=self._getScenePrimitive())\n if rayObject is None:\n return None\n\n points = utils.segmentPlaneIntersect(\n rayObject[0, :3],\n rayObject[1, :3],\n planeNorm=self.getNormal(),\n planePt=self.getPoint())\n\n if len(points) == 1: # Single intersection\n if numpy.any(points[0] < 0.):\n return None # Outside volume\n z, y, x = int(points[0][2]), int(points[0][1]), int(points[0][0])\n\n data = self.getData(copy=False)\n if data is None:\n return None # No dataset\n\n depth, height, width = data.shape\n if z < depth and y < height and x < width:\n return PickingResult(self,\n positions=[points[0]],\n indices=([z], [y], [x]))\n else:\n return None # Outside image\n else: # Either no intersection or segment and image are coplanar\n return None\n\n\nclass Isosurface(Item3D):\n \"\"\"Class representing an iso-surface in a :class:`ScalarField3D` item.\n\n :param parent: The DataItem3D this iso-surface belongs to\n \"\"\"\n\n def __init__(self, parent):\n Item3D.__init__(self, parent=parent)\n assert isinstance(parent, ScalarField3D)\n parent.sigItemChanged.connect(self._scalarField3DChanged)\n self._level = float('nan')\n self._autoLevelFunction = None\n self._color = rgba('#FFD700FF')\n self._updateScenePrimitive()\n\n def _scalarField3DChanged(self, event):\n \"\"\"Handle parent's ScalarField3D sigItemChanged\"\"\"\n if event == ItemChangedType.DATA:\n self._updateScenePrimitive()\n\n def getData(self, copy=True):\n \"\"\"Return 3D dataset.\n\n :param bool copy:\n True (default) to get a copy,\n False to get the internal data (DO NOT modify!)\n :return: The data set (or None if not set)\n \"\"\"\n parent = self.parent()\n return None if parent is None else parent.getData(copy=copy)\n\n def getLevel(self):\n \"\"\"Return the level of this iso-surface (float)\"\"\"\n return self._level\n\n def setLevel(self, level):\n \"\"\"Set the value at which to build the iso-surface.\n\n Setting this value reset auto-level function\n\n :param float level: The value at which to build the iso-surface\n \"\"\"\n self._autoLevelFunction = None\n level = float(level)\n if level != self._level:\n self._level = level\n self._updateScenePrimitive()\n self._updated(Item3DChangedType.ISO_LEVEL)\n\n def isAutoLevel(self):\n \"\"\"True if iso-level is rebuild for each data set.\"\"\"\n return self.getAutoLevelFunction() is not None\n\n def getAutoLevelFunction(self):\n \"\"\"Return the function computing the iso-level (callable or None)\"\"\"\n return self._autoLevelFunction\n\n def setAutoLevelFunction(self, autoLevel):\n \"\"\"Set the function used to compute the iso-level.\n\n WARNING: The function might get called in a thread.\n\n :param callable autoLevel:\n A function taking a 3D numpy.ndarray of float32 and returning\n a float used as iso-level.\n Example: numpy.mean(data) + numpy.std(data)\n \"\"\"\n assert callable(autoLevel)\n self._autoLevelFunction = autoLevel\n self._updateScenePrimitive()\n\n def getColor(self):\n \"\"\"Return the color of this iso-surface (QColor)\"\"\"\n return qt.QColor.fromRgbF(*self._color)\n\n def setColor(self, color):\n \"\"\"Set the color of the iso-surface\n\n :param color: RGBA color of the isosurface\n :type color: QColor, str or array-like of 4 float in [0., 1.]\n \"\"\"\n color = rgba(color)\n if color != self._color:\n self._color = color\n primitive = self._getScenePrimitive()\n if len(primitive.children) != 0:\n primitive.children[0].setAttribute('color', self._color)\n self._updated(ItemChangedType.COLOR)\n\n def _updateScenePrimitive(self):\n \"\"\"Update underlying mesh\"\"\"\n self._getScenePrimitive().children = []\n\n data = self.getData(copy=False)\n\n if data is None:\n if self.isAutoLevel():\n self._level = float('nan')\n\n else:\n if self.isAutoLevel():\n st = time.time()\n try:\n level = float(self.getAutoLevelFunction()(data))\n\n except Exception:\n module_ = self.getAutoLevelFunction().__module__\n name = self.getAutoLevelFunction().__name__\n _logger.error(\n \"Error while executing iso level function %s.%s\",\n module_,\n name,\n exc_info=True)\n level = float('nan')\n\n else:\n _logger.info(\n 'Computed iso-level in %f s.', time.time() - st)\n\n if level != self._level:\n self._level = level\n self._updated(Item3DChangedType.ISO_LEVEL)\n\n if not numpy.isfinite(self._level):\n return\n\n st = time.time()\n vertices, normals, indices = MarchingCubes(\n data,\n isolevel=self._level)\n _logger.info('Computed iso-surface in %f s.', time.time() - st)\n\n if len(vertices) == 0:\n return\n else:\n mesh = primitives.Mesh3D(vertices,\n colors=self._color,\n normals=normals,\n mode='triangles',\n indices=indices)\n self._getScenePrimitive().children = [mesh]\n\n def _pickFull(self, context):\n \"\"\"Perform picking in this item at given widget position.\n\n :param PickContext context: Current picking context\n :return: Object holding the results or None\n :rtype: Union[None,PickingResult]\n \"\"\"\n rayObject = context.getPickingSegment(frame=self._getScenePrimitive())\n if rayObject is None:\n return None\n rayObject = rayObject[:, :3]\n\n data = self.getData(copy=False)\n bins = utils.segmentVolumeIntersect(\n rayObject, numpy.array(data.shape) - 1)\n if bins is None:\n return None\n\n # gather bin data\n offsets = [(i, j, k) for i in (0, 1) for j in (0, 1) for k in (0, 1)]\n indices = bins[:, numpy.newaxis, :] + offsets\n binsData = data[indices[:, :, 0], indices[:, :, 1], indices[:, :, 2]]\n # binsData.shape = nbins, 8\n # TODO up-to this point everything can be done once for all isosurfaces\n\n # check bin candidates\n level = self.getLevel()\n mask = numpy.logical_and(numpy.nanmin(binsData, axis=1) <= level,\n level <= numpy.nanmax(binsData, axis=1))\n bins = bins[mask]\n binsData = binsData[mask]\n\n if len(bins) == 0:\n return None # No bin candidate\n\n # do picking on candidates\n intersections = []\n depths = []\n for currentBin, data in zip(bins, binsData):\n mc = MarchingCubes(data.reshape(2, 2, 2), isolevel=level)\n points = mc.get_vertices() + currentBin\n triangles = points[mc.get_indices()]\n t = utils.segmentTrianglesIntersection(rayObject, triangles)[1]\n t = numpy.unique(t) # Duplicates happen on triangle edges\n if len(t) != 0:\n # Compute intersection points and get closest data point\n points = t.reshape(-1, 1) * (rayObject[1] - rayObject[0]) + rayObject[0]\n # Get closest data points by rounding to int\n intersections.extend(points)\n depths.extend(t)\n\n if len(intersections) == 0:\n return None # No intersected triangles\n\n intersections = numpy.array(intersections)[numpy.argsort(depths)]\n indices = numpy.transpose(numpy.round(intersections).astype(numpy.int))\n return PickingResult(self, positions=intersections, indices=indices)\n\n\nclass ScalarField3D(BaseNodeItem):\n \"\"\"3D scalar field on a regular grid.\n\n :param parent: The View widget this item belongs to.\n \"\"\"\n\n def __init__(self, parent=None):\n BaseNodeItem.__init__(self, parent=parent)\n\n # Gives this item the shape of the data, no matter\n # of the isosurface/cut plane size\n self._boundedGroup = primitives.BoundedGroup()\n\n # Store iso-surfaces\n self._isosurfaces = []\n\n self._data = None\n self._dataRange = None\n\n self._cutPlane = CutPlane(parent=self)\n self._cutPlane.setVisible(False)\n\n self._isogroup = primitives.GroupDepthOffset()\n self._isogroup.transforms = [\n # Convert from z, y, x from marching cubes to x, y, z\n transform.Matrix((\n (0., 0., 1., 0.),\n (0., 1., 0., 0.),\n (1., 0., 0., 0.),\n (0., 0., 0., 1.))),\n # Offset to match cutting plane coords\n transform.Translate(0.5, 0.5, 0.5)\n ]\n\n self._getScenePrimitive().children = [\n self._boundedGroup,\n self._cutPlane._getScenePrimitive(),\n self._isogroup]\n\n def setData(self, data, copy=True):\n \"\"\"Set the 3D scalar data represented by this item.\n\n Dataset order is zyx (i.e., first dimension is z).\n\n :param data: 3D array\n :type data: 3D numpy.ndarray of float32 with shape at least (2, 2, 2)\n :param bool copy:\n True (default) to make a copy,\n False to avoid copy (DO NOT MODIFY data afterwards)\n \"\"\"\n if data is None:\n self._data = None\n self._dataRange = None\n self._boundedGroup.shape = None\n\n else:\n data = numpy.array(data, copy=copy, dtype=numpy.float32, order='C')\n assert data.ndim == 3\n assert min(data.shape) >= 2\n\n self._data = data\n\n # Store data range info\n dataRange = min_max(self._data, min_positive=True, finite=True)\n if dataRange.minimum is None: # Only non-finite data\n dataRange = None\n\n if dataRange is not None:\n min_positive = dataRange.min_positive\n if min_positive is None:\n min_positive = float('nan')\n dataRange = dataRange.minimum, min_positive, dataRange.maximum\n self._dataRange = dataRange\n\n self._boundedGroup.shape = self._data.shape\n\n self._updated(ItemChangedType.DATA)\n\n def getData(self, copy=True):\n \"\"\"Return 3D dataset.\n\n :param bool copy:\n True (default) to get a copy,\n False to get the internal data (DO NOT modify!)\n :return: The data set (or None if not set)\n \"\"\"\n if self._data is None:\n return None\n else:\n return numpy.array(self._data, copy=copy)\n\n def getDataRange(self):\n \"\"\"Return the range of the data as a 3-tuple of values.\n\n positive min is NaN if no data is positive.\n\n :return: (min, positive min, max) or None.\n \"\"\"\n return self._dataRange\n\n # Cut Plane\n\n def getCutPlanes(self):\n \"\"\"Return an iterable of all :class:`CutPlane` of this item.\n\n This includes hidden cut planes.\n\n For now, there is always one cut plane.\n \"\"\"\n return (self._cutPlane,)\n\n # Handle iso-surfaces\n\n # TODO rename to sigItemAdded|Removed?\n sigIsosurfaceAdded = qt.Signal(object)\n \"\"\"Signal emitted when a new iso-surface is added to the view.\n\n The newly added iso-surface is provided by this signal\n \"\"\"\n\n sigIsosurfaceRemoved = qt.Signal(object)\n \"\"\"Signal emitted when an iso-surface is removed from the view\n\n The removed iso-surface is provided by this signal.\n \"\"\"\n\n def addIsosurface(self, level, color):\n \"\"\"Add an isosurface to this item.\n\n :param level:\n The value at which to build the iso-surface or a callable\n (e.g., a function) taking a 3D numpy.ndarray as input and\n returning a float.\n Example: numpy.mean(data) + numpy.std(data)\n :type level: float or callable\n :param color: RGBA color of the isosurface\n :type color: str or array-like of 4 float in [0., 1.]\n :return: isosurface object\n :rtype: ~silx.gui.plot3d.items.volume.Isosurface\n \"\"\"\n isosurface = Isosurface(parent=self)\n isosurface.setColor(color)\n if callable(level):\n isosurface.setAutoLevelFunction(level)\n else:\n isosurface.setLevel(level)\n isosurface.sigItemChanged.connect(self._isosurfaceItemChanged)\n\n self._isosurfaces.append(isosurface)\n\n self._updateIsosurfaces()\n\n self.sigIsosurfaceAdded.emit(isosurface)\n return isosurface\n\n def getIsosurfaces(self):\n \"\"\"Return an iterable of all :class:`.Isosurface` instance of this item\"\"\"\n return tuple(self._isosurfaces)\n\n def removeIsosurface(self, isosurface):\n \"\"\"Remove an iso-surface from this item.\n\n :param ~silx.gui.plot3d.Plot3DWidget.Isosurface isosurface:\n The isosurface object to remove\n \"\"\"\n if isosurface not in self.getIsosurfaces():\n _logger.warning(\n \"Try to remove isosurface that is not in the list: %s\",\n str(isosurface))\n else:\n isosurface.sigItemChanged.disconnect(self._isosurfaceItemChanged)\n self._isosurfaces.remove(isosurface)\n self._updateIsosurfaces()\n self.sigIsosurfaceRemoved.emit(isosurface)\n\n def clearIsosurfaces(self):\n \"\"\"Remove all :class:`.Isosurface` instances from this item.\"\"\"\n for isosurface in self.getIsosurfaces():\n self.removeIsosurface(isosurface)\n\n def _isosurfaceItemChanged(self, event):\n \"\"\"Handle update of isosurfaces upon level changed\"\"\"\n if event == Item3DChangedType.ISO_LEVEL:\n self._updateIsosurfaces()\n\n def _updateIsosurfaces(self):\n \"\"\"Handle updates of iso-surfaces level and add/remove\"\"\"\n # Sorting using minus, this supposes data 'object' to be max values\n sortedIso = sorted(self.getIsosurfaces(),\n key=lambda isosurface: - isosurface.getLevel())\n self._isogroup.children = [iso._getScenePrimitive() for iso in sortedIso]\n\n # BaseNodeItem\n\n def getItems(self):\n \"\"\"Returns the list of items currently present in the ScalarField3D.\n\n :rtype: tuple\n \"\"\"\n return self.getCutPlanes() + self.getIsosurfaces()\n", "# coding: utf-8\n# /*##########################################################################\n#\n# Copyright (c) 2017-2018 European Synchrotron Radiation Facility\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n#\n# ###########################################################################*/\n\"\"\"This script shows the features of a :mod:`~silx.gui.dialog.ColormapDialog`.\n\"\"\"\n\n__authors__ = [\"V. Valls\"]\n__license__ = \"MIT\"\n__date__ = \"14/06/2018\"\n\nimport functools\nimport numpy\n\ntry:\n import scipy\nexcept ImportError:\n scipy = None\n\nfrom silx.gui import qt\nfrom silx.gui.dialog.ColormapDialog import ColormapDialog\nfrom silx.gui.colors import Colormap\nfrom silx.gui.plot.ColorBar import ColorBarWidget\n\n\nclass ColormapDialogExample(qt.QMainWindow):\n \"\"\"PlotWidget with an ad hoc toolbar and a colorbar\"\"\"\n\n def __init__(self, parent=None):\n super(ColormapDialogExample, self).__init__(parent)\n self.setWindowTitle(\"Colormap dialog example\")\n\n self.colormap1 = Colormap(\"viridis\")\n self.colormap2 = Colormap(\"gray\")\n\n self.colorBar = ColorBarWidget(self)\n\n self.colorDialogs = []\n\n options = qt.QWidget(self)\n options.setLayout(qt.QVBoxLayout())\n self.createOptions(options.layout())\n\n mainWidget = qt.QWidget(self)\n mainWidget.setLayout(qt.QHBoxLayout())\n mainWidget.layout().addWidget(options)\n mainWidget.layout().addWidget(self.colorBar)\n self.mainWidget = mainWidget\n\n self.setCentralWidget(mainWidget)\n self.createColorDialog()\n\n def createOptions(self, layout):\n button = qt.QPushButton(\"Create a new dialog\")\n button.clicked.connect(self.createColorDialog)\n layout.addWidget(button)\n\n layout.addSpacing(10)\n\n button = qt.QPushButton(\"Set editable\")\n button.clicked.connect(self.setEditable)\n layout.addWidget(button)\n button = qt.QPushButton(\"Set non-editable\")\n button.clicked.connect(self.setNonEditable)\n layout.addWidget(button)\n\n layout.addSpacing(10)\n\n button = qt.QPushButton(\"Set no colormap\")\n button.clicked.connect(self.setNoColormap)\n layout.addWidget(button)\n button = qt.QPushButton(\"Set colormap 1\")\n button.clicked.connect(self.setColormap1)\n layout.addWidget(button)\n button = qt.QPushButton(\"Set colormap 2\")\n button.clicked.connect(self.setColormap2)\n layout.addWidget(button)\n button = qt.QPushButton(\"Create new colormap\")\n button.clicked.connect(self.setNewColormap)\n layout.addWidget(button)\n\n layout.addSpacing(10)\n\n button = qt.QPushButton(\"Set no histogram\")\n button.clicked.connect(self.setNoHistogram)\n layout.addWidget(button)\n button = qt.QPushButton(\"Set positive histogram\")\n button.clicked.connect(self.setPositiveHistogram)\n layout.addWidget(button)\n button = qt.QPushButton(\"Set neg-pos histogram\")\n button.clicked.connect(self.setNegPosHistogram)\n layout.addWidget(button)\n button = qt.QPushButton(\"Set negative histogram\")\n button.clicked.connect(self.setNegativeHistogram)\n layout.addWidget(button)\n\n layout.addSpacing(10)\n\n button = qt.QPushButton(\"Set no range\")\n button.clicked.connect(self.setNoRange)\n layout.addWidget(button)\n button = qt.QPushButton(\"Set positive range\")\n button.clicked.connect(self.setPositiveRange)\n layout.addWidget(button)\n button = qt.QPushButton(\"Set neg-pos range\")\n button.clicked.connect(self.setNegPosRange)\n layout.addWidget(button)\n button = qt.QPushButton(\"Set negative range\")\n button.clicked.connect(self.setNegativeRange)\n layout.addWidget(button)\n\n layout.addSpacing(10)\n\n button = qt.QPushButton(\"Set no data\")\n button.clicked.connect(self.setNoData)\n layout.addWidget(button)\n button = qt.QPushButton(\"Set shepp logan phantom\")\n button.clicked.connect(self.setSheppLoganPhantom)\n layout.addWidget(button)\n button = qt.QPushButton(\"Set data with non finite\")\n button.clicked.connect(self.setDataWithNonFinite)\n layout.addWidget(button)\n\n layout.addStretch()\n\n def createColorDialog(self):\n newDialog = ColormapDialog(self)\n newDialog.finished.connect(functools.partial(self.removeColorDialog, newDialog))\n self.colorDialogs.append(newDialog)\n self.mainWidget.layout().addWidget(newDialog)\n\n def removeColorDialog(self, dialog, result):\n self.colorDialogs.remove(dialog)\n\n def setNoColormap(self):\n self.colorBar.setColormap(None)\n for dialog in self.colorDialogs:\n dialog.setColormap(None)\n\n def setColormap1(self):\n self.colorBar.setColormap(self.colormap1)\n for dialog in self.colorDialogs:\n dialog.setColormap(self.colormap1)\n\n def setColormap2(self):\n self.colorBar.setColormap(self.colormap2)\n for dialog in self.colorDialogs:\n dialog.setColormap(self.colormap2)\n\n def setEditable(self):\n for dialog in self.colorDialogs:\n colormap = dialog.getColormap()\n if colormap is not None:\n colormap.setEditable(True)\n\n def setNonEditable(self):\n for dialog in self.colorDialogs:\n colormap = dialog.getColormap()\n if colormap is not None:\n colormap.setEditable(False)\n\n def setNewColormap(self):\n self.colormap = Colormap(\"inferno\")\n self.colorBar.setColormap(self.colormap)\n for dialog in self.colorDialogs:\n dialog.setColormap(self.colormap)\n\n def setNoHistogram(self):\n for dialog in self.colorDialogs:\n dialog.setHistogram()\n\n def setPositiveHistogram(self):\n histo = [5, 10, 50, 10, 5]\n pos = 1\n edges = list(range(pos, pos + len(histo)))\n for dialog in self.colorDialogs:\n dialog.setHistogram(histo, edges)\n\n def setNegPosHistogram(self):\n histo = [5, 10, 50, 10, 5]\n pos = -2\n edges = list(range(pos, pos + len(histo)))\n for dialog in self.colorDialogs:\n dialog.setHistogram(histo, edges)\n\n def setNegativeHistogram(self):\n histo = [5, 10, 50, 10, 5]\n pos = -30\n edges = list(range(pos, pos + len(histo)))\n for dialog in self.colorDialogs:\n dialog.setHistogram(histo, edges)\n\n def setNoRange(self):\n for dialog in self.colorDialogs:\n dialog.setDataRange()\n\n def setPositiveRange(self):\n for dialog in self.colorDialogs:\n dialog.setDataRange(1, 1, 10)\n\n def setNegPosRange(self):\n for dialog in self.colorDialogs:\n dialog.setDataRange(-10, 1, 10)\n\n def setNegativeRange(self):\n for dialog in self.colorDialogs:\n dialog.setDataRange(-10, float(\"nan\"), -1)\n\n def setNoData(self):\n for dialog in self.colorDialogs:\n dialog.setData(None)\n\n def setSheppLoganPhantom(self):\n from silx.image import phantomgenerator\n data = phantomgenerator.PhantomGenerator.get2DPhantomSheppLogan(256)\n data = data * 1000\n if scipy is not None:\n from scipy import ndimage\n data = ndimage.gaussian_filter(data, sigma=20)\n data = numpy.random.poisson(data)\n self.data = data\n for dialog in self.colorDialogs:\n dialog.setData(data)\n\n def setDataWithNonFinite(self):\n from silx.image import phantomgenerator\n data = phantomgenerator.PhantomGenerator.get2DPhantomSheppLogan(256)\n data = data * 1000\n if scipy is not None:\n from scipy import ndimage\n data = ndimage.gaussian_filter(data, sigma=20)\n data = numpy.random.poisson(data)\n data[10] = float(\"nan\")\n data[50] = float(\"+inf\")\n data[100] = float(\"-inf\")\n self.data = data\n for dialog in self.colorDialogs:\n dialog.setData(data)\n\n\ndef main():\n app = qt.QApplication([])\n\n # Create the ad hoc plot widget and change its default colormap\n example = ColormapDialogExample()\n example.show()\n\n app.exec_()\n\n\nif __name__ == '__main__':\n main()\n", "# -*- coding: utf-8 -*-\n#\n# Project: silx\n# https://github.com/silx-kit/silx\n#\n# Copyright (C) 2012-2016 European Synchrotron Radiation Facility, Grenoble, France\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\n__authors__ = [\"V. Valls\"]\n__license__ = \"MIT\"\n__date__ = \"18/04/2018\"\n\nimport unittest\nimport numpy\nfrom .._mergeimpl import MarchingSquaresMergeImpl\n\n\nclass TestMergeImplApi(unittest.TestCase):\n\n def test_image_not_an_array(self):\n bad_image = 1\n self.assertRaises(ValueError, MarchingSquaresMergeImpl, bad_image)\n\n def test_image_bad_dim(self):\n bad_image = numpy.array([[[1.0]]])\n self.assertRaises(ValueError, MarchingSquaresMergeImpl, bad_image)\n\n def test_image_not_big_enough(self):\n bad_image = numpy.array([[1.0, 1.0, 1.0, 1.0]])\n self.assertRaises(ValueError, MarchingSquaresMergeImpl, bad_image)\n\n def test_mask_not_an_array(self):\n image = numpy.array([[1.0, 1.0], [1.0, 1.0]])\n bad_mask = 1\n self.assertRaises(ValueError, MarchingSquaresMergeImpl, image, bad_mask)\n\n def test_mask_not_match(self):\n image = numpy.array([[1.0, 1.0], [1.0, 1.0]])\n bad_mask = numpy.array([[1.0, 1.0]])\n self.assertRaises(ValueError, MarchingSquaresMergeImpl, image, bad_mask)\n\n def test_ok_anyway_bad_type(self):\n image = numpy.array([[1.0, 1.0], [1.0, 1.0]], dtype=numpy.int32)\n mask = numpy.array([[1.0, 1.0], [1.0, 1.0]], dtype=numpy.float32)\n MarchingSquaresMergeImpl(image, mask)\n\n def test_find_contours_result(self):\n image = numpy.zeros((2, 2))\n image[0, 0] = 1\n ms = MarchingSquaresMergeImpl(image)\n polygons = ms.find_contours(0.5)\n self.assertIsInstance(polygons, list)\n self.assertTrue(len(polygons), 1)\n self.assertIsInstance(polygons[0], numpy.ndarray)\n self.assertEqual(polygons[0].shape[1], 2)\n self.assertEqual(polygons[0].dtype.kind, \"f\")\n\n def test_find_pixels_result(self):\n image = numpy.zeros((2, 2))\n image[0, 0] = 1\n ms = MarchingSquaresMergeImpl(image)\n pixels = ms.find_pixels(0.5)\n self.assertIsInstance(pixels, numpy.ndarray)\n self.assertEqual(pixels.shape[1], 2)\n self.assertEqual(pixels.dtype.kind, \"i\")\n\n def test_find_contours_empty_result(self):\n image = numpy.zeros((2, 2))\n ms = MarchingSquaresMergeImpl(image)\n polygons = ms.find_contours(0.5)\n self.assertIsInstance(polygons, list)\n self.assertEqual(len(polygons), 0)\n\n def test_find_pixels_empty_result(self):\n image = numpy.zeros((2, 2))\n ms = MarchingSquaresMergeImpl(image)\n pixels = ms.find_pixels(0.5)\n self.assertIsInstance(pixels, numpy.ndarray)\n self.assertEqual(pixels.shape[1], 2)\n self.assertEqual(pixels.shape[0], 0)\n self.assertEqual(pixels.dtype.kind, \"i\")\n\n def test_find_contours_yx_result(self):\n image = numpy.zeros((2, 2))\n image[1, 0] = 1\n ms = MarchingSquaresMergeImpl(image)\n polygons = ms.find_contours(0.5)\n polygon = polygons[0]\n self.assertTrue((polygon == (0.5, 0)).any())\n self.assertTrue((polygon == (1, 0.5)).any())\n\n def test_find_pixels_yx_result(self):\n image = numpy.zeros((2, 2))\n image[1, 0] = 1\n ms = MarchingSquaresMergeImpl(image)\n pixels = ms.find_pixels(0.5)\n self.assertTrue((pixels == (1, 0)).any())\n\n\nclass TestMergeImplContours(unittest.TestCase):\n\n def test_merge_segments(self):\n image = numpy.zeros((4, 4))\n image[(2, 3), :] = 1\n ms = MarchingSquaresMergeImpl(image)\n polygons = ms.find_contours(0.5)\n self.assertEqual(len(polygons), 1)\n\n def test_merge_segments_2(self):\n image = numpy.zeros((4, 4))\n image[(2, 3), :] = 1\n image[2, 2] = 0\n ms = MarchingSquaresMergeImpl(image)\n polygons = ms.find_contours(0.5)\n self.assertEqual(len(polygons), 1)\n\n def test_merge_tiles(self):\n image = numpy.zeros((4, 4))\n image[(2, 3), :] = 1\n ms = MarchingSquaresMergeImpl(image, group_size=2)\n polygons = ms.find_contours(0.5)\n self.assertEqual(len(polygons), 1)\n\n def test_fully_masked(self):\n image = numpy.zeros((5, 5))\n image[(2, 3), :] = 1\n mask = numpy.ones((5, 5))\n ms = MarchingSquaresMergeImpl(image, mask)\n polygons = ms.find_contours(0.5)\n self.assertEqual(len(polygons), 0)\n\n def test_fully_masked_minmax(self):\n \"\"\"This invalidates all the tiles. The route is not the same.\"\"\"\n image = numpy.zeros((5, 5))\n image[(2, 3), :] = 1\n mask = numpy.ones((5, 5))\n ms = MarchingSquaresMergeImpl(image, mask, group_size=2, use_minmax_cache=True)\n polygons = ms.find_contours(0.5)\n self.assertEqual(len(polygons), 0)\n\n def test_masked_segments(self):\n image = numpy.zeros((5, 5))\n image[(2, 3, 4), :] = 1\n mask = numpy.zeros((5, 5))\n mask[:, 2] = 1\n ms = MarchingSquaresMergeImpl(image, mask)\n polygons = ms.find_contours(0.5)\n self.assertEqual(len(polygons), 2)\n\n def test_closed_polygon(self):\n image = numpy.zeros((5, 5))\n image[2, 2] = 1\n image[1, 2] = 1\n image[3, 2] = 1\n image[2, 1] = 1\n image[2, 3] = 1\n mask = None\n ms = MarchingSquaresMergeImpl(image, mask)\n polygons = ms.find_contours(0.9)\n self.assertEqual(len(polygons), 1)\n self.assertEqual(list(polygons[0][0]), list(polygons[0][-1]))\n\n def test_closed_polygon_between_tiles(self):\n image = numpy.zeros((5, 5))\n image[2, 2] = 1\n image[1, 2] = 1\n image[3, 2] = 1\n image[2, 1] = 1\n image[2, 3] = 1\n mask = None\n ms = MarchingSquaresMergeImpl(image, mask, group_size=2)\n polygons = ms.find_contours(0.9)\n self.assertEqual(len(polygons), 1)\n self.assertEqual(list(polygons[0][0]), list(polygons[0][-1]))\n\n def test_open_polygon(self):\n image = numpy.zeros((5, 5))\n image[2, 2] = 1\n image[1, 2] = 1\n image[3, 2] = 1\n image[2, 1] = 1\n image[2, 3] = 1\n mask = numpy.zeros((5, 5))\n mask[1, 1] = 1\n ms = MarchingSquaresMergeImpl(image, mask)\n polygons = ms.find_contours(0.9)\n self.assertEqual(len(polygons), 1)\n self.assertNotEqual(list(polygons[0][0]), list(polygons[0][-1]))\n\n def test_ambiguous_pattern(self):\n image = numpy.zeros((6, 8))\n image[(3, 4), :] = 1\n image[:, (0, -1)] = 0\n image[3, 3] = -0.001\n image[4, 4] = 0.0\n mask = None\n ms = MarchingSquaresMergeImpl(image, mask)\n polygons = ms.find_contours(0.5)\n self.assertEqual(len(polygons), 2)\n\n def test_ambiguous_pattern_2(self):\n image = numpy.zeros((6, 8))\n image[(3, 4), :] = 1\n image[:, (0, -1)] = 0\n image[3, 3] = +0.001\n image[4, 4] = 0.0\n mask = None\n ms = MarchingSquaresMergeImpl(image, mask)\n polygons = ms.find_contours(0.5)\n self.assertEqual(len(polygons), 1)\n\n def count_closed_polygons(self, polygons):\n closed = 0\n for polygon in polygons:\n if list(polygon[0]) == list(polygon[-1]):\n closed += 1\n return closed\n\n def test_image(self):\n # example from skimage\n x, y = numpy.ogrid[-numpy.pi:numpy.pi:100j, -numpy.pi:numpy.pi:100j]\n image = numpy.sin(numpy.exp((numpy.sin(x)**3 + numpy.cos(y)**2)))\n mask = None\n ms = MarchingSquaresMergeImpl(image, mask)\n polygons = ms.find_contours(0.5)\n self.assertEqual(len(polygons), 11)\n self.assertEqual(self.count_closed_polygons(polygons), 3)\n\n def test_image_tiled(self):\n # example from skimage\n x, y = numpy.ogrid[-numpy.pi:numpy.pi:100j, -numpy.pi:numpy.pi:100j]\n image = numpy.sin(numpy.exp((numpy.sin(x)**3 + numpy.cos(y)**2)))\n mask = None\n ms = MarchingSquaresMergeImpl(image, mask, group_size=50)\n polygons = ms.find_contours(0.5)\n self.assertEqual(len(polygons), 11)\n self.assertEqual(self.count_closed_polygons(polygons), 3)\n\n def test_image_tiled_minmax(self):\n # example from skimage\n x, y = numpy.ogrid[-numpy.pi:numpy.pi:100j, -numpy.pi:numpy.pi:100j]\n image = numpy.sin(numpy.exp((numpy.sin(x)**3 + numpy.cos(y)**2)))\n mask = None\n ms = MarchingSquaresMergeImpl(image, mask, group_size=50, use_minmax_cache=True)\n polygons = ms.find_contours(0.5)\n self.assertEqual(len(polygons), 11)\n self.assertEqual(self.count_closed_polygons(polygons), 3)\n\n\ndef suite():\n test_suite = unittest.TestSuite()\n loadTests = unittest.defaultTestLoader.loadTestsFromTestCase\n test_suite.addTest(loadTests(TestMergeImplApi))\n test_suite.addTest(loadTests(TestMergeImplContours))\n return test_suite\n" ]
[ [ "numpy.arange", "numpy.sum", "numpy.trapz" ], [ "numpy.zeros_like", "numpy.zeros", "numpy.empty", "numpy.isclose" ], [ "numpy.linspace", "numpy.asarray", "numpy.int_", "numpy.prod", "numpy.float32", "numpy.array", "numpy.empty" ], [ "numpy.linspace", "numpy.ascontiguousarray", "numpy.int32", "numpy.cos", "numpy.dtype", "numpy.sin", "numpy.ones", "numpy.float32", "numpy.zeros" ], [ "numpy.arange", "numpy.array" ], [ "numpy.array" ], [ "numpy.array", "numpy.equal" ], [ "numpy.nanmax", "numpy.isfinite", "numpy.unique", "numpy.nanmin", "numpy.round", "numpy.any", "numpy.argsort", "numpy.array" ], [ "numpy.random.poisson", "scipy.ndimage.gaussian_filter" ], [ "numpy.cos", "numpy.sin", "numpy.ones", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
duncanmazza/ml_stock_prediction_api
[ "4cf6325ff1252511049b87bc46fa4d5b48acf4f3", "4cf6325ff1252511049b87bc46fa4d5b48acf4f3" ]
[ "src/CombinedModel.py", "src/StockRNN.py" ]
[ "\"\"\"\nCode for the combined model approach.\n\n@author: Shashank Swaminathan\n\"\"\"\n\nfrom src.BayesReg import GPM\nfrom src.StockRNN import StockRNN\nimport pandas as pd\nimport numpy as np\nfrom datetime import datetime\nfrom datetime import date\n\nZERO_TIME = \" 00:00:00\"\n\nDEVICE = \"cuda\" # selects the gpu to be used\nTO_GPU_FAIL_MSG = \"Unable to successfully run model.to('{}'). If running in Collaboratory, make sure \" \\\n \"that you have enabled the GPU your settings\".format(DEVICE)\n\nclass CombinedModel:\n r\"\"\"\n Class for handling combined model operations.\n \"\"\"\n def __init__(self, ticker, comp_tickers):\n r\"\"\"\n init function. It will set up the StockRNN and GPM classes.\n\n :param ticker: Ticker of stocks to predict\n :param comp_tickers: List of tickers to compare desired ticker against. Used for StockRNN only.\n \"\"\"\n self.srnn = StockRNN(ticker, to_compare=comp_tickers,\n train_start_date=datetime(2012, 1, 1),\n train_end_date=datetime.today(),\n try_load_weights=False)\n self.cms = GPM(ticker)\n\n def train(self, start_date, pred_start, pred_end, mw=0.5, n_epochs=10):\n r\"\"\"\n Main training function. It runs both the LSTM and GP models and stores results in attributes.\n\n :param start_date: Training start date (for GP model only). Provide as datetime object.\n :param pred_start: Date to start predictions from. Provide as datetime object.\n :param pred_end: Date to end predictions. Provide as datetime object.\n :param mw: Model weight. Used to do weighted average between GP and LSTM. 0 is for only the LSTM, and 1 is for only the GP. Defaults to 0.5 (equal split).\n :param n_epochs: Number of epochs to train the LSTM. Defaults to 10.\n\n :returns: (Mean predictions [t, y], Upper/lower bounds of 2 std [t, y])\n \"\"\"\n dt_ps = date(pred_start.year, pred_start.month, pred_start.day)\n dt_pe = date(pred_end.year, pred_end.month, pred_end.day)\n self.n_days_pred = np.busday_count(dt_ps, dt_pe) + 1\n\n self.train_end = pred_start - pd.Timedelta(1, \"D\")\n return self._combo_shot(start_date, pred_start, pred_end,\n mw = mw, n_epochs = n_epochs)\n\n def _combo_shot(self, start_date, pred_start, pred_end, mw=0.5, n_epochs=10):\n r\"\"\"\n Helper function to actually do the combo model training. Runs the two models individually, aligns the two results in time, then adds the two generated distributions as a weighted sum. Sets attribute combo_vals equal to the result.\n\n :param start_date: Training start date (for GP model only). Provide as datetime object.\n :param pred_start: Date to start predictions from. Provide as datetime object.\n :param pred_end: Date to end predictions. Provide as datetime object.\n :param mw: Model weight. Used to do weighted average between GP and LSTM. 0 is for only the LSTM, and 1 is for only the GP. Defaults to 0.5 (equal split).\n :param n_epochs: Number of epochs to train the LSTM. Defaults to 10.\n \"\"\"\n self._srnn_train(pred_start, self.n_days_pred, n_epochs = n_epochs)\n self._cms_train(start_date, self.train_end, pred_end)\n m_combo = self.m_cms[-self.n_days_pred:]*(mw)+self.m_srnn*(1-mw)\n std_combo = self.std_cms[-self.n_days_pred:]*(mw)+self.std_srnn*(1-mw)\n\n xy_pred = [self.times, m_combo]\n upper = m_combo + 2*std_combo\n lower = m_combo - 2*std_combo\n band_x = np.append(self.times, self.times[::-1])\n band_y = np.append(lower, upper[::-1])\n std_bounds = [band_x, band_y]\n self.combo_vals = (xy_pred, std_bounds)\n\n def _srnn_train(self, pred_start, n_days_pred, n_epochs=10):\n r\"\"\"\n Helper function to train the LSTM using the StockRNN class. Generates upper and lower bounds of prediction based on mean and std. deviation. Sets attribute srnn_vals equal to result. Result is of form: ([time, mean prediction], [time, upper/lower bounds], [time, actual data prior to prediction], [time, actual data during prediction]).\n\n :param pred_start: Date to start predictions from. Provide as datetime object.\n :param n_days_pred: Number of days to predict ahead. Will only predict on business days.\n :param n_epochs: Number of epochs to train the LSTM. Defaults to 10.\n \"\"\"\n srdf = self.srnn.companies[0].data_frame\n srdfdt = pd.to_datetime(srdf.Date)\n raw_p_st_idx = srdfdt.searchsorted(pred_start)\n p_st_idx = raw_p_st_idx + srdf.index[0]\n raw_p_e_idx = raw_p_st_idx + self.n_days_pred\n try:\n self.srnn.to(DEVICE)\n self.srnn.__togpu__(True)\n except RuntimeError:\n print(TO_GPU_FAIL_MSG)\n except AssertionError:\n print(TO_GPU_FAIL_MSG)\n self.srnn.__togpu__(False)\n\n self.srnn.do_training(num_epochs=n_epochs)\n self.m_srnn, self.std_srnn = self.srnn.pred_in_conj(p_st_idx, n_days_pred)\n self.times = srdf.Date.iloc[raw_p_st_idx:raw_p_e_idx]\n self.m_srnn = np.array(self.m_srnn)\n self.std_srnn = np.array(self.std_srnn)\n\n times_td = srdf.Date.iloc[raw_p_st_idx-50:raw_p_st_idx-1]\n td_srnn = srdf.Close.iloc[raw_p_st_idx-50:raw_p_st_idx-1]\n a_srnn = srdf.Close.iloc[raw_p_st_idx:raw_p_e_idx]\n\n xy_pred = [self.times, self.m_srnn]\n upper = self.m_srnn + 2*self.std_srnn\n lower = self.m_srnn - 2*self.std_srnn\n band_x = np.append(self.times, self.times[::-1])\n band_y = np.append(lower, upper[::-1])\n std_bounds = [band_x, band_y]\n train_data = [times_td, td_srnn]\n test_data = [self.times, a_srnn]\n self.srnn_vals = (xy_pred, std_bounds, train_data, test_data)\n\n def _cms_train(self, start_date, train_end, pred_end):\n r\"\"\"\n Helper function to train the GP model using the GPM class. Sets attribute cms_vals equal to result. Result is of form: ([time, mean prediction], [time, upper/lower bounds], [time, actual data prior to prediction], [time, actual data during prediction]).\n\n :param start_date: Training start date (for GP model only). Provide as datetime object.\n :param train_end: Date to end training. Provide as datetime object.\n :param pred_end: Date to end predictions. Provide as datetime object. Assumes predictions begin right after training.\n \"\"\"\n xy_pred, std_bounds, train_data, test_data = self.cms.go(start_date=start_date,\n split_date=train_end,\n end_date=pred_end)\n self.m_cms = xy_pred[1]\n self.std_cms = xy_pred[2]\n self.cms_vals = (xy_pred, std_bounds, train_data, test_data)\n", "\"\"\"\nCode to train the RNN\n\n@author: Duncan Mazza\n\"\"\"\n\nfrom torch import Tensor\nimport torch.nn as nn\nimport torch\nfrom torch.utils.data import DataLoader\nfrom torch.utils.data import TensorDataset\nimport numpy as np\nfrom datetime import datetime\nfrom src.get_data import Company\nimport matplotlib.pyplot as plt\nimport time\nfrom pandas import _libs\nfrom pandas_datareader._utils import RemoteDataError\nimport os\n\nZERO_TIME = \" 00:00:00\"\n\nDEVICE = \"cuda\" # selects the gpu to be used\nTO_GPU_FAIL_MSG = \"Unable to successfully run model.to('{}'). If running in Collaboratory, make sure \" \\\n \"that you have enabled the GPU your settings\".format(DEVICE)\n\n\nclass StockRNN(nn.Module):\n r\"\"\"\n Class for training on and predicting stocks using a LSTM network\n \"\"\"\n train_set: TensorDataset\n test_set: TensorDataset\n train_loader: DataLoader\n test_loader: DataLoader\n\n def __init__(self, ticker: str, lstm_hidden_size: int = 100, lstm_num_layers: int = 2, to_compare: [str, ] = None,\n train_start_date: datetime = datetime(2017, 1, 1), train_end_date: datetime = datetime(2018, 1, 1),\n sequence_segment_length: int = 50, drop_prob: float = 0.3, device: str = DEVICE,\n auto_populate: bool = True, train_data_prop: float = 0.8, lr: float = 1e-4,\n train_batch_size: int = 10, test_batch_size: int = 4, num_workers: int = 2, label_length: int = 30,\n try_load_weights: bool = False, save_state_dict: bool = True):\n r\"\"\"\n :param lstm_hidden_size: size of the lstm hidden layer\n :param lstm_num_layers: number of layers for the lstm\n :param ticker: ticker of company whose stock you want to predict\n :param to_compare: ticker of companies whose stock will be part of the features of the dataset\n :param train_start_date: date to request data from\n :param train_end_date: date to request data to\n :param sequence_segment_length: length of sequences to train the model on\n :param drop_prob: probability for dropout layers\n :param device: string for device to try sending the tensors to (i.e. \"cuda\")\n :param auto_populate: automatically calls all 'populate' functions in the constructor\n :param train_data_prop: proportion of data set to allocate to training data\n :param lr: learning rate for the optimizer\n :param train_batch_size: batch size for the training data\n :param test_batch_size:batch size for the testing data\n :param num_workers: parameter for Pytorch DataLoaders\n :param label_length: length of data (starting at the end of each sequence segment) to consider for the loss\n :param try_load_weights: boolean for whether the model should search for a cached model state dictionary\n :param save_state_dict: boolean for whether the model should cache its weights as a state dictionary\n \"\"\"\n super(StockRNN, self).__init__()\n\n # variable indicating success of calling self.to(DEVICE), where 0 indicates that it hasn't been tried yet, -1 \n # indicates that it failed, and 1 indicates that it was successful \n self.__togpu_works__ = 0\n\n # __init__ params\n self.lstm_hidden_size = lstm_hidden_size\n self.lstm_num_layers = lstm_num_layers\n self.drop_prob = drop_prob\n self.device = device\n self.ticker = ticker\n self.train_start_date = train_start_date\n self.train_end_date = train_end_date\n self.sequence_segment_length = sequence_segment_length\n self.auto_populate = auto_populate\n self.train_data_prop = train_data_prop\n self.lr = lr\n self.train_batch_size = train_batch_size\n self.test_batch_size = test_batch_size\n self.num_workers = num_workers\n self.save_state_dict = save_state_dict\n\n if label_length >= self.sequence_segment_length:\n print(\"Label length was specified to be {}, but cannot be >= self.sequence_segment_length; setting \"\n \"self.label_length to self.sequence_segment_length - 1.\")\n self.label_length = self.sequence_segment_length - 1\n else:\n self.label_length = label_length\n\n # company in index 0 is the company whose stock is being predicted\n self.companies = [Company(self.ticker, self.train_start_date, self.train_end_date)]\n\n start_date_changes = []\n end_date_changes = []\n if to_compare is not None:\n to_compare.sort()\n for company_ticker in to_compare:\n try:\n self.companies.append(Company(company_ticker, self.train_start_date, self.train_end_date))\n except KeyError:\n print(\"There was a KeyError exception raised when accessing data for the ticker {}; will skip this \"\n \"ticker\".format(company_ticker))\n continue\n except _libs.tslibs.np_datetime.OutOfBoundsDatetime:\n print(\"There was a _libs.tslibs.np_datetime.OutOfBoundsDatetime exception raised when accessing \"\n \"data for the ticker {}; will skip this ticker\".format(company_ticker))\n continue\n except RemoteDataError:\n print(\"There was a RemoteDataError when fetching data for ticker '{}'; will skip this ticker\"\n .format(company_ticker))\n continue\n\n if self.companies[-1].start_date_changed:\n start_date_changes.append(self.companies[-1].start_date)\n if self.companies[-1].end_date_changed:\n end_date_changes.append(self.companies[-1].end_date)\n\n self.num_companies = len(self.companies)\n\n if len(start_date_changes) != 0: # revise the start date of all of the data if necessary\n self.train_start_date = max(start_date_changes)\n for company in self.companies:\n company.revise_start_date(self.train_start_date)\n print(\"Data did not exist for every ticker at start date of {}; revising to the most recent starting time \"\n \"(common among all companies' data) of {}\".format(train_start_date.__str__().strip(ZERO_TIME),\n self.train_start_date.__str__().strip(ZERO_TIME)))\n # revise the end date of all of the data\n if len(end_date_changes) != 0:\n self.train_end_date = min(end_date_changes)\n for company in self.companies:\n company.revise_end_date(self.train_end_date)\n print(\"Data did not exist for every ticker at end date of {}; revising to the earliest ending time \"\n \"(common among all companies' data) of {}\".format(train_end_date.__str__().strip(ZERO_TIME),\n self.train_end_date.__str__().strip(ZERO_TIME)))\n self.start_date_str = self.train_start_date.__str__().strip(ZERO_TIME)\n self.end_date_str = self.train_end_date.__str__().strip(ZERO_TIME)\n\n # sting that describes the parameters for this model such that files for weights can be successfully loaded\n if self.num_companies > 1:\n considering_string = \"_CONSIDERING_\" + \"&\".join(list(map(lambda company:\n company.ticker, self.companies[1:])))\n else:\n considering_string = \"\"\n self.identifier = \"MODEL_FOR_\" + self.companies[0].ticker + considering_string + \\\n \"_WITH_lstm_hidden_size_{}_lstm_num_layers_{}_input_size_{}_sequence_\" \\\n \"segment_length_{}\".format(\n self.lstm_hidden_size,\n self.lstm_num_layers,\n self.num_companies,\n self.sequence_segment_length)\n\n self.model_weights_path = os.path.join(os.getcwd(), \".cache\", self.identifier + \".bin\")\n\n # initialize objects used during forward pass\n self.lstm = nn.LSTM(input_size=self.num_companies, hidden_size=self.lstm_hidden_size,\n num_layers=self.lstm_num_layers, dropout=self.drop_prob, batch_first=True)\n self.post_lstm_dropout = nn.Dropout(p=self.drop_prob)\n self.fc_1 = nn.Linear(self.lstm_hidden_size, 10)\n self.fc_2 = nn.Linear(10, self.num_companies)\n self.tanh = nn.Tanh()\n # self.rescaler = Rescaler(-0.5, 0.5)\n\n # initialize attributes with placeholder arrays\n self.daily_stock_data = np.array(0)\n self.train_sample_indices = np.array(0)\n self.test_sample_indices = np.array(0)\n self.train_loader_len = 0\n self.test_loader_len = 0\n self.data_len = 0\n\n # initialize optimizer and loss\n self.loss = nn.MSELoss()\n\n self.optimizer = torch.optim.Adam(self.parameters(), lr=self.lr)\n\n if self.auto_populate:\n self.populate_daily_stock_data()\n self.populate_test_train()\n self.populate_loaders()\n\n if try_load_weights:\n try:\n weights = torch.load(self.model_weights_path)\n self.load_state_dict(weights)\n print(\"Loded weights from file\")\n except FileNotFoundError:\n print(\"Tried loading state dict from file but could not find cached file\")\n except:\n print(\"WARNING: Could not load state dict for an unknown reason\")\n\n def __togpu__(self, successful):\n r\"\"\"\n Sets the value of :attr:`__togpu_works__`, which is used in such a way that expensive error catching isn't run\n every epoch of training.\n\n :param successful: boolean for whether ``.to(gpu)`` was called successfully\n \"\"\"\n if successful:\n self.__togpu_works__ = 1\n else:\n self.__togpu_works__ = -1\n\n def peek_dataset(self, figsize: (int, int) = (10, 5)):\n r\"\"\"\n Creates a simple line plot of the entire dataset\n\n :param figsize: tuple of integers for :class:`plt.subplots` ``figsize`` argument\n \"\"\"\n if self.num_companies == 1:\n _, axes = plt.subplots(1, 1, figsize=figsize)\n axes.plot(self.daily_stock_data[0, :])\n axes.set_title(\"'{}' closing price day-over-day % change from {} to {}\".format(self.companies[0].ticker,\n self.start_date_str,\n self.end_date_str))\n axes.set_xlabel(\"Time\")\n axes.set_ylabel(\"Price (USD)\")\n else:\n _, axes = plt.subplots(2, 1, figsize=figsize)\n axes[0].plot(self.daily_stock_data[0, :])\n axes[0].set_title(\n \"'{}' closing price day-over-day % change from {} to {}\".format(self.companies[0].ticker,\n self.start_date_str, self.end_date_str))\n axes[0].set_xlabel(\"Time\")\n axes[0].set_ylabel(\"Price (USD)\")\n\n for c, company in enumerate(self.companies):\n axes[1].plot(self.daily_stock_data[c, :], label=company.ticker)\n axes[1].legend()\n axes[1].set_title(\n \"All companies' closing price day-over-day % change from {} to {}\".format(self.start_date_str,\n self.end_date_str))\n axes[1].set_xlabel(\"Time\")\n axes[1].set_ylabel(\"Price (USD)\")\n\n plt.show()\n\n def populate_daily_stock_data(self):\n r\"\"\"\n Populates ``self.daily_stock_data`` with the day-over-day percent change of the closing stock prices. The data\n for each company is truncated such that each company's array of data is the same length as the rest and such\n that their length is divisible by :attr:` sequence_segment_length`.\n \"\"\"\n daily_stock_data = []\n daily_stock_data_lens = []\n data_is_of_same_len = True\n for company in self.companies:\n daily_stock_data.append(company.return_numpy_array_of_company_daily_stock_percent_change())\n daily_stock_data_lens.append(len(daily_stock_data[-1]))\n if daily_stock_data_lens[0] != daily_stock_data_lens[-1]:\n data_is_of_same_len = False\n\n self.data_len = min(daily_stock_data_lens)\n mod = self.data_len % self.sequence_segment_length\n if not data_is_of_same_len or mod != 0:\n self.data_len -= mod\n for c in range(self.num_companies):\n daily_stock_data[c] = daily_stock_data[c][-self.data_len:]\n\n try:\n assert self.data_len >= 2 * self.sequence_segment_length\n except AssertionError:\n print(\"The specified segment length for the data to be split up into, {}, would result in \"\n \"a dataset of only one segment because the self.daily_stock_data array is of length {}\"\n \"; a minimum of 2 must be created for a train/test split (although there clearly needs\"\n \" to be more than 2 data points to train the model).\"\n .format(self.sequence_segment_length, self.data_len))\n raise AssertionError\n\n self.daily_stock_data = np.array(daily_stock_data)\n\n def populate_test_train(self, rand_seed: int = -1):\n r\"\"\"\n Populates ``self.train_data`` and ``self.test_data`` tensors with complimentary subsets of the sequences of\n ``self.daily_stock_data``, where the sequences are the ``self.sequence_length`` length sequences of data that,\n when concatenated, comprise ``self.daily_stock_data``.\n\n :param rand_seed: value to seed the random number generator; if -1 (or any value < 0), then do not\n seed the random number generator.\n \"\"\"\n num_segments = self.data_len // self.sequence_segment_length # floor divide is used to return an\n # integer (should be no rounding)\n\n # shape of segmented_data: (batch_size, num_features, sequence_length)\n segmented_data = np.zeros((num_segments, self.num_companies, self.sequence_segment_length))\n for c in range(self.num_companies):\n segmented_data[:, c, :] = self.daily_stock_data[c, :].reshape((num_segments, self.sequence_segment_length))\n num_train_segments = round(num_segments * self.train_data_prop)\n if num_segments == num_train_segments:\n # If true, this means that there would be no data for testing (because the train/test ratio is very high\n # and/or there is too little data given self.sequence_segment_length\n num_train_segments -= 1\n\n if rand_seed >= 0:\n np.random.seed(rand_seed) # useful for unit testing\n\n all_indices = np.array(range(num_segments), dtype=np.int64)\n np.random.shuffle(all_indices)\n self.train_sample_indices = all_indices[0:num_train_segments]\n self.test_sample_indices = np.array(list(set(range(num_segments)) - set(self.train_sample_indices)))\n del all_indices\n\n # X_train: Tensor = torch.from_numpy(self.rescaler.rescale_train(segmented_data[self.train_sample_indices, :, :])).float()\n # X_test: Tensor = torch.from_numpy(self.rescaler.rescale_test(segmented_data[self.test_sample_indices, :, :])).float()\n X_train: Tensor = torch.from_numpy(segmented_data[self.train_sample_indices, :, :]).float()\n X_test: Tensor = torch.from_numpy(segmented_data[self.test_sample_indices, :, :]).float()\n del segmented_data\n # the data for the labels is the data in the first position of the features dimension\n y_train: Tensor = X_train[:, :, -self.label_length:]\n y_test: Tensor = X_test[:, :, -self.label_length:]\n self.train_set = TensorDataset(X_train, y_train)\n self.test_set = TensorDataset(X_test, y_test)\n\n def return_loaders(self) -> [DataLoader, DataLoader]:\n r\"\"\"\n Returns the :ref:`torch.utils.data.Dataloader` objects for the training and test sets\n\n :return: training DataLoader\n :return: testing DataLoader\n \"\"\"\n if self.__togpu_works__ == 1:\n return [\n DataLoader(\n self.train_set,\n batch_size=self.train_batch_size,\n num_workers=self.num_workers,\n pin_memory=True # speeds up the host-to-device transfer\n ),\n DataLoader(\n self.test_set,\n batch_size=self.test_batch_size,\n num_workers=self.num_workers,\n pin_memory=True # speeds up the host-to-device transfer\n )\n ]\n else:\n return [\n DataLoader(\n self.train_set,\n batch_size=self.train_batch_size,\n num_workers=self.num_workers\n ),\n DataLoader(\n self.test_set,\n batch_size=self.test_batch_size,\n num_workers=self.num_workers\n )\n ]\n\n def populate_loaders(self):\n r\"\"\"\n Populates :attr:`train_loader`, :attr:`test_laoder`, :attr:`train_loader_len`, and `:attr:`test_loader_len`\n attributes.\n \"\"\"\n self.train_loader, self.test_loader = self.return_loaders()\n self.train_loader_len = len(self.train_loader)\n self.test_loader_len = len(self.test_loader)\n\n def forward(self, X: torch.Tensor, predict_beyond: int = 0):\n r\"\"\"\n Completes a forward pass of data through the network. The tensor passed in is of shape (batch size, features,\n sequence length), and the output is of shape (batch size, 1, sequence length). The data is passed through a LSTM\n layer with an arbitrary number of layers and an arbitrary hidden size (as defined by :attr:`lstm_hidden_size`\n and :attr:`lstm_num_layers`; the output is then passed through 2 fully connected layers such that the final\n number of features is the same as the input number of features (:attr:`num_companies`)\n\n :param X: input matrix of data of shape: (batch size, features (number of companies), sequence length)\n :param predict_beyond: number of days to recursively predict beyond the given input sequence\n :return: output of the forward pass of the data through the network (same shape as input)\n \"\"\"\n X = X.permute(0, 2, 1) # input x needs to be converted from (batch_size, features, sequence_length) to\n # (batch_size, sequence_length, features)\n\n output, (h, c) = self.lstm.forward(X)\n output = self.post_lstm_dropout(output) # dropout built into LSTM object doesnt work on last layer of LSTM\n output = self.fc_1.forward(output)\n output = self.tanh(output)\n output = self.fc_2.forward(output)\n output = self.tanh(output)\n\n if predict_beyond == 0:\n output = output.permute(0, 2, 1)\n return output\n else:\n new_output = torch.zeros(output.shape[0], self.sequence_segment_length - 1 + predict_beyond,\n output.shape[2])\n new_output[:, :output.shape[1], :] = output\n for i in range(predict_beyond):\n predict_beyond_out, (h, c) = self.lstm.forward(output[:, -1, None, :], (h, c))\n predict_beyond_out = self.fc_1.forward(predict_beyond_out)\n predict_beyond_out = self.tanh(predict_beyond_out)\n predict_beyond_out = self.fc_2.forward(predict_beyond_out)\n predict_beyond_out = self.tanh(predict_beyond_out)\n new_output[:, self.sequence_segment_length - 1 + i, None, :] = predict_beyond_out\n new_output = new_output.permute(0, 2, 1)\n return new_output\n\n def do_training(self, num_epochs: int, verbose=True, plot_output: bool = True,\n plot_output_figsize: (int, int) = (5, 10), plot_loss: bool = True,\n plot_loss_figsize: (int, int) = (7, 5)):\n \"\"\"\n This method trains the network using data in :attr:`train_loader` and checks against the data in\n :attr:`test_loader` at the end of each epoch. The forward pass through the network produces sequences of the\n same length as the input sequences. The sequences in the label data are of length :attr:`label_length`, so the\n output sequences are cropped to length :attr:`label_length` before being passed through the MSE loss function.\n Because each element of the output sequence at position ``n`` is a prediction of the input element ``n+1``, the\n cropped windows of the output sequences are given by the window that terminates at the second-to-last element\n of the output sequence.\n\n :param num_epochs: number of epochs to to run the training for\n :param verbose: if true, print diagnostic progress updates and final training and test loss\n :param plot_output: if true, plot the results of the final pass through the LSTM with a randomly selected\n segment of data\n :param plot_output_figsize: ``figsize`` argument for the output plot\n :param plot_loss: if true, plot the training and test loss\n :param plot_loss_figsize: ``figsize`` argument for the loss plot\n \"\"\"\n epoch_num = 0\n pass_num = 0\n training_start_time = time.time()\n train_loss_list = []\n train_loss_list_idx = []\n test_loss_list = []\n test_loss_list_idx = []\n\n if num_epochs <= 0:\n print(\"Specified number of epochs is <= 0; it must be > 0, so it is set to 1.\")\n num_epochs = 1\n\n while epoch_num <= num_epochs:\n if verbose:\n print(\"Epoch num: {}/{}: \".format(epoch_num, num_epochs))\n for i, data in enumerate(self.train_loader, 0):\n train_inputs, train_labels = data\n # send inputs and labels to the gpu if possible\n if self.__togpu_works__ == 1:\n train_inputs.to(DEVICE)\n train_labels.to(DEVICE)\n # otherwise, ``inputs`` and ``labels`` are already tensors\n\n self.optimizer.zero_grad()\n output = self.forward(train_inputs)\n train_loss_size = self.loss(output[:, :, output.shape[2] - self.label_length - 1:-1], train_labels)\n\n train_loss_size.backward()\n train_loss_list.append(train_loss_size.data.item())\n train_loss_list_idx.append(pass_num)\n self.optimizer.step()\n pass_num += 1\n if verbose:\n percent = round(100 * (i + 1) / self.train_loader_len)\n percent_floored_by_10: int = (percent // 10)\n front = '\\r' if i != 0 else \"\"\n print(\n \"{} > {}% [\".format(front, percent) + \"-\" * percent_floored_by_10 + \" \" * (\n 10 - percent_floored_by_10)\n + \"] train loss size = {}\".format(round(train_loss_list[-1], 4)), end=\"\")\n\n # do a run on the test set at the end of every epoch:\n test_loss_this_epoch = 0\n subplot_val = 0\n if epoch_num == num_epochs and plot_output:\n subplot_val = self.test_loader_len if self.test_loader_len <= 3 else 3\n _, axes = plt.subplots(subplot_val, 1, figsize=plot_output_figsize)\n if subplot_val == 1:\n axes = [axes, ]\n for i, data in enumerate(self.test_loader, 0):\n test_inputs, test_labels = data\n if self.__togpu_works__ == 1: # send inputs and labels to the gpu if possible\n test_inputs.to(DEVICE)\n test_labels.to(DEVICE)\n output = self.forward(test_inputs)\n test_loss_size = self.loss(output[:, :, output.shape[2] - self.label_length - 1:-1], test_labels)\n test_loss_this_epoch += test_loss_size.data.item()\n\n if epoch_num == num_epochs and plot_output and i < subplot_val:\n axes[i].plot(np.arange(0, self.sequence_segment_length, 1), test_inputs[0, 0, :].detach().numpy(),\n label=\"orig\")\n axes[i].plot(np.arange(1, self.sequence_segment_length + 1, 1), output[0, 0, :].detach().numpy(),\n label=\"pred\")\n axes[i].set_title(\n \"'{}' closing price day-over-day % change\\nfrom {} to {}: Example {} of\\nOriginal vs. Model \"\n \"Output\".format(self.companies[0].ticker, self.start_date_str, self.end_date_str, i))\n axes[i].set_xlabel(\"Time\")\n axes[i].set_ylabel(\"% Change of Stock (USD)\")\n\n if epoch_num == num_epochs and plot_output:\n plt.legend()\n plt.show()\n\n test_loss_list.append(test_loss_this_epoch / len(self.test_loader))\n test_loss_list_idx.append(pass_num)\n epoch_num += 1\n if verbose:\n print(\" | test loss size = {}\".format(round(test_loss_list[-1], 4)))\n\n if verbose:\n print(\"-----------------\\n\"\n \"Finished training\\n\"\n \" > Duration: {}s\\n\"\n \" > Final train loss: {} (delta of {})\\n\"\n \" > Final test loss: {} (delta of {})\".format(round(time.time() - training_start_time, 4),\n round(train_loss_list[-1], 4),\n round(train_loss_list[-1] - train_loss_list[0], 4),\n round(test_loss_list[-1], 4),\n round(test_loss_list[-1] - test_loss_list[0], 4)))\n\n if self.save_state_dict:\n if not os.path.isdir(os.path.join(os.getcwd(), \".cache\")):\n os.mkdir(os.path.join(os.getcwd(), \".cache\"))\n try:\n torch.save(self.state_dict(), self.model_weights_path)\n print(\" > (saved model weights to '{}' folder)\".format(os.path.join(os.getcwd(), \".cache\")))\n except:\n print(\"WARNING: an unknown exception occured when trying to save model weights\")\n\n if plot_loss:\n _, axes = plt.subplots(1, 1, figsize=plot_loss_figsize)\n axes.plot(train_loss_list_idx, train_loss_list, label=\"train\")\n axes.plot(test_loss_list_idx, test_loss_list, label=\"test\")\n axes.set_xlabel(\"Train data forward pass index\")\n axes.set_ylabel(\"Loss magnitude\")\n axes.set_title(\"Train and Testing Data Loss over Training Duration\")\n plt.legend()\n plt.show()\n\n def make_prediction_with_validation(self, predict_beyond: int = 30, num_plots: int = 2,\n data_start_indices: np.ndarray = None):\n r\"\"\"\n Selects data from the dataset and makes a prediction ``predict_beyond`` days out, and the actual values\n of the stock are shown alongside.\n\n :param predict_beyond: days to predict ahead in the future\n :param data_start_indices: indices corresponding to locations in the total dataset sequence for the training\n data to be gathered from (with the training data being of length :attr:`sequence_segment_length`)\n :return: length of the data being returned (training + prediction sequences)\n :return: datetime objects corresponding to data_start_indices\n :return: datetime objects corresponding to the end of the returned sequences\n :return: indices corresponding to the days where the predicted sequence starts\n :return: input and label sequence data associated with each pass of the model\n :return: numpy array of the model output\n :return: training data (in absolute stock value form instead of the % change that the model sees)\n :return: output prediction of the model converted from % change to actual stock values\n :return: label data (in absolute stock value form instead of % change) to compare to the output prediction\n :return: disparity between predicted stock values and actual stock values\n \"\"\"\n input_and_pred_len = self.sequence_segment_length + predict_beyond\n if data_start_indices is None:\n data_start_indices = np.random.choice(self.daily_stock_data.shape[1] - input_and_pred_len, num_plots)\n\n start_train_datetimes = [] # holds datetime objects corresponding to the data_start_indices\n end_pred_datetimes = [] # holds datetime objects corresponding to the data_start_indices\n pred_data_start_indicies = [] # indices for the start of predictions\n train_and_actual_data = torch.zeros((num_plots, self.num_companies, input_and_pred_len))\n for i in range(num_plots):\n train_and_actual_data[i, :, :] = torch.from_numpy(\n self.daily_stock_data[:, data_start_indices[i]:data_start_indices[i] + input_and_pred_len])\n start_train_datetimes.append(self.companies[0].get_date_at_index(data_start_indices[i]))\n end_pred_datetimes.append(self.companies[0].get_date_at_index(data_start_indices[i] + input_and_pred_len))\n pred_data_start_indicies.append(data_start_indices[i] + input_and_pred_len - predict_beyond)\n\n # pass in the data for training\n output_numpy = self.forward(train_and_actual_data[:, :, :-(predict_beyond + 1)], predict_beyond).detach().\\\n numpy()\n\n orig_stock_list = []\n pred_stock_list = []\n actual_stock_list = []\n disparity_list = []\n for i in range(num_plots):\n orig_stock_list.append(self.companies[0].data_frame[\"Close\"].iloc[\n list(range(data_start_indices[i], pred_data_start_indicies[i], 1))])\n pred_stock_list.append(\n self.companies[0].reconstruct_stock_from_percent_change(output_numpy[i, 0, -predict_beyond:],\n initial_condition_index=(\n pred_data_start_indicies[\n i] - 1))[1:])\n actual_stock_list.append(self.companies[0].data_frame[\"Close\"].iloc[\n list(range(pred_data_start_indicies[i], pred_data_start_indicies[i] + predict_beyond))].values)\n disparity_list.append(np.abs(pred_stock_list[i] - actual_stock_list[i]))\n\n return input_and_pred_len, start_train_datetimes, end_pred_datetimes, pred_data_start_indicies, \\\n train_and_actual_data, output_numpy, orig_stock_list, pred_stock_list, actual_stock_list, disparity_list\n\n def check_sliding_window_valid_at_index(self, end_pred_index, pred_beyond_range):\n r\"\"\"\n Checks that the index parameter for creating a distribution of predictions is valid for the dataset, and\n modifies it if it isn't (as well as prints a warning describing the condition)\n\n :param end_pred_index: index of the date that is desired to be predicted\n :param pred_beyond_range: tuple containing the range of the number of forecasted days the model will use to\n arrive at a prediction at ``end_pred_index``\n :return: end_pred_index (modified if necessary)\n \"\"\"\n if end_pred_index is None:\n print(\"latest_data_index is None, so will set to minimum possible value\")\n end_pred_index = self.sequence_segment_length + pred_beyond_range[1]\n if end_pred_index - (self.sequence_segment_length + (pred_beyond_range[1] - pred_beyond_range[0])) < 0:\n print(\"WARNING: latest_data_index, when combined with the provided pred_beyond_range, will yield negative\"\n \"indices for training data start points; revising to smallest possible value\")\n end_pred_index = self.sequence_segment_length + pred_beyond_range[1]\n if end_pred_index >= self.data_len:\n print(\"WARNING: latest_data_index is too large for dataset; revising to largest possible value\")\n end_pred_index = self.data_len - 1\n return end_pred_index\n\n def generate_predicted_distribution(self, end_pred_index: int = None, pred_beyond_range: (int, int) = (1, 10)):\n r\"\"\"\n Returns a list of predicted stock values at a given date using a range of forecast lengths\n\n :param end_pred_index: index of the date that is desired to be predicted\n :param pred_beyond_range: tuple containing the range of the number of forecasted days the model will use to\n arrive at a prediction at ``end_pred_index``\n :return: list of predicted values (of length given by ``pred_beyond_range``)\n :return: actual stock value corresponding to the predictions\n \"\"\"\n end_pred_index = self.check_sliding_window_valid_at_index(end_pred_index, pred_beyond_range)\n pred_beyond_range_delta = pred_beyond_range[1] - pred_beyond_range[0]\n predicted_value_list = []\n debug = []\n for i in range(pred_beyond_range[0], pred_beyond_range[1]):\n # the start of the desired index is the end value index decreased by the length of the prediction and the\n # training sequence length; the start index is then shifted back as the number of days that is predicted\n # beyond increases\n _, _, _, _, _, _, _, pred_stock_list, actual_stock_list, _ = self.make_prediction_with_validation(i,\n num_plots=1, data_start_indices=np.array([end_pred_index - self.sequence_segment_length -\n pred_beyond_range_delta - i]))\n predicted_value_list.append(pred_stock_list[0][-1])\n debug.append(actual_stock_list[0][-1])\n return predicted_value_list, actual_stock_list[0][-1]\n\n def pred_in_conj(self, start_of_pred_idx: int, n_days: int, pred_beyond_range: (int, int) = (1, 10)):\n r\"\"\"\n Calls :method:`generate_predicted_distribution` to create a list of predictions for each day given in a given\n range, and returns the mean and standard deviation associated with each day.\n\n :param start_of_pred_index: integer corresponding to the first date whose distribution will be predicted\n :param n_days: number of days from ``start_of_pred_index`` to predict out\n :param pred_beyond_range: tuple containing the range of the number of forecasted days the model will use to\n arrive at a prediction at ``end_pred_index``\n :return: list of length ``n_days`` of the mean values associated with each day's predicted stock\n :return: list of length ``n_days`` of the standard deviation associated with each day's predicted stock\n \"\"\"\n mean_list = []\n std_list = []\n for n in range(n_days):\n end_pred_index = start_of_pred_idx + n\n end_pred_index = self.check_sliding_window_valid_at_index(end_pred_index, pred_beyond_range)\n predicted_value_list, actual_value = self.generate_predicted_distribution(end_pred_index,\n pred_beyond_range)\n mean_list.append(np.mean(predicted_value_list))\n std_list.append(np.std(predicted_value_list))\n return mean_list, std_list\n\n def plot_predicted_distribution(self, latest_data_index: int = None, pred_beyond_range: (int, int) = (1, 10)):\n r\"\"\"\n TODO: documentation\n \"\"\"\n predicted_value_list, actual_value = self.generate_predicted_distribution(latest_data_index, pred_beyond_range)\n n_bins = round((pred_beyond_range[1] - pred_beyond_range[0]) / 3)\n if n_bins < 3:\n n_bins = 3\n plt.hist(predicted_value_list, bins=n_bins, color=\"green\")\n plt.plot([actual_value, actual_value], [0, pred_beyond_range[1] - pred_beyond_range[0]], \"-\")\n plt.show()\n\n def plot_prediction_with_validation(self, predict_beyond: int = 30, num_plots: int = 5, plt_scl=20):\n r\"\"\"\n A method for debugging/validating :attr:`make_prediction_with_validation` - makes predictions and shows the\n raw output of the model, reconstructed stock prices, and disparity between predicted stock prices and actual\n stock prices.\n\n :param predict_beyond: days to predict ahead in the future\n :param num_plots: number of times to call :attr:`make_prediction_with_validation` and plot the results\n :plt_scl: integer for width and heigh parameters of matplotlib plot\n \"\"\"\n forward_seq_len, start_dates, end_dates, pred_data_start_indicies, make_pred_data, \\\n output_numpy, orig_stock_list, pred_stock_list, actual_stock_list, disparity_list = \\\n self.make_prediction_with_validation(predict_beyond, num_plots)\n pred_beyond_plot_indices = np.arange(self.sequence_segment_length, forward_seq_len)\n pred_over_input_plot_indices = np.arange(1, self.sequence_segment_length)\n input_plot_indices = np.arange(0, self.sequence_segment_length)\n disparity_plot_indices = np.arange(1, predict_beyond + 1)\n _, axes = plt.subplots(num_plots, 3, figsize=(plt_scl, plt_scl))\n for ax in range(num_plots):\n axes[ax][0].plot(pred_beyond_plot_indices, output_numpy[ax, 0, -predict_beyond:], color='green',\n label=\"pred. beyond\",\n linestyle='--', marker='o')\n axes[ax][0].plot(pred_over_input_plot_indices, output_numpy[ax, 0, :self.sequence_segment_length - 1],\n color='gray',\n label=\"pred. over input\", linestyle='--', marker='o')\n axes[ax][0].plot(input_plot_indices,\n make_pred_data.detach().numpy()[ax, 0, :self.sequence_segment_length], color='red',\n label=\"input\", linestyle='-', marker='o')\n axes[ax][0].plot(pred_beyond_plot_indices,\n make_pred_data.detach().numpy()[ax, 0, self.sequence_segment_length:], label=\"actual\",\n linestyle='-', marker='o')\n axes[ax][0].set_title(\"{} % change from\\n{} to {}\".format(self.companies[0].ticker,\n start_dates[ax], end_dates[ax]))\n axes[ax][0].set_xlabel(\"Business days since {}\".format(start_dates[ax]))\n axes[ax][0].set_ylabel(\"% Change\")\n axes[ax][0].legend()\n axes[ax][1].plot(pred_beyond_plot_indices, pred_stock_list[ax], color=\"green\",\n label=\"pred. beyond\", linestyle='--', marker='o')\n axes[ax][1].plot(input_plot_indices, orig_stock_list[ax], color='red', label=\"input\", linestyle='-', marker='o')\n axes[ax][1].plot(pred_beyond_plot_indices, actual_stock_list[ax], label=\"actual\", linestyle='-', marker='o')\n axes[ax][1].set_title(\"{} stock from\\n{} to {}\".format(self.companies[0].ticker, start_dates[ax],\n end_dates[ax]))\n axes[ax][1].set_xlabel(\"Business days since {}\".format(start_dates[ax]))\n axes[ax][1].set_ylabel(\"Stock Price\")\n axes[ax][1].legend()\n axes[ax][2].plot(disparity_plot_indices, disparity_list[ax], label=\"disparity\",\n linestyle=\"\", marker=\"o\")\n axes[ax][2].set_title(\"Disparity of Predicted and Actual Stock\")\n axes[ax][2].set_xlabel(\"Num. predicted days out {}\".format(start_dates[ax]))\n axes[ax][2].set_ylabel(\"Absolute difference between\\nprediction and reality\")\n plt.show()\n\n\nif __name__ == \"__main__\":\n model: StockRNN\n\n # set to switch between loading saved weights if available\n try_load_weights = True\n\n model = StockRNN(\"AAPL\", to_compare=[\"GOOGL\", \"MSFT\", \"MSI\"], train_start_date=datetime(2012, 1, 1),\n train_end_date=datetime(2019, 1, 1), try_load_weights=try_load_weights)\n # model = StockRNN(\"dummy\")\n # model.peek_dataset()\n\n try:\n model.to(DEVICE)\n model.__togpu__(True)\n except RuntimeError:\n print(TO_GPU_FAIL_MSG)\n except AssertionError:\n print(TO_GPU_FAIL_MSG)\n model.__togpu__(False)\n\n # model.do_training(num_epochs=100)\n\n # model.eval()\n model.plot_prediction_with_validation()\n # model.plot_predicted_distribution(12)\n" ]
[ [ "pandas.to_datetime", "pandas.Timedelta", "numpy.append", "numpy.busday_count", "numpy.array" ], [ "matplotlib.pyplot.legend", "torch.zeros", "torch.load", "torch.utils.data.DataLoader", "matplotlib.pyplot.plot", "numpy.mean", "torch.nn.Dropout", "numpy.arange", "torch.utils.data.TensorDataset", "torch.from_numpy", "numpy.std", "numpy.zeros", "numpy.random.choice", "torch.nn.Linear", "numpy.array", "matplotlib.pyplot.show", "matplotlib.pyplot.hist", "numpy.abs", "numpy.random.seed", "torch.nn.LSTM", "matplotlib.pyplot.subplots", "numpy.random.shuffle", "torch.nn.Tanh", "torch.nn.MSELoss" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
naylor-b/OpenMDAO1
[ "49d82f6601b33db9bdcf7d146d030d55e3b62ef4" ]
[ "openmdao/solvers/test/test_ln_direct.py" ]
[ "\"\"\" Unit test for the DirectSolver linear solver. \"\"\"\n\nimport unittest\nimport numpy as np\n\nfrom openmdao.api import Group, Problem, IndepVarComp, ExecComp, DirectSolver, \\\n LinearGaussSeidel, Newton\nfrom openmdao.core.test.test_residual_sign import SimpleImplicitSL\nfrom openmdao.test.converge_diverge import ConvergeDiverge, SingleDiamond, \\\n ConvergeDivergeGroups, SingleDiamondGrouped\nfrom openmdao.test.sellar import SellarStateConnection\nfrom openmdao.test.simple_comps import SimpleCompDerivMatVec, FanOut, FanIn, \\\n FanOutGrouped, FanInGrouped, ArrayComp2D\nfrom openmdao.test.util import assert_rel_error\n\n\nclass TestDirectSolver(unittest.TestCase):\n\n def test_simple_matvec(self):\n group = Group()\n group.add('x_param', IndepVarComp('x', 1.0), promotes=['*'])\n group.add('mycomp', SimpleCompDerivMatVec(), promotes=['x', 'y'])\n\n prob = Problem()\n prob.root = group\n prob.root.ln_solver = DirectSolver()\n prob.setup(check=False)\n prob.run()\n\n J = prob.calc_gradient(['x'], ['y'], mode='fwd', return_format='dict')\n assert_rel_error(self, J['y']['x'][0][0], 2.0, 1e-6)\n\n J = prob.calc_gradient(['x'], ['y'], mode='rev', return_format='dict')\n assert_rel_error(self, J['y']['x'][0][0], 2.0, 1e-6)\n\n def test_simple_matvec_subbed(self):\n group = Group()\n group.add('mycomp', SimpleCompDerivMatVec(), promotes=['x', 'y'])\n\n prob = Problem()\n prob.root = Group()\n prob.root.add('x_param', IndepVarComp('x', 1.0), promotes=['*'])\n prob.root.add('sub', group, promotes=['*'])\n\n prob.root.ln_solver = DirectSolver()\n prob.setup(check=False)\n prob.run()\n\n J = prob.calc_gradient(['x'], ['y'], mode='fwd', return_format='dict')\n assert_rel_error(self, J['y']['x'][0][0], 2.0, 1e-6)\n\n J = prob.calc_gradient(['x'], ['y'], mode='rev', return_format='dict')\n assert_rel_error(self, J['y']['x'][0][0], 2.0, 1e-6)\n\n def test_array2D(self):\n group = Group()\n group.add('x_param', IndepVarComp('x', np.ones((2, 2))), promotes=['*'])\n group.add('mycomp', ArrayComp2D(), promotes=['x', 'y'])\n\n prob = Problem()\n prob.root = group\n prob.root.ln_solver = DirectSolver()\n prob.setup(check=False)\n prob.run()\n\n J = prob.calc_gradient(['x'], ['y'], mode='fwd', return_format='dict')\n Jbase = prob.root.mycomp._jacobian_cache\n diff = np.linalg.norm(J['y']['x'] - Jbase['y', 'x'])\n assert_rel_error(self, diff, 0.0, 1e-8)\n\n J = prob.calc_gradient(['x'], ['y'], mode='rev', return_format='dict')\n diff = np.linalg.norm(J['y']['x'] - Jbase['y', 'x'])\n assert_rel_error(self, diff, 0.0, 1e-8)\n\n def test_simple_in_group_matvec(self):\n group = Group()\n sub = group.add('sub', Group(), promotes=['x', 'y'])\n group.add('x_param', IndepVarComp('x', 1.0), promotes=['*'])\n sub.add('mycomp', SimpleCompDerivMatVec(), promotes=['x', 'y'])\n\n prob = Problem()\n prob.root = group\n prob.root.ln_solver = DirectSolver()\n prob.setup(check=False)\n prob.run()\n\n J = prob.calc_gradient(['x'], ['y'], mode='fwd', return_format='dict')\n assert_rel_error(self, J['y']['x'][0][0], 2.0, 1e-6)\n\n J = prob.calc_gradient(['x'], ['y'], mode='rev', return_format='dict')\n assert_rel_error(self, J['y']['x'][0][0], 2.0, 1e-6)\n\n def test_simple_jac(self):\n group = Group()\n group.add('x_param', IndepVarComp('x', 1.0), promotes=['*'])\n group.add('mycomp', ExecComp(['y=2.0*x']), promotes=['x', 'y'])\n\n prob = Problem()\n prob.root = group\n prob.root.ln_solver = DirectSolver()\n prob.setup(check=False)\n prob.run()\n\n J = prob.calc_gradient(['x'], ['y'], mode='fwd', return_format='dict')\n assert_rel_error(self, J['y']['x'][0][0], 2.0, 1e-6)\n\n J = prob.calc_gradient(['x'], ['y'], mode='rev', return_format='dict')\n assert_rel_error(self, J['y']['x'][0][0], 2.0, 1e-6)\n\n def test_fan_out(self):\n\n prob = Problem()\n prob.root = FanOut()\n prob.root.ln_solver = DirectSolver()\n prob.setup(check=False)\n prob.run()\n\n indep_list = ['p.x']\n unknown_list = ['comp2.y', \"comp3.y\"]\n\n J = prob.calc_gradient(indep_list, unknown_list, mode='fwd', return_format='dict')\n assert_rel_error(self, J['comp2.y']['p.x'][0][0], -6.0, 1e-6)\n assert_rel_error(self, J['comp3.y']['p.x'][0][0], 15.0, 1e-6)\n\n J = prob.calc_gradient(indep_list, unknown_list, mode='rev', return_format='dict')\n assert_rel_error(self, J['comp2.y']['p.x'][0][0], -6.0, 1e-6)\n assert_rel_error(self, J['comp3.y']['p.x'][0][0], 15.0, 1e-6)\n\n def test_fan_out_grouped(self):\n\n prob = Problem()\n prob.root = FanOutGrouped()\n prob.root.ln_solver = DirectSolver()\n prob.setup(check=False)\n prob.run()\n\n indep_list = ['p.x']\n unknown_list = ['sub.comp2.y', \"sub.comp3.y\"]\n\n J = prob.calc_gradient(indep_list, unknown_list, mode='fwd', return_format='dict')\n assert_rel_error(self, J['sub.comp2.y']['p.x'][0][0], -6.0, 1e-6)\n assert_rel_error(self, J['sub.comp3.y']['p.x'][0][0], 15.0, 1e-6)\n\n J = prob.calc_gradient(indep_list, unknown_list, mode='rev', return_format='dict')\n assert_rel_error(self, J['sub.comp2.y']['p.x'][0][0], -6.0, 1e-6)\n assert_rel_error(self, J['sub.comp3.y']['p.x'][0][0], 15.0, 1e-6)\n\n def test_fan_in(self):\n\n prob = Problem()\n prob.root = FanIn()\n prob.root.ln_solver = DirectSolver()\n prob.setup(check=False)\n prob.run()\n\n indep_list = ['p1.x1', 'p2.x2']\n unknown_list = ['comp3.y']\n\n J = prob.calc_gradient(indep_list, unknown_list, mode='fwd', return_format='dict')\n assert_rel_error(self, J['comp3.y']['p1.x1'][0][0], -6.0, 1e-6)\n assert_rel_error(self, J['comp3.y']['p2.x2'][0][0], 35.0, 1e-6)\n\n J = prob.calc_gradient(indep_list, unknown_list, mode='rev', return_format='dict')\n assert_rel_error(self, J['comp3.y']['p1.x1'][0][0], -6.0, 1e-6)\n assert_rel_error(self, J['comp3.y']['p2.x2'][0][0], 35.0, 1e-6)\n\n def test_fan_in_grouped(self):\n\n prob = Problem()\n prob.root = FanInGrouped()\n prob.root.ln_solver = DirectSolver()\n prob.setup(check=False)\n prob.run()\n\n indep_list = ['p1.x1', 'p2.x2']\n unknown_list = ['comp3.y']\n\n J = prob.calc_gradient(indep_list, unknown_list, mode='fwd', return_format='dict')\n assert_rel_error(self, J['comp3.y']['p1.x1'][0][0], -6.0, 1e-6)\n assert_rel_error(self, J['comp3.y']['p2.x2'][0][0], 35.0, 1e-6)\n\n J = prob.calc_gradient(indep_list, unknown_list, mode='rev', return_format='dict')\n assert_rel_error(self, J['comp3.y']['p1.x1'][0][0], -6.0, 1e-6)\n assert_rel_error(self, J['comp3.y']['p2.x2'][0][0], 35.0, 1e-6)\n\n def test_converge_diverge(self):\n\n prob = Problem()\n prob.root = ConvergeDiverge()\n prob.root.ln_solver = DirectSolver()\n prob.setup(check=False)\n prob.run()\n\n indep_list = ['p.x']\n unknown_list = ['comp7.y1']\n\n prob.run()\n\n # Make sure value is fine.\n assert_rel_error(self, prob['comp7.y1'], -102.7, 1e-6)\n\n J = prob.calc_gradient(indep_list, unknown_list, mode='fwd', return_format='dict')\n assert_rel_error(self, J['comp7.y1']['p.x'][0][0], -40.75, 1e-6)\n\n J = prob.calc_gradient(indep_list, unknown_list, mode='rev', return_format='dict')\n assert_rel_error(self, J['comp7.y1']['p.x'][0][0], -40.75, 1e-6)\n\n J = prob.calc_gradient(indep_list, unknown_list, mode='fd', return_format='dict')\n assert_rel_error(self, J['comp7.y1']['p.x'][0][0], -40.75, 1e-6)\n\n def test_converge_diverge_groups(self):\n\n prob = Problem()\n prob.root = ConvergeDivergeGroups()\n prob.root.ln_solver = DirectSolver()\n prob.setup(check=False)\n prob.run()\n\n # Make sure value is fine.\n assert_rel_error(self, prob['comp7.y1'], -102.7, 1e-6)\n\n indep_list = ['p.x']\n unknown_list = ['comp7.y1']\n\n J = prob.calc_gradient(indep_list, unknown_list, mode='fwd', return_format='dict')\n assert_rel_error(self, J['comp7.y1']['p.x'][0][0], -40.75, 1e-6)\n\n J = prob.calc_gradient(indep_list, unknown_list, mode='rev', return_format='dict')\n assert_rel_error(self, J['comp7.y1']['p.x'][0][0], -40.75, 1e-6)\n\n J = prob.calc_gradient(indep_list, unknown_list, mode='fd', return_format='dict')\n assert_rel_error(self, J['comp7.y1']['p.x'][0][0], -40.75, 1e-6)\n\n def test_single_diamond(self):\n\n prob = Problem()\n prob.root = SingleDiamond()\n prob.root.ln_solver = DirectSolver()\n prob.setup(check=False)\n prob.run()\n\n indep_list = ['p.x']\n unknown_list = ['comp4.y1', 'comp4.y2']\n\n J = prob.calc_gradient(indep_list, unknown_list, mode='fwd', return_format='dict')\n assert_rel_error(self, J['comp4.y1']['p.x'][0][0], 25, 1e-6)\n assert_rel_error(self, J['comp4.y2']['p.x'][0][0], -40.5, 1e-6)\n\n J = prob.calc_gradient(indep_list, unknown_list, mode='rev', return_format='dict')\n assert_rel_error(self, J['comp4.y1']['p.x'][0][0], 25, 1e-6)\n assert_rel_error(self, J['comp4.y2']['p.x'][0][0], -40.5, 1e-6)\n\n def test_single_diamond_grouped(self):\n\n prob = Problem()\n prob.root = SingleDiamondGrouped()\n prob.root.ln_solver = DirectSolver()\n prob.setup(check=False)\n prob.run()\n\n indep_list = ['p.x']\n unknown_list = ['comp4.y1', 'comp4.y2']\n\n J = prob.calc_gradient(indep_list, unknown_list, mode='fwd', return_format='dict')\n assert_rel_error(self, J['comp4.y1']['p.x'][0][0], 25, 1e-6)\n assert_rel_error(self, J['comp4.y2']['p.x'][0][0], -40.5, 1e-6)\n\n J = prob.calc_gradient(indep_list, unknown_list, mode='rev', return_format='dict')\n assert_rel_error(self, J['comp4.y1']['p.x'][0][0], 25, 1e-6)\n assert_rel_error(self, J['comp4.y2']['p.x'][0][0], -40.5, 1e-6)\n\n J = prob.calc_gradient(indep_list, unknown_list, mode='fd', return_format='dict')\n assert_rel_error(self, J['comp4.y1']['p.x'][0][0], 25, 1e-6)\n assert_rel_error(self, J['comp4.y2']['p.x'][0][0], -40.5, 1e-6)\n\n def test_sellar_derivs(self):\n\n prob = Problem()\n prob.root = SellarStateConnection()\n prob.root.ln_solver = DirectSolver()\n\n prob.root.nl_solver.options['atol'] = 1e-12\n prob.setup(check=False)\n prob.run()\n\n # Just make sure we are at the right answer\n assert_rel_error(self, prob['y1'], 25.58830273, .00001)\n assert_rel_error(self, prob['d1.y2'], 12.05848819, .00001)\n\n indep_list = ['x', 'z']\n unknown_list = ['obj', 'con1', 'con2']\n\n Jbase = {}\n Jbase['con1'] = {}\n Jbase['con1']['x'] = -0.98061433\n Jbase['con1']['z'] = np.array([-9.61002285, -0.78449158])\n Jbase['con2'] = {}\n Jbase['con2']['x'] = 0.09692762\n Jbase['con2']['z'] = np.array([1.94989079, 1.0775421 ])\n Jbase['obj'] = {}\n Jbase['obj']['x'] = 2.98061392\n Jbase['obj']['z'] = np.array([9.61001155, 1.78448534])\n\n J = prob.calc_gradient(indep_list, unknown_list, mode='fwd', return_format='dict')\n for key1, val1 in Jbase.items():\n for key2, val2 in val1.items():\n assert_rel_error(self, J[key1][key2], val2, .00001)\n\n J = prob.calc_gradient(indep_list, unknown_list, mode='rev', return_format='dict')\n for key1, val1 in Jbase.items():\n for key2, val2 in val1.items():\n assert_rel_error(self, J[key1][key2], val2, .00001)\n\n def test_implicit_solve_linear(self):\n\n p = Problem()\n p.root = Group()\n\n dvars = ( ('a', 3.), ('b', 10.))\n p.root.add('desvars', IndepVarComp(dvars), promotes=['a', 'b'])\n\n sg = p.root.add('sg', Group(), promotes=[\"*\"])\n sg.add('si', SimpleImplicitSL(), promotes=['a', 'b', 'x'])\n\n p.root.add('func', ExecComp('f = 2*x0+a'), promotes=['f', 'x0', 'a'])\n p.root.connect('x', 'x0', src_indices=[1])\n\n p.driver.add_objective('f')\n p.driver.add_desvar('a')\n\n p.root.nl_solver = Newton()\n p.root.nl_solver.options['rtol'] = 1e-10\n p.root.nl_solver.options['atol'] = 1e-10\n p.root.ln_solver = DirectSolver()\n\n p.setup(check=False)\n p['x'] = np.array([1.5, 2.])\n\n p.run()\n J = p.calc_gradient(['a'], ['f'], mode='rev')\n assert_rel_error(self, J[0][0], 1.57735, 1e-6)\n\n\nclass TestDirectSolverAssemble(unittest.TestCase):\n \"\"\" Tests the DirectSolver using the method that assembles a Jacobian.\"\"\"\n\n def test_simple_matvec(self):\n group = Group()\n group.add('x_param', IndepVarComp('x', 1.0), promotes=['*'])\n group.add('mycomp', SimpleCompDerivMatVec(), promotes=['x', 'y'])\n\n prob = Problem()\n prob.root = group\n prob.root.ln_solver = DirectSolver()\n prob.root.ln_solver.options['jacobian_method'] = 'assemble'\n prob.setup(check=False)\n prob.run()\n\n with self.assertRaises(RuntimeError) as cm:\n J = prob.calc_gradient(['x'], ['y'], mode='fwd', return_format='dict')\n\n expected_msg = \"The 'assemble' jacobian_method is not supported when \" + \\\n \"'apply_linear' is used on a component (mycomp).\"\n\n self.assertEqual(str(cm.exception), expected_msg)\n\n def test_simple_matvec_subbed(self):\n group = Group()\n group.add('mycomp', SimpleCompDerivMatVec(), promotes=['x', 'y'])\n\n prob = Problem()\n prob.root = Group()\n prob.root.add('x_param', IndepVarComp('x', 1.0), promotes=['*'])\n prob.root.add('sub', group, promotes=['*'])\n\n prob.root.ln_solver = DirectSolver()\n prob.root.ln_solver.options['jacobian_method'] = 'assemble'\n prob.setup(check=False)\n prob.run()\n\n with self.assertRaises(RuntimeError) as cm:\n J = prob.calc_gradient(['x'], ['y'], mode='fwd', return_format='dict')\n\n expected_msg = \"The 'assemble' jacobian_method is not supported when \" + \\\n \"'apply_linear' is used on a component (sub.mycomp).\"\n\n self.assertEqual(str(cm.exception), expected_msg)\n\n def test_array2D(self):\n group = Group()\n group.add('x_param', IndepVarComp('x', np.ones((2, 2))), promotes=['*'])\n group.add('mycomp', ArrayComp2D(), promotes=['x', 'y'])\n\n prob = Problem()\n prob.root = group\n prob.root.ln_solver = DirectSolver()\n prob.root.ln_solver.options['jacobian_method'] = 'assemble'\n prob.setup(check=False)\n prob.run()\n\n J = prob.calc_gradient(['x'], ['y'], mode='fwd', return_format='dict')\n Jbase = prob.root.mycomp._jacobian_cache\n diff = np.linalg.norm(J['y']['x'] - Jbase['y', 'x'])\n assert_rel_error(self, diff, 0.0, 1e-8)\n\n J = prob.calc_gradient(['x'], ['y'], mode='rev', return_format='dict')\n diff = np.linalg.norm(J['y']['x'] - Jbase['y', 'x'])\n assert_rel_error(self, diff, 0.0, 1e-8)\n\n def test_array2D_no_decompose(self):\n group = Group()\n group.add('x_param', IndepVarComp('x', np.ones((2, 2))), promotes=['*'])\n group.add('mycomp', ArrayComp2D(), promotes=['x', 'y'])\n\n prob = Problem()\n prob.root = group\n prob.root.ln_solver = DirectSolver()\n prob.root.ln_solver.options['jacobian_method'] = 'assemble'\n prob.root.ln_solver.options['solve_method'] = 'solve'\n prob.setup(check=False)\n prob.run()\n\n J = prob.calc_gradient(['x'], ['y'], mode='fwd', return_format='dict')\n Jbase = prob.root.mycomp._jacobian_cache\n diff = np.linalg.norm(J['y']['x'] - Jbase['y', 'x'])\n assert_rel_error(self, diff, 0.0, 1e-8)\n\n J = prob.calc_gradient(['x'], ['y'], mode='rev', return_format='dict')\n diff = np.linalg.norm(J['y']['x'] - Jbase['y', 'x'])\n assert_rel_error(self, diff, 0.0, 1e-8)\n\n def test_simple_in_group_matvec(self):\n group = Group()\n sub = group.add('sub', Group(), promotes=['x', 'y'])\n group.add('x_param', IndepVarComp('x', 1.0), promotes=['*'])\n sub.add('mycomp', SimpleCompDerivMatVec(), promotes=['x', 'y'])\n\n prob = Problem()\n prob.root = group\n prob.root.ln_solver = DirectSolver()\n prob.root.ln_solver.options['jacobian_method'] = 'assemble'\n prob.setup(check=False)\n prob.run()\n\n with self.assertRaises(RuntimeError) as cm:\n J = prob.calc_gradient(['x'], ['y'], mode='fwd', return_format='dict')\n\n expected_msg = \"The 'assemble' jacobian_method is not supported when \" + \\\n \"'apply_linear' is used on a component (sub.mycomp).\"\n\n self.assertEqual(str(cm.exception), expected_msg)\n\n def test_simple_jac(self):\n group = Group()\n group.add('x_param', IndepVarComp('x', 1.0), promotes=['*'])\n group.add('mycomp', ExecComp(['y=2.0*x']), promotes=['x', 'y'])\n\n prob = Problem()\n prob.root = group\n prob.root.ln_solver = DirectSolver()\n prob.root.ln_solver.options['jacobian_method'] = 'assemble'\n prob.setup(check=False)\n prob.run()\n\n J = prob.calc_gradient(['x'], ['y'], mode='fwd', return_format='dict')\n assert_rel_error(self, J['y']['x'][0][0], 2.0, 1e-6)\n\n J = prob.calc_gradient(['x'], ['y'], mode='rev', return_format='dict')\n assert_rel_error(self, J['y']['x'][0][0], 2.0, 1e-6)\n\n def test_fan_out(self):\n\n prob = Problem()\n prob.root = FanOut()\n prob.root.ln_solver = DirectSolver()\n prob.root.ln_solver.options['jacobian_method'] = 'assemble'\n prob.setup(check=False)\n prob.run()\n\n indep_list = ['p.x']\n unknown_list = ['comp2.y', \"comp3.y\"]\n\n J = prob.calc_gradient(indep_list, unknown_list, mode='fwd', return_format='dict')\n assert_rel_error(self, J['comp2.y']['p.x'][0][0], -6.0, 1e-6)\n assert_rel_error(self, J['comp3.y']['p.x'][0][0], 15.0, 1e-6)\n\n J = prob.calc_gradient(indep_list, unknown_list, mode='rev', return_format='dict')\n assert_rel_error(self, J['comp2.y']['p.x'][0][0], -6.0, 1e-6)\n assert_rel_error(self, J['comp3.y']['p.x'][0][0], 15.0, 1e-6)\n\n def test_fan_out_grouped(self):\n\n prob = Problem()\n prob.root = FanOutGrouped()\n prob.root.ln_solver = DirectSolver()\n prob.root.ln_solver.options['jacobian_method'] = 'assemble'\n prob.setup(check=False)\n prob.run()\n\n indep_list = ['p.x']\n unknown_list = ['sub.comp2.y', \"sub.comp3.y\"]\n\n J = prob.calc_gradient(indep_list, unknown_list, mode='fwd', return_format='dict')\n assert_rel_error(self, J['sub.comp2.y']['p.x'][0][0], -6.0, 1e-6)\n assert_rel_error(self, J['sub.comp3.y']['p.x'][0][0], 15.0, 1e-6)\n\n J = prob.calc_gradient(indep_list, unknown_list, mode='rev', return_format='dict')\n assert_rel_error(self, J['sub.comp2.y']['p.x'][0][0], -6.0, 1e-6)\n assert_rel_error(self, J['sub.comp3.y']['p.x'][0][0], 15.0, 1e-6)\n\n def test_fan_in(self):\n\n prob = Problem()\n prob.root = FanIn()\n prob.root.ln_solver = DirectSolver()\n prob.root.ln_solver.options['jacobian_method'] = 'assemble'\n prob.setup(check=False)\n prob.run()\n\n indep_list = ['p1.x1', 'p2.x2']\n unknown_list = ['comp3.y']\n\n J = prob.calc_gradient(indep_list, unknown_list, mode='fwd', return_format='dict')\n assert_rel_error(self, J['comp3.y']['p1.x1'][0][0], -6.0, 1e-6)\n assert_rel_error(self, J['comp3.y']['p2.x2'][0][0], 35.0, 1e-6)\n\n J = prob.calc_gradient(indep_list, unknown_list, mode='rev', return_format='dict')\n assert_rel_error(self, J['comp3.y']['p1.x1'][0][0], -6.0, 1e-6)\n assert_rel_error(self, J['comp3.y']['p2.x2'][0][0], 35.0, 1e-6)\n\n def test_fan_in_grouped(self):\n\n prob = Problem()\n prob.root = FanInGrouped()\n prob.root.ln_solver = DirectSolver()\n prob.root.ln_solver.options['jacobian_method'] = 'assemble'\n prob.setup(check=False)\n prob.run()\n\n indep_list = ['p1.x1', 'p2.x2']\n unknown_list = ['comp3.y']\n\n J = prob.calc_gradient(indep_list, unknown_list, mode='fwd', return_format='dict')\n assert_rel_error(self, J['comp3.y']['p1.x1'][0][0], -6.0, 1e-6)\n assert_rel_error(self, J['comp3.y']['p2.x2'][0][0], 35.0, 1e-6)\n\n J = prob.calc_gradient(indep_list, unknown_list, mode='rev', return_format='dict')\n assert_rel_error(self, J['comp3.y']['p1.x1'][0][0], -6.0, 1e-6)\n assert_rel_error(self, J['comp3.y']['p2.x2'][0][0], 35.0, 1e-6)\n\n def test_converge_diverge(self):\n\n prob = Problem()\n prob.root = ConvergeDiverge()\n prob.root.ln_solver = DirectSolver()\n prob.root.ln_solver.options['jacobian_method'] = 'assemble'\n prob.setup(check=False)\n prob.run()\n\n indep_list = ['p.x']\n unknown_list = ['comp7.y1']\n\n prob.run()\n\n # Make sure value is fine.\n assert_rel_error(self, prob['comp7.y1'], -102.7, 1e-6)\n\n J = prob.calc_gradient(indep_list, unknown_list, mode='fwd', return_format='dict')\n assert_rel_error(self, J['comp7.y1']['p.x'][0][0], -40.75, 1e-6)\n\n J = prob.calc_gradient(indep_list, unknown_list, mode='rev', return_format='dict')\n assert_rel_error(self, J['comp7.y1']['p.x'][0][0], -40.75, 1e-6)\n\n J = prob.calc_gradient(indep_list, unknown_list, mode='fd', return_format='dict')\n assert_rel_error(self, J['comp7.y1']['p.x'][0][0], -40.75, 1e-6)\n\n def test_converge_diverge_groups(self):\n\n prob = Problem()\n prob.root = ConvergeDivergeGroups()\n prob.root.ln_solver = DirectSolver()\n prob.root.ln_solver.options['jacobian_method'] = 'assemble'\n prob.setup(check=False)\n prob.run()\n\n # Make sure value is fine.\n assert_rel_error(self, prob['comp7.y1'], -102.7, 1e-6)\n\n indep_list = ['p.x']\n unknown_list = ['comp7.y1']\n\n J = prob.calc_gradient(indep_list, unknown_list, mode='fwd', return_format='dict')\n assert_rel_error(self, J['comp7.y1']['p.x'][0][0], -40.75, 1e-6)\n\n J = prob.calc_gradient(indep_list, unknown_list, mode='rev', return_format='dict')\n assert_rel_error(self, J['comp7.y1']['p.x'][0][0], -40.75, 1e-6)\n\n J = prob.calc_gradient(indep_list, unknown_list, mode='fd', return_format='dict')\n assert_rel_error(self, J['comp7.y1']['p.x'][0][0], -40.75, 1e-6)\n\n def test_single_diamond(self):\n\n prob = Problem()\n prob.root = SingleDiamond()\n prob.root.ln_solver = DirectSolver()\n prob.root.ln_solver.options['jacobian_method'] = 'assemble'\n prob.setup(check=False)\n prob.run()\n\n indep_list = ['p.x']\n unknown_list = ['comp4.y1', 'comp4.y2']\n\n J = prob.calc_gradient(indep_list, unknown_list, mode='fwd', return_format='dict')\n assert_rel_error(self, J['comp4.y1']['p.x'][0][0], 25, 1e-6)\n assert_rel_error(self, J['comp4.y2']['p.x'][0][0], -40.5, 1e-6)\n\n J = prob.calc_gradient(indep_list, unknown_list, mode='rev', return_format='dict')\n assert_rel_error(self, J['comp4.y1']['p.x'][0][0], 25, 1e-6)\n assert_rel_error(self, J['comp4.y2']['p.x'][0][0], -40.5, 1e-6)\n\n def test_single_diamond_grouped(self):\n\n prob = Problem()\n prob.root = SingleDiamondGrouped()\n prob.root.ln_solver = DirectSolver()\n prob.root.ln_solver.options['jacobian_method'] = 'assemble'\n prob.setup(check=False)\n prob.run()\n\n indep_list = ['p.x']\n unknown_list = ['comp4.y1', 'comp4.y2']\n\n J = prob.calc_gradient(indep_list, unknown_list, mode='fwd', return_format='dict')\n assert_rel_error(self, J['comp4.y1']['p.x'][0][0], 25, 1e-6)\n assert_rel_error(self, J['comp4.y2']['p.x'][0][0], -40.5, 1e-6)\n\n J = prob.calc_gradient(indep_list, unknown_list, mode='rev', return_format='dict')\n assert_rel_error(self, J['comp4.y1']['p.x'][0][0], 25, 1e-6)\n assert_rel_error(self, J['comp4.y2']['p.x'][0][0], -40.5, 1e-6)\n\n J = prob.calc_gradient(indep_list, unknown_list, mode='fd', return_format='dict')\n assert_rel_error(self, J['comp4.y1']['p.x'][0][0], 25, 1e-6)\n assert_rel_error(self, J['comp4.y2']['p.x'][0][0], -40.5, 1e-6)\n\n def test_sellar_derivs(self):\n\n prob = Problem()\n prob.root = SellarStateConnection()\n prob.root.ln_solver = DirectSolver()\n prob.root.ln_solver.options['jacobian_method'] = 'assemble'\n\n prob.root.nl_solver.options['atol'] = 1e-12\n prob.setup(check=False)\n prob.run()\n\n # Just make sure we are at the right answer\n assert_rel_error(self, prob['y1'], 25.58830273, .00001)\n assert_rel_error(self, prob['d1.y2'], 12.05848819, .00001)\n\n indep_list = ['x', 'z']\n unknown_list = ['obj', 'con1', 'con2']\n\n Jbase = {}\n Jbase['con1'] = {}\n Jbase['con1']['x'] = -0.98061433\n Jbase['con1']['z'] = np.array([-9.61002285, -0.78449158])\n Jbase['con2'] = {}\n Jbase['con2']['x'] = 0.09692762\n Jbase['con2']['z'] = np.array([1.94989079, 1.0775421 ])\n Jbase['obj'] = {}\n Jbase['obj']['x'] = 2.98061392\n Jbase['obj']['z'] = np.array([9.61001155, 1.78448534])\n\n J = prob.calc_gradient(indep_list, unknown_list, mode='fwd', return_format='dict')\n for key1, val1 in Jbase.items():\n for key2, val2 in val1.items():\n assert_rel_error(self, J[key1][key2], val2, .00001)\n\n J = prob.calc_gradient(indep_list, unknown_list, mode='rev', return_format='dict')\n for key1, val1 in Jbase.items():\n for key2, val2 in val1.items():\n assert_rel_error(self, J[key1][key2], val2, .00001)\n\n def test_sellar_derivs_under_lin_GS(self):\n\n prob = Problem()\n prob.root = Group()\n prob.root.ln_solver = LinearGaussSeidel()\n nest = prob.root.add('nest', SellarStateConnection())\n nest.ln_solver = DirectSolver()\n nest.ln_solver.options['jacobian_method'] = 'assemble'\n\n nest.nl_solver.options['atol'] = 1e-12\n prob.setup(check=False)\n prob.run()\n\n # Just make sure we are at the right answer\n assert_rel_error(self, prob['nest.y1'], 25.58830273, .00001)\n assert_rel_error(self, prob['nest.d1.y2'], 12.05848819, .00001)\n\n indep_list = ['nest.x', 'nest.z']\n unknown_list = ['nest.obj', 'nest.con1', 'nest.con2']\n\n Jbase = {}\n Jbase['nest.con1'] = {}\n Jbase['nest.con1']['nest.x'] = -0.98061433\n Jbase['nest.con1']['nest.z'] = np.array([-9.61002285, -0.78449158])\n Jbase['nest.con2'] = {}\n Jbase['nest.con2']['nest.x'] = 0.09692762\n Jbase['nest.con2']['nest.z'] = np.array([1.94989079, 1.0775421 ])\n Jbase['nest.obj'] = {}\n Jbase['nest.obj']['nest.x'] = 2.98061392\n Jbase['nest.obj']['nest.z'] = np.array([9.61001155, 1.78448534])\n\n J = prob.calc_gradient(indep_list, unknown_list, mode='fwd', return_format='dict')\n for key1, val1 in Jbase.items():\n for key2, val2 in val1.items():\n assert_rel_error(self, J[key1][key2], val2, .00001)\n\n J = prob.calc_gradient(indep_list, unknown_list, mode='rev', return_format='dict')\n for key1, val1 in Jbase.items():\n for key2, val2 in val1.items():\n assert_rel_error(self, J[key1][key2], val2, .00001)\n\n def test_implicit_solve_linear(self):\n\n p = Problem()\n p.root = Group()\n\n dvars = ( ('a', 3.), ('b', 10.))\n p.root.add('desvars', IndepVarComp(dvars), promotes=['a', 'b'])\n\n sg = p.root.add('sg', Group(), promotes=[\"*\"])\n sg.add('si', SimpleImplicitSL(), promotes=['a', 'b', 'x'])\n\n p.root.add('func', ExecComp('f = 2*x0+a'), promotes=['f', 'x0', 'a'])\n p.root.connect('x', 'x0', src_indices=[1])\n\n p.driver.add_objective('f')\n p.driver.add_desvar('a')\n\n p.root.nl_solver = Newton()\n p.root.nl_solver.options['rtol'] = 1e-10\n p.root.nl_solver.options['atol'] = 1e-10\n p.root.ln_solver = DirectSolver()\n p.root.ln_solver.options['jacobian_method'] = 'assemble'\n\n p.setup(check=False)\n p['x'] = np.array([1.5, 2.])\n\n p.run()\n J = p.calc_gradient(['a'], ['f'], mode='rev')\n assert_rel_error(self, J[0][0], 1.57735, 1e-6)\n\n def test_unrel_var_in_Jac(self):\n\n p = Problem()\n root = p.root = Group()\n root.add('p', IndepVarComp('x', 4.0))\n root.add('comp', ExecComp(['y1 = 1.5*x1 + 2.0*x2', 'y2 = 3.0*x1 - x2']))\n\n root.connect('p.x', 'comp.x1')\n p.driver.add_objective('comp.y1')\n p.driver.add_desvar('p.x')\n\n p.root.ln_solver = DirectSolver()\n p.root.ln_solver.options['jacobian_method'] = 'assemble'\n\n p.setup(check=False)\n p.run()\n\n J = p.calc_gradient(['p.x'], ['comp.y1'], mode='fwd')\n assert_rel_error(self, J[0][0], 1.5, 1e-6)\n\nif __name__ == \"__main__\":\n unittest.main()\n" ]
[ [ "numpy.array", "numpy.linalg.norm", "numpy.ones" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ad6398/Pointer-Generator-NW
[ "4bf997453fb8570fe04668318ca3861cb7d23ecf" ]
[ "utils/dataset.py" ]
[ "# -*- coding: utf-8 -*-\n\nimport csv\nimport glob\nimport time\nimport queue\nimport struct\nimport numpy as np\nimport tensorflow as tf\nfrom random import shuffle\nfrom threading import Thread\nfrom tensorflow.core.example import example_pb2\n\nfrom utils import utils\nfrom utils import config\n\nimport random\nrandom.seed(1234)\n\n\n# <s> and </s> are used in the data files to segment the abstracts into sentences. They don't receive vocab ids.\nSENTENCE_STA = '<s>'\nSENTENCE_END = '</s>'\n\nPAD_TOKEN = '[PAD]' # This has a vocab id, which is used to pad the encoder input, decoder input and target sequence\nUNK_TOKEN = '[UNK]' # This has a vocab id, which is used to represent out-of-vocabulary words\nBOS_TOKEN = '[BOS]' # This has a vocab id, which is used at the start of every decoder input sequence\nEOS_TOKEN = '[EOS]' # This has a vocab id, which is used at the end of untruncated target sequences\n# Note: none of <s>, </s>, [PAD], [UNK], [START], [STOP] should appear in the vocab file.\n\n\nclass Vocab(object):\n\n def __init__(self, file, max_size):\n self.word2idx = {}\n self.idx2word = {}\n self.count = 0 # keeps track of total number of words in the Vocab\n\n # [UNK], [PAD], [BOS] and [EOS] get the ids 0,1,2,3.\n for w in [UNK_TOKEN, PAD_TOKEN, BOS_TOKEN, EOS_TOKEN]:\n self.word2idx[w] = self.count\n self.idx2word[self.count] = w\n self.count += 1\n\n # Read the vocab file and add words up to max_size\n with open(file, 'r') as fin:\n for line in fin:\n items = line.split()\n if len(items) != 2:\n print('Warning: incorrectly formatted line in vocabulary file: %s' % line.strip())\n continue\n w = items[0]\n if w in [SENTENCE_STA, SENTENCE_END, UNK_TOKEN, PAD_TOKEN, BOS_TOKEN, EOS_TOKEN]:\n raise Exception(\n '<s>, </s>, [UNK], [PAD], [BOS] and [EOS] shouldn\\'t be in the vocab file, but %s is' % w)\n if w in self.word2idx:\n raise Exception('Duplicated word in vocabulary file: %s' % w)\n self.word2idx[w] = self.count\n self.idx2word[self.count] = w\n self.count += 1\n if max_size != 0 and self.count >= max_size:\n break\n print(\"Finished constructing vocabulary of %i total words. Last word added: %s\" % (\n self.count, self.idx2word[self.count - 1]))\n\n def word2id(self, word):\n if word not in self.word2idx:\n return self.word2idx[UNK_TOKEN]\n return self.word2idx[word]\n\n def id2word(self, word_id):\n if word_id not in self.idx2word:\n raise ValueError('Id not found in vocab: %d' % word_id)\n return self.idx2word[word_id]\n\n def size(self):\n return self.count\n\n def write_metadata(self, path):\n print( \"Writing word embedding metadata file to %s...\" % (path))\n with open(path, \"w\") as f:\n fieldnames = ['word']\n writer = csv.DictWriter(f, delimiter=\"\\t\", fieldnames=fieldnames)\n for i in range(self.size()):\n writer.writerow({\"word\": self.idx2word[i]})\n\nclass Example(object):\n\n def __init__(self, article, abstract_sentences, vocab):\n # Get ids of special tokens\n bos_decoding = vocab.word2id(BOS_TOKEN)\n eos_decoding = vocab.word2id(EOS_TOKEN)\n\n # Process the article\n article_words = article.decode().split()\n if len(article_words) > config.max_enc_steps:\n article_words = article_words[:config.max_enc_steps]\n self.enc_len = len(article_words) # store the length after truncation but before padding\n self.enc_inp = [vocab.word2id(w) for w in\n article_words] # list of word ids; OOVs are represented by the id for UNK token\n\n # Process the abstract\n abstract = ' '.encode().join(abstract_sentences).decode()\n abstract_words = abstract.split() # list of strings\n abs_ids = [vocab.word2id(w) for w in\n abstract_words] # list of word ids; OOVs are represented by the id for UNK token\n\n # Get the decoder input sequence and target sequence\n self.dec_inp, self.tgt = self.get_dec_seq(abs_ids, config.max_dec_steps, bos_decoding, eos_decoding)\n self.dec_len = len(self.dec_inp)\n\n # If using pointer-generator mode, we need to store some extra info\n if config.pointer_gen:\n # Store a version of the enc_input where in-article OOVs are represented by their temporary OOV id;\n # also store the in-article OOVs words themselves\n self.enc_inp_extend_vocab, self.article_oovs = utils.article2ids(article_words, vocab)\n\n # Get a verison of the reference summary where in-article OOVs are represented by their temporary article OOV id\n abs_ids_extend_vocab = utils.abstract2ids(abstract_words, vocab, self.article_oovs)\n\n # Overwrite decoder target sequence so it uses the temp article OOV ids\n _, self.tgt = self.get_dec_seq(abs_ids_extend_vocab, config.max_dec_steps, bos_decoding, eos_decoding)\n\n # Store the original strings\n self.original_article = article\n self.original_abstract = abstract\n self.original_abstract_sents = abstract_sentences\n\n def get_dec_seq(self, sequence, max_len, start_id, stop_id):\n src = [start_id] + sequence[:]\n tgt = sequence[:]\n if len(src) > max_len: # truncate\n src = src[:max_len]\n tgt = tgt[:max_len] # no end_token\n else: # no truncation\n tgt.append(stop_id) # end token\n assert len(src) == len(tgt)\n return src, tgt\n\n def pad_enc_seq(self, max_len, pad_id):\n while len(self.enc_inp) < max_len:\n self.enc_inp.append(pad_id)\n if config.pointer_gen:\n while len(self.enc_inp_extend_vocab) < max_len:\n self.enc_inp_extend_vocab.append(pad_id)\n\n def pad_dec_seq(self, max_len, pad_id):\n while len(self.dec_inp) < max_len:\n self.dec_inp.append(pad_id)\n while len(self.tgt) < max_len:\n self.tgt.append(pad_id)\n\n\nclass Batch(object):\n def __init__(self, example_list, vocab, batch_size):\n self.batch_size = batch_size\n self.pad_id = vocab.word2id(PAD_TOKEN) # id of the PAD token used to pad sequences\n self.init_encoder_seq(example_list) # initialize the input to the encoder\n self.init_decoder_seq(example_list) # initialize the input and targets for the decoder\n self.store_orig_strings(example_list) # store the original strings\n\n def init_encoder_seq(self, example_list):\n # Determine the maximum length of the encoder input sequence in this batch\n max_enc_seq_len = max([ex.enc_len for ex in example_list])\n\n # Pad the encoder input sequences up to the length of the longest sequence\n for ex in example_list:\n ex.pad_enc_seq(max_enc_seq_len, self.pad_id)\n\n # Initialize the numpy arrays\n # Note: our enc_batch can have different length (second dimension) for each batch because we use dynamic_rnn for the encoder.\n self.enc_batch = np.zeros((self.batch_size, max_enc_seq_len), dtype=np.int32)\n self.enc_lens = np.zeros((self.batch_size), dtype=np.int32)\n self.enc_padding_mask = np.zeros((self.batch_size, max_enc_seq_len), dtype=np.float32)\n\n # Fill in the numpy arrays\n for i, ex in enumerate(example_list):\n self.enc_batch[i, :] = ex.enc_inp[:]\n self.enc_lens[i] = ex.enc_len\n for j in range(ex.enc_len):\n self.enc_padding_mask[i][j] = 1\n\n # For pointer-generator mode, need to store some extra info\n if config.pointer_gen:\n # Determine the max number of in-article OOVs in this batch\n self.max_art_oovs = max([len(ex.article_oovs) for ex in example_list])\n # Store the in-article OOVs themselves\n self.art_oovs = [ex.article_oovs for ex in example_list]\n # Store the version of the enc_batch that uses the article OOV ids\n self.enc_batch_extend_vocab = np.zeros((self.batch_size, max_enc_seq_len), dtype=np.int32)\n for i, ex in enumerate(example_list):\n self.enc_batch_extend_vocab[i, :] = ex.enc_inp_extend_vocab[:]\n\n def init_decoder_seq(self, example_list):\n # Pad the inputs and targets\n for ex in example_list:\n ex.pad_dec_seq(config.max_dec_steps, self.pad_id)\n\n # Initialize the numpy arrays.\n self.dec_batch = np.zeros((self.batch_size, config.max_dec_steps), dtype=np.int32)\n self.tgt_batch = np.zeros((self.batch_size, config.max_dec_steps), dtype=np.int32)\n self.dec_padding_mask = np.zeros((self.batch_size, config.max_dec_steps), dtype=np.float32)\n self.dec_lens = np.zeros((self.batch_size), dtype=np.int32)\n\n # Fill in the numpy arrays\n for i, ex in enumerate(example_list):\n self.dec_batch[i, :] = ex.dec_inp[:]\n self.tgt_batch[i, :] = ex.tgt[:]\n self.dec_lens[i] = ex.dec_len\n for j in range(ex.dec_len):\n self.dec_padding_mask[i][j] = 1\n\n def store_orig_strings(self, example_list):\n self.original_articles = [ex.original_article for ex in example_list] # list of lists\n self.original_abstracts = [ex.original_abstract for ex in example_list] # list of lists\n self.original_abstracts_sents = [ex.original_abstract_sents for ex in example_list] # list of list of lists\n\n\nclass Batcher(object):\n BATCH_QUEUE_MAX = 100 # max number of batches the batch_queue can hold\n\n def __init__(self, vocab, data_path, batch_size, single_pass, mode):\n self._vocab = vocab\n self._data_path = data_path\n self.batch_size = batch_size\n self.single_pass = single_pass\n self.mode = mode\n\n # Initialize a queue of Batches waiting to be used, and a queue of Examples waiting to be batched\n self._batch_queue = queue.Queue(self.BATCH_QUEUE_MAX)\n self._example_queue = queue.Queue(self.BATCH_QUEUE_MAX * self.batch_size)\n\n # Different settings depending on whether we're in single_pass mode or not\n if single_pass:\n self._num_example_q_threads = 1 # just one thread, so we read through the dataset just once\n self._num_batch_q_threads = 1 # just one thread to batch examples\n self._bucketing_cache_size = 1 # only load one batch's worth of examples before bucketing\n self._finished_reading = False # this will tell us when we're finished reading the dataset\n else:\n self._num_example_q_threads = 1 # num threads to fill example queue\n self._num_batch_q_threads = 1 # num threads to fill batch queue\n self._bucketing_cache_size = 1 # how many batches-worth of examples to load into cache before bucketing\n\n # Start the threads that load the queues\n self._example_q_threads = []\n for _ in range(self._num_example_q_threads):\n self._example_q_threads.append(Thread(target=self.fill_example_queue))\n self._example_q_threads[-1].daemon = True\n self._example_q_threads[-1].start()\n self._batch_q_threads = []\n for _ in range(self._num_batch_q_threads):\n self._batch_q_threads.append(Thread(target=self.fill_batch_queue))\n self._batch_q_threads[-1].daemon = True\n self._batch_q_threads[-1].start()\n\n # Start a thread that watches the other threads and restarts them if they're dead\n if not single_pass: # We don't want a watcher in single_pass mode because the threads shouldn't run forever\n self._watch_thread = Thread(target=self.watch_threads)\n self._watch_thread.daemon = True\n self._watch_thread.start()\n\n def next_batch(self):\n # If the batch queue is empty, print a warning\n if self._batch_queue.qsize() == 0:\n tf.logging.warning(\n 'Bucket input queue is empty when calling next_batch. Bucket queue size: %i, Input queue size: %i',\n self._batch_queue.qsize(), self._example_queue.qsize())\n if self.single_pass and self._finished_reading:\n tf.logging.info(\"Finished reading dataset in single_pass mode.\")\n return None\n\n batch = self._batch_queue.get() # get the next Batch\n return batch\n\n def fill_example_queue(self):\n example_generator = self.example_generator(self._data_path, self.single_pass)\n input_gen = self.pair_generator(example_generator)\n\n while True:\n try:\n (article,\n abstract) = input_gen.__next__() # read the next example from file. article and abstract are both strings.\n except StopIteration: # if there are no more examples:\n tf.logging.info(\"The example generator for this example queue filling thread has exhausted data.\")\n if self.single_pass:\n tf.logging.info(\n \"single_pass mode is on, so we've finished reading dataset. This thread is stopping.\")\n self._finished_reading = True\n break\n else:\n raise Exception(\"single_pass mode is off but the example generator is out of data; error.\")\n\n abstract_sentences = [sent.strip() for sent in utils.abstract2sents(\n abstract)] # Use the <s> and </s> tags in abstract to get a list of sentences.\n example = Example(article, abstract_sentences, self._vocab)\n self._example_queue.put(example)\n\n def fill_batch_queue(self):\n while True:\n if self.mode == 'decode':\n # beam search decode mode single example repeated in the batch\n ex = self._example_queue.get()\n b = [ex for _ in range(self.batch_size)]\n self._batch_queue.put(Batch(b, self._vocab, self.batch_size))\n else:\n # Get bucketing_cache_size-many batches of Examples into a list, then sort\n inputs = []\n for _ in range(self.batch_size * self._bucketing_cache_size):\n inputs.append(self._example_queue.get())\n inputs = sorted(inputs, key=lambda inp: inp.enc_len, reverse=True) # sort by length of encoder sequence\n\n # Group the sorted Examples into batches, optionally shuffle the batches, and place in the batch queue.\n batches = []\n for i in range(0, len(inputs), self.batch_size):\n batches.append(inputs[i:i + self.batch_size])\n if not self.single_pass:\n shuffle(batches)\n for b in batches: # each b is a list of Example objects\n self._batch_queue.put(Batch(b, self._vocab, self.batch_size))\n\n def watch_threads(self):\n while True:\n tf.logging.info(\n 'Bucket queue size: %i, Input queue size: %i',\n self._batch_queue.qsize(), self._example_queue.qsize())\n\n time.sleep(60)\n for idx, t in enumerate(self._example_q_threads):\n if not t.is_alive(): # if the thread is dead\n tf.logging.error('Found example queue thread dead. Restarting.')\n new_t = Thread(target=self.fill_example_queue)\n self._example_q_threads[idx] = new_t\n new_t.daemon = True\n new_t.start()\n for idx, t in enumerate(self._batch_q_threads):\n if not t.is_alive(): # if the thread is dead\n tf.logging.error('Found batch queue thread dead. Restarting.')\n new_t = Thread(target=self.fill_batch_queue)\n self._batch_q_threads[idx] = new_t\n new_t.daemon = True\n new_t.start()\n\n def pair_generator(self, example_generator):\n while True:\n e = example_generator.__next__() # e is a tf.Example\n try:\n article_text = e.features.feature['article'].bytes_list.value[\n 0] # the article text was saved under the key 'article' in the data files\n abstract_text = e.features.feature['abstract'].bytes_list.value[\n 0] # the abstract text was saved under the key 'abstract' in the data files\n except ValueError:\n tf.logging.error('Failed to get article or abstract from example')\n continue\n if len(article_text) == 0: # See https://github.com/abisee/pointer-generator/issues/1\n # tf.logging.warning('Found an example with empty article text. Skipping it.')\n continue\n else:\n yield (article_text, abstract_text)\n\n def example_generator(self, data_path, single_pass):\n while True:\n filelist = glob.glob(data_path) # get the list of datafiles\n assert filelist, ('Error: Empty filelist at %s' % data_path) # check filelist isn't empty\n if single_pass:\n filelist = sorted(filelist)\n else:\n random.shuffle(filelist)\n for f in filelist:\n reader = open(f, 'rb')\n while True:\n len_bytes = reader.read(8)\n if not len_bytes: break # finished reading this file\n str_len = struct.unpack('q', len_bytes)[0]\n example_str = struct.unpack('%ds' % str_len, reader.read(str_len))[0]\n yield example_pb2.Example.FromString(example_str)\n if single_pass:\n print(\"example_generator completed reading all datafiles. No more data.\")\n break" ]
[ [ "tensorflow.logging.error", "numpy.zeros", "tensorflow.core.example.example_pb2.Example.FromString", "tensorflow.logging.info" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
veterinarian-5300/Genious-Python-Code-Generator
[ "d78cd5f4b64221e8e4dc80d6e1f5ba0a4c613bcd", "d78cd5f4b64221e8e4dc80d6e1f5ba0a4c613bcd", "d78cd5f4b64221e8e4dc80d6e1f5ba0a4c613bcd", "d78cd5f4b64221e8e4dc80d6e1f5ba0a4c613bcd" ]
[ "practice/check.py", "Py_lab/Lab 1,2/plotting_unit_signals.py", "Py_lab/Lab 1,2/matrix.py", "Py_lab/Lab 4/unsolved_1.py" ]
[ "import csv\r\nimport pandas as pd\r\n\r\none=pd.read_csv(\"pa_dashboards.csv\")\r\n\r\ntwo=pd.read_csv(\"pa_dashboards(1).csv\", squeeze=True)\r\n\r\npattern = '|'.join(two)\r\n\r\nexist=one['sentences'].str.contains(pattern, na=False)\r\n\r\nwith open('new.csv', 'w') as outFile:\r\n for cols in exist:\r\n if pattern in exist:\r\n outFile.write(exist, \"1\")\r\n", "### importing libraries\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nn = np.linspace(-5, 5, 11)\r\ndelta = 1*(n==0)\r\nu = 1*(n>=0)\r\nplt.stem(n, delta, use_line_collection = True)\r\n\r\n# naming the x axis\r\nplt.xlabel('n')\r\nplt.ylabel('x[n] = delta[n]')\r\n# giving a title to my graph\r\nplt.title('Unit Impulse Sequence')\r\nplt.show()\r\nplt.stem(n, u, use_line_collection = True)\r\nplt.xlabel('n')\r\nplt.ylabel('x[n] = u[n]')\r\n# giving a title to my graph\r\nplt.title('Unit Step Sequence')\r\n# naming the y axis\r\n\r\n# naming the x axis\r\n# naming the y axis\r\n\r\nplt.show()", "# importing numpy\r\nimport numpy as np\r\n\r\nmatrix = np.array([[2,4,6,10],\r\n[5,10,15,20],\r\n[7,14,21,28]])\r\n\r\nprint(matrix.shape)\r\nprint(matrix.size)\r\nprint(matrix.ndim)\r\n", "import matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport scipy.fft\r\n\r\nn = np.linspace(0, 10, 11)\r\nu1=1*(n>=0)\r\nu2=1*(n>=3)\r\n\r\nx1=(u1-u2)\r\ndft=scipy.fft.fft(x1)\r\n\r\n\r\nplt.figure(figsize=(8,9))\r\nplt.subplot(2,1,1)\r\nplt.stem(dft.real, use_line_collection=True)\r\n\r\n#naming the axis\r\nplt.xlabel('n')\r\nplt.ylabel('Real {x[n]}')\r\n\r\n#plot title\r\nplt.title('Real part of DFT')\r\n#imaginary part\r\nplt.subplot(2,1,2)\r\nplt.stem(dft.imag,use_line_collection=True)\r\n\r\n#naming the axis\r\nplt.xlabel('n')\r\nplt.ylabel('imag{x[n]}')\r\n\r\n#plot title\r\nplt.title('Imaginary part of DFT')\r\nplt.show()\r\nprint('DFT x[n] =',dft)\r\n" ]
[ [ "pandas.read_csv" ], [ "matplotlib.pyplot.stem", "matplotlib.pyplot.title", "numpy.linspace", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ], [ "numpy.array" ], [ "matplotlib.pyplot.stem", "numpy.linspace", "matplotlib.pyplot.title", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.subplot", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
csaid/bokeh
[ "4312b2de1a15fb24884fcd97eaf6442bf8b4bd7b", "4312b2de1a15fb24884fcd97eaf6442bf8b4bd7b", "4312b2de1a15fb24884fcd97eaf6442bf8b4bd7b" ]
[ "examples/plotting/server/boxplot.py", "bokeh/charts/scatter.py", "sphinx/source/tutorial/solutions/histogram.py" ]
[ "# The plot server must be running\n# Go to http://localhost:5006/bokeh to view this plot\n\nimport numpy as np\nimport pandas as pd\nfrom bokeh.plotting import *\n\n# Generate some synthetic time series for six different categories\ncats = list(\"abcdef\")\ny = np.random.randn(2000)\ng = np.random.choice(cats, 2000)\nfor i, l in enumerate(cats):\n y[g == l] += i // 2\ndf = pd.DataFrame(dict(score=y, group=g))\n\n# Find the quartiles and IQR foor each category\ngroups = df.groupby('group')\nq1 = groups.quantile(q=0.25)\nq2 = groups.quantile(q=0.5)\nq3 = groups.quantile(q=0.75)\niqr = q3 - q1\nupper = q2 + 1.5*iqr\nlower = q2 - 1.5*iqr\n\n# find the outliers for each category\ndef outliers(group):\n cat = group.name\n return group[(group.score > upper.loc[cat][0]) | (group.score < lower.loc[cat][0])]['score']\nout = groups.apply(outliers).dropna()\n\n# Prepare outlier data for plotting, we need coordinate for every outlier.\noutx = []\nouty = []\nfor cat in cats:\n for value in out[cat]:\n outx.append(cat)\n outy.append(value)\n\noutput_server('boxplot')\n\nfigure(tools=\"previewsave\", background_fill=\"#EFE8E2\", title=\"\")\n\nhold()\n\n# stems\nsegment(cats, upper.score, cats, q3.score, x_range=cats,\n line_width=2, line_color=\"black\", )\nsegment(cats, lower.score, cats, q1.score, x_range=cats,\n line_width=2, line_color=\"black\")\n# boxes\nrect(cats, (q3.score+q2.score)/2, 0.7, q3.score-q2.score,\n fill_color=\"#E08E79\", line_width=2, line_color=\"black\")\nrect(cats, (q2.score+q1.score)/2, 0.7, q2.score-q1.score,\n fill_color=\"#3B8686\", line_width=2, line_color=\"black\")\n\n# whisters (0-height rects simpler than segments)\nrect(cats, lower.score, 0.2, 0, line_color=\"black\")\nrect(cats, upper.score, 0.2, 0, line_color=\"black\")\n\n# outliers\ncircle(outx, outy, size=6, color=\"#F38630\", fill_alpha=0.6)\n\nxgrid().grid_line_color = None\nygrid().grid_line_color = \"white\"\nygrid().grid_line_width = 2\nxaxis().major_label_text_font_size=\"12pt\"\nshow()\n", "\"\"\"This is the Bokeh charts interface. It gives you a high level API to build\ncomplex plot is a simple way.\n\nThis is the Scatter class which lets you build your scatter plots just passing\nthe arguments to the Chart class and calling the proper functions.\nIt also add detection of the incomming input to see if it is a pandas dataframe\nor a pandas groupby object.\n\"\"\"\n#-----------------------------------------------------------------------------\n# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.\n#\n# Powered by the Bokeh Development Team.\n#\n# The full license is in the file LICENCE.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Imports\n#-----------------------------------------------------------------------------\n\nimport numpy as np\nimport pandas as pd\n\nfrom ._charts import Chart\nfrom ._chartobject import ChartObject\n\n#-----------------------------------------------------------------------------\n# Classes and functions\n#-----------------------------------------------------------------------------\n\n\nclass Scatter(ChartObject):\n\n def __init__(self, pairs,\n title=None, xlabel=None, ylabel=None, legend=False,\n xscale=\"linear\", yscale=\"linear\", width=800, height=600,\n tools=True, filename=False, server=False, notebook=False):\n self.pairs = pairs\n super(Scatter, self).__init__(title, xlabel, ylabel, legend,\n xscale, yscale, width, height,\n tools, filename, server, notebook)\n\n def check_attr(self):\n super(Scatter, self).check_attr()\n\n def show(self):\n \"This is the main Scatter show function.\"\n # asumming we get an hierchiral pandas object\n if isinstance(self.pairs, pd.DataFrame):\n self.labels = self.pairs.columns.levels[1].values\n\n from collections import OrderedDict\n pdict = OrderedDict()\n\n for i in self.pairs.columns.levels[0].values:\n pdict[i] = self.pairs[i].dropna().values\n\n self.pairs = pdict\n\n # asumming we get an groupby object\n if isinstance(self.pairs, pd.core.groupby.DataFrameGroupBy):\n from collections import OrderedDict\n pdict = OrderedDict()\n\n for i in self.pairs.groups.keys():\n self.labels = self.pairs.get_group(i).columns\n xname = self.pairs.get_group(i).columns[0]\n yname = self.pairs.get_group(i).columns[1]\n x = getattr(self.pairs.get_group(i), xname)\n y = getattr(self.pairs.get_group(i), yname)\n pdict[i] = np.array([x.values, y.values]).T\n\n self.pairs = pdict\n\n self.check_attr()\n\n if self._xlabel is None:\n self._xlabel = self.labels[0]\n if self._ylabel is None:\n self._ylabel = self.labels[1]\n\n chart = Chart(self._title, self._xlabel, self._ylabel, self._legend,\n self.xscale, self.yscale, self._width, self._height,\n self._tools, self._filename, self._server, self._notebook)\n chart.get_data_scatter(**self.pairs)\n chart.get_source_scatter()\n chart.start_plot()\n chart.scatter()\n chart.end_plot()\n chart.show()\n", "# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport scipy.special\nfrom bokeh.plotting import *\n\nmu, sigma = 0, 0.5 # NOTE: you can tinker with these values if you like\n\n# sample the distribution\nmeasured = np.random.normal(mu, sigma, 1000)\nhist, edges = np.histogram(measured, density=True, bins=50)\n\n# compute ideal values\nx = np.linspace(-2, 2, 1000)\npdf = 1/(sigma * np.sqrt(2*np.pi)) * np.exp(-(x-mu)**2 / (2*sigma**2))\ncdf = (1+scipy.special.erf((x-mu)/np.sqrt(2*sigma**2)))/2\n\n# EXERCISE: output to a static HTML file\noutput_file('histogram.html')\n\n# EXERCISE: turn on plot hold\nhold()\n\n# Use the `quad` renderer to display the histogram bars.\nquad(top=hist, bottom=np.zeros(len(hist)), left=edges[:-1], right=edges[1:],\n fill_color=\"#036564\", line_color=\"#033649\",\n\n # NOTE: these are only needed on the first renderer\n background_fill=\"#E8DDCB\",\n title=\"Normal Distribution (μ=0, σ=0.5)\",\n tools=\"\"\n)\n\n# Use `line` renderers to display the PDF and CDF\nline(x, pdf, line_color=\"#D95B43\", line_width=8, alpha=0.7, legend=\"PDF\")\nline(x, cdf, line_color=\"white\", line_width=2, alpha=0.7, legend=\"CDF\")\n\n# Move the legend to a better place.\n# Acceptable values: 'top_left', 'top_right', 'bottom_left', and 'bottom_right'\nlegend().orientation = \"top_left\"\n\n# EXERCISE: create a new figure\nfigure()\n\nmu, sigma = 0, 0.5 # NOTE: you can tinker with these values if you like\n\n# sample the distribution\nmeasured = np.random.lognormal(mu, sigma, 1000)\nhist, edges = np.histogram(measured, density=True, bins=50)\n\n# compute ideal values\nx = np.linspace(0, 8.0, 1000)\npdf = 1/(x* sigma * np.sqrt(2*np.pi)) * np.exp(-(np.log(x)-mu)**2 / (2*sigma**2))\ncdf = (1+scipy.special.erf((np.log(x)-mu)/(np.sqrt(2)*sigma)))/2\n\n# EXERCISE: recreate the first plot for this new data\nquad(top=hist, bottom=np.zeros(len(hist)), left=edges[:-1], right=edges[1:],\n fill_color=\"#036564\", line_color=\"#033649\", background_fill=\"#E8DDCB\",\n title=\"Log Normal Distribution (μ=0, σ=0.5)\", tools=\"\")\nline(x, pdf, line_color=\"#D95B43\", line_width=8, alpha=0.7, legend=\"PDF\")\nline(x, cdf, line_color=\"white\", line_width=2, alpha=0.7, legend=\"CDF\")\nlegend().orientation = \"bottom_right\"\n\n# EXERCISE (optional): Add new plots for the following distributions:\n# * Gamma\n# * Beta\n# * Weibull\n# The numerical code is included, you will need to create new figures.\nfigure()\n\nk, theta = 1.0, 2.0\n\n# sample the distribution\nmeasured = np.random.gamma(k, theta, 1000)\nhist, edges = np.histogram(measured, density=True, bins=50)\n\n# compute ideal values\nx = np.linspace(0, 20.0, 1000)\npdf = x**(k-1) * np.exp(-x/theta) / (theta**k * scipy.special.gamma(k))\ncdf = scipy.special.gammainc(k, x/theta) / scipy.special.gamma(k)\n\nquad(top=hist, bottom=np.zeros(len(hist)), left=edges[:-1], right=edges[1:],\n fill_color=\"#036564\", line_color=\"#033649\", background_fill=\"#E8DDCB\",\n title=\"Gamma Distribution (k=1, θ=2)\", tools=\"\")\nline(x, pdf, line_color=\"#D95B43\", line_width=8, alpha=0.7, legend=\"PDF\")\nline(x, cdf, line_color=\"white\", line_width=2, alpha=0.7, legend=\"CDF\")\nlegend().orientation = \"top_left\"\n\n\nfigure()\n\nalpha, beta = 2.0, 2.0\n\n# sample the distribution\nmeasured = np.random.beta(alpha, beta, 1000)\nhist, edges = np.histogram(measured, density=True, bins=50)\n\n# compute ideal values\nx = np.linspace(0, 1, 1000)\npdf = x**(alpha-1) * (1-x)**(beta-1) / scipy.special.beta(alpha, beta)\ncdf = scipy.special.btdtr(alpha, beta, x)\n\nquad(top=hist, bottom=np.zeros(len(hist)), left=edges[:-1], right=edges[1:],\n fill_color=\"#036564\", line_color=\"#033649\", background_fill=\"#E8DDCB\",\n title=\"Beta Distribution (α=2, β=2)\", tools=\"\")\nline(x, pdf, line_color=\"#D95B43\", line_width=8, alpha=0.7, legend=\"PDF\")\nline(x, cdf, line_color=\"white\", line_width=2, alpha=0.7, legend=\"CDF\")\n\n\n\nfigure()\n\nlam, k = 1, 1.25\n\n# sample the distribution\nmeasured = lam*(-np.log(np.random.uniform(0, 1, 1000)))**(1/k)\nhist, edges = np.histogram(measured, density=True, bins=50)\n\n# compute ideal values\nx = np.linspace(0, 8, 1000)\npdf = (k/lam)*(x/lam)**(k-1) * np.exp(-(x/lam)**k)\ncdf = 1 - np.exp(-(x/lam)**k)\n\nquad(top=hist, bottom=np.zeros(len(hist)), left=edges[:-1], right=edges[1:],\n fill_color=\"#036564\", line_color=\"#033649\", background_fill=\"#E8DDCB\",\n title=\"Weibull Distribution (λ=1, k=1.25)\", tools=\"\")\nline(x, pdf, line_color=\"#D95B43\", line_width=8, alpha=0.7, legend=\"PDF\")\nline(x, cdf, line_color=\"white\", line_width=2, alpha=0.7, legend=\"CDF\")\nlegend().orientation = \"top_left\"\n\nshow()" ]
[ [ "numpy.random.randn", "numpy.random.choice" ], [ "numpy.array" ], [ "numpy.random.lognormal", "numpy.log", "numpy.random.beta", "numpy.sqrt", "numpy.linspace", "numpy.random.normal", "numpy.random.gamma", "numpy.random.uniform", "numpy.exp", "numpy.histogram" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
valentinaschueller/sweet
[ "27e99c7a110c99deeadee70688c186d82b39ac90", "27e99c7a110c99deeadee70688c186d82b39ac90", "27e99c7a110c99deeadee70688c186d82b39ac90", "27e99c7a110c99deeadee70688c186d82b39ac90" ]
[ "benchmarks_sphere/paper_jrn_parco_rexi_nonlinear/compare_wt_dt_vs_accuracy_galewsky/postprocessing_consolidate.py", "doc/rexi/rexi_with_cauchy_integral/test_cauchy_shifted/CauchyPhiQuadrature.py", "benchmarks_plane/nonlinear_interaction/pp_nm_plot_evol_modewise.py", "benchmarks_sphere/paper_jrn_sl_exp/reference_galewsky_M1024_12days_ln_dt0010sec/postprocessing_swe.py" ]
[ "#! /usr/bin/env python3\n\nimport sys\nimport math\n\nfrom SWEET import *\nfrom mule.plotting.Plotting import *\nfrom mule.postprocessing.JobsData import *\nfrom mule.postprocessing.JobsDataConsolidate import *\n\nsys.path.append('../')\nimport pretty_plotting as pp\nsys.path.pop()\n\nmule_plotting_usetex(False)\n\ngroups = ['runtime.timestepping_method']\n\ntagnames_y = [\n\t'sphere_data_diff_prog_h.res_norm_l1',\n\t'sphere_data_diff_prog_h.res_norm_l2',\n\t'sphere_data_diff_prog_h.res_norm_linf',\n]\n\n\n\nj = JobsData('./job_bench_*', verbosity=0)\n\nc = JobsDataConsolidate(j)\nprint(\"\")\nprint(\"Groups:\")\njob_groups = c.create_groups(groups)\nfor key, g in job_groups.items():\n\tprint(key)\n\nfor tagname_y in tagnames_y:\n\n\tparams = []\n\tparams += [\n\t\t\t{\n\t\t\t\t'tagname_x': 'runtime.timestep_size',\n\t\t\t\t'xlabel': \"Timestep size (seconds)\",\n\t\t\t\t'ylabel': pp.latex_pretty_names[tagname_y],\n\t\t\t\t'title': 'Timestep size vs. error',\n\t\t\t\t'xscale': 'log',\n\t\t\t\t'yscale': 'log',\n\t\t\t},\n\t\t]\n\n\tparams += [\n\t\t\t{\n\t\t\t\t'tagname_x': 'output.simulation_benchmark_timings.main_timestepping',\n\t\t\t\t'xlabel': \"Wallclock time (seconds)\",\n\t\t\t\t'ylabel': pp.latex_pretty_names[tagname_y],\n\t\t\t\t'title': 'Wallclock time vs. error',\n\t\t\t\t'xscale': 'log',\n\t\t\t\t'yscale': 'log',\n\t\t\t},\n\t\t]\n\n\n\tfor param in params:\n\n\t\ttagname_x = param['tagname_x']\n\t\txlabel = param['xlabel']\n\t\tylabel = param['ylabel']\n\t\ttitle = param['title']\n\t\txscale = param['xscale']\n\t\tyscale = param['yscale']\n\n\t\tprint(\"*\"*80)\n\t\tprint(\"Processing tag \"+tagname_x)\n\t\tprint(\"*\"*80)\n\n\n\n\t\tif True:\n\t\t\t\"\"\"\n\t\t\tPlotting format\n\t\t\t\"\"\"\n\n\t\t\t# Filter out errors beyond this value!\n\t\t\tdef data_filter(x, y, jobdata):\n\t\t\t\tif y == None:\n\t\t\t\t\treturn True\n\n\t\t\t\tx = float(x)\n\t\t\t\ty = float(y)\n\n\t\t\t\tif math.isnan(y):\n\t\t\t\t\treturn True\n\n\t\t\t\tif 'prog_h' in tagname_y:\n\t\t\t\t\tif 'l1' in tagname_y:\n\t\t\t\t\t\tif y > 1e1:\n\t\t\t\t\t\t\tprint(\"Sorting out L1 data \"+str(y))\n\t\t\t\t\t\t\treturn True\n\t\t\t\t\telif 'l2' in tagname_y:\n\t\t\t\t\t\tif y > 1e1:\n\t\t\t\t\t\t\tprint(\"Sorting out L2 data \"+str(y))\n\t\t\t\t\t\t\treturn True\n\t\t\t\t\telif 'linf' in tagname_y:\n\t\t\t\t\t\tif y > 1e2:\n\t\t\t\t\t\t\tprint(\"Sorting out Linf data \"+str(y))\n\t\t\t\t\t\t\treturn True\n\t\t\t\t\telse:\n\t\t\t\t\t\traise Exception(\"Unknown y tag \"+tagname_y)\n\n\t\t\t\telse:\n\t\t\t\t\tprint(\"TODO\")\n\n\t\t\t\treturn False\n\n\n\n\t\t\td = JobsData_GroupsPlottingScattered(\n\t\t\t\t\tjob_groups,\n\t\t\t\t\ttagname_x,\n\t\t\t\t\ttagname_y,\n\t\t\t\t\tdata_filter = data_filter\n\t\t\t\t)\n\n\t\t\tfileid = \"output_plotting_\"+tagname_x.replace('.', '-').replace('_', '-')+\"_vs_\"+tagname_y.replace('.', '-').replace('_', '-')\n\n\n\t\t\tif True:\n\t\t\t\t#\n\t\t\t\t# Proper naming and sorting of each label\n\t\t\t\t#\n\n\t\t\t\t# new data dictionary\n\t\t\t\tdata_new = {}\n\t\t\t\tfor key, data in d.data.items():\n\t\t\t\t\t# generate nice tex label\n\t\t\t\t\t#data['label'] = pp.get_pretty_name(key)\n\t\t\t\t\tdata['label'] = key #pp.get_pretty_name(key)\n\n\t\t\t\t\tkey_new = pp.get_pretty_name_order(key)+'_'+key\n\n\t\t\t\t\t# copy data\n\t\t\t\t\tdata_new[key_new] = copy.copy(data)\n\n\t\t\t\t# Copy back new data table\n\t\t\t\td.data = data_new\n\n\t\t\tp = Plotting_ScatteredData()\n\n\n\t\t\tdef fun(p):\n\t\t\t\tfrom matplotlib import ticker\n\t\t\t\tfrom matplotlib.ticker import FormatStrFormatter\n\n\t\t\t\tplt.tick_params(axis='x', which='minor')\n\t\t\t\tp.ax.xaxis.set_minor_formatter(FormatStrFormatter(\"%.0f\"))\n\t\t\t\tp.ax.xaxis.set_major_formatter(FormatStrFormatter(\"%.0f\"))\n\n\t\t\t\tp.ax.xaxis.set_minor_locator(ticker.LogLocator(subs=[1.5, 2.0, 3.0, 5.0]))\n\n\t\t\t\tfor tick in p.ax.xaxis.get_minor_ticks():\n\t\t\t\t\ttick.label.set_fontsize(8) \n\n\n\t\t\t\tplt.tick_params(axis='y', which='minor')\n\t\t\t\tp.ax.yaxis.set_minor_formatter(FormatStrFormatter(\"%.1e\"))\n\t\t\t\tp.ax.yaxis.set_major_formatter(FormatStrFormatter(\"%.1e\"))\n \n\t\t\t\tp.ax.yaxis.set_minor_locator(ticker.LogLocator(subs=[1.5, 2.0, 3.0, 5.0]))\n\n\t\t\t\tfor tick in p.ax.yaxis.get_minor_ticks():\n\t\t\t\t\ttick.label.set_fontsize(6) \n\n\n\n\t\t\tannotate_text_template = \"{:.1f} / {:.3f}\"\n\t\t\tp.plot(\n\t\t\t\t\tdata_plotting = d.get_data_float(),\n\t\t\t\t\txlabel = xlabel,\n\t\t\t\t\tylabel = ylabel,\n\t\t\t\t\ttitle = title,\n\t\t\t\t\txscale = xscale,\n\t\t\t\t\tyscale = yscale,\n\t\t\t\t\t#annotate = True,\n\t\t\t\t\t#annotate_each_nth_value = 3,\n\t\t\t\t\t#annotate_fontsize = 6,\n\t\t\t\t\t#annotate_text_template = annotate_text_template,\n\t\t\t\t\tlegend_fontsize = 8,\n\t\t\t\t\tgrid = True,\n\t\t\t\t\toutfile = fileid+\".pdf\",\n\t\t\t\t\tlambda_fun = fun,\n\t\t\t\t)\n\n\t\t\tprint(\"Data plotting:\")\n\t\t\td.print()\n\t\t\td.write(fileid+\".csv\")\n\n\t\tprint(\"Info:\")\n\t\tprint(\"\tNaN: Errors in simulations\")\n\t\tprint(\"\tNone: No data available\")\n", "#! /usr/bin/env python3\n#\n# Author: Martin Schreiber <[email protected]>\n# Date: 2017-08-16\n#\n\n\nimport math\nimport cmath\nimport numpy as np\nimport sys\n\nimport matplotlib.pyplot as plt\n\n\nclass CauchyPhiQuadrature:\n\talpha = []\n\tbeta = []\n\n\t#\n\t# Phi 0-N functions\n\t#\n\tdef phi(self, n, z):\n\t\tif n == 0:\n\t\t\treturn cmath.exp(z)\n\n\t\tif n != 0:\n\t\t\tif abs(z) < 1e-8:\n\t\t\t\treturn 1.0/math.factorial(n)\n\n\t\t\t\traise Exception(\"Z close to zero, not yet supported for phi \"+str(n)+\" !!!\")\n\n\n\t\treturn (self.phi(n-1, z) - 1.0/math.factorial(n-1))/z\n\n\t\traise Exception(\"Phi function not supported yet\")\n\n\n\t#\n\t# Constructor\n\t# See setup(...) for documentation on parameters\n\t# \n\tdef __init__(self, phiN = -1, P = 64, R = 1.0, mu = 1.0, half = False, Rt=[]):\n\t\tif phiN == -1:\n\t\t\treturn\n\n\t\tself.setup(phiN, P, R, mu, half, Rt)\n\n\n\tdef CE(self, Rreal, Rimag, A):\n\t\treturn A.real*Rreal + 1.j*A.imag*Rimag\n\n\n\tdef setup(\n\t\tself,\n\t\tphiN = 0,\t# phi function id\n\t\tP = 64,\t\t# Number of quadrature poles\n\t\tR = 1.0,\t# Radius\n\t\tmu = 1.0,\t# Shift\n\t\thalf = False,\n\t\tRt = []\t\t# Elipse-related stuff, not yet supported\n\t):\n\t\tself.phiN = phiN\n\t\tself.P = P\n\t\tself.Rreal = R\n\t\tself.Rimag = R\n\t\tself.mu = mu\n\t\tself.half = half\n\n\t\tif len(Rt) > 0:\n\t\t\traise Exception(\"Not yet supported. Results have to be probably scaled with circumference of Elipse\")\n\t\t\tself.Rreal = Rt[0]\n\n\t\tif len(Rt) > 1:\n\t\t\tself.Rimag = Rt[1]\n\n\t\t# If only half of the poles should be generated, only rotate half around the quadrature range!\n\t\tif half:\n\t\t\traise Exception(\"Halving not supported, yet\")\n\t\t\t#P //= 2\n\n\n\t\t#\n\t\t# Compute support points of quadrature\n\t\t#\n\t\tself.coords = []\n\t\tfor j in range(self.P):\n\t\t\ttheta_j = 2.0*math.pi*(j+0.5)/self.P\n\t\t\tgamma_j = self.CE(self.Rreal, self.Rimag, cmath.exp(1j*theta_j)) + mu\n\t\t\tself.coords.append(gamma_j)\n\n\n\t\tself.alpha = []\n\t\tself.beta = []\n\t\tfor j in range(self.P):\n\t\t\ttheta_j = 2.0*math.pi*(j+0.5)/self.P\n\t\t\tgamma_j = self.CE(self.Rreal, self.Rimag, cmath.exp(1j*theta_j)) + mu\n\n\t\t\tk = self.CE(self.Rreal, self.Rimag, cmath.exp(1j*theta_j))\n\n\t\t\tbeta = -self.phi(phiN, gamma_j)*k\n\t\t\tbeta /= P\n\t\t\talpha = -(k + mu)\n\n\t\t\tself.alpha.append(alpha)\n\t\t\tself.beta.append(beta)\n\n\n\tdef plot(self, filename = None):\n\t\tpoints_re = []\n\t\tpoints_im = []\n\t\tfor j in range(self.P):\n\t\t\tpoints_re.append(self.coords[j].real)\n\t\t\tpoints_im.append(self.coords[j].imag)\n\n\t\tpoints_re.append(points_re[0])\n\t\tpoints_im.append(points_im[0])\n\n\t\tplt.plot(points_re, points_im, '-bo')\n\n\t\tif filename != None:\n\t\t\tplt.savefig(filename)\n\t\telse:\n\t\t\tplt.show()\n\n\n\n\tdef approx_phi_pde(self, dt_L, U):\n\t\tS = len(dt_L)\n\n\t\taccum = np.array([0.j, 0.j])\n\t\tfor j in range(len(self.alpha)):\n\t\t\tM_inv = np.linalg.inv(dt_L + np.identity(S)*self.alpha[j])\n\t\t\taccum += self.beta[j] * np.dot(M_inv, U)\n\n\t\treturn accum\n\n\tdef approx_phi_ode(self, dt_L, U):\n\t\taccum = 0.0\n\t\tfor j in range(len(self.alpha)):\n\t\t\tM_inv = 1.0/(dt_L + self.alpha[j])\n\t\t\taccum += self.beta[j] * M_inv * U\n\n\t\treturn accum\n\n\n\tdef analytical_phi_pde(self, dt_L, U):\n\t\tS = len(dt_L)\n\n\t\t# Setup eigenvalues and Eigenvectors for analytical solution\n\t\tLEvals, LEvecs = np.linalg.eig(dt_L)\n\t\tLEvecs_inv = np.linalg.inv(LEvecs)\n\n\t\tif True:\n\t\t\terror = np.sum(np.absolute(dt_L - np.dot(np.dot(LEvecs, np.diag(LEvals)), LEvecs_inv)))\n\t\t\tif error > 1e-10:\n\t\t\t\traise Exception(\"Error \"+str(error)+\" too large\")\n\n\t\tUwave = np.dot(LEvecs_inv, U)\n\t\ttmp = np.array([self.phi(self.phiN, LEvals[i])*Uwave[i] for i in range(S)])\n\t\treturn np.dot(LEvecs, tmp)\n\n\tdef analytical_phi_ode(self, dt_L, U):\n\t\t# Setup eigenvalues and Eigenvectors for analytical solution\n\t\treturn self.phi(self.phiN, dt_L)*U\n\n\n", "#! /usr/bin/env python3\n\nimport sys\n\nimport matplotlib\nmatplotlib.use('Agg')\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport matplotlib.colors as colors\nfrom matplotlib.lines import Line2D\n\n\nfrom mule.postprocessing.JobData import *\n\nif len(sys.argv) > 2:\n\toutput_filename = sys.argv[1]\nelse:\n\tprint(\"\")\n\tprint(\"Usage:\")\n\tprint(\"\")\n\tprint(\"\t\"+sys.argv[0]+\" [output_filename.pdf] [jobdir]\")\n\tprint(\"\")\n\tsys.exit(1)\n\n#List all parameters of job\njd = JobData(sys.argv[2])\njd_flat = jd.get_flattened_data()\n#for key in jd_flat:\n\t#print(key, '->', jd_flat[key])\t\t\njd_raw = jd.get_job_raw_data()\n\noutput=jd_raw['output']\nruntime=jd_raw['jobgeneration']\nruntime=runtime['runtime']\n\nnm_name=output['simulation_benchmark_normal_modes.case']\nnwaves=int(output['simulation_benchmark_normal_modes.nwaves'])\n\nk0=[]\nk1=[]\nd0=[]\ndwest=[]\ndeast=[]\nwaves = \"Initial waves (k0,k1):(geo,west,east) \"\nfor i in range(nwaves):\n\twave=\"simulation_benchmark_normal_modes.w\"+str(i)+\".\"\n\tk0.append(int(output[wave+\"k0\"]))\n\tk1.append(int(output[wave+\"k1\"]))\n\td0.append(int(output[wave+\"d0\"]))\n\tdwest.append(int(output[wave+\"dwest\"]))\n\tdeast.append(int(output[wave+\"deast\"]))\n\twaves = waves+\"\\n (\"+str(k0[i])+\",\"+str(k1[i])+\"):(\"+str(d0[i])+\",\"+str(dwest[i])+\",\"+str(deast[i])+\") \"\n\nprint(\"Initial conditions\")\nprint(waves)\n#print(\"k1: \", k1)\n#print(\"Geo_mode: \", d0)\n#print(\"West_mode: \", dwest)\n#print(\"East_mode: \", deast)\n\nparams = \"h0=\"+str(runtime['h0'])+\", \"\\\n\t+\"f=\"+str(runtime['sphere_rotating_coriolis_omega'])+\", \"\\\n\t+\"g=\"+str(runtime['gravitation'])+\", \"\\\n\t+\"L=\"+str(runtime['plane_domain_size'])+\", \"\\\n\t+\"M=\"+str(runtime['space_res_spectral'])\n\t#+\"N=\"+str(runtime['space_res_physical'])\n\n\n\n\n\n#Read output_nm files\nnm_geo_file = jd_flat['runtime.p_job_dirpath']+\"/output_nm_geo_evol.txt\"\nnm_igwest_file = jd_flat['runtime.p_job_dirpath']+\"/output_nm_igwest_evol.txt\"\nnm_igeast_file = jd_flat['runtime.p_job_dirpath']+\"/output_nm_igeast_evol.txt\"\n\neps=10e-4\neps2=10e-4\nscales = {}\ntimerescale=60*60\n#Remove modes with null values\nexclude = ['n']\n\ndf_geo=pd.read_csv(nm_geo_file, sep='\\t', skipinitialspace=True, engine=\"python\")\ntime=df_geo['time']\ndf_geo=df_geo.loc[:, df_geo.columns.difference(exclude)]\ndf_geo_tmp=df_geo.loc[:, (df_geo > eps).any(axis=0)]\nscales['0']=eps\nif len(df_geo_tmp.columns) < 2:\n\tprint(\"Tolerance too large for geo, empty plot! Reducing tolerance!\")\n\tscales['0']=eps*eps2\n\tdf_geo=df_geo.loc[:, (df_geo > eps*eps2).any(axis=0)]\nelse:\n\tdf_geo = df_geo_tmp\ndf_geo.time=df_geo.time/timerescale\ndf_geo.set_index('time',drop=True,inplace=True)\n\ndf_west=pd.read_csv(nm_igwest_file, sep='\\t', skipinitialspace=True, engine=\"python\")\ndf_west=df_west.loc[:, df_west.columns.difference(exclude)]\ndf_west_tmp=df_west.loc[:, (df_west > eps).any(axis=0)]\nscales['1']=eps\nif len(df_west_tmp.columns) < 2:\n\tprint(\"Tolerance too large for west, empty plot! Reducing tolerance!\")\n\tscales['1']=eps*eps2\n\tdf_west=df_west.loc[:, (df_west > eps*eps2).any(axis=0)]\nelse:\n\tdf_west=df_west_tmp\ndf_west.time=df_west.time/timerescale\ndf_west.set_index('time',drop=True,inplace=True)\n\ndf_east=pd.read_csv(nm_igeast_file, sep='\\t', skipinitialspace=True, engine=\"python\")\ndf_east=df_east.loc[:, df_east.columns.difference(exclude)]\ndf_east_tmp=df_east.loc[:, (df_east > eps).any(axis=0)]\nscales['2']=eps\nif len(df_east_tmp.columns) < 2:\n\tprint(\"Tolerance too large for east, empty plot! Reducing tolerance!\")\n\tscales['2']=eps*eps2\n\tdf_east=df_east.loc[:, (df_east > eps*eps2).any(axis=0)]\nelse:\n\tdf_east=df_east_tmp\ndf_east.time=df_east.time/timerescale\ndf_east.set_index('time',drop=True,inplace=True)\n\nprint(df_geo)\nprint(df_west)\nprint(df_east)\n\n\n##########################################################\n# Plotting starts here\n##########################################################\n\nprint(\"*\"*80)\nprint(\"*\"*80)\nprint(\"*\"*80)\n\n\nfontsize=18\nfigsize=(10, 10)\n\nfig, axs = plt.subplots(3, figsize=(10,10), sharex=True)\n#plt.rc('text', usetex=True)\ntitle=\"Normal Mode Nonlinear Interaction\\n\"+params+\"\\n\"+waves\nfig.suptitle(title)\n\nfor i, ax in enumerate(axs):\n\tax.set_xscale(\"linear\", nonposx='clip')\n\tax.set_yscale(\"log\", nonposy='clip')\n\tylim=[scales[str(i)], 10]\n\tax.set_ylim(ylim)\n\n\n\nfor ax in axs.flat:\n ax.label_outer()\n\n\ncolors = ['b', 'g', 'r', 'c', 'm', 'y', 'k']\n\nmarkers = []\nfor m in Line2D.markers:\n try:\n if len(m) == 1 and m != ' ' and m != '':\n markers.append(m)\n except TypeError:\n pass\n\nlinestyles = ['-', '--', ':', '-.']\n\n\nc = 0\n\n#ncol=2,handleheight=2.4, labelspacing=0.05\nncol=1\nif len(df_geo.columns):\n\tncol = 2\t\naxs[0].set(ylabel='Geostrophic Mode')\ndf_geo.plot(ax=axs[0])\naxs[0].legend(loc='center left', bbox_to_anchor= (1.01, 0.5), ncol=ncol)\n\nncol=1\nif len(df_west.columns):\n\tncol = 2\t\naxs[1].set(ylabel='IGWest Mode')\ndf_west.plot(ax=axs[1])\naxs[1].legend(loc='center left', bbox_to_anchor= (1.01, 0.5), ncol=ncol)\n\nncol=1\nif len(df_east.columns):\n\tncol = 2\t\n\n\ndf_east.plot(ax=axs[2])\naxs[2].set(xlabel=\"Time (hours)\", ylabel='IG East Mode')\naxs[2].legend(loc='center left', bbox_to_anchor= (1.01, 0.5), ncol=ncol)\n\nfig.subplots_adjust(right=0.7)\n\nplt.savefig(output_filename, transparent=True) #, bbox_inches='tight') #, pad_inches=0.02)\n\nplt.close()\n\n\n", "#! /usr/bin/env python3\n\nimport sys, os\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom mule.postprocessing.JobData import JobData\nfrom mule_local.postprocessing.SphereDataSpectral import SphereDataSpectral\nimport mule_local.postprocessing.shtnsfiledata as shtnsfiledata\n\n\ndebug_active = False\n\nclass postprocessing_swe:\n def __init__(self):\n self.job_data = None\n self.job_data_flattened = None\n self.rsphere = None\n self.grav = None\n\n self.phi_pert_spec = None\n self.vrt_spec = None\n self.div_spec = None\n\n self.sh = None\n self.sh_aa = None\n\n\n def setup(self,\n i_job_dir,\n i_phi_file,\n i_vrt_file,\n i_div_file\n ):\n self.job_dir = i_job_dir\n self.phi_pert_file = i_phi_file\n self.vrt_file = i_vrt_file\n self.div_file = i_div_file\n\n # Load job's data\n self.job_data = JobData(self.job_dir)\n self.job_data_flattened = self.job_data.get_flattened_data()\n\n # TODO: Determine this automagically from job data\n self.rsphere = 6371220\n self.grav = 9.80616\n self.coriolis = 7.292*1e-5\n\n # Load fields\n self.phi_pert_spec = SphereDataSpectral(self.phi_pert_file, setup_physical=False)\n self.vrt_spec = SphereDataSpectral(self.vrt_file, setup_physical=False)\n self.div_spec = SphereDataSpectral(self.div_file, setup_physical=False)\n\n # Setup transformations without anti-aliasing (lower physical resolution)\n self.sh = shtnsfiledata.shtnsfiledata(rsphere=self.rsphere)\n self.sh.setup(self.phi_pert_spec.file_info, anti_aliasing=False)\n\n # Setup transformations *with* anti-aliasing\n self.sh_aa = shtnsfiledata.shtnsfiledata(rsphere=self.rsphere)\n self.sh_aa.setup(self.phi_pert_spec.file_info, anti_aliasing=True)\n\n print(\"A\")\n print(self.vrt_spec.data_spectral)\n print(self.vrt_spec.data_spectral.shape)\n print(self.div_spec.data_spectral)\n print(self.div_spec.data_spectral.shape)\n u_phys_data, v_phys_data = self.sh_aa.vrtdiv2uv(self.vrt_spec.data_spectral, self.div_spec.data_spectral)\n\n def plot_physical_field_data_only(\n self,\n i_tag,\n i_data_phys,\n i_output_filename\n ):\n plt.imshow(i_data_phys)\n\n if i_tag == \"h_pert\":\n e = 50\n\n elif i_tag == \"phi_pert\":\n e = 50\n\n elif i_tag == \"vrt\":\n \"\"\"\n Contours from\n Galewsky, J., Scott, R. K., & Polvani, L. M. (2004). An initial-value problem for testing numerical models of the global shallow-water equations. Tellus, Series A: Dynamic Meteorology and Oceanography, 56(5), 429–440. https://doi.org/10.1111/j.1600-0870.2004.00071.x\n Figure 1, Page 3\n \n They use the potential vorticity, h*vrt and contours e=0.2*\\Omega/H\n Using just the vorticity, we get e=0.2*\\Omega \n \"\"\"\n e = 0.2*self.coriolis\n\n elif i_tag == \"div\":\n e = 1e-7\n\n else:\n raise Exception(\"Unknown tag \"+i_tag)\n\n num_contours = 30\n\n levels = np.arange(e, e * (num_contours+1), e)\n print(\"positive contour levels: \"+str(levels))\n plt.contour(i_data_phys, levels=levels, linestyles='solid', linewidths=0.2, colors='black')\n\n levels = np.arange(-e * num_contours, 0, e)\n print(\"negative contour levels: \"+str(levels))\n plt.contour(i_data_phys, levels=levels, linestyles='dashed', linewidths=0.2, colors='black')\n\n pass\n\n def plot_physical_field(\n self,\n i_tag,\n i_data,\n i_output_filename,\n i_title\n ):\n print(\"Plotting \", i_output_filename)\n plt.close()\n\n self.plot_physical_field_data_only(i_tag, i_data, i_output_filename)\n plt.title(i_title)\n\n plt.tight_layout()\n plt.savefig(i_output_filename)\n\n def plot_physical_fields(self):\n # Compute u and v without anti-aliasing\n u_phys_data, v_phys_data = self.sh.vrtdiv2uv(self.vrt_spec.data_spectral, self.div_spec.data_spectral)\n\n vrt_phys_data = self.sh.spec2phys(self.vrt_spec.data_spectral)\n div_phys_data = self.sh.spec2phys(self.div_spec.data_spectral)\n\n # Compute u and v without anti-aliasing\n gh_pert_phys_data = self.sh.spec2phys(self.phi_pert_spec.data_spectral)\n\n # Compute height\n h_pert_phys_data = gh_pert_phys_data/self.grav\n\n\n ##############################\n # Plot the height field\n ##############################\n\n # Doesn't really exist, but just call it somehow\n input_filename = self.phi_pert_file.replace(\"phi\", \"h\")\n\n output_filename = input_filename.replace('.sweet', '.pdf')\n output_filename = output_filename.replace('/output', '/plot_output')\n\n title = input_filename.replace('.pdf', '')\n _, title = os.path.split(title)\n\n self.plot_physical_field(\n \"h_pert\",\n h_pert_phys_data,\n output_filename,\n title\n )\n\n\n ##############################\n # Plot the vorticity field\n ##############################\n\n input_filename = self.vrt_file\n\n output_filename = input_filename.replace('.sweet', '.pdf')\n output_filename = output_filename.replace('/output', '/plot_output')\n\n title = input_filename.replace('.pdf', '')\n _, title = os.path.split(title)\n\n self.plot_physical_field(\n \"vrt\",\n vrt_phys_data,\n output_filename,\n title\n )\n\n\n ##############################\n # Plot the divergence field\n ##############################\n\n input_filename = self.div_file\n\n output_filename = input_filename.replace('.sweet', '.pdf')\n output_filename = output_filename.replace('/output', '/plot_output')\n\n title = input_filename.replace('.pdf', '')\n _, title = os.path.split(title)\n\n self.plot_physical_field(\n \"div\",\n div_phys_data,\n output_filename,\n title\n )\n\n def _ke_spectrum_dist_bucket(self, real_m, verbose=False):\n \"\"\"\n Compute the bucket and distribution of mode real_m which is not integer\n\n Now we need to split things up into buckets, 0th mode bucket\n\n m = 0 ... 0.5 ... 1.0 ... 1.5 ... 2.0 ... 2.5 ... 3.0 ...\n | | | |\n | bck0 | bucket1 | bucket2 | bucket3\n | | | |\n\n Change the real_m to:\n\n m = 0.5 .. 1.0 ... 1.5 ... 2.0 ... 2.5 ... 3.0 ... 3.5 ...\n | | | |\n | bck0 | bucket1 | bucket2 | bucket3\n | | | |\n \"\"\"\n\n if verbose:\n print(\" +++ using real mode \", real_m)\n\n real_mh = real_m + 0.5\n\n # Now things are easy...\n\n bucket_a_num = int(real_mh - 0.5)\n bucket_b_num = int(real_mh + 0.5)\n\n bucket_a_weight = 1.0 - (real_mh - 0.5 - bucket_a_num)\n bucket_b_weight = real_mh + 0.5 - bucket_b_num\n\n if verbose:\n print(\" +++ bucket \", bucket_a_num, \" gets \", bucket_a_weight)\n print(\" +++ bucket \", bucket_b_num, \" gets \", bucket_b_weight)\n\n assert(bucket_a_num >= 0)\n assert(bucket_b_num == bucket_a_num + 1)\n assert(bucket_a_weight >= 0 and bucket_a_weight <= 1.0)\n assert np.allclose(bucket_a_weight + bucket_b_weight, 1.0)\n return bucket_a_num, bucket_a_weight, bucket_b_num, bucket_b_weight\n\n def _ke_spectrum_dist_bucket_array(self, real_m, verbose=False):\n \"\"\"\n Compute the bucket and distribution of mode real_m which is not integer\n\n Now we need to split things up into buckets, 0th mode bucket\n\n m = 0 ... 0.5 ... 1.0 ... 1.5 ... 2.0 ... 2.5 ... 3.0 ...\n | | | |\n | bck0 | bucket1 | bucket2 | bucket3\n | | | |\n\n Change the real_m to:\n\n m = 0.5 .. 1.0 ... 1.5 ... 2.0 ... 2.5 ... 3.0 ... 3.5 ...\n | | | |\n | bck0 | bucket1 | bucket2 | bucket3\n | | | |\n \"\"\"\n\n if verbose:\n print(\" +++ using real mode \", real_m)\n\n real_mh = real_m + 0.5\n\n # Now things are easy...\n\n bucket_a_num = np.array(real_mh - 0.5, dtype=int)\n bucket_b_num = np.array(real_mh + 0.5, dtype=int)\n\n bucket_a_weight = 1.0 - (real_mh - 0.5 - bucket_a_num)\n bucket_b_weight = real_mh + 0.5 - bucket_b_num\n\n if verbose:\n print(\" +++ bucket \", bucket_a_num, \" gets \", bucket_a_weight)\n print(\" +++ bucket \", bucket_b_num, \" gets \", bucket_b_weight)\n\n assert(np.greater_equal(bucket_a_num, 0).all())\n assert(np.equal(bucket_b_num, bucket_a_num+1).all())\n\n assert(np.greater_equal(bucket_a_weight, 0).all())\n assert(np.less_equal(bucket_a_weight, 1).all())\n\n assert np.allclose(bucket_a_weight + bucket_b_weight, 1.0)\n return bucket_a_num, bucket_a_weight, bucket_b_num, bucket_b_weight\n\n\n def plot_kinetic_energy_distribution(self):\n \"\"\"\n Compute\n Ke = 1/2 * m * V^2\n \"\"\"\n\n # Compute u and v, prepared for anti-aliasing\n u_phys_data, v_phys_data = self.sh_aa.vrtdiv2uv(self.vrt_spec.data_spectral, self.div_spec.data_spectral)\n\n # Compute\n # u*u + v*v\n # and apply anti-aliasing\n V2_phys_data = self.sh.spec2phys(self.sh_aa.phys2spec(u_phys_data * u_phys_data + v_phys_data * v_phys_data))\n\n # Get mass (which we relate to the height of the SWE)\n # m = geopot. / g\n m_phys_data = self.sh.spec2phys(self.phi_pert_spec.data_spectral) / self.grav\n\n # Finish computation of\n # Ke = 1/2 * m * V^2\n # in physical space\n ke = 0.5 * m_phys_data * V2_phys_data\n\n\n print(\"Resolution in physical space: \", ke.shape)\n ke_spec = np.fft.rfft(ke, axis=1)\n print(\"Resolution after longitudinal FT transformation: \", ke_spec.shape)\n\n def _spec_to_buckets_iter(mode_numbers, modes_data, buckets):\n \"\"\"\n Iterate over all Fourier modes\n m here relates to the number of waves\n \"\"\"\n for m in range(len(mode_numbers)):\n # Compute real mode (including shortening by being closer to poles)\n # There would be additional number of waves, hence we need to divide by this\n bucket_a_num, bucket_a_weight, bucket_b_num, bucket_b_weight = self._ke_spectrum_dist_bucket(mode_numbers[m])\n\n ampl = np.abs(modes_data[m])\n\n if bucket_a_num < len(buckets):\n buckets[bucket_a_num] += ampl\n\n if bucket_b_num < len(buckets):\n buckets[bucket_b_num] += ampl\n\n\n\n def _spec_to_buckets_fast(mode_numbers, modes_data, buckets):\n\n bucket_a_num_, bucket_a_weight_, bucket_b_num_, bucket_b_weight_ = self._ke_spectrum_dist_bucket_array(mode_numbers)\n ampl_ = np.abs(modes_data)\n\n for m in range(len(buckets)):\n if bucket_a_num_[m] < len(buckets):\n buckets[bucket_a_num_[m]] += ampl_[m]\n\n if bucket_b_num_[m] < len(buckets):\n buckets[bucket_b_num_[m]] += ampl_[m]\n\n\n\n num_buckets = ke_spec.shape[1]-1\n print(\"Setting up \", num_buckets, \"spectral buckets\")\n buckets = np.zeros(num_buckets)\n\n # Iterate over all longitude stripes\n for i in range(ke_spec.shape[0]):\n print(\"Lat: \", self.sh.lats[i])\n\n # Compute scalar to multiply modal number with\n # scaling factor \\in [\n s = np.cos(self.sh.lats[i])\n\n m_ = range(num_buckets)\n real_m_ = np.array(m_) / s\n\n if debug_active:\n a = np.zeros_like(buckets)\n _spec_to_buckets_iter(real_m_, ke_spec[i], a)\n\n b = np.zeros_like(buckets)\n _spec_to_buckets_fast(real_m_, ke_spec[i], b)\n\n print(a-b)\n assert np.allclose(a, b)\n\n\n if 1:\n _spec_to_buckets_fast(real_m_, ke_spec[i], buckets)\n\n\n #\n # Plot results\n #\n\n import matplotlib.pyplot as plt\n\n def modes_to_wavelengths(modes):\n return np.pi * 2.0 * self.rsphere / modes\n\n\n def _plot_buckets(buckets, label):\n # Compute wavelengths\n _ = np.arange(len(buckets))\n _[0] = -1.0 # avoid div/0\n wavelengths = modes_to_wavelengths(_)\n\n # bin first and last mode\n plt.plot(wavelengths[1:-1], buckets[1:-1], label=label)\n\n plt.gca().invert_xaxis()\n plt.xlabel(\"Wavelength\")\n plt.ylabel(\"Amplitude\")\n\n plt.xscale(\"log\")\n plt.yscale(\"log\")\n\n\n def _plot_k_lines(ax):\n\n k_ = np.array([100, 200])\n wl_ = modes_to_wavelengths(k_)\n\n #\n # k^-3\n #\n s = 1e15\n y_ = np.power(k_, -3.0)*s\n line = plt.plot(wl_, y_, linestyle=\"solid\", color=\"gray\")\n x = line[0].get_xdata()[1]\n y = line[0].get_ydata()[1]\n\n ax.annotate(\n \"k^-3\",\n xy=(x * 1.05, y * 0.35),\n color=line[0].get_color(),\n size=10,\n )\n\n if False:\n #\n # k^-(5/3)\n #\n s *= np.power(k_[0], -3.0)/np.power(k_[0], -5.0/3.0)\n y_ = np.power(k_, -5.0/3.0)*s\n line = plt.plot(wl_, y_, linestyle=\"solid\", color=\"gray\")\n x = line[0].get_xdata()[1]\n y = line[0].get_ydata()[1]\n\n ax.annotate(\n \"k^-5/3\",\n xy=(x * 1.05, y * 1.35),\n color=line[0].get_color(),\n size=10,\n )\n\n\n plt.close()\n fig, ax = plt.subplots(figsize=(6, 4))\n\n _plot_buckets(buckets, \"test\")\n _plot_k_lines(ax)\n plt.legend()\n\n # Pseudo input filename\n input_filename = self.vrt_file.replace(\"vrt\", \"kinetic_energy_spectrum\")\n\n title = input_filename[:]\n title = title.replace('.sweet', '')\n title = title.replace('output_prog_', '')\n _, title = os.path.split(title)\n\n plt.title(title)\n\n plt.tight_layout()\n\n output_filename = input_filename\n output_filename = output_filename.replace('.sweet', '.pdf')\n output_filename = output_filename.replace('/output', '/plot_output')\n output_filename = output_filename.replace('output_prog_', '')\n\n plt.savefig(output_filename)\n" ]
[ [ "matplotlib.ticker.FormatStrFormatter", "matplotlib.ticker.LogLocator" ], [ "numpy.diag", "numpy.dot", "numpy.linalg.inv", "numpy.linalg.eig", "matplotlib.pyplot.savefig", "matplotlib.pyplot.plot", "numpy.identity", "numpy.array", "matplotlib.pyplot.show" ], [ "pandas.read_csv", "matplotlib.use", "matplotlib.pyplot.subplots", "matplotlib.pyplot.savefig", "matplotlib.pyplot.close" ], [ "matplotlib.pyplot.legend", "matplotlib.pyplot.imshow", "matplotlib.pyplot.plot", "numpy.zeros_like", "matplotlib.pyplot.gca", "matplotlib.pyplot.tight_layout", "numpy.allclose", "numpy.arange", "numpy.greater_equal", "numpy.less_equal", "matplotlib.pyplot.close", "numpy.zeros", "matplotlib.pyplot.title", "numpy.power", "matplotlib.pyplot.savefig", "matplotlib.pyplot.xlabel", "numpy.equal", "numpy.array", "matplotlib.pyplot.ylabel", "numpy.abs", "numpy.fft.rfft", "matplotlib.pyplot.yscale", "matplotlib.pyplot.subplots", "numpy.cos", "matplotlib.pyplot.contour", "matplotlib.pyplot.xscale" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
NunoEdgarGFlowHub/cvxpy
[ "43270fcc8af8fc4742f1b3519800b0074f2e6693" ]
[ "cvxpy/atoms/max.py" ]
[ "\"\"\"\nCopyright 2013 Steven Diamond\n\nThis file is part of CVXPY.\n\nCVXPY is free software: you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation, either version 3 of the License, or\n(at your option) any later version.\n\nCVXPY is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with CVXPY. If not, see <http://www.gnu.org/licenses/>.\n\"\"\"\n\nfrom cvxpy.atoms.atom import Atom\nfrom cvxpy.atoms.axis_atom import AxisAtom\nimport cvxpy.lin_ops.lin_utils as lu\nimport numpy as np\n\n\nclass max(AxisAtom):\n \"\"\":math:`\\max_{i,j}\\{X_{i,j}\\}`.\n \"\"\"\n\n def __init__(self, x, axis=None, keepdims=False):\n super(max, self).__init__(x, axis=axis, keepdims=keepdims)\n\n @Atom.numpy_numeric\n def numeric(self, values):\n \"\"\"Returns the largest entry in x.\n \"\"\"\n return values[0].max(axis=self.axis, keepdims=self.keepdims)\n\n def _grad(self, values):\n \"\"\"Gives the (sub/super)gradient of the atom w.r.t. each argument.\n\n Matrix expressions are vectorized, so the gradient is a matrix.\n\n Args:\n values: A list of numeric values for the arguments.\n\n Returns:\n A list of SciPy CSC sparse matrices or None.\n \"\"\"\n return self._axis_grad(values)\n\n def _column_grad(self, value):\n \"\"\"Gives the (sub/super)gradient of the atom w.r.t. a column argument.\n\n Matrix expressions are vectorized, so the gradient is a matrix.\n\n Args:\n value: A numeric value for a column.\n\n Returns:\n A NumPy ndarray or None.\n \"\"\"\n # Grad: 1 for a largest index.\n value = np.matrix(value).A.ravel(order='F')\n idx = np.argmax(value)\n D = np.zeros((value.size, 1))\n D[idx] = 1\n return D\n\n def sign_from_args(self):\n \"\"\"Returns sign (is positive, is negative) of the expression.\n \"\"\"\n # Same as argument.\n return (self.args[0].is_nonneg(), self.args[0].is_nonpos())\n\n def is_atom_convex(self):\n \"\"\"Is the atom convex?\n \"\"\"\n return True\n\n def is_atom_concave(self):\n \"\"\"Is the atom concave?\n \"\"\"\n return False\n\n def is_incr(self, idx):\n \"\"\"Is the composition non-decreasing in argument idx?\n \"\"\"\n return True\n\n def is_decr(self, idx):\n \"\"\"Is the composition non-increasing in argument idx?\n \"\"\"\n return False\n\n def is_pwl(self):\n \"\"\"Is the atom piecewise linear?\n \"\"\"\n return self.args[0].is_pwl()\n\n @staticmethod\n def graph_implementation(arg_objs, shape, data=None):\n \"\"\"Reduces the atom to an affine expression and list of constraints.\n\n Parameters\n ----------\n arg_objs : list\n LinExpr for each argument.\n shape : tuple\n The shape of the resulting expression.\n data :\n Additional data required by the atom.\n\n Returns\n -------\n tuple\n (LinOp for objective, list of constraints)\n \"\"\"\n axis = data[0]\n if axis is None:\n t = lu.create_var((1, 1))\n promoted_t = lu.promote(t, arg_objs[0].shape)\n elif axis == 0:\n t = lu.create_var((1, arg_objs[0].shape[1]))\n const_shape = (arg_objs[0].shape[0], 1)\n ones = lu.create_const(np.ones(const_shape), const_shape)\n promoted_t = lu.mul_expr(ones, t, arg_objs[0].shape)\n else: # axis == 1\n t = lu.create_var((arg_objs[0].shape[0], 1))\n const_shape = (1, arg_objs[0].shape[1])\n ones = lu.create_const(np.ones(const_shape), const_shape)\n promoted_t = lu.rmul_expr(t, ones, arg_objs[0].shape)\n\n constraints = [lu.create_leq(arg_objs[0], promoted_t)]\n return (t, constraints)\n" ]
[ [ "numpy.matrix", "numpy.argmax", "numpy.zeros", "numpy.ones" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
damien911224/augmentation-corruption
[ "4cf22bd3be1d100635fb6cd41e9b71a6949b5dd0", "4cf22bd3be1d100635fb6cd41e9b71a6949b5dd0" ]
[ "experiments/severity_scan_imagenet.py", "experiments/overlap/test_corrupt_net.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport hydra\nfrom hydra.utils import instantiate\nimport logging\nfrom overlap.train_net import train_net\nfrom overlap.test_net import test_net\nimport numpy as np\nimport torch\nimport pickle\nimport os\nimport omegaconf\nfrom overlap.extract_features import extract_features\nimport submitit\n\nlog = logging.getLogger(__name__)\n\[email protected](config_path=\"conf/severity_scan_imagenet.yaml\")\ndef run(cfg):\n if cfg.num_gpus > 1:\n job_env = submitit.JobEnvironment()\n rank = job_env.global_rank\n world_size = job_env.num_tasks\n if rank != 0:\n logging.root.handlers = []\n try:\n torch.cuda.set_device(rank)\n torch.distributed.init_process_group(\n backend='nccl',\n init_method=\"tcp://{}:{}\".format('localhost', 10001),\n world_size=world_size,\n rank=rank\n )\n train(cfg, is_leader=(rank==0))\n except KeyboardInterrupt:\n pass\n finally:\n torch.distributed.destroy_process_group()\n else:\n train(cfg, is_leader=True)\n\ndef train(cfg, is_leader=True):\n\n np.random.seed(cfg.rng_seed)\n torch.manual_seed(cfg.rng_seed)\n\n log.info(cfg.pretty())\n cur_device = torch.cuda.current_device()\n model = instantiate(cfg.model).cuda(device=cur_device)\n if cfg.num_gpus > 1:\n model = torch.nn.parallel.DistributedDataParallel(\n module=model,\n device_ids=[cur_device],\n output_device=cur_device\n )\n optimizer = instantiate(cfg.optim, model.parameters())\n if cfg.optim.max_epoch > 0:\n train_dataset = instantiate(cfg.train)\n else:\n train_dataset = None\n test_dataset = instantiate(cfg.test)\n lr_policy = instantiate(cfg.optim.lr_policy)\n with omegaconf.open_dict(cfg):\n feature_extractor = instantiate(cfg.ft, num_gpus=cfg.num_gpus, is_leader=is_leader)\n feature_extractor.train()\n \n train_net(model=model,\n optimizer=optimizer,\n train_dataset=train_dataset,\n batch_size=cfg.train.batch_size,\n max_epoch=cfg.optim.max_epoch,\n loader_params=cfg.data_loader,\n lr_policy=lr_policy,\n save_period=cfg.train.checkpoint_period,\n weights=cfg.train.weights,\n num_gpus=cfg.num_gpus,\n is_leader=is_leader\n )\n\n err = test_net(model=model,\n test_dataset=test_dataset,\n batch_size=cfg.test.batch_size,\n loader_params=cfg.data_loader,\n output_name='test_epoch',\n num_gpus=cfg.num_gpus)\n\n if os.path.exists(cfg.feature_file):\n feature_dict = {k : v for k, v in np.load(cfg.feature_file).items()}\n else:\n feature_dict = {}\n indices = np.load(cfg.ft_corrupt.indices_file)\n for aug in cfg.aug_string.split(\"--\"):\n if len(aug.split(\"-\")) > 1:\n #log.info(\"Severity provided in corrupt.aug_string will be weighted by given severity.\")\n sev = aug.split(\"-\")[1]\n if len(sev.split(\"_\")) > 1:\n low = float(sev.split(\"_\")[0])\n high = float(sev.split(\"_\")[1])\n else:\n low = 0.0\n high = float(sev)\n\n sev_factor = (high - low) * cfg.severity / 10 + low\n else:\n sev_factor = cfg.severity\n aug = aug.split(\"-\")[0]\n aug_string = \"{}-{}\".format(aug, sev_factor)\n if aug_string in feature_dict:\n continue\n with omegaconf.open_dict(cfg.corrupt):\n corrupt_dataset = instantiate(cfg.corrupt, aug_string=aug_string)\n err = test_net(model=model,\n test_dataset=corrupt_dataset,\n batch_size=cfg.corrupt.batch_size,\n loader_params=cfg.data_loader,\n output_name=aug_string,\n num_gpus=cfg.num_gpus)\n with omegaconf.open_dict(cfg.ft_corrupt):\n ft_corrupt_dataset = instantiate(cfg.ft_corrupt, aug_string=aug_string)\n if cfg.ft_corrupt.params.num_transforms is not None:\n ft_corrupt_dataset = ft_corrupt_dataset.serialize(indices)\n else:\n ft_corrupt_dataset = torch.utils.data.Subset(ft_corrupt_dataset, indices)\n \n feature = extract_features(feature_extractor=feature_extractor,\n dataset=ft_corrupt_dataset,\n batch_size=cfg.ft_corrupt.batch_size,\n loader_params=cfg.data_loader,\n average=True,\n num_gpus=cfg.num_gpus)\n feature_dict[aug_string] = feature\n if is_leader:\n np.savez(cfg.feature_file, **feature_dict)\n\nif __name__==\"__main__\":\n run()\n", "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport torch\nimport logging\nfrom .utils import logging as lu\nfrom omegaconf import open_dict\nfrom .augmentations.utils import aug_finder\nfrom hydra.utils import instantiate\nimport numpy as np\nimport os\nimport pickle\n\n\nlog = logging.getLogger(__name__)\n\ndef test_corrupt_net(model, corrupt_cfg, batch_size, loader_params, aug_string=None, mCE_denom=None, clean_err=None, imagenetc_grouping=True, num_gpus=1, log_name=None):\n\n model.eval()\n if aug_string is None:\n augs = aug_finder.get_augs_by_tag(['imagenet_c'])\n severities = [1,2,3,4,5]\n augs = [\"{}-{}\".format(a.name, s) for a in augs for s in severities]\n else:\n augs = aug_string.split(\"--\")\n\n\n if log_name is not None and os.path.exists(log_name):\n prestats = lu.load_json_stats(log_name)\n else:\n prestats = None\n \n\n errs = []\n for aug in augs:\n if prestats is not None and len(lu.parse_json_stats(prestats, row_type=aug, key='top1_err')) > 0:\n continue\n with open_dict(corrupt_cfg):\n corrupt_dataset = instantiate(corrupt_cfg, aug_string=aug)\n sampler = torch.utils.data.distributed.DistributedSampler(corrupt_dataset)\\\n if num_gpus > 1 else None\n loader = torch.utils.data.DataLoader(\n corrupt_dataset,\n batch_size=batch_size,\n shuffle=False,\n sampler=sampler,\n num_workers=loader_params.num_workers,\n pin_memory=loader_params.pin_memory,\n drop_last=False\n )\n num_correct = 0\n for curr_iter, (inputs, labels) in enumerate(loader):\n inputs, labels = inputs.cuda(), labels.cuda(non_blocking=True)\n preds = model(inputs)\n correct = torch.sum(torch.argmax(preds, dim=1)==labels)\n if num_gpus > 1:\n torch.distributed.all_reduce(correct)\n num_correct += correct.item()\n\n err = 100 * (1 - num_correct / len(corrupt_dataset))\n stats = {'_type' : aug, 'top1_err' : err}\n lu.log_json_stats(stats)\n errs.append(err)\n\n\n # Calculating records\n if mCE_denom is not None:\n mCE_denom = pickle.load(open(os.path.join(os.path.dirname(__file__), '../baseline_data/', mCE_denom), 'rb'))\n\n errs = np.array(errs)\n aug_names = [a.split(\"-\")[0] for a in augs]\n unique_aug_names = list(set(aug_names))\n avg_errs = [np.mean(errs[[i for i, a in enumerate(aug_names) if a==u]]) for u in unique_aug_names]\n avg_errs = np.array(avg_errs)\n mCE = None\n rmCE = None\n if mCE_denom:\n mCE = [100 * avg_errs[i] / mCE_denom[a] for i, a in enumerate(unique_aug_names)]\n mCE = np.array(mCE)\n if clean_err:\n rmCE = [100 * (avg_errs[i] - clean_err) / (mCE_denom[a] - mCE_denom['clean'])\\\n for i, a in enumerate(unique_aug_names)]\n rmCE = np.array(rmCE)\n for i, a in enumerate(unique_aug_names):\n stats = {'_type' : a + '-avg', 'top1_err' : avg_errs[i]}\n if mCE is not None:\n stats['mCE'] = mCE[i]\n if rmCE is not None:\n stats['rmCE'] = rmCE[i]\n lu.log_json_stats(stats)\n if imagenetc_grouping:\n for aug_type in ['blur', 'digital', 'noise', 'weather', 'extra']:\n aug_indices = [i for i, a in enumerate(unique_aug_names)\\\n if aug_type in aug_finder.get_aug_by_name(a).tags]\n err_for_type = np.mean(avg_errs[aug_indices])\n stats = {'_type' : aug_type + '-avg', 'top1_err' : err_for_type}\n if mCE is not None:\n mCE_for_type = np.mean(mCE[aug_indices])\n stats['mCE'] = mCE_for_type\n if rmCE is not None:\n rmCE_for_type = np.mean(rmCE[aug_indices])\n stats['rmCE'] = rmCE_for_type\n lu.log_json_stats(stats)\n\n if imagenetc_grouping:\n indices = [i for i, a in enumerate(unique_aug_names)\\\n if 'extra' not in aug_finder.get_aug_by_name(a).tags]\n else:\n indices = [i for i, a in enumerate(unique_aug_names)]\n \n overall_avg = np.mean(avg_errs[indices])\n stats = {'_type' : 'overall-avg', 'top1_err' : overall_avg}\n if mCE is not None:\n overall_mCE = np.mean(mCE[indices])\n stats['mCE'] = overall_mCE\n if rmCE is not None:\n overall_rmCE = np.mean(rmCE[indices])\n stats['rmCE'] = overall_rmCE\n lu.log_json_stats(stats)\n" ]
[ [ "numpy.savez", "numpy.random.seed", "torch.cuda.current_device", "torch.cuda.set_device", "torch.manual_seed", "torch.distributed.destroy_process_group", "torch.utils.data.Subset", "numpy.load", "torch.nn.parallel.DistributedDataParallel" ], [ "torch.utils.data.distributed.DistributedSampler", "torch.utils.data.DataLoader", "numpy.mean", "numpy.array", "torch.distributed.all_reduce", "torch.argmax" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
francisengelmann/PyViz3D
[ "51e49788e2aafc522920cbde7c48e962aca8e7b5" ]
[ "examples/example_normals.py" ]
[ "import numpy as np\nimport pyviz3d.visualizer as viz\n\n\ndef create_color_palette():\n return np.array([\n (0, 0, 0),\n (174, 199, 232),\t\t# wall\n (152, 223, 138),\t\t# floor\n (31, 119, 180), \t\t# cabinet\n (255, 187, 120),\t\t# bed\n (188, 189, 34), \t\t# chair\n (140, 86, 75), \t\t# sofa\n (255, 152, 150),\t\t# table\n (214, 39, 40), \t\t# door\n (197, 176, 213),\t\t# window\n (148, 103, 189),\t\t# bookshelf\n (196, 156, 148),\t\t# picture\n (23, 190, 207), \t\t# counter\n (178, 76, 76),\n (247, 182, 210),\t\t# desk\n (66, 188, 102),\n (219, 219, 141),\t\t# curtain\n (140, 57, 197),\n (202, 185, 52),\n (51, 176, 203),\n (200, 54, 131),\n (92, 193, 61),\n (78, 71, 183),\n (172, 114, 82),\n (255, 127, 14), \t\t# refrigerator\n (91, 163, 138),\n (153, 98, 156),\n (140, 153, 101),\n (158, 218, 229),\t\t# shower curtain\n (100, 125, 154),\n (178, 127, 135),\n (120, 185, 128),\n (146, 111, 194),\n (44, 160, 44), \t\t# toilet\n (112, 128, 144),\t\t# sink\n (96, 207, 209),\n (227, 119, 194),\t\t# bathtub\n (213, 92, 176),\n (94, 106, 211),\n (82, 84, 163), \t\t# otherfurn\n (100, 85, 144)\n ], dtype=np.uint8)\n\n\ndef main():\n\n # First, we set up a visualizer\n v = viz.Visualizer()\n\n # Example with normals\n scene_name = 'scene0000_00_vh_clean_2'\n scene = np.load('examples/data/' + scene_name + '.npy')\n point_positions = scene[:, 0:3] - np.mean(scene[:, 0:3], axis=0)\n point_colors = scene[:, 3:6]\n point_labels = scene[:, -1].astype(int)\n point_normals = scene[:, 6:9]\n point_semantic_colors = create_color_palette()[point_labels]\n point_size = 35.0\n\n v.add_points('RGB Color', point_positions, point_colors, point_normals, point_size=point_size, visible=False)\n v.add_points('Semantics', point_positions, point_semantic_colors, point_normals, point_size=point_size)\n v.add_lines('Normals', point_positions, point_positions + point_normals/10, visible=True)\n\n # When we added everything we need to the visualizer, we save it.\n v.save('normals')\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.load", "numpy.array", "numpy.mean" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
lyuwen/phono3py
[ "9e2adffa6c07abb4bdbe4e4188c460cb8a0462fd", "9e2adffa6c07abb4bdbe4e4188c460cb8a0462fd" ]
[ "phono3py/cui/settings.py", "phono3py/phonon3/triplets.py" ]
[ "import numpy as np\nfrom phonopy.cui.settings import Settings, ConfParser, fracval\n\nclass Phono3pySettings(Settings):\n def __init__(self):\n Settings.__init__(self)\n\n self._boundary_mfp = 1.0e6 # In micrometre. The default value is\n # just set to avoid divergence.\n self._coarse_mesh_shifts = None\n self._const_ave_pp = None\n self._create_displacements = False\n self._cutoff_fc3_distance = None\n self._cutoff_pair_distance = None\n self._gamma_conversion_factor = None\n self._grid_addresses = None\n self._grid_points = None\n self._ion_clamped = False\n self._is_bterta = False\n self._is_compact_fc = False\n self._is_frequency_shift = False\n self._is_full_pp = False\n self._is_gruneisen = False\n self._is_imag_self_energy = False\n self._is_isotope = False\n self._is_joint_dos = False\n self._is_kappa_star = True\n self._is_lbte = False\n self._is_N_U = False\n self._is_reducible_collision_matrix = False\n self._is_symmetrize_fc2 = False\n self._is_symmetrize_fc3_q = False\n self._is_symmetrize_fc3_r = False\n self._mass_variances = None\n self._max_freepath = None\n self._mesh_divisors = None\n self._read_collision = None\n self._read_fc2 = False\n self._read_fc3 = False\n self._read_gamma = False\n self._read_phonon = False\n self._read_pp = False\n self._phonon_supercell_matrix = None\n self._pinv_cutoff = 1.0e-8\n self._pinv_solver = 0\n self._pp_conversion_factor = None\n self._scattering_event_class = None # scattering event class 1 or 2\n self._sigma_cutoff_width = None\n self._solve_collective_phonon = False\n self._use_ave_pp = False\n self._write_collision = False\n self._write_gamma_detail = False\n self._write_gamma = False\n self._write_phonon = False\n self._write_pp = False\n self._write_LBTE_solution = False\n\n def set_boundary_mfp(self, boundary_mfp):\n self._boundary_mfp = boundary_mfp\n\n def get_boundary_mfp(self):\n return self._boundary_mfp\n\n def set_coarse_mesh_shifts(self, coarse_mesh_shifts):\n self._coarse_mesh_shifts = coarse_mesh_shifts\n\n def get_coarse_mesh_shifts(self):\n return self._coarse_mesh_shifts\n\n def set_create_displacements(self, create_displacements):\n self._create_displacements = create_displacements\n\n def get_create_displacements(self):\n return self._create_displacements\n\n def set_constant_averaged_pp_interaction(self, ave_pp):\n self._const_ave_pp = ave_pp\n\n def get_constant_averaged_pp_interaction(self):\n return self._const_ave_pp\n\n def set_cutoff_fc3_distance(self, cutoff_fc3_distance):\n self._cutoff_fc3_distance = cutoff_fc3_distance\n\n def get_cutoff_fc3_distance(self):\n return self._cutoff_fc3_distance\n\n def set_cutoff_pair_distance(self, cutoff_pair_distance):\n self._cutoff_pair_distance = cutoff_pair_distance\n\n def get_cutoff_pair_distance(self):\n return self._cutoff_pair_distance\n\n def set_gamma_conversion_factor(self, gamma_conversion_factor):\n self._gamma_conversion_factor = gamma_conversion_factor\n\n def get_gamma_conversion_factor(self):\n return self._gamma_conversion_factor\n\n def set_grid_addresses(self, grid_addresses):\n self._grid_addresses = grid_addresses\n\n def get_grid_addresses(self):\n return self._grid_addresses\n\n def set_grid_points(self, grid_points):\n self._grid_points = grid_points\n\n def get_grid_points(self):\n return self._grid_points\n\n def set_ion_clamped(self, ion_clamped):\n self._ion_clamped = ion_clamped\n\n def get_ion_clamped(self):\n return self._ion_clamped\n\n def set_is_bterta(self, is_bterta):\n self._is_bterta = is_bterta\n\n def get_is_bterta(self):\n return self._is_bterta\n\n def set_is_compact_fc(self, is_compact_fc):\n self._is_compact_fc = is_compact_fc\n\n def get_is_compact_fc(self):\n return self._is_compact_fc\n\n def set_is_frequency_shift(self, is_frequency_shift):\n self._is_frequency_shift = is_frequency_shift\n\n def get_is_frequency_shift(self):\n return self._is_frequency_shift\n\n def set_is_full_pp(self, is_full_pp):\n self._is_full_pp = is_full_pp\n\n def get_is_full_pp(self):\n return self._is_full_pp\n\n def set_is_gruneisen(self, is_gruneisen):\n self._is_gruneisen = is_gruneisen\n\n def get_is_gruneisen(self):\n return self._is_gruneisen\n\n def set_is_imag_self_energy(self, is_imag_self_energy):\n self._is_imag_self_energy = is_imag_self_energy\n\n def get_is_imag_self_energy(self):\n return self._is_imag_self_energy\n\n def set_is_isotope(self, is_isotope):\n self._is_isotope = is_isotope\n\n def get_is_isotope(self):\n return self._is_isotope\n\n def set_is_joint_dos(self, is_joint_dos):\n self._is_joint_dos = is_joint_dos\n\n def get_is_joint_dos(self):\n return self._is_joint_dos\n\n def set_is_kappa_star(self, is_kappa_star):\n self._is_kappa_star = is_kappa_star\n\n def get_is_kappa_star(self):\n return self._is_kappa_star\n\n def set_is_lbte(self, is_lbte):\n self._is_lbte = is_lbte\n\n def get_is_lbte(self):\n return self._is_lbte\n\n def set_is_N_U(self, is_N_U):\n self._is_N_U = is_N_U\n\n def get_is_N_U(self):\n return self._is_N_U\n\n def set_is_reducible_collision_matrix(self, is_reducible_collision_matrix):\n self._is_reducible_collision_matrix = is_reducible_collision_matrix\n\n def get_is_reducible_collision_matrix(self):\n return self._is_reducible_collision_matrix\n\n def set_is_symmetrize_fc2(self, is_symmetrize_fc2):\n self._is_symmetrize_fc2 = is_symmetrize_fc2\n\n def get_is_symmetrize_fc2(self):\n return self._is_symmetrize_fc2\n\n def set_is_symmetrize_fc3_q(self, is_symmetrize_fc3_q):\n self._is_symmetrize_fc3_q = is_symmetrize_fc3_q\n\n def get_is_symmetrize_fc3_q(self):\n return self._is_symmetrize_fc3_q\n\n def set_is_symmetrize_fc3_r(self, is_symmetrize_fc3_r):\n self._is_symmetrize_fc3_r = is_symmetrize_fc3_r\n\n def get_is_symmetrize_fc3_r(self):\n return self._is_symmetrize_fc3_r\n\n def set_mass_variances(self, mass_variances):\n self._mass_variances = mass_variances\n\n def get_mass_variances(self):\n return self._mass_variances\n\n def set_max_freepath(self, max_freepath):\n self._max_freepath = max_freepath\n\n def get_max_freepath(self):\n return self._max_freepath\n\n def set_mesh_divisors(self, mesh_divisors):\n self._mesh_divisors = mesh_divisors\n\n def get_mesh_divisors(self):\n return self._mesh_divisors\n\n def set_phonon_supercell_matrix(self, matrix):\n self._phonon_supercell_matrix = matrix\n\n def get_phonon_supercell_matrix(self):\n return self._phonon_supercell_matrix\n\n def set_pinv_cutoff(self, pinv_cutoff):\n self._pinv_cutoff = pinv_cutoff\n\n def get_pinv_cutoff(self):\n return self._pinv_cutoff\n\n def set_pinv_solver(self, pinv_solver):\n self._pinv_solver = pinv_solver\n\n def get_pinv_solver(self):\n return self._pinv_solver\n\n def set_pp_conversion_factor(self, pp_conversion_factor):\n self._pp_conversion_factor = pp_conversion_factor\n\n def get_pp_conversion_factor(self):\n return self._pp_conversion_factor\n\n def set_read_collision(self, read_collision):\n self._read_collision = read_collision\n\n def get_read_collision(self):\n return self._read_collision\n\n def set_read_fc2(self, read_fc2):\n self._read_fc2 = read_fc2\n\n def get_read_fc2(self):\n return self._read_fc2\n\n def set_read_fc3(self, read_fc3):\n self._read_fc3 = read_fc3\n\n def get_read_fc3(self):\n return self._read_fc3\n\n def set_read_gamma(self, read_gamma):\n self._read_gamma = read_gamma\n\n def get_read_gamma(self):\n return self._read_gamma\n\n def set_read_phonon(self, read_phonon):\n self._read_phonon = read_phonon\n\n def get_read_phonon(self):\n return self._read_phonon\n\n def set_read_pp(self, read_pp):\n self._read_pp = read_pp\n\n def get_read_pp(self):\n return self._read_pp\n\n def set_scattering_event_class(self, scattering_event_class):\n self._scattering_event_class = scattering_event_class\n\n def get_scattering_event_class(self):\n return self._scattering_event_class\n\n def set_sigma_cutoff_width(self, sigma_cutoff_width):\n self._sigma_cutoff_width = sigma_cutoff_width\n\n def get_sigma_cutoff_width(self):\n return self._sigma_cutoff_width\n\n def set_solve_collective_phonon(self, solve_collective_phonon):\n self._solve_collective_phonon = solve_collective_phonon\n\n def get_solve_collective_phonon(self):\n return self._solve_collective_phonon\n\n def set_use_ave_pp(self, use_ave_pp):\n self._use_ave_pp = use_ave_pp\n\n def get_use_ave_pp(self):\n return self._use_ave_pp\n\n def set_write_collision(self, write_collision):\n self._write_collision = write_collision\n\n def get_write_collision(self):\n return self._write_collision\n\n def set_write_gamma_detail(self, write_gamma_detail):\n self._write_gamma_detail = write_gamma_detail\n\n def get_write_gamma_detail(self):\n return self._write_gamma_detail\n\n def set_write_gamma(self, write_gamma):\n self._write_gamma = write_gamma\n\n def get_write_gamma(self):\n return self._write_gamma\n\n def set_write_phonon(self, write_phonon):\n self._write_phonon = write_phonon\n\n def get_write_phonon(self):\n return self._write_phonon\n\n def set_write_pp(self, write_pp):\n self._write_pp = write_pp\n\n def get_write_pp(self):\n return self._write_pp\n\n def set_write_LBTE_solution(self, write_LBTE_solution):\n self._write_LBTE_solution = write_LBTE_solution\n\n def get_write_LBTE_solution(self):\n return self._write_LBTE_solution\n\n\nclass Phono3pyConfParser(ConfParser):\n def __init__(self, filename=None, args=None):\n self._settings = Phono3pySettings()\n confs = {}\n if filename is not None:\n ConfParser.__init__(self, filename=filename)\n self.read_file() # store .conf file setting in self._confs\n self._parse_conf()\n self._set_settings()\n confs.update(self._confs)\n if args is not None:\n ConfParser.__init__(self, args=args)\n self._read_options()\n self._parse_conf()\n self._set_settings()\n confs.update(self._confs)\n self._confs = confs\n\n def _read_options(self):\n self.read_options() # store data in self._confs\n if 'phonon_supercell_dimension' in self._args:\n dim_fc2 = self._args.phonon_supercell_dimension\n if dim_fc2 is not None:\n self._confs['dim_fc2'] = \" \".join(dim_fc2)\n\n if 'boundary_mfp' in self._args:\n if self._args.boundary_mfp is not None:\n self._confs['boundary_mfp'] = self._args.boundary_mfp\n\n if 'const_ave_pp' in self._args:\n const_ave_pp = self._args.const_ave_pp\n if const_ave_pp is not None:\n self._confs['const_ave_pp'] = const_ave_pp\n\n if 'cutoff_fc3_distance' in self._args:\n cutoff_fc3 = self._args.cutoff_fc3_distance\n if cutoff_fc3 is not None:\n self._confs['cutoff_fc3_distance'] = cutoff_fc3\n\n if 'cutoff_pair_distance' in self._args:\n cutoff_pair = self._args.cutoff_pair_distance\n if cutoff_pair is not None:\n self._confs['cutoff_pair_distance'] = cutoff_pair\n\n if 'gamma_conversion_factor' in self._args:\n g_conv_factor = self._args.gamma_conversion_factor\n if g_conv_factor is not None:\n self._confs['gamma_conversion_factor'] = g_conv_factor\n\n if 'grid_addresses' in self._args:\n grid_adrs = self._args.grid_addresses\n if grid_adrs is not None:\n self._confs['grid_addresses'] = \" \".join(grid_adrs)\n\n if 'grid_points' in self._args:\n if self._args.grid_points is not None:\n self._confs['grid_points'] = \" \".join(self._args.grid_points)\n\n if 'ion_clamped' in self._args:\n if self._args.ion_clamped:\n self._confs['ion_clamped'] = '.true.'\n\n if 'is_bterta' in self._args:\n if self._args.is_bterta:\n self._confs['bterta'] = '.true.'\n\n if 'is_compact_fc' in self._args:\n if self._args.is_compact_fc:\n self._confs['compact_fc'] = '.true.'\n\n if 'is_gruneisen' in self._args:\n if self._args.is_gruneisen:\n self._confs['gruneisen'] = '.true.'\n\n if 'is_displacement' in self._args:\n if self._args.is_displacement:\n self._confs['create_displacements'] = '.true.'\n\n if 'is_frequency_shift' in self._args:\n if self._args.is_frequency_shift:\n self._confs['frequency_shift'] = '.true.'\n\n if 'is_full_pp' in self._args:\n if self._args.is_full_pp:\n self._confs['full_pp'] = '.true.'\n\n if 'is_imag_self_energy' in self._args:\n if self._args.is_imag_self_energy:\n self._confs['imag_self_energy'] = '.true.'\n\n if 'is_isotope' in self._args:\n if self._args.is_isotope:\n self._confs['isotope'] = '.true.'\n\n if 'is_joint_dos' in self._args:\n if self._args.is_joint_dos:\n self._confs['joint_dos'] = '.true.'\n\n if 'no_kappa_stars' in self._args:\n if self._args.no_kappa_stars:\n self._confs['kappa_star'] = '.false.'\n\n if 'is_lbte' in self._args:\n if self._args.is_lbte:\n self._confs['lbte'] = '.true.'\n\n if 'is_N_U' in self._args:\n if self._args.is_N_U:\n self._confs['N_U'] = '.true.'\n\n if 'is_reducible_collision_matrix' in self._args:\n if self._args.is_reducible_collision_matrix:\n self._confs['reducible_collision_matrix'] = '.true.'\n\n if 'is_symmetrize_fc2' in self._args:\n if self._args.is_symmetrize_fc2:\n self._confs['symmetrize_fc2'] = '.true.'\n\n if 'is_symmetrize_fc3_q' in self._args:\n if self._args.is_symmetrize_fc3_q:\n self._confs['symmetrize_fc3_q'] = '.true.'\n\n if 'is_symmetrize_fc3_r' in self._args:\n if self._args.is_symmetrize_fc3_r:\n self._confs['symmetrize_fc3_r'] = '.true.'\n\n if 'mass_variances' in self._args:\n mass_variances = self._args.mass_variances\n if mass_variances is not None:\n self._confs['mass_variances'] = \" \".join(mass_variances)\n\n if 'max_freepath' in self._args:\n if self._args.max_freepath is not None:\n self._confs['max_freepath'] = self._args.max_freepath\n\n if 'mesh_divisors' in self._args:\n mesh_divisors = self._args.mesh_divisors\n if mesh_divisors is not None:\n self._confs['mesh_divisors'] = \" \".join(mesh_divisors)\n\n if 'pinv_cutoff' in self._args:\n if self._args.pinv_cutoff is not None:\n self._confs['pinv_cutoff'] = self._args.pinv_cutoff\n\n if 'pinv_solver' in self._args:\n if self._args.pinv_solver is not None:\n self._confs['pinv_solver'] = self._args.pinv_solver\n\n if 'pp_conversion_factor' in self._args:\n pp_conv_factor = self._args.pp_conversion_factor\n if pp_conv_factor is not None:\n self._confs['pp_conversion_factor'] = pp_conv_factor\n\n if 'read_fc2' in self._args:\n if self._args.read_fc2:\n self._confs['read_fc2'] = '.true.'\n\n if 'read_fc3' in self._args:\n if self._args.read_fc3:\n self._confs['read_fc3'] = '.true.'\n\n if 'read_gamma' in self._args:\n if self._args.read_gamma:\n self._confs['read_gamma'] = '.true.'\n\n if 'read_phonon' in self._args:\n if self._args.read_phonon:\n self._confs['read_phonon'] = '.true.'\n\n if 'read_pp' in self._args:\n if self._args.read_pp:\n self._confs['read_pp'] = '.true.'\n\n if 'read_collision' in self._args:\n if self._args.read_collision is not None:\n self._confs['read_collision'] = self._args.read_collision\n\n if 'scattering_event_class' in self._args:\n scatt_class = self._args.scattering_event_class\n if scatt_class is not None:\n self._confs['scattering_event_class'] = scatt_class\n\n if 'sigma_cutoff_width' in self._args:\n sigma_cutoff = self._args.sigma_cutoff_width\n if sigma_cutoff is not None:\n self._confs['sigma_cutoff_width'] = sigma_cutoff\n\n if 'solve_collective_phonon' in self._args:\n if self._args.solve_collective_phonon:\n self._confs['collective_phonon'] = '.true.'\n\n if 'use_ave_pp' in self._args:\n if self._args.use_ave_pp:\n self._confs['use_ave_pp'] = '.true.'\n\n if 'write_gamma_detail' in self._args:\n if self._args.write_gamma_detail:\n self._confs['write_gamma_detail'] = '.true.'\n\n if 'write_gamma' in self._args:\n if self._args.write_gamma:\n self._confs['write_gamma'] = '.true.'\n\n if 'write_collision' in self._args:\n if self._args.write_collision:\n self._confs['write_collision'] = '.true.'\n\n if 'write_phonon' in self._args:\n if self._args.write_phonon:\n self._confs['write_phonon'] = '.true.'\n\n if 'write_pp' in self._args:\n if self._args.write_pp:\n self._confs['write_pp'] = '.true.'\n\n if 'write_LBTE_solution' in self._args:\n if self._args.write_LBTE_solution:\n self._confs['write_LBTE_solution'] = '.true.'\n\n def _parse_conf(self):\n self.parse_conf()\n confs = self._confs\n\n for conf_key in confs.keys():\n if conf_key == 'create_displacements':\n if confs['create_displacements'].lower() == '.false.':\n self.set_parameter('create_displacements', False)\n elif confs['create_displacements'].lower() == '.true.':\n self.set_parameter('create_displacements', True)\n\n if conf_key == 'dim_fc2':\n matrix = [ int(x) for x in confs['dim_fc2'].split() ]\n if len(matrix) == 9:\n matrix = np.array(matrix).reshape(3, 3)\n elif len(matrix) == 3:\n matrix = np.diag(matrix)\n else:\n self.setting_error(\n \"Number of elements of dim2 has to be 3 or 9.\")\n\n if matrix.shape == (3, 3):\n if np.linalg.det(matrix) < 1:\n self.setting_error(\n \"Determinant of supercell matrix has \" +\n \"to be positive.\")\n else:\n self.set_parameter('dim_fc2', matrix)\n\n if conf_key == 'boundary_mfp':\n self.set_parameter('boundary_mfp',\n float(confs['boundary_mfp']))\n\n if conf_key in ('constant_averaged_pp_interaction'\n 'const_ave_pp'):\n self.set_parameter('const_ave_pp', float(confs['const_ave_pp']))\n\n if conf_key == 'cutoff_fc3_distance':\n self.set_parameter('cutoff_fc3_distance',\n float(confs['cutoff_fc3_distance']))\n\n if conf_key == 'cutoff_pair_distance':\n self.set_parameter('cutoff_pair_distance',\n float(confs['cutoff_pair_distance']))\n\n if conf_key == 'full_pp':\n if confs['full_pp'].lower() == '.false.':\n self.set_parameter('is_full_pp', False)\n elif confs['full_pp'].lower() == '.true.':\n self.set_parameter('is_full_pp', True)\n\n if conf_key == 'gamma_conversion_factor':\n self.set_parameter('gamma_conversion_factor',\n float(confs['gamma_conversion_factor']))\n\n if conf_key == 'grid_addresses':\n vals = [int(x) for x in\n confs['grid_addresses'].replace(',', ' ').split()]\n if len(vals) % 3 == 0 and len(vals) > 0:\n self.set_parameter('grid_addresses',\n np.reshape(vals, (-1, 3)))\n else:\n self.setting_error(\"Grid addresses are incorrectly set.\")\n\n if conf_key == 'grid_points':\n vals = [int(x) for x in\n confs['grid_points'].replace(',', ' ').split()]\n self.set_parameter('grid_points', vals)\n\n if conf_key == 'ion_clamped':\n if confs['ion_clamped'].lower() == '.false.':\n self.set_parameter('ion_clamped', False)\n elif confs['ion_clamped'].lower() == '.true.':\n self.set_parameter('ion_clamped', True)\n\n if conf_key == 'bterta':\n if confs['bterta'].lower() == '.false.':\n self.set_parameter('is_bterta', False)\n elif confs['bterta'].lower() == '.true.':\n self.set_parameter('is_bterta', True)\n\n if conf_key == 'compact_fc':\n if confs['compact_fc'].lower() == '.false.':\n self.set_parameter('is_compact_fc', False)\n elif confs['compact_fc'].lower() == '.true.':\n self.set_parameter('is_compact_fc', True)\n\n if conf_key == 'frequency_shift':\n if confs['frequency_shift'].lower() == '.false.':\n self.set_parameter('is_frequency_shift', False)\n elif confs['frequency_shift'].lower() == '.true.':\n self.set_parameter('is_frequency_shift', True)\n\n if conf_key == 'gruneisen':\n if confs['gruneisen'].lower() == '.false.':\n self.set_parameter('is_gruneisen', False)\n elif confs['gruneisen'].lower() == '.true.':\n self.set_parameter('is_gruneisen', True)\n\n if conf_key == 'imag_self_energy':\n if confs['imag_self_energy'].lower() == '.false.':\n self.set_parameter('is_imag_self_energy', False)\n elif confs['imag_self_energy'].lower() == '.true.':\n self.set_parameter('is_imag_self_energy', True)\n\n if conf_key == 'isotope':\n if confs['isotope'].lower() == '.false.':\n self.set_parameter('is_isotope', False)\n elif confs['isotope'].lower() == '.true.':\n self.set_parameter('is_isotope', True)\n\n if conf_key == 'joint_dos':\n if confs['joint_dos'].lower() == '.false.':\n self.set_parameter('is_joint_dos', False)\n elif confs['joint_dos'].lower() == '.true.':\n self.set_parameter('is_joint_dos', True)\n\n if conf_key == 'lbte':\n if confs['lbte'].lower() == '.false.':\n self.set_parameter('is_lbte', False)\n elif confs['lbte'].lower() == '.true.':\n self.set_parameter('is_lbte', True)\n\n if conf_key == 'N_U':\n if confs['N_U'].lower() == '.false.':\n self.set_parameter('is_N_U', False)\n elif confs['N_U'].lower() == '.true.':\n self.set_parameter('is_N_U', True)\n\n if conf_key == 'reducible_collision_matrix':\n if confs['reducible_collision_matrix'].lower() == '.false.':\n self.set_parameter('is_reducible_collision_matrix', False)\n elif confs['reducible_collision_matrix'].lower() == '.true.':\n self.set_parameter('is_reducible_collision_matrix', True)\n\n if conf_key == 'symmetrize_fc2':\n if confs['symmetrize_fc2'].lower() == '.false.':\n self.set_parameter('is_symmetrize_fc2', False)\n elif confs['symmetrize_fc2'].lower() == '.true.':\n self.set_parameter('is_symmetrize_fc2', True)\n\n if conf_key == 'symmetrize_fc3_q':\n if confs['symmetrize_fc3_q'].lower() == '.false.':\n self.set_parameter('is_symmetrize_fc3_q', False)\n elif confs['symmetrize_fc3_q'].lower() == '.true.':\n self.set_parameter('is_symmetrize_fc3_q', True)\n\n if conf_key == 'symmetrize_fc3_r':\n if confs['symmetrize_fc3_r'].lower() == '.false.':\n self.set_parameter('is_symmetrize_fc3_r', False)\n elif confs['symmetrize_fc3_r'].lower() == '.true.':\n self.set_parameter('is_symmetrize_fc3_r', True)\n\n if conf_key == 'mass_variances':\n vals = [fracval(x) for x in confs['mass_variances'].split()]\n if len(vals) < 1:\n self.setting_error(\"Mass variance parameters are incorrectly set.\")\n else:\n self.set_parameter('mass_variances', vals)\n\n if conf_key == 'max_freepath':\n self.set_parameter('max_freepath', float(confs['max_freepath']))\n\n if conf_key == 'mesh_divisors':\n vals = [x for x in confs['mesh_divisors'].split()]\n if len(vals) == 3:\n self.set_parameter('mesh_divisors', [int(x) for x in vals])\n elif len(vals) == 6:\n divs = [int(x) for x in vals[:3]]\n is_shift = [x.lower() == 't' for x in vals[3:]]\n for i in range(3):\n if is_shift[i] and (divs[i] % 2 != 0):\n is_shift[i] = False\n self.setting_error(\"Coarse grid shift along the \" +\n [\"first\", \"second\", \"third\"][i] +\n \" axis is not allowed.\")\n self.set_parameter('mesh_divisors', divs + is_shift)\n else:\n self.setting_error(\"Mesh divisors are incorrectly set.\")\n\n if conf_key == 'kappa_star':\n if confs['kappa_star'].lower() == '.false.':\n self.set_parameter('is_kappa_star', False)\n elif confs['kappa_star'].lower() == '.true.':\n self.set_parameter('is_kappa_star', True)\n\n if conf_key == 'pinv_cutoff':\n self.set_parameter('pinv_cutoff', float(confs['pinv_cutoff']))\n\n if conf_key == 'pinv_solver':\n self.set_parameter('pinv_solver', int(confs['pinv_solver']))\n\n if conf_key == 'pp_conversion_factor':\n self.set_parameter('pp_conversion_factor',\n float(confs['pp_conversion_factor']))\n\n if conf_key == 'read_collision':\n if confs['read_collision'] == 'all':\n self.set_parameter('read_collision', 'all')\n else:\n vals = [int(x) for x in confs['read_collision'].split()]\n self.set_parameter('read_collision', vals)\n\n if conf_key == 'read_fc2':\n if confs['read_fc2'].lower() == '.false.':\n self.set_parameter('read_fc2', False)\n elif confs['read_fc2'].lower() == '.true.':\n self.set_parameter('read_fc2', True)\n\n if conf_key == 'read_fc3':\n if confs['read_fc3'].lower() == '.false.':\n self.set_parameter('read_fc3', False)\n elif confs['read_fc3'].lower() == '.true.':\n self.set_parameter('read_fc3', True)\n\n if conf_key == 'read_gamma':\n if confs['read_gamma'].lower() == '.false.':\n self.set_parameter('read_gamma', False)\n elif confs['read_gamma'].lower() == '.true.':\n self.set_parameter('read_gamma', True)\n\n if conf_key == 'read_phonon':\n if confs['read_phonon'].lower() == '.false.':\n self.set_parameter('read_phonon', False)\n elif confs['read_phonon'].lower() == '.true.':\n self.set_parameter('read_phonon', True)\n\n if conf_key == 'read_pp':\n if confs['read_pp'].lower() == '.false.':\n self.set_parameter('read_pp', False)\n elif confs['read_pp'].lower() == '.true.':\n self.set_parameter('read_pp', True)\n\n if conf_key == 'scattering_event_class':\n self.set_parameter('scattering_event_class',\n confs['scattering_event_class'])\n\n if conf_key == 'sigma_cutoff_width':\n self.set_parameter('sigma_cutoff_width',\n float(confs['sigma_cutoff_width']))\n\n if conf_key == 'collective_phonon':\n if confs['collective_phonon'].lower() == '.false.':\n self.set_parameter('collective_phonon', False)\n elif confs['collective_phonon'].lower() == '.true.':\n self.set_parameter('collective_phonon', True)\n\n if conf_key == 'use_ave_pp':\n if confs['use_ave_pp'].lower() == '.false.':\n self.set_parameter('use_ave_pp', False)\n elif confs['use_ave_pp'].lower() == '.true.':\n self.set_parameter('use_ave_pp', True)\n\n if conf_key == 'write_gamma_detail':\n if confs['write_gamma_detail'].lower() == '.false.':\n self.set_parameter('write_gamma_detail', False)\n elif confs['write_gamma_detail'].lower() == '.true.':\n self.set_parameter('write_gamma_detail', True)\n\n if conf_key == 'write_gamma':\n if confs['write_gamma'].lower() == '.false.':\n self.set_parameter('write_gamma', False)\n elif confs['write_gamma'].lower() == '.true.':\n self.set_parameter('write_gamma', True)\n\n if conf_key == 'write_collision':\n if confs['write_collision'].lower() == '.false.':\n self.set_parameter('write_collision', False)\n elif confs['write_collision'].lower() == '.true.':\n self.set_parameter('write_collision', True)\n\n if conf_key == 'write_phonon':\n if confs['write_phonon'].lower() == '.false.':\n self.set_parameter('write_phonon', False)\n elif confs['write_phonon'].lower() == '.true.':\n self.set_parameter('write_phonon', True)\n\n if conf_key == 'write_pp':\n if confs['write_pp'].lower() == '.false.':\n self.set_parameter('write_pp', False)\n elif confs['write_pp'].lower() == '.true.':\n self.set_parameter('write_pp', True)\n\n if conf_key == 'write_LBTE_solution':\n if confs['write_LBTE_solution'].lower() == '.false.':\n self.set_parameter('write_LBTE_solution', False)\n elif confs['write_LBTE_solution'].lower() == '.true.':\n self.set_parameter('write_LBTE_solution', True)\n\n def _set_settings(self):\n self.set_settings()\n params = self._parameters\n\n # Is getting least displacements?\n if 'create_displacements' in params:\n if params['create_displacements']:\n self._settings.set_create_displacements('displacements')\n\n # Supercell dimension for fc2\n if 'dim_fc2' in params:\n self._settings.set_phonon_supercell_matrix(params['dim_fc2'])\n\n # Boundary mean free path for thermal conductivity calculation\n if 'boundary_mfp' in params:\n self._settings.set_boundary_mfp(params['boundary_mfp'])\n\n # Peierls type approximation for squared ph-ph interaction strength\n if 'const_ave_pp' in params:\n self._settings.set_constant_averaged_pp_interaction(\n params['const_ave_pp'])\n\n # Cutoff distance of third-order force constants. Elements where any\n # pair of atoms has larger distance than cut-off distance are set zero.\n if 'cutoff_fc3_distance' in params:\n self._settings.set_cutoff_fc3_distance(params['cutoff_fc3_distance'])\n\n # Cutoff distance between pairs of displaced atoms used for supercell\n # creation with displacements and making third-order force constants\n if 'cutoff_pair_distance' in params:\n self._settings.set_cutoff_pair_distance(\n params['cutoff_pair_distance'])\n\n # Gamma unit conversion factor\n if 'gamma_conversion_factor' in params:\n self._settings.set_gamma_conversion_factor(\n params['gamma_conversion_factor'])\n\n # Grid addresses (sets of three integer values)\n if 'grid_addresses' in params:\n self._settings.set_grid_addresses(params['grid_addresses'])\n\n # Grid points\n if 'grid_points' in params:\n self._settings.set_grid_points(params['grid_points'])\n\n # Atoms are clamped under applied strain in Gruneisen parameter calculation\n if 'ion_clamped' in params:\n self._settings.set_ion_clamped(params['ion_clamped'])\n\n # Calculate thermal conductivity in BTE-RTA\n if 'is_bterta' in params:\n self._settings.set_is_bterta(params['is_bterta'])\n\n # Compact force constants or full force constants\n if 'is_compact_fc' in params:\n self._settings.set_is_compact_fc(params['is_compact_fc'])\n\n # Calculate frequency_shifts\n if 'is_frequency_shift' in params:\n self._settings.set_is_frequency_shift(params['is_frequency_shift'])\n\n # Calculate full ph-ph interaction strength for RTA conductivity\n if 'is_full_pp' in params:\n self._settings.set_is_full_pp(params['is_full_pp'])\n\n # Calculate phonon-Gruneisen parameters\n if 'is_gruneisen' in params:\n self._settings.set_is_gruneisen(params['is_gruneisen'])\n\n # Calculate imaginary part of self energy\n if 'is_imag_self_energy' in params:\n self._settings.set_is_imag_self_energy(params['is_imag_self_energy'])\n\n # Calculate lifetime due to isotope scattering\n if 'is_isotope' in params:\n self._settings.set_is_isotope(params['is_isotope'])\n\n # Calculate joint-DOS\n if 'is_joint_dos' in params:\n self._settings.set_is_joint_dos(params['is_joint_dos'])\n\n # Calculate thermal conductivity in LBTE with Chaput's method\n if 'is_lbte' in params:\n self._settings.set_is_lbte(params['is_lbte'])\n\n # Calculate Normal and Umklapp processes\n if 'is_N_U' in params:\n self._settings.set_is_N_U(params['is_N_U'])\n\n # Solve reducible collision matrix but not reduced matrix\n if 'is_reducible_collision_matrix' in params:\n self._settings.set_is_reducible_collision_matrix(\n params['is_reducible_collision_matrix'])\n\n # Symmetrize fc2 by index exchange\n if 'is_symmetrize_fc2' in params:\n self._settings.set_is_symmetrize_fc2(params['is_symmetrize_fc2'])\n\n # Symmetrize phonon fc3 by index exchange\n if 'is_symmetrize_fc3_q' in params:\n self._settings.set_is_symmetrize_fc3_q(params['is_symmetrize_fc3_q'])\n\n # Symmetrize fc3 by index exchange\n if 'is_symmetrize_fc3_r' in params:\n self._settings.set_is_symmetrize_fc3_r(params['is_symmetrize_fc3_r'])\n\n # Mass variance parameters\n if 'mass_variances' in params:\n self._settings.set_mass_variances(params['mass_variances'])\n\n # Maximum mean free path\n if 'max_freepath' in params:\n self._settings.set_max_freepath(params['max_freepath'])\n\n # Divisors for mesh numbers\n if 'mesh_divisors' in params:\n self._settings.set_mesh_divisors(params['mesh_divisors'][:3])\n if len(params['mesh_divisors']) > 3:\n self._settings.set_coarse_mesh_shifts(\n params['mesh_divisors'][3:])\n\n # Cutoff frequency for pseudo inversion of collision matrix\n if 'pinv_cutoff' in params:\n self._settings.set_pinv_cutoff(params['pinv_cutoff'])\n\n # Switch for pseudo-inverse solver\n if 'pinv_solver' in params:\n self._settings.set_pinv_solver(params['pinv_solver'])\n\n # Ph-ph interaction unit conversion factor\n if 'pp_conversion_factor' in params:\n self._settings.set_pp_conversion_factor(params['pp_conversion_factor'])\n\n # Read phonon-phonon interaction amplitudes from hdf5\n if 'read_amplitude' in params:\n self._settings.set_read_amplitude(params['read_amplitude'])\n\n # Read collision matrix and gammas from hdf5\n if 'read_collision' in params:\n self._settings.set_read_collision(params['read_collision'])\n\n # Read fc2 from hdf5\n if 'read_fc2' in params:\n self._settings.set_read_fc2(params['read_fc2'])\n\n # Read fc3 from hdf5\n if 'read_fc3' in params:\n self._settings.set_read_fc3(params['read_fc3'])\n\n # Read gammas from hdf5\n if 'read_gamma' in params:\n self._settings.set_read_gamma(params['read_gamma'])\n\n # Read phonons from hdf5\n if 'read_phonon' in params:\n self._settings.set_read_phonon(params['read_phonon'])\n\n # Read ph-ph interaction strength from hdf5\n if 'read_pp' in params:\n self._settings.set_read_pp(params['read_pp'])\n\n # Sum partial kappa at q-stars\n if 'is_kappa_star' in params:\n self._settings.set_is_kappa_star(params['is_kappa_star'])\n\n # Scattering event class 1 or 2\n if 'scattering_event_class' in params:\n self._settings.set_scattering_event_class(\n params['scattering_event_class'])\n\n # Cutoff width of smearing function (ratio to sigma value)\n if 'sigma_cutoff_width' in params:\n self._settings.set_sigma_cutoff_width(params['sigma_cutoff_width'])\n\n # Solve collective phonons\n if 'collective_phonon' in params:\n self._settings.set_solve_collective_phonon(\n params['collective_phonon'])\n\n # Use averaged ph-ph interaction\n if 'use_ave_pp' in params:\n self._settings.set_use_ave_pp(params['use_ave_pp'])\n\n # Write detailed imag-part of self energy to hdf5\n if 'write_gamma_detail' in params:\n self._settings.set_write_gamma_detail(\n params['write_gamma_detail'])\n\n # Write imag-part of self energy to hdf5\n if 'write_gamma' in params:\n self._settings.set_write_gamma(params['write_gamma'])\n\n # Write collision matrix and gammas to hdf5\n if 'write_collision' in params:\n self._settings.set_write_collision(params['write_collision'])\n\n # Write all phonons on grid points to hdf5\n if 'write_phonon' in params:\n self._settings.set_write_phonon(params['write_phonon'])\n\n # Write phonon-phonon interaction amplitudes to hdf5\n if 'write_pp' in params:\n self._settings.set_write_pp(params['write_pp'])\n\n # Write direct solution of LBTE to hdf5 files\n if 'write_LBTE_solution' in params:\n self._settings.set_write_LBTE_solution(\n params['write_LBTE_solution'])\n", "import numpy as np\nfrom phonopy.units import THzToEv, Kb\nimport phonopy.structure.spglib as spg\nfrom phonopy.structure.symmetry import Symmetry\nfrom phonopy.structure.tetrahedron_method import TetrahedronMethod\nfrom phonopy.structure.grid_points import extract_ir_grid_points\n\n\ndef gaussian(x, sigma):\n return 1.0 / np.sqrt(2 * np.pi) / sigma * np.exp(-x**2 / 2 / sigma**2)\n\n\ndef occupation(x, t):\n return 1.0 / (np.exp(THzToEv * x / (Kb * t)) - 1)\n\n\ndef get_triplets_at_q(grid_point,\n mesh,\n point_group, # real space point group of space group\n reciprocal_lattice, # column vectors\n is_time_reversal=True,\n swappable=True,\n stores_triplets_map=False):\n \"\"\"Parameters\n ----------\n grid_point : int\n A grid point\n mesh : array_like\n Mesh numbers\n dtype='intc'\n shape=(3,)\n point_group : array_like\n Rotation matrices in real space. Note that those in reciprocal space\n mean these matrices transposed (local terminology).\n dtype='intc'\n shape=(n_rot, 3, 3)\n reciprocal_lattice : array_like\n Reciprocal primitive basis vectors given as column vectors\n dtype='double'\n shape=(3, 3)\n is_time_reversal : bool, optional\n Inversion symemtry is added if it doesn't exist. Default is True.\n swappable : bool, optional\n q1 and q2 among (q0, q1, q2) can be swapped. Deafult is True.\n\n Returns\n -------\n triplets_at_q : ndarray\n Symmetry reduced number of triplets are stored as grid point\n integer numbers.\n dtype='uintp'\n shape=(n_triplets, 3)\n weights : ndarray\n Weights of triplets in Brillouin zone\n dtype='intc'\n shape=(n_triplets,)\n bz_grid_address : ndarray\n Integer grid address of the points in Brillouin zone including\n surface. The first prod(mesh) numbers of points are\n independent. But the rest of points are\n translational-symmetrically equivalent to some other points.\n dtype='intc'\n shape=(n_grid_points, 3)\n bz_map : ndarray\n Grid point mapping table containing BZ surface. See more\n detail in spglib docstring.\n dtype='uintp'\n shape=(prod(mesh*2),)\n map_tripelts : ndarray or None\n Returns when stores_triplets_map=True, otherwise None is\n returned. Mapping table of all triplets to symmetrically\n independent tripelts. More precisely, this gives a list of\n index mapping from all q-points to independent q' of\n q+q'+q''=G. Considering q' is enough because q is fixed and\n q''=G-q-q' where G is automatically determined to choose\n smallest |G|.\n dtype='uintp'\n shape=(prod(mesh),)\n map_q : ndarray or None\n Returns when stores_triplets_map=True, otherwise None is\n returned. Irreducible q-points stabilized by q-point of\n specified grid_point.\n dtype='uintp'\n shape=(prod(mesh),)\n\n \"\"\"\n\n map_triplets, map_q, grid_address = _get_triplets_reciprocal_mesh_at_q(\n grid_point,\n mesh,\n point_group,\n is_time_reversal=is_time_reversal,\n swappable=swappable)\n bz_grid_address, bz_map = spg.relocate_BZ_grid_address(grid_address,\n mesh,\n reciprocal_lattice,\n is_dense=True)\n triplets_at_q, weights = _get_BZ_triplets_at_q(\n grid_point,\n bz_grid_address,\n bz_map,\n map_triplets,\n mesh)\n\n assert np.prod(mesh) == weights.sum(), \\\n \"Num grid points %d, sum of weight %d\" % (\n np.prod(mesh), weights.sum())\n\n # These maps are required for collision matrix calculation.\n if not stores_triplets_map:\n map_triplets = None\n map_q = None\n\n return triplets_at_q, weights, bz_grid_address, bz_map, map_triplets, map_q\n\n\ndef get_all_triplets(grid_point,\n bz_grid_address,\n bz_map,\n mesh):\n triplets_at_q, _ = _get_BZ_triplets_at_q(\n grid_point,\n bz_grid_address,\n bz_map,\n np.arange(np.prod(mesh), dtype=bz_map.dtype),\n mesh)\n\n return triplets_at_q\n\n\ndef get_nosym_triplets_at_q(grid_point,\n mesh,\n reciprocal_lattice,\n stores_triplets_map=False):\n grid_address = get_grid_address(mesh)\n bz_grid_address, bz_map = spg.relocate_BZ_grid_address(grid_address,\n mesh,\n reciprocal_lattice,\n is_dense=True)\n map_triplets = np.arange(len(grid_address), dtype=bz_map.dtype)\n triplets_at_q, weights = _get_BZ_triplets_at_q(\n grid_point,\n bz_grid_address,\n bz_map,\n map_triplets,\n mesh)\n\n if not stores_triplets_map:\n map_triplets = None\n map_q = None\n else:\n map_q = map_triplets.copy()\n\n return triplets_at_q, weights, bz_grid_address, bz_map, map_triplets, map_q\n\n\ndef get_grid_address(mesh):\n grid_mapping_table, grid_address = spg.get_stabilized_reciprocal_mesh(\n mesh,\n [[[1, 0, 0], [0, 1, 0], [0, 0, 1]]],\n is_time_reversal=False,\n is_dense=True)\n\n return grid_address\n\n\ndef get_bz_grid_address(mesh, reciprocal_lattice, with_boundary=False):\n grid_address = get_grid_address(mesh)\n bz_grid_address, bz_map = spg.relocate_BZ_grid_address(grid_address,\n mesh,\n reciprocal_lattice,\n is_dense=True)\n if with_boundary:\n return bz_grid_address, bz_map\n else:\n return bz_grid_address[:np.prod(mesh)]\n\n\ndef get_grid_point_from_address_py(address, mesh):\n # X runs first in XYZ\n # (*In spglib, Z first is possible with MACRO setting.)\n m = mesh\n return (address[0] % m[0] +\n (address[1] % m[1]) * m[0] +\n (address[2] % m[2]) * m[0] * m[1])\n\n\ndef get_grid_point_from_address(address, mesh):\n \"\"\"Grid point number is given by grid address.\n\n Parameters\n ----------\n address : array_like\n Grid address.\n dtype='intc'\n shape=(3,)\n mesh : array_like\n Mesh numbers.\n dtype='intc'\n shape=(3,)\n\n Returns\n -------\n int\n Grid point number.\n\n \"\"\"\n\n return spg.get_grid_point_from_address(address, mesh)\n\n\ndef get_bz_grid_point_from_address(address, mesh, bz_map):\n # X runs first in XYZ\n # (*In spglib, Z first is possible with MACRO setting.)\n # 2m is defined in kpoint.c of spglib.\n m = 2 * np.array(mesh, dtype='intc')\n return bz_map[get_grid_point_from_address(address, m)]\n\n\ndef invert_grid_point(grid_point, mesh, grid_address, bz_map):\n # gp --> [address] --> [-address] --> inv_gp\n address = grid_address[grid_point]\n return get_bz_grid_point_from_address(-address, mesh, bz_map)\n\n\ndef get_ir_grid_points(mesh, rotations, mesh_shifts=None):\n if mesh_shifts is None:\n mesh_shifts = [False, False, False]\n grid_mapping_table, grid_address = spg.get_stabilized_reciprocal_mesh(\n mesh,\n rotations,\n is_shift=np.where(mesh_shifts, 1, 0),\n is_dense=True)\n (ir_grid_points,\n ir_grid_weights) = extract_ir_grid_points(grid_mapping_table)\n\n return ir_grid_points, ir_grid_weights, grid_address, grid_mapping_table\n\n\ndef get_grid_points_by_rotations(grid_point,\n reciprocal_rotations,\n mesh,\n mesh_shifts=None):\n if mesh_shifts is None:\n mesh_shifts = [False, False, False]\n return spg.get_grid_points_by_rotations(\n grid_point,\n reciprocal_rotations,\n mesh,\n is_shift=np.where(mesh_shifts, 1, 0),\n is_dense=True)\n\n\ndef get_BZ_grid_points_by_rotations(grid_point,\n reciprocal_rotations,\n mesh,\n bz_map,\n mesh_shifts=None):\n if mesh_shifts is None:\n mesh_shifts = [False, False, False]\n return spg.get_BZ_grid_points_by_rotations(\n grid_point,\n reciprocal_rotations,\n mesh,\n bz_map,\n is_shift=np.where(mesh_shifts, 1, 0),\n is_dense=True)\n\n\ndef reduce_grid_points(mesh_divisors,\n grid_address,\n dense_grid_points,\n dense_grid_weights=None,\n coarse_mesh_shifts=None):\n divisors = np.array(mesh_divisors, dtype='intc')\n if (divisors == 1).all():\n coarse_grid_points = np.array(dense_grid_points, dtype='uintp')\n if dense_grid_weights is not None:\n coarse_grid_weights = np.array(dense_grid_weights, dtype='intc')\n else:\n if coarse_mesh_shifts is None:\n shift = [0, 0, 0]\n else:\n shift = np.where(coarse_mesh_shifts, divisors // 2, [0, 0, 0])\n modulo = grid_address[dense_grid_points] % divisors\n condition = (modulo == shift).all(axis=1)\n coarse_grid_points = np.extract(condition, dense_grid_points)\n if dense_grid_weights is not None:\n coarse_grid_weights = np.extract(condition, dense_grid_weights)\n\n if dense_grid_weights is None:\n return coarse_grid_points\n else:\n return coarse_grid_points, coarse_grid_weights\n\n\ndef from_coarse_to_dense_grid_points(dense_mesh,\n mesh_divisors,\n coarse_grid_points,\n coarse_grid_address,\n coarse_mesh_shifts=None):\n if coarse_mesh_shifts is None:\n coarse_mesh_shifts = [False, False, False]\n shifts = np.where(coarse_mesh_shifts, 1, 0)\n dense_grid_points = []\n for cga in coarse_grid_address[coarse_grid_points]:\n dense_address = cga * mesh_divisors + shifts * (mesh_divisors // 2)\n dense_grid_points.append(get_grid_point_from_address(dense_address,\n dense_mesh))\n return np.array(dense_grid_points, dtype='uintp')\n\n\ndef get_coarse_ir_grid_points(primitive,\n mesh,\n mesh_divisors,\n coarse_mesh_shifts,\n is_kappa_star=True,\n symprec=1e-5):\n mesh = np.array(mesh, dtype='intc')\n\n symmetry = Symmetry(primitive, symprec)\n point_group = symmetry.get_pointgroup_operations()\n\n if mesh_divisors is None:\n (ir_grid_points,\n ir_grid_weights,\n grid_address,\n grid_mapping_table) = get_ir_grid_points(mesh, point_group)\n else:\n mesh_divs = np.array(mesh_divisors, dtype='intc')\n coarse_mesh = mesh // mesh_divs\n if coarse_mesh_shifts is None:\n coarse_mesh_shifts = [False, False, False]\n\n if not is_kappa_star:\n coarse_grid_address = get_grid_address(coarse_mesh)\n coarse_grid_points = np.arange(np.prod(coarse_mesh), dtype='uintp')\n else:\n (coarse_ir_grid_points,\n coarse_ir_grid_weights,\n coarse_grid_address,\n coarse_grid_mapping_table) = get_ir_grid_points(\n coarse_mesh,\n point_group,\n mesh_shifts=coarse_mesh_shifts)\n ir_grid_points = from_coarse_to_dense_grid_points(\n mesh,\n mesh_divs,\n coarse_grid_points,\n coarse_grid_address,\n coarse_mesh_shifts=coarse_mesh_shifts)\n grid_address = get_grid_address(mesh)\n ir_grid_weights = ir_grid_weights\n\n reciprocal_lattice = np.linalg.inv(primitive.get_cell())\n bz_grid_address, bz_map = spg.relocate_BZ_grid_address(grid_address,\n mesh,\n reciprocal_lattice,\n is_dense=True)\n\n return (ir_grid_points,\n ir_grid_weights,\n bz_grid_address,\n grid_mapping_table)\n\n\ndef get_number_of_triplets(primitive,\n mesh,\n grid_point,\n swappable=True,\n symprec=1e-5):\n mesh = np.array(mesh, dtype='intc')\n symmetry = Symmetry(primitive, symprec)\n point_group = symmetry.get_pointgroup_operations()\n reciprocal_lattice = np.linalg.inv(primitive.get_cell())\n triplets_at_q, _, _, _, _, _ = get_triplets_at_q(\n grid_point,\n mesh,\n point_group,\n reciprocal_lattice,\n swappable=swappable)\n\n return len(triplets_at_q)\n\n\ndef get_triplets_integration_weights(interaction,\n frequency_points,\n sigma,\n sigma_cutoff=None,\n is_collision_matrix=False,\n neighboring_phonons=False,\n lang='C'):\n triplets = interaction.get_triplets_at_q()[0]\n frequencies = interaction.get_phonons()[0]\n num_band = frequencies.shape[1]\n g_zero = None\n\n if is_collision_matrix:\n g = np.empty(\n (3, len(triplets), len(frequency_points), num_band, num_band),\n dtype='double', order='C')\n else:\n g = np.empty(\n (2, len(triplets), len(frequency_points), num_band, num_band),\n dtype='double', order='C')\n g[:] = 0\n\n if sigma:\n if lang == 'C':\n import phono3py._phono3py as phono3c\n g_zero = np.zeros(g.shape[1:], dtype='byte', order='C')\n if sigma_cutoff is None:\n cutoff = -1\n else:\n cutoff = float(sigma_cutoff)\n # cutoff < 0 disables g_zero feature.\n phono3c.triplets_integration_weights_with_sigma(\n g,\n g_zero,\n frequency_points,\n triplets,\n frequencies,\n sigma,\n cutoff)\n else:\n for i, tp in enumerate(triplets):\n f1s = frequencies[tp[1]]\n f2s = frequencies[tp[2]]\n for j, k in list(np.ndindex((num_band, num_band))):\n f1 = f1s[j]\n f2 = f2s[k]\n g0 = gaussian(frequency_points - f1 - f2, sigma)\n g[0, i, :, j, k] = g0\n g1 = gaussian(frequency_points + f1 - f2, sigma)\n g2 = gaussian(frequency_points - f1 + f2, sigma)\n g[1, i, :, j, k] = g1 - g2\n if len(g) == 3:\n g[2, i, :, j, k] = g0 + g1 + g2\n else:\n if lang == 'C':\n g_zero = np.zeros(g.shape[1:], dtype='byte', order='C')\n _set_triplets_integration_weights_c(\n g,\n g_zero,\n interaction,\n frequency_points,\n neighboring_phonons=neighboring_phonons)\n else:\n _set_triplets_integration_weights_py(\n g, interaction, frequency_points)\n\n return g, g_zero\n\n\ndef get_tetrahedra_vertices(relative_address,\n mesh,\n triplets_at_q,\n bz_grid_address,\n bz_map):\n bzmesh = mesh * 2\n grid_order = [1, mesh[0], mesh[0] * mesh[1]]\n bz_grid_order = [1, bzmesh[0], bzmesh[0] * bzmesh[1]]\n num_triplets = len(triplets_at_q)\n vertices = np.zeros((num_triplets, 2, 24, 4), dtype='uintp')\n for i, tp in enumerate(triplets_at_q):\n for j, adrs_shift in enumerate(\n (relative_address, -relative_address)):\n adrs = bz_grid_address[tp[j + 1]] + adrs_shift\n bz_gp = np.dot(adrs % bzmesh, bz_grid_order)\n gp = np.dot(adrs % mesh, grid_order)\n vgp = bz_map[bz_gp]\n vertices[i, j] = vgp + (vgp == -1) * (gp + 1)\n return vertices\n\n\ndef _get_triplets_reciprocal_mesh_at_q(fixed_grid_number,\n mesh,\n rotations,\n is_time_reversal=True,\n swappable=True):\n \"\"\"Search symmetry reduced triplets fixing one q-point\n\n Triplets of (q0, q1, q2) are searched.\n\n Parameters\n ----------\n fixed_grid_number : int\n Grid point of q0\n mesh : array_like\n Mesh numbers\n dtype='intc'\n shape=(3,)\n rotations : array_like\n Rotation matrices in real space. Note that those in reciprocal space\n mean these matrices transposed (local terminology).\n dtype='intc'\n shape=(n_rot, 3, 3)\n is_time_reversal : bool\n Inversion symemtry is added if it doesn't exist.\n swappable : bool\n q1 and q2 can be swapped. By this number of triplets decreases.\n\n \"\"\"\n\n import phono3py._phono3py as phono3c\n\n map_triplets = np.zeros(np.prod(mesh), dtype='uintp')\n map_q = np.zeros(np.prod(mesh), dtype='uintp')\n grid_address = np.zeros((np.prod(mesh), 3), dtype='intc')\n\n phono3c.triplets_reciprocal_mesh_at_q(\n map_triplets,\n map_q,\n grid_address,\n fixed_grid_number,\n np.array(mesh, dtype='intc'),\n is_time_reversal * 1,\n np.array(rotations, dtype='intc', order='C'),\n swappable * 1)\n\n return map_triplets, map_q, grid_address\n\n\ndef _get_BZ_triplets_at_q(grid_point,\n bz_grid_address,\n bz_map,\n map_triplets,\n mesh):\n import phono3py._phono3py as phono3c\n\n weights = np.zeros(len(map_triplets), dtype='intc')\n for g in map_triplets:\n weights[g] += 1\n ir_weights = np.extract(weights > 0, weights)\n triplets = np.zeros((len(ir_weights), 3), dtype=bz_map.dtype)\n # triplets are overwritten.\n num_ir_ret = phono3c.BZ_triplets_at_q(triplets,\n grid_point,\n bz_grid_address,\n bz_map,\n map_triplets,\n np.array(mesh, dtype='intc'))\n assert num_ir_ret == len(ir_weights)\n\n return triplets, np.array(ir_weights, dtype='intc')\n\n\ndef _set_triplets_integration_weights_c(g,\n g_zero,\n interaction,\n frequency_points,\n neighboring_phonons=False):\n import phono3py._phono3py as phono3c\n\n reciprocal_lattice = np.linalg.inv(interaction.get_primitive().get_cell())\n mesh = interaction.get_mesh_numbers()\n thm = TetrahedronMethod(reciprocal_lattice, mesh=mesh)\n grid_address = interaction.get_grid_address()\n bz_map = interaction.get_bz_map()\n triplets_at_q = interaction.get_triplets_at_q()[0]\n\n if neighboring_phonons:\n unique_vertices = thm.get_unique_tetrahedra_vertices()\n for i, j in zip((1, 2), (1, -1)):\n neighboring_grid_points = np.zeros(\n len(unique_vertices) * len(triplets_at_q), dtype=bz_map.dtype)\n phono3c.neighboring_grid_points(\n neighboring_grid_points,\n np.array(triplets_at_q[:, i], dtype='uintp').ravel(),\n j * unique_vertices,\n mesh,\n grid_address,\n bz_map)\n interaction.set_phonons(np.unique(neighboring_grid_points))\n\n frequencies = interaction.get_phonons()[0]\n phono3c.triplets_integration_weights(\n g,\n g_zero,\n frequency_points, # f0\n thm.get_tetrahedra(),\n mesh,\n triplets_at_q,\n frequencies, # f1\n frequencies, # f2\n grid_address,\n bz_map,\n g.shape[0])\n\n\ndef _set_triplets_integration_weights_py(g, interaction, frequency_points):\n reciprocal_lattice = np.linalg.inv(interaction.get_primitive().get_cell())\n mesh = interaction.get_mesh_numbers()\n thm = TetrahedronMethod(reciprocal_lattice, mesh=mesh)\n grid_address = interaction.get_grid_address()\n bz_map = interaction.get_bz_map()\n triplets_at_q = interaction.get_triplets_at_q()[0]\n tetrahedra_vertices = get_tetrahedra_vertices(\n thm.get_tetrahedra(),\n mesh,\n triplets_at_q,\n grid_address,\n bz_map)\n interaction.set_phonons(np.unique(tetrahedra_vertices))\n frequencies = interaction.get_phonons()[0]\n num_band = frequencies.shape[1]\n for i, vertices in enumerate(tetrahedra_vertices):\n for j, k in list(np.ndindex((num_band, num_band))):\n f1_v = frequencies[vertices[0], j]\n f2_v = frequencies[vertices[1], k]\n thm.set_tetrahedra_omegas(f1_v + f2_v)\n thm.run(frequency_points)\n g0 = thm.get_integration_weight()\n g[0, i, :, j, k] = g0\n thm.set_tetrahedra_omegas(-f1_v + f2_v)\n thm.run(frequency_points)\n g1 = thm.get_integration_weight()\n thm.set_tetrahedra_omegas(f1_v - f2_v)\n thm.run(frequency_points)\n g2 = thm.get_integration_weight()\n g[1, i, :, j, k] = g1 - g2\n if len(g) == 3:\n g[2, i, :, j, k] = g0 + g1 + g2\n" ]
[ [ "numpy.diag", "numpy.linalg.det", "numpy.array", "numpy.reshape" ], [ "numpy.dot", "numpy.sqrt", "numpy.unique", "numpy.extract", "numpy.prod", "numpy.ndindex", "numpy.array", "numpy.exp", "numpy.where", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
bputman/schrutepy
[ "68f18a4e47a77bcfc92e0f76c3ba6bb135add65c" ]
[ "schrutepy/schrutepy.py" ]
[ "import pandas\n\n\ndef load_schrute():\n \"\"\"\n The entire script transcriptions from The Office in pandas dataframe format.\n \"\"\"\n\n full_path = \"https://github.com/bradlindblad/schrutepy/raw/master/data/schrute.csv\"\n\n df = pandas.read_csv(full_path)\n df = df.drop(\"Unnamed: 0\", axis=1)\n\n return df\n" ]
[ [ "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
joshrose/Horizon
[ "a2eb407b31a16560ae78aa6751eb83672a122a7e", "a2eb407b31a16560ae78aa6751eb83672a122a7e", "a2eb407b31a16560ae78aa6751eb83672a122a7e", "a2eb407b31a16560ae78aa6751eb83672a122a7e" ]
[ "ml/rl/test/gym/world_model/mdnrnn_gym.py", "ml/rl/test/gym/run_gym.py", "ml/rl/test/preprocessing/test_normalization.py", "ml/rl/test/environment/linear_dynamics.py" ]
[ "#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.\n\"\"\"\nLearn a world model on gym environments\n\"\"\"\nimport argparse\nimport json\nimport logging\nimport sys\nfrom typing import Dict, Optional\n\nimport ml.rl.types as rlt\nimport numpy as np\nimport torch\nfrom ml.rl.evaluation.world_model_evaluator import (\n FeatureImportanceEvaluator,\n FeatureSensitivityEvaluator,\n)\nfrom ml.rl.json_serialize import json_to_object\nfrom ml.rl.models.mdn_rnn import MDNRNNMemoryPool\nfrom ml.rl.models.world_model import MemoryNetwork\nfrom ml.rl.parameters import MDNRNNParameters, OpenAiGymParameters, OpenAiRunDetails\nfrom ml.rl.test.gym.open_ai_gym_environment import (\n EnvType,\n ModelType,\n OpenAIGymEnvironment,\n)\nfrom ml.rl.test.gym.run_gym import dict_to_np, get_possible_actions\nfrom ml.rl.training.rl_dataset import RLDataset\nfrom ml.rl.training.world_model.mdnrnn_trainer import MDNRNNTrainer\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef loss_to_num(losses):\n return {k: v.item() for k, v in losses.items()}\n\n\ndef multi_step_sample_generator(\n gym_env: OpenAIGymEnvironment,\n num_transitions: int,\n max_steps: Optional[int],\n multi_steps: int,\n include_shorter_samples_at_start: bool,\n include_shorter_samples_at_end: bool,\n):\n \"\"\"\n Convert gym env multi-step sample format to mdn-rnn multi-step sample format\n\n :param gym_env: The environment used to generate multi-step samples\n :param num_transitions: # of samples to return\n :param max_steps: An episode terminates when the horizon is beyond max_steps\n :param multi_steps: # of steps of states and actions per sample\n :param include_shorter_samples_at_start: Whether to keep samples of shorter steps\n which are generated at the beginning of an episode\n :param include_shorter_samples_at_end: Whether to keep samples of shorter steps\n which are generated at the end of an episode\n \"\"\"\n samples = gym_env.generate_random_samples(\n num_transitions=num_transitions,\n use_continuous_action=True,\n max_step=max_steps,\n multi_steps=multi_steps,\n include_shorter_samples_at_start=include_shorter_samples_at_start,\n include_shorter_samples_at_end=include_shorter_samples_at_end,\n )\n\n for j in range(num_transitions):\n sample_steps = len(samples.terminals[j]) # type: ignore\n state = dict_to_np(samples.states[j], np_size=gym_env.state_dim, key_offset=0)\n action = dict_to_np(\n samples.actions[j], np_size=gym_env.action_dim, key_offset=gym_env.state_dim\n )\n next_actions = np.float32( # type: ignore\n [\n dict_to_np(\n samples.next_actions[j][k],\n np_size=gym_env.action_dim,\n key_offset=gym_env.state_dim,\n )\n for k in range(sample_steps)\n ]\n )\n next_states = np.float32( # type: ignore\n [\n dict_to_np(\n samples.next_states[j][k], np_size=gym_env.state_dim, key_offset=0\n )\n for k in range(sample_steps)\n ]\n )\n rewards = np.float32(samples.rewards[j]) # type: ignore\n terminals = np.float32(samples.terminals[j]) # type: ignore\n not_terminals = np.logical_not(terminals)\n ordered_states = np.vstack((state, next_states))\n ordered_actions = np.vstack((action, next_actions))\n mdnrnn_states = ordered_states[:-1]\n mdnrnn_actions = ordered_actions[:-1]\n mdnrnn_next_states = ordered_states[-multi_steps:]\n mdnrnn_next_actions = ordered_actions[-multi_steps:]\n\n # Padding zeros so that all samples have equal steps\n # The general rule is to pad zeros at the end of sequences.\n # In addition, if the sequence only has one step (i.e., the\n # first state of an episode), pad one zero row ahead of the\n # sequence, which enables embedding generated properly for\n # one-step samples\n num_padded_top_rows = 1 if multi_steps > 1 and sample_steps == 1 else 0\n num_padded_bottom_rows = multi_steps - sample_steps - num_padded_top_rows\n sample_steps_next = len(mdnrnn_next_states)\n num_padded_top_rows_next = 0\n num_padded_bottom_rows_next = multi_steps - sample_steps_next\n yield (\n np.pad(\n mdnrnn_states,\n ((num_padded_top_rows, num_padded_bottom_rows), (0, 0)),\n \"constant\",\n constant_values=0.0,\n ),\n np.pad(\n mdnrnn_actions,\n ((num_padded_top_rows, num_padded_bottom_rows), (0, 0)),\n \"constant\",\n constant_values=0.0,\n ),\n np.pad(\n rewards,\n ((num_padded_top_rows, num_padded_bottom_rows)),\n \"constant\",\n constant_values=0.0,\n ),\n np.pad(\n mdnrnn_next_states,\n ((num_padded_top_rows_next, num_padded_bottom_rows_next), (0, 0)),\n \"constant\",\n constant_values=0.0,\n ),\n np.pad(\n mdnrnn_next_actions,\n ((num_padded_top_rows_next, num_padded_bottom_rows_next), (0, 0)),\n \"constant\",\n constant_values=0.0,\n ),\n np.pad(\n not_terminals,\n ((num_padded_top_rows, num_padded_bottom_rows)),\n \"constant\",\n constant_values=0.0,\n ),\n sample_steps,\n sample_steps_next,\n )\n\n\ndef get_replay_buffer(\n num_episodes: int, seq_len: int, max_step: int, gym_env: OpenAIGymEnvironment\n) -> MDNRNNMemoryPool:\n num_transitions = num_episodes * max_step\n replay_buffer = MDNRNNMemoryPool(max_replay_memory_size=num_transitions)\n for (\n mdnrnn_state,\n mdnrnn_action,\n rewards,\n next_states,\n _,\n not_terminals,\n _,\n _,\n ) in multi_step_sample_generator(\n gym_env,\n num_transitions=num_transitions,\n max_steps=max_step,\n multi_steps=seq_len,\n include_shorter_samples_at_start=False,\n include_shorter_samples_at_end=False,\n ):\n mdnrnn_state, mdnrnn_action, next_states, rewards, not_terminals = (\n torch.tensor(mdnrnn_state),\n torch.tensor(mdnrnn_action),\n torch.tensor(next_states),\n torch.tensor(rewards),\n torch.tensor(not_terminals),\n )\n replay_buffer.insert_into_memory(\n mdnrnn_state, mdnrnn_action, next_states, rewards, not_terminals\n )\n\n return replay_buffer\n\n\ndef main(args):\n parser = argparse.ArgumentParser(\n description=\"Train a Mixture-Density-Network RNN net to learn an OpenAI\"\n \" Gym environment, i.e., predict next state, reward, and\"\n \" terminal signal using current state and action\"\n )\n parser.add_argument(\"-p\", \"--parameters\", help=\"Path to JSON parameters file.\")\n parser.add_argument(\n \"-g\",\n \"--gpu_id\",\n help=\"If set, will use GPU with specified ID. Otherwise will use CPU.\",\n default=-1,\n )\n parser.add_argument(\n \"-l\",\n \"--log_level\",\n choices=[\"debug\", \"info\", \"warning\", \"error\", \"critical\"],\n help=\"If set, use logging level specified (debug, info, warning, error, \"\n \"critical). Else defaults to info.\",\n default=\"info\",\n )\n parser.add_argument(\n \"-f\",\n \"--feature_importance\",\n action=\"store_true\",\n help=\"If set, feature importance will be calculated after the training\",\n )\n parser.add_argument(\n \"-s\",\n \"--feature_sensitivity\",\n action=\"store_true\",\n help=\"If set, state feature sensitivity by varying actions will be\"\n \" calculated after the training\",\n )\n parser.add_argument(\n \"-e\",\n \"--save_embedding_to_path\",\n help=\"If a file path is provided, save a RLDataset with states embedded\"\n \" by the trained world model\",\n )\n args = parser.parse_args(args)\n\n logger.setLevel(getattr(logging, args.log_level.upper()))\n\n with open(args.parameters, \"r\") as f:\n params = json_to_object(f.read(), OpenAiGymParameters)\n if args.gpu_id != -1:\n params = params._replace(use_gpu=True)\n\n mdnrnn_gym(\n params,\n args.feature_importance,\n args.feature_sensitivity,\n args.save_embedding_to_path,\n )\n\n\ndef mdnrnn_gym(\n params: OpenAiGymParameters,\n feature_importance: bool = False,\n feature_sensitivity: bool = False,\n save_embedding_to_path: Optional[str] = None,\n seed: Optional[int] = None,\n):\n assert params.mdnrnn is not None\n use_gpu = params.use_gpu\n logger.info(\"Running gym with params\")\n logger.info(params)\n\n env_type = params.env\n env = OpenAIGymEnvironment(\n env_type, epsilon=1.0, softmax_policy=True, gamma=0.99, random_seed=seed\n )\n\n # create test data once\n assert params.run_details.max_steps is not None\n test_replay_buffer = get_replay_buffer(\n params.run_details.num_test_episodes,\n params.run_details.seq_len,\n params.run_details.max_steps,\n env,\n )\n test_batch = test_replay_buffer.sample_memories(\n test_replay_buffer.memory_size, use_gpu=use_gpu, batch_first=True\n )\n\n trainer = create_trainer(params, env, use_gpu)\n _, _, trainer = train_sgd(\n env,\n trainer,\n use_gpu,\n \"{} test run\".format(env_type),\n params.mdnrnn.minibatch_size,\n params.run_details,\n test_batch=test_batch,\n )\n feature_importance_map, feature_sensitivity_map, dataset = None, None, None\n if feature_importance:\n feature_importance_map = calculate_feature_importance(\n env, trainer, use_gpu, params.run_details, test_batch=test_batch\n )\n if feature_sensitivity:\n feature_sensitivity_map = calculate_feature_sensitivity_by_actions(\n env, trainer, use_gpu, params.run_details, test_batch=test_batch\n )\n if save_embedding_to_path:\n dataset = RLDataset(save_embedding_to_path)\n create_embed_rl_dataset(env, trainer, dataset, use_gpu, params.run_details)\n dataset.save()\n return env, trainer, feature_importance_map, feature_sensitivity_map, dataset\n\n\ndef calculate_feature_importance(\n gym_env: OpenAIGymEnvironment,\n trainer: MDNRNNTrainer,\n use_gpu: bool,\n run_details: OpenAiRunDetails,\n test_batch: rlt.PreprocessedTrainingBatch,\n):\n assert run_details.max_steps is not None\n assert run_details.num_test_episodes is not None\n assert run_details.seq_len is not None\n feature_importance_evaluator = FeatureImportanceEvaluator(\n trainer,\n discrete_action=gym_env.action_type == EnvType.DISCRETE_ACTION,\n state_feature_num=gym_env.state_dim,\n action_feature_num=gym_env.action_dim,\n sorted_action_feature_start_indices=list(range(gym_env.action_dim)),\n sorted_state_feature_start_indices=list(range(gym_env.state_dim)),\n )\n feature_loss_vector = feature_importance_evaluator.evaluate(test_batch)[\n \"feature_loss_increase\"\n ]\n feature_importance_map = {}\n for i in range(gym_env.action_dim):\n print(\n \"action {}, feature importance: {}\".format(i, feature_loss_vector[i].item())\n )\n feature_importance_map[f\"action{i}\"] = feature_loss_vector[i].item()\n for i in range(gym_env.state_dim):\n print(\n \"state {}, feature importance: {}\".format(\n i, feature_loss_vector[i + gym_env.action_dim].item()\n )\n )\n feature_importance_map[f\"state{i}\"] = feature_loss_vector[\n i + gym_env.action_dim\n ].item()\n return feature_importance_map\n\n\ndef create_embed_rl_dataset(\n gym_env: OpenAIGymEnvironment,\n trainer: MDNRNNTrainer,\n dataset: RLDataset,\n use_gpu: bool,\n run_details: OpenAiRunDetails,\n):\n assert run_details.max_steps is not None\n old_mdnrnn_mode = trainer.mdnrnn.mdnrnn.training\n trainer.mdnrnn.mdnrnn.eval()\n num_transitions = run_details.num_state_embed_episodes * run_details.max_steps\n device = torch.device(\"cuda\") if use_gpu else torch.device(\"cpu\") # type: ignore\n\n (\n state_batch,\n action_batch,\n reward_batch,\n next_state_batch,\n next_action_batch,\n not_terminal_batch,\n step_batch,\n next_step_batch,\n ) = map(\n list,\n zip(\n *multi_step_sample_generator(\n gym_env=gym_env,\n num_transitions=num_transitions,\n max_steps=run_details.max_steps,\n # +1 because MDNRNN embeds the first seq_len steps and then\n # the embedded state will be concatenated with the last step\n multi_steps=run_details.seq_len + 1,\n include_shorter_samples_at_start=True,\n include_shorter_samples_at_end=False,\n )\n ),\n )\n\n def concat_batch(batch):\n return torch.cat(\n [\n torch.tensor(\n np.expand_dims(x, axis=1), dtype=torch.float, device=device\n )\n for x in batch\n ],\n dim=1,\n )\n\n # shape: seq_len x batch_size x feature_dim\n mdnrnn_state = concat_batch(state_batch)\n next_mdnrnn_state = concat_batch(next_state_batch)\n mdnrnn_action = concat_batch(action_batch)\n next_mdnrnn_action = concat_batch(next_action_batch)\n\n mdnrnn_input = rlt.PreprocessedStateAction.from_tensors(\n state=mdnrnn_state, action=mdnrnn_action\n )\n next_mdnrnn_input = rlt.PreprocessedStateAction.from_tensors(\n state=next_mdnrnn_state, action=next_mdnrnn_action\n )\n # batch-compute state embedding\n mdnrnn_output = trainer.mdnrnn(mdnrnn_input)\n next_mdnrnn_output = trainer.mdnrnn(next_mdnrnn_input)\n\n for i in range(len(state_batch)):\n # Embed the state as the hidden layer's output\n # until the previous step + current state\n hidden_idx = 0 if step_batch[i] == 1 else step_batch[i] - 2 # type: ignore\n next_hidden_idx = next_step_batch[i] - 2 # type: ignore\n hidden_embed = (\n mdnrnn_output.all_steps_lstm_hidden[hidden_idx, i, :]\n .squeeze()\n .detach()\n .cpu()\n )\n state_embed = torch.cat(\n (hidden_embed, torch.tensor(state_batch[i][hidden_idx + 1])) # type: ignore\n )\n next_hidden_embed = (\n next_mdnrnn_output.all_steps_lstm_hidden[next_hidden_idx, i, :]\n .squeeze()\n .detach()\n .cpu()\n )\n next_state_embed = torch.cat(\n (\n next_hidden_embed,\n torch.tensor(next_state_batch[i][next_hidden_idx + 1]), # type: ignore\n )\n )\n\n logger.debug(\n \"create_embed_rl_dataset:\\nstate batch\\n{}\\naction batch\\n{}\\nlast \"\n \"action: {},reward: {}\\nstate embed {}\\nnext state embed {}\\n\".format(\n state_batch[i][: hidden_idx + 1], # type: ignore\n action_batch[i][: hidden_idx + 1], # type: ignore\n action_batch[i][hidden_idx + 1], # type: ignore\n reward_batch[i][hidden_idx + 1], # type: ignore\n state_embed,\n next_state_embed,\n )\n )\n\n terminal = 1 - not_terminal_batch[i][hidden_idx + 1] # type: ignore\n possible_actions, possible_actions_mask = get_possible_actions(\n gym_env, ModelType.PYTORCH_PARAMETRIC_DQN.value, False\n )\n possible_next_actions, possible_next_actions_mask = get_possible_actions(\n gym_env, ModelType.PYTORCH_PARAMETRIC_DQN.value, terminal\n )\n dataset.insert(\n state=state_embed,\n action=torch.tensor(action_batch[i][hidden_idx + 1]), # type: ignore\n reward=float(reward_batch[i][hidden_idx + 1]), # type: ignore\n next_state=next_state_embed,\n next_action=torch.tensor(\n next_action_batch[i][next_hidden_idx + 1] # type: ignore\n ),\n terminal=torch.tensor(terminal),\n possible_next_actions=possible_next_actions,\n possible_next_actions_mask=possible_next_actions_mask,\n time_diff=torch.tensor(1),\n possible_actions=possible_actions,\n possible_actions_mask=possible_actions_mask,\n policy_id=0,\n )\n logger.info(\n \"Insert {} transitions into a state embed dataset\".format(len(state_batch))\n )\n trainer.mdnrnn.mdnrnn.train(old_mdnrnn_mode)\n return dataset\n\n\ndef calculate_feature_sensitivity_by_actions(\n gym_env: OpenAIGymEnvironment,\n trainer: MDNRNNTrainer,\n use_gpu: bool,\n run_details: OpenAiRunDetails,\n test_batch: rlt.PreprocessedTrainingBatch,\n seq_len: int = 5,\n num_test_episodes: int = 100,\n):\n assert run_details.max_steps is not None\n feature_sensitivity_evaluator = FeatureSensitivityEvaluator(\n trainer,\n state_feature_num=gym_env.state_dim,\n sorted_state_feature_start_indices=list(range(gym_env.state_dim)),\n )\n feature_sensitivity_vector = feature_sensitivity_evaluator.evaluate(test_batch)[\n \"feature_sensitivity\"\n ]\n feature_sensitivity_map = {}\n for i in range(gym_env.state_dim):\n feature_sensitivity_map[\"state\" + str(i)] = feature_sensitivity_vector[i].item()\n print(\n \"state {}, feature sensitivity: {}\".format(\n i, feature_sensitivity_vector[i].item()\n )\n )\n return feature_sensitivity_map\n\n\ndef train_sgd(\n gym_env: OpenAIGymEnvironment,\n trainer: MDNRNNTrainer,\n use_gpu: bool,\n test_run_name: str,\n minibatch_size: int,\n run_details: OpenAiRunDetails,\n test_batch: rlt.PreprocessedTrainingBatch,\n):\n assert run_details.max_steps is not None\n train_replay_buffer = get_replay_buffer(\n run_details.num_train_episodes,\n run_details.seq_len,\n run_details.max_steps,\n gym_env,\n )\n valid_replay_buffer = get_replay_buffer(\n run_details.num_test_episodes,\n run_details.seq_len,\n run_details.max_steps,\n gym_env,\n )\n valid_batch = valid_replay_buffer.sample_memories(\n valid_replay_buffer.memory_size, use_gpu=use_gpu, batch_first=True\n )\n valid_loss_history = []\n\n num_batch_per_epoch = train_replay_buffer.memory_size // minibatch_size\n logger.info(\n \"Collected data {} transitions.\\n\"\n \"Training will take {} epochs, with each epoch having {} mini-batches\"\n \" and each mini-batch having {} samples\".format(\n train_replay_buffer.memory_size,\n run_details.train_epochs,\n num_batch_per_epoch,\n minibatch_size,\n )\n )\n\n for i_epoch in range(run_details.train_epochs):\n for i_batch in range(num_batch_per_epoch):\n training_batch = train_replay_buffer.sample_memories(\n minibatch_size, use_gpu=use_gpu, batch_first=True\n )\n losses = trainer.train(training_batch, batch_first=True)\n logger.info(\n \"{}-th epoch, {}-th minibatch: \\n\"\n \"loss={}, bce={}, gmm={}, mse={} \\n\"\n \"cum loss={}, cum bce={}, cum gmm={}, cum mse={}\\n\".format(\n i_epoch,\n i_batch,\n losses[\"loss\"],\n losses[\"bce\"],\n losses[\"gmm\"],\n losses[\"mse\"],\n np.mean(trainer.cum_loss),\n np.mean(trainer.cum_bce),\n np.mean(trainer.cum_gmm),\n np.mean(trainer.cum_mse),\n )\n )\n\n trainer.mdnrnn.mdnrnn.eval()\n valid_losses = trainer.get_loss(\n valid_batch, state_dim=gym_env.state_dim, batch_first=True\n )\n valid_losses = loss_to_num(valid_losses)\n valid_loss_history.append(valid_losses)\n trainer.mdnrnn.mdnrnn.train()\n logger.info(\n \"{}-th epoch, validate loss={}, bce={}, gmm={}, mse={}\".format(\n i_epoch,\n valid_losses[\"loss\"],\n valid_losses[\"bce\"],\n valid_losses[\"gmm\"],\n valid_losses[\"mse\"],\n )\n )\n latest_loss = valid_loss_history[-1][\"loss\"]\n recent_valid_loss_hist = valid_loss_history[\n -1 - run_details.early_stopping_patience : -1\n ]\n # earlystopping\n if len(valid_loss_history) > run_details.early_stopping_patience and all(\n (latest_loss >= v[\"loss\"] for v in recent_valid_loss_hist)\n ):\n break\n\n trainer.mdnrnn.mdnrnn.eval()\n test_losses = trainer.get_loss(\n test_batch, state_dim=gym_env.state_dim, batch_first=True\n )\n test_losses = loss_to_num(test_losses)\n logger.info(\n \"Test loss: {}, bce={}, gmm={}, mse={}\".format(\n test_losses[\"loss\"],\n test_losses[\"bce\"],\n test_losses[\"gmm\"],\n test_losses[\"mse\"],\n )\n )\n logger.info(\"Valid loss history: {}\".format(valid_loss_history))\n return test_losses, valid_loss_history, trainer\n\n\ndef create_trainer(\n params: OpenAiGymParameters, env: OpenAIGymEnvironment, use_gpu: bool\n):\n assert params.mdnrnn is not None\n assert params.run_details.max_steps is not None\n mdnrnn_params = params.mdnrnn\n mdnrnn_net = MemoryNetwork(\n state_dim=env.state_dim,\n action_dim=env.action_dim,\n num_hiddens=mdnrnn_params.hidden_size,\n num_hidden_layers=mdnrnn_params.num_hidden_layers,\n num_gaussians=mdnrnn_params.num_gaussians,\n )\n if use_gpu:\n mdnrnn_net = mdnrnn_net.cuda()\n\n cum_loss_hist_len = (\n params.run_details.num_train_episodes\n * params.run_details.max_steps\n // mdnrnn_params.minibatch_size\n )\n trainer = MDNRNNTrainer(\n mdnrnn_network=mdnrnn_net, params=mdnrnn_params, cum_loss_hist=cum_loss_hist_len\n )\n return trainer\n\n\nif __name__ == \"__main__\":\n logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)\n logging.getLogger().setLevel(logging.INFO)\n args = sys.argv\n main(args[1:])\n", "#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.\n\nimport argparse\nimport json\nimport logging\nimport pickle\nimport random\nimport sys\nfrom typing import Any, Dict, Optional\n\nimport numpy as np\nimport torch\nfrom caffe2.proto import caffe2_pb2\nfrom caffe2.python import core\nfrom ml.rl.json_serialize import json_to_object\nfrom ml.rl.parameters import (\n CEMParameters,\n CNNParameters,\n ContinuousActionModelParameters,\n DiscreteActionModelParameters,\n FeedForwardParameters,\n MDNRNNParameters,\n OpenAiGymParameters,\n OpenAiRunDetails,\n OptimizerParameters,\n RainbowDQNParameters,\n RLParameters,\n SACModelParameters,\n SACTrainingParameters,\n TD3ModelParameters,\n TD3TrainingParameters,\n TrainingParameters,\n)\nfrom ml.rl.test.base.utils import write_lists_to_csv\nfrom ml.rl.test.gym.open_ai_gym_environment import (\n EnvType,\n ModelType,\n OpenAIGymEnvironment,\n)\nfrom ml.rl.test.gym.open_ai_gym_memory_pool import OpenAIGymMemoryPool\nfrom ml.rl.training.on_policy_predictor import (\n CEMPlanningPredictor,\n ContinuousActionOnPolicyPredictor,\n DiscreteDQNOnPolicyPredictor,\n OnPolicyPredictor,\n ParametricDQNOnPolicyPredictor,\n)\nfrom ml.rl.training.rl_dataset import RLDataset\nfrom ml.rl.training.rl_trainer_pytorch import RLTrainer\nfrom ml.rl.workflow.transitional import (\n create_dqn_trainer_from_params,\n create_parametric_dqn_trainer_from_params,\n get_cem_trainer,\n get_sac_trainer,\n get_td3_trainer,\n)\nfrom sklearn.ensemble import GradientBoostingClassifier\nfrom sklearn.model_selection import train_test_split\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef dict_to_np(d, np_size, key_offset):\n x = np.zeros(np_size, dtype=np.float32)\n for key in d:\n x[key - key_offset] = d[key]\n return x\n\n\ndef dict_to_torch(d, np_size, key_offset):\n x = torch.zeros(np_size)\n for key in d:\n x[key - key_offset] = d[key]\n return x\n\n\ndef get_possible_actions(gym_env, model_type, terminal):\n if model_type == ModelType.PYTORCH_DISCRETE_DQN.value:\n possible_next_actions = None\n possible_next_actions_mask = torch.tensor(\n [0 if terminal else 1 for __ in range(gym_env.action_dim)]\n )\n elif model_type == ModelType.PYTORCH_PARAMETRIC_DQN.value:\n possible_next_actions = torch.eye(gym_env.action_dim)\n possible_next_actions_mask = torch.tensor(\n [0 if terminal else 1 for __ in range(gym_env.action_dim)]\n )\n elif model_type in (\n ModelType.CONTINUOUS_ACTION.value,\n ModelType.SOFT_ACTOR_CRITIC.value,\n ModelType.TD3.value,\n ModelType.CEM.value,\n ):\n possible_next_actions = None\n possible_next_actions_mask = None\n else:\n raise NotImplementedError()\n return possible_next_actions, possible_next_actions_mask\n\n\ndef create_epsilon(offline_train, rl_parameters, params):\n if offline_train:\n # take random actions during data collection\n epsilon = 1.0\n else:\n epsilon = rl_parameters.epsilon\n epsilon_decay, minimum_epsilon = 1.0, None\n if params.run_details.epsilon_decay is not None:\n epsilon_decay = params.run_details.epsilon_decay\n minimum_epsilon = params.run_details.minimum_epsilon\n return epsilon, epsilon_decay, minimum_epsilon\n\n\ndef create_replay_buffer(\n env, params, model_type, offline_train, path_to_pickled_transitions\n):\n \"\"\"\n Train on transitions generated from a random policy live or\n read transitions from a pickle file and load into replay buffer.\n \"\"\"\n replay_buffer = OpenAIGymMemoryPool(params.max_replay_memory_size)\n if path_to_pickled_transitions:\n create_stored_policy_offline_dataset(replay_buffer, path_to_pickled_transitions)\n replay_state_dim = replay_buffer.state_dim\n replay_action_dim = replay_buffer.action_dim\n assert replay_state_dim == env.state_dim\n assert replay_action_dim == env.action_dim\n elif offline_train:\n create_random_policy_offline_dataset(\n env, replay_buffer, params.run_details.max_steps, model_type\n )\n return replay_buffer\n\n\ndef train(\n c2_device,\n gym_env,\n offline_train,\n replay_buffer,\n model_type,\n trainer,\n predictor,\n test_run_name,\n score_bar,\n run_details: OpenAiRunDetails,\n save_timesteps_to_dataset=None,\n start_saving_from_score=None,\n bcq_imitator_hyperparams=None,\n reward_shape_func=None,\n):\n if offline_train:\n assert (\n run_details.max_steps is not None\n and run_details.offline_train_epochs is not None\n ), \"Missing fields required for offline training: {}\".format(str(run_details))\n return train_gym_offline_rl(\n gym_env,\n replay_buffer,\n model_type,\n trainer,\n predictor,\n test_run_name,\n score_bar,\n run_details.max_steps,\n run_details.avg_over_num_episodes,\n run_details.offline_train_epochs,\n run_details.offline_num_batches_per_epoch,\n bcq_imitator_hyperparams,\n )\n else:\n return train_gym_online_rl(\n c2_device,\n gym_env,\n replay_buffer,\n model_type,\n trainer,\n predictor,\n test_run_name,\n score_bar,\n run_details.num_episodes,\n run_details.max_steps,\n run_details.train_every_ts,\n run_details.train_after_ts,\n run_details.test_every_ts,\n run_details.test_after_ts,\n run_details.num_train_batches,\n run_details.avg_over_num_episodes,\n run_details.render,\n save_timesteps_to_dataset,\n start_saving_from_score,\n run_details.solved_reward_threshold,\n run_details.max_episodes_to_run_after_solved,\n run_details.stop_training_after_solved,\n reward_shape_func,\n )\n\n\ndef create_random_policy_offline_dataset(gym_env, replay_buffer, max_steps, model_type):\n \"\"\"Generate random transitions and and load into replay buffer.\"\"\"\n\n samples = gym_env.generate_random_samples(\n num_transitions=replay_buffer.max_replay_memory_size,\n use_continuous_action=True,\n max_step=max_steps,\n )\n policy_id = 0\n for i in range(len(samples.mdp_ids)):\n state = dict_to_torch(samples.states[i], gym_env.state_dim, 0)\n action = dict_to_torch(\n samples.actions[i], gym_env.action_dim, gym_env.state_dim\n )\n reward = float(samples.rewards[i])\n next_state = dict_to_torch(samples.next_states[i], gym_env.state_dim, 0)\n next_action = dict_to_torch(\n samples.next_actions[i], gym_env.action_dim, gym_env.state_dim\n )\n terminal = samples.terminals[i]\n (possible_actions, possible_actions_mask) = get_possible_actions(\n gym_env, model_type, False\n )\n (possible_next_actions, possible_next_actions_mask) = get_possible_actions(\n gym_env, model_type, samples.terminals[i]\n )\n replay_buffer.insert_into_memory(\n state,\n action,\n reward,\n next_state,\n next_action,\n terminal,\n possible_next_actions,\n possible_next_actions_mask,\n 1,\n possible_actions,\n possible_actions_mask,\n policy_id,\n )\n logger.info(\n \"Generating {} transitions under random policy.\".format(\n replay_buffer.max_replay_memory_size\n )\n )\n\n\ndef create_stored_policy_offline_dataset(replay_buffer, path):\n \"\"\"Read transitions from pickle file and load into replay buffer.\"\"\"\n with open(path, \"rb\") as f:\n rows = pickle.load(f)\n unique_policies = set()\n for row in rows:\n unique_policies.add(row[\"policy_id\"])\n replay_buffer.insert_into_memory(**row)\n logger.info(\n \"Transitions generated from {} different policies\".format(len(unique_policies))\n )\n logger.info(\"Loading {} transitions from {}\".format(replay_buffer.size, path))\n\n\ndef train_gym_offline_rl(\n gym_env: OpenAIGymEnvironment,\n replay_buffer: OpenAIGymMemoryPool,\n model_type: str,\n trainer: RLTrainer,\n predictor: OnPolicyPredictor,\n test_run_name: str,\n score_bar: Optional[float],\n max_steps: int,\n avg_over_num_episodes: int,\n offline_train_epochs: int,\n num_batch_per_epoch: Optional[int],\n bcq_imitator_hyper_params: Optional[Dict[str, Any]] = None,\n):\n if num_batch_per_epoch is None:\n num_batch_per_epoch = replay_buffer.size // trainer.minibatch_size\n assert num_batch_per_epoch > 0, \"The size of replay buffer is not sufficient\"\n\n logger.info(\n \"{} offline transitions in replay buffer.\\n\"\n \"Training will take {} epochs, with each epoch having {} mini-batches\"\n \" and each mini-batch having {} samples\".format(\n replay_buffer.size,\n offline_train_epochs,\n num_batch_per_epoch,\n trainer.minibatch_size,\n )\n )\n\n avg_reward_history, epoch_history = [], []\n\n # Pre-train a GBDT imitator if doing batch constrained q-learning in Gym\n if getattr(trainer, \"bcq\", None):\n assert bcq_imitator_hyper_params is not None\n gbdt = GradientBoostingClassifier(\n n_estimators=bcq_imitator_hyper_params[\"gbdt_trees\"],\n max_depth=bcq_imitator_hyper_params[\"max_depth\"],\n )\n samples = replay_buffer.sample_memories(replay_buffer.size, model_type)\n X, y = samples.states.numpy(), torch.max(samples.actions, dim=1)[1].numpy()\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1)\n logger.info(\"Fitting GBDT...\")\n gbdt.fit(X_train, y_train)\n train_score = round(gbdt.score(X_train, y_train) * 100, 1)\n test_score = round(gbdt.score(X_test, y_test) * 100, 1)\n logger.info(\n \"GBDT train accuracy {}% || test accuracy {}%\".format(\n train_score, test_score\n )\n )\n trainer.bcq_imitator = gbdt.predict_proba # type: ignore\n\n # Offline training\n for i_epoch in range(offline_train_epochs):\n for _ in range(num_batch_per_epoch):\n samples = replay_buffer.sample_memories(trainer.minibatch_size, model_type)\n samples.set_device(trainer.device)\n trainer.train(samples)\n\n batch_td_loss = float(\n torch.mean(\n torch.tensor(\n [stat.td_loss for stat in trainer.loss_reporter.incoming_stats]\n )\n )\n )\n trainer.loss_reporter.flush()\n logger.info(\n \"Average TD loss: {} in epoch {}\".format(batch_td_loss, i_epoch + 1)\n )\n\n # test model performance for this epoch\n avg_rewards, avg_discounted_rewards = gym_env.run_ep_n_times(\n avg_over_num_episodes, predictor, test=True, max_steps=max_steps\n )\n avg_reward_history.append(avg_rewards)\n\n # For offline training, use epoch number as timestep history since\n # we have a fixed batch of data to count epochs over.\n epoch_history.append(i_epoch)\n logger.info(\n \"Achieved an average reward score of {} over {} evaluations\"\n \" after epoch {}.\".format(avg_rewards, avg_over_num_episodes, i_epoch)\n )\n if score_bar is not None and avg_rewards > score_bar:\n logger.info(\n \"Avg. reward history for {}: {}\".format(\n test_run_name, avg_reward_history\n )\n )\n return avg_reward_history, epoch_history, trainer, predictor, gym_env\n\n logger.info(\n \"Avg. reward history for {}: {}\".format(test_run_name, avg_reward_history)\n )\n return avg_reward_history, epoch_history, trainer, predictor, gym_env\n\n\ndef train_gym_online_rl(\n c2_device,\n gym_env,\n replay_buffer,\n model_type,\n trainer,\n predictor,\n test_run_name,\n score_bar,\n num_episodes,\n max_steps,\n train_every_ts,\n train_after_ts,\n test_every_ts,\n test_after_ts,\n num_train_batches,\n avg_over_num_episodes,\n render,\n save_timesteps_to_dataset,\n start_saving_from_score,\n solved_reward_threshold,\n max_episodes_to_run_after_solved,\n stop_training_after_solved,\n reward_shape_func,\n):\n \"\"\"Train off of dynamic set of transitions generated on-policy.\"\"\"\n total_timesteps = 0\n avg_reward_history, timestep_history = [], []\n best_episode_score_seen = -1e20\n episodes_since_solved = 0\n solved = False\n policy_id = 0\n\n for i in range(num_episodes):\n if (\n max_episodes_to_run_after_solved is not None\n and episodes_since_solved > max_episodes_to_run_after_solved\n ):\n break\n\n if solved:\n episodes_since_solved += 1\n\n terminal = False\n next_state = gym_env.transform_state(gym_env.reset())\n next_action, next_action_probability = gym_env.policy(\n predictor, next_state, False\n )\n\n reward_sum = 0\n ep_timesteps = 0\n\n if model_type == ModelType.CONTINUOUS_ACTION.value:\n trainer.noise.clear()\n\n while not terminal:\n state = next_state\n action = next_action\n action_probability = next_action_probability\n\n # Get possible actions\n possible_actions, _ = get_possible_actions(gym_env, model_type, terminal)\n\n if render:\n gym_env.env.render()\n\n timeline_format_action, gym_action = _format_action_for_log_and_gym(\n action, gym_env.action_type, model_type\n )\n next_state, reward, terminal, _ = gym_env.step(gym_action)\n\n next_state = gym_env.transform_state(next_state)\n\n if reward_shape_func:\n reward = reward_shape_func(next_state, ep_timesteps)\n\n ep_timesteps += 1\n total_timesteps += 1\n next_action, next_action_probability = gym_env.policy(\n predictor, next_state, False\n )\n reward_sum += reward\n\n (possible_actions, possible_actions_mask) = get_possible_actions(\n gym_env, model_type, False\n )\n\n # Get possible next actions\n (possible_next_actions, possible_next_actions_mask) = get_possible_actions(\n gym_env, model_type, terminal\n )\n\n replay_buffer.insert_into_memory(\n state,\n action,\n reward,\n next_state,\n next_action,\n terminal,\n possible_next_actions,\n possible_next_actions_mask,\n 1,\n possible_actions,\n possible_actions_mask,\n policy_id,\n )\n\n if save_timesteps_to_dataset and (\n start_saving_from_score is None\n or best_episode_score_seen >= start_saving_from_score\n ):\n save_timesteps_to_dataset.insert(\n mdp_id=i,\n sequence_number=ep_timesteps - 1,\n state=state,\n action=action,\n timeline_format_action=timeline_format_action,\n action_probability=action_probability,\n reward=reward,\n next_state=next_state,\n next_action=next_action,\n terminal=terminal,\n possible_next_actions=possible_next_actions,\n possible_next_actions_mask=possible_next_actions_mask,\n time_diff=1,\n possible_actions=possible_actions,\n possible_actions_mask=possible_actions_mask,\n policy_id=policy_id,\n )\n\n # Training loop\n if (\n total_timesteps % train_every_ts == 0\n and total_timesteps > train_after_ts\n and replay_buffer.size >= trainer.minibatch_size\n and not (stop_training_after_solved and solved)\n ):\n for _ in range(num_train_batches):\n samples = replay_buffer.sample_memories(\n trainer.minibatch_size, model_type\n )\n samples.set_device(trainer.device)\n trainer.train(samples)\n # Every time we train, the policy changes\n policy_id += 1\n\n # Evaluation loop\n if total_timesteps % test_every_ts == 0 and total_timesteps > test_after_ts:\n avg_rewards, avg_discounted_rewards = gym_env.run_ep_n_times(\n avg_over_num_episodes, predictor, test=True, max_steps=max_steps\n )\n if avg_rewards > best_episode_score_seen:\n best_episode_score_seen = avg_rewards\n\n if (\n solved_reward_threshold is not None\n and best_episode_score_seen >= solved_reward_threshold\n ):\n solved = True\n\n avg_reward_history.append(avg_rewards)\n timestep_history.append(total_timesteps)\n logger.info(\n \"Achieved an average reward score of {} over {} evaluations.\"\n \" Total episodes: {}, total timesteps: {}.\".format(\n avg_rewards, avg_over_num_episodes, i + 1, total_timesteps\n )\n )\n if score_bar is not None and avg_rewards > score_bar:\n logger.info(\n \"Avg. reward history for {}: {}\".format(\n test_run_name, avg_reward_history\n )\n )\n return (\n avg_reward_history,\n timestep_history,\n trainer,\n predictor,\n gym_env,\n )\n\n if max_steps and ep_timesteps >= max_steps:\n break\n\n # Always eval on last episode\n if i == num_episodes - 1:\n avg_rewards, avg_discounted_rewards = gym_env.run_ep_n_times(\n avg_over_num_episodes, predictor, test=True, max_steps=max_steps\n )\n avg_reward_history.append(avg_rewards)\n timestep_history.append(total_timesteps)\n logger.info(\n \"Achieved an average reward score of {} over {} evaluations.\"\n \" Total episodes: {}, total timesteps: {}.\".format(\n avg_rewards, avg_over_num_episodes, i + 1, total_timesteps\n )\n )\n\n if solved:\n gym_env.epsilon = gym_env.minimum_epsilon\n else:\n gym_env.decay_epsilon()\n\n if i % 10 == 0:\n logger.info(\n \"Online RL episode {}, total_timesteps {}\".format(i, total_timesteps)\n )\n\n logger.info(\n \"Avg. reward history for {}: {}\".format(test_run_name, avg_reward_history)\n )\n return avg_reward_history, timestep_history, trainer, predictor, gym_env\n\n\ndef main(args):\n parser = argparse.ArgumentParser(\n description=\"Train a RL net to play in an OpenAI Gym environment.\"\n )\n parser.add_argument(\"-p\", \"--parameters\", help=\"Path to JSON parameters file.\")\n parser.add_argument(\n \"-s\",\n \"--score-bar\",\n help=\"Bar for averaged tests scores.\",\n type=float,\n default=None,\n )\n parser.add_argument(\n \"-l\",\n \"--log_level\",\n help=\"If set, use logging level specified (debug, info, warning, error, \"\n \"critical). Else defaults to info.\",\n default=\"info\",\n )\n parser.add_argument(\n \"-f\",\n \"--file_path\",\n help=\"If set, save all collected samples as an RLDataset to this file.\",\n default=None,\n )\n parser.add_argument(\n \"-e\",\n \"--start_saving_from_score\",\n type=int,\n help=\"If file_path is set, start saving episodes after this score is hit.\",\n default=None,\n )\n parser.add_argument(\n \"-r\",\n \"--results_file_path\",\n help=\"If set, save evaluation results to file.\",\n type=str,\n default=None,\n )\n parser.add_argument(\n \"--offline_train\",\n action=\"store_true\",\n help=\"If set, collect data using a random policy then train RL offline.\",\n )\n parser.add_argument(\n \"--path_to_pickled_transitions\",\n help=\"Path to saved transitions to load into replay buffer.\",\n type=str,\n default=None,\n )\n parser.add_argument(\n \"--seed\",\n help=\"Seed for the test (numpy, torch, and gym).\",\n type=int,\n default=None,\n )\n parser.add_argument(\n \"--use_gpu\",\n help=\"Use GPU, if available; set the device with CUDA_VISIBLE_DEVICES\",\n action=\"store_true\",\n )\n\n args = parser.parse_args(args)\n\n if args.log_level not in (\"debug\", \"info\", \"warning\", \"error\", \"critical\"):\n raise Exception(\"Logging level {} not valid level.\".format(args.log_level))\n else:\n logging.getLogger().setLevel(getattr(logging, args.log_level.upper()))\n\n if args.seed is not None:\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n random.seed(args.seed)\n\n assert (\n not args.path_to_pickled_transitions or args.offline_train\n ), \"path_to_pickled_transitions is provided so you must run offline training\"\n\n with open(args.parameters, \"r\") as f:\n params = json_to_object(f.read(), OpenAiGymParameters)\n\n if args.use_gpu:\n assert torch.cuda.is_available(), \"CUDA requested but not available\"\n params = params._replace(use_gpu=True)\n\n dataset = RLDataset(args.file_path) if args.file_path else None\n\n reward_history, iteration_history, trainer, predictor, env = run_gym(\n params,\n args.offline_train,\n args.score_bar,\n args.seed,\n dataset,\n args.start_saving_from_score,\n args.path_to_pickled_transitions,\n )\n\n if dataset:\n dataset.save()\n logger.info(\"Saving dataset to {}\".format(args.file_path))\n final_score_exploit, _ = env.run_ep_n_times(\n params.run_details.avg_over_num_episodes, predictor, test=True\n )\n final_score_explore, _ = env.run_ep_n_times(\n params.run_details.avg_over_num_episodes, predictor, test=False\n )\n logger.info(\n \"Final policy scores {} with epsilon={} and {} with epsilon=0 over {} eps.\".format(\n final_score_explore,\n env.epsilon,\n final_score_exploit,\n params.run_details.avg_over_num_episodes,\n )\n )\n\n if args.results_file_path:\n write_lists_to_csv(args.results_file_path, reward_history, iteration_history)\n return reward_history\n\n\ndef run_gym(\n params: OpenAiGymParameters,\n offline_train,\n score_bar,\n seed=None,\n save_timesteps_to_dataset=None,\n start_saving_from_score=None,\n path_to_pickled_transitions=None,\n warm_trainer=None,\n reward_shape_func=None,\n):\n use_gpu = params.use_gpu\n logger.info(\"Running gym with params\")\n logger.info(params)\n assert params.rl is not None\n rl_parameters = params.rl\n\n env_type = params.env\n model_type = params.model_type\n\n epsilon, epsilon_decay, minimum_epsilon = create_epsilon(\n offline_train, rl_parameters, params\n )\n env = OpenAIGymEnvironment(\n env_type,\n epsilon,\n rl_parameters.softmax_policy,\n rl_parameters.gamma,\n epsilon_decay,\n minimum_epsilon,\n seed,\n )\n replay_buffer = create_replay_buffer(\n env, params, model_type, offline_train, path_to_pickled_transitions\n )\n\n trainer = warm_trainer if warm_trainer else create_trainer(params, env)\n predictor = create_predictor(trainer, model_type, use_gpu, env.action_dim)\n\n c2_device = core.DeviceOption(caffe2_pb2.CUDA if use_gpu else caffe2_pb2.CPU)\n return train(\n c2_device,\n env,\n offline_train,\n replay_buffer,\n model_type,\n trainer,\n predictor,\n \"{} test run\".format(env_type),\n score_bar,\n params.run_details,\n save_timesteps_to_dataset=save_timesteps_to_dataset,\n start_saving_from_score=start_saving_from_score,\n reward_shape_func=reward_shape_func,\n )\n\n\ndef create_trainer(params: OpenAiGymParameters, env: OpenAIGymEnvironment):\n use_gpu = params.use_gpu\n model_type = params.model_type\n assert params.rl is not None\n rl_parameters = params.rl\n\n if model_type == ModelType.PYTORCH_DISCRETE_DQN.value:\n assert params.training is not None\n training_parameters = params.training\n assert params.rainbow is not None\n if env.img:\n assert (\n training_parameters.cnn_parameters is not None\n ), \"Missing CNN parameters for image input\"\n training_parameters.cnn_parameters.conv_dims[0] = env.num_input_channels\n training_parameters._replace(\n cnn_parameters=training_parameters.cnn_parameters._replace(\n input_height=env.height,\n input_width=env.width,\n num_input_channels=env.num_input_channels,\n )\n )\n else:\n assert (\n training_parameters.cnn_parameters is None\n ), \"Extra CNN parameters for non-image input\"\n discrete_trainer_params = DiscreteActionModelParameters(\n actions=env.actions,\n rl=rl_parameters,\n training=training_parameters,\n rainbow=params.rainbow,\n evaluation=params.evaluation,\n )\n trainer = create_dqn_trainer_from_params(\n discrete_trainer_params, env.normalization, use_gpu\n )\n\n elif model_type == ModelType.PYTORCH_PARAMETRIC_DQN.value:\n assert params.training is not None\n training_parameters = params.training\n assert params.rainbow is not None\n if env.img:\n assert (\n training_parameters.cnn_parameters is not None\n ), \"Missing CNN parameters for image input\"\n training_parameters.cnn_parameters.conv_dims[0] = env.num_input_channels\n else:\n assert (\n training_parameters.cnn_parameters is None\n ), \"Extra CNN parameters for non-image input\"\n continuous_trainer_params = ContinuousActionModelParameters(\n rl=rl_parameters, training=training_parameters, rainbow=params.rainbow\n )\n trainer = create_parametric_dqn_trainer_from_params(\n continuous_trainer_params,\n env.normalization,\n env.normalization_action,\n use_gpu,\n )\n\n elif model_type == ModelType.TD3.value:\n assert params.td3_training is not None\n assert params.critic_training is not None\n assert params.actor_training is not None\n td3_trainer_params = TD3ModelParameters(\n rl=rl_parameters,\n training=params.td3_training,\n q_network=params.critic_training,\n actor_network=params.actor_training,\n )\n trainer = get_td3_trainer(env, td3_trainer_params, use_gpu)\n\n elif model_type == ModelType.SOFT_ACTOR_CRITIC.value:\n assert params.sac_training is not None\n assert params.critic_training is not None\n assert params.actor_training is not None\n value_network = None\n if params.sac_training.use_value_network:\n value_network = params.sac_value_training\n\n sac_trainer_params = SACModelParameters(\n rl=rl_parameters,\n training=params.sac_training,\n q_network=params.critic_training,\n value_network=value_network,\n actor_network=params.actor_training,\n )\n trainer = get_sac_trainer(env, sac_trainer_params, use_gpu)\n elif model_type == ModelType.CEM.value:\n assert params.cem is not None\n cem_trainer_params = params.cem._replace(rl=params.rl)\n trainer = get_cem_trainer(env, cem_trainer_params, use_gpu)\n else:\n raise NotImplementedError(\"Model of type {} not supported\".format(model_type))\n\n return trainer\n\n\ndef _format_action_for_log_and_gym(action, env_type, model_type):\n if env_type == EnvType.DISCRETE_ACTION:\n action_index = torch.argmax(action).item()\n if model_type == ModelType.PYTORCH_DISCRETE_DQN.value:\n return str(action_index), int(action_index)\n else:\n return action.tolist(), int(action_index)\n return action.tolist(), action.tolist()\n\n\ndef create_predictor(trainer, model_type, use_gpu, action_dim=None):\n if model_type in (ModelType.TD3.value, ModelType.SOFT_ACTOR_CRITIC.value):\n predictor = ContinuousActionOnPolicyPredictor(trainer, action_dim, use_gpu)\n elif model_type == ModelType.PYTORCH_DISCRETE_DQN.value:\n predictor = DiscreteDQNOnPolicyPredictor(trainer, action_dim, use_gpu)\n elif model_type == ModelType.PYTORCH_PARAMETRIC_DQN.value:\n predictor = ParametricDQNOnPolicyPredictor(trainer, action_dim, use_gpu)\n elif model_type == ModelType.CEM.value:\n predictor = CEMPlanningPredictor(trainer, action_dim, use_gpu)\n return predictor\n\n\nif __name__ == \"__main__\":\n logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)\n logging.getLogger().setLevel(logging.INFO)\n from ml.rl import debug_on_error\n\n debug_on_error.start()\n args = sys.argv\n main(args[1:])\n", "#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.\n\nimport unittest\n\nimport numpy as np\nimport numpy.testing as npt\nimport six\nfrom caffe2.python import core, workspace\nfrom ml.rl.caffe_utils import C2\nfrom ml.rl.preprocessing import identify_types, normalization\nfrom ml.rl.preprocessing.identify_types import BOXCOX, CONTINUOUS, ENUM\nfrom ml.rl.preprocessing.normalization import (\n NormalizationParameters,\n sort_features_by_normalization,\n)\nfrom ml.rl.preprocessing.preprocessor_net import PreprocessorNet\nfrom ml.rl.test.base.utils import NumpyFeatureProcessor\nfrom ml.rl.test.preprocessing.preprocessing_util import (\n BOXCOX_FEATURE_ID,\n ENUM_FEATURE_ID,\n PROBABILITY_FEATURE_ID,\n id_to_type,\n read_data,\n)\nfrom scipy import special\n\n\nclass TestNormalization(unittest.TestCase):\n def _feature_type_override(self, feature_id):\n \"\"\"\n This should only be used to test CONTINUOUS_ACTION\n \"\"\"\n if id_to_type(feature_id) == identify_types.CONTINUOUS_ACTION:\n return identify_types.CONTINUOUS_ACTION\n return None\n\n def test_prepare_normalization_and_normalize(self):\n feature_value_map = read_data()\n\n normalization_parameters = {}\n for name, values in feature_value_map.items():\n normalization_parameters[name] = normalization.identify_parameter(\n name, values, 10, feature_type=self._feature_type_override(name)\n )\n for k, v in normalization_parameters.items():\n if id_to_type(k) == CONTINUOUS:\n self.assertEqual(v.feature_type, CONTINUOUS)\n self.assertIs(v.boxcox_lambda, None)\n self.assertIs(v.boxcox_shift, None)\n elif id_to_type(k) == BOXCOX:\n self.assertEqual(v.feature_type, BOXCOX)\n self.assertIsNot(v.boxcox_lambda, None)\n self.assertIsNot(v.boxcox_shift, None)\n else:\n assert v.feature_type == id_to_type(k)\n sorted_features, _ = sort_features_by_normalization(normalization_parameters)\n\n norm_net = core.Net(\"net\")\n C2.set_net(norm_net)\n preprocessor = PreprocessorNet()\n input_matrix = np.zeros([10000, len(sorted_features)], dtype=np.float32)\n for i, feature in enumerate(sorted_features):\n input_matrix[:, i] = feature_value_map[feature]\n input_matrix_blob = \"input_matrix_blob\"\n workspace.FeedBlob(input_matrix_blob, np.array([], dtype=np.float32))\n output_blob, _ = preprocessor.normalize_dense_matrix(\n input_matrix_blob, sorted_features, normalization_parameters, \"\", False\n )\n workspace.FeedBlob(input_matrix_blob, input_matrix)\n workspace.RunNetOnce(norm_net)\n normalized_feature_matrix = workspace.FetchBlob(output_blob)\n\n normalized_features = {}\n on_column = 0\n for feature in sorted_features:\n norm = normalization_parameters[feature]\n if norm.feature_type == ENUM:\n column_size = len(norm.possible_values)\n else:\n column_size = 1\n normalized_features[feature] = normalized_feature_matrix[\n :, on_column : (on_column + column_size)\n ]\n on_column += column_size\n\n self.assertTrue(\n all(\n [\n np.isfinite(parameter.stddev) and np.isfinite(parameter.mean)\n for parameter in normalization_parameters.values()\n ]\n )\n )\n for k, v in six.iteritems(normalized_features):\n self.assertTrue(np.all(np.isfinite(v)))\n feature_type = normalization_parameters[k].feature_type\n if feature_type == identify_types.PROBABILITY:\n sigmoidv = special.expit(v)\n self.assertTrue(\n np.all(\n np.logical_and(np.greater(sigmoidv, 0), np.less(sigmoidv, 1))\n )\n )\n elif feature_type == identify_types.ENUM:\n possible_values = normalization_parameters[k].possible_values\n self.assertEqual(v.shape[0], len(feature_value_map[k]))\n self.assertEqual(v.shape[1], len(possible_values))\n\n possible_value_map = {}\n for i, possible_value in enumerate(possible_values):\n possible_value_map[possible_value] = i\n\n for i, row in enumerate(v):\n original_feature = feature_value_map[k][i]\n self.assertEqual(\n possible_value_map[original_feature], np.where(row == 1)[0][0]\n )\n elif feature_type == identify_types.QUANTILE:\n for i, feature in enumerate(v[0]):\n original_feature = feature_value_map[k][i]\n expected = NumpyFeatureProcessor.value_to_quantile(\n original_feature, normalization_parameters[k].quantiles\n )\n self.assertAlmostEqual(feature, expected, 2)\n elif feature_type == identify_types.BINARY:\n pass\n elif (\n feature_type == identify_types.CONTINUOUS\n or feature_type == identify_types.BOXCOX\n ):\n one_stddev = np.isclose(np.std(v, ddof=1), 1, atol=0.01)\n zero_stddev = np.isclose(np.std(v, ddof=1), 0, atol=0.01)\n zero_mean = np.isclose(np.mean(v), 0, atol=0.01)\n self.assertTrue(\n np.all(zero_mean),\n \"mean of feature {} is {}, not 0\".format(k, np.mean(v)),\n )\n self.assertTrue(np.all(np.logical_or(one_stddev, zero_stddev)))\n elif feature_type == identify_types.CONTINUOUS_ACTION:\n less_than_max = v < 1\n more_than_min = v > -1\n self.assertTrue(\n np.all(less_than_max),\n \"values are not less than 1: {}\".format(v[less_than_max == False]),\n )\n self.assertTrue(\n np.all(more_than_min),\n \"values are not more than -1: {}\".format(v[more_than_min == False]),\n )\n else:\n raise NotImplementedError()\n\n def test_normalize_dense_matrix_enum(self):\n normalization_parameters = {\n 1: NormalizationParameters(\n identify_types.ENUM,\n None,\n None,\n None,\n None,\n [12, 4, 2],\n None,\n None,\n None,\n ),\n 2: NormalizationParameters(\n identify_types.CONTINUOUS, None, 0, 0, 1, None, None, None, None\n ),\n 3: NormalizationParameters(\n identify_types.ENUM, None, None, None, None, [15, 3], None, None, None\n ),\n }\n norm_net = core.Net(\"net\")\n C2.set_net(norm_net)\n preprocessor = PreprocessorNet()\n\n inputs = np.zeros([4, 3], dtype=np.float32)\n feature_ids = [2, 1, 3] # Sorted according to feature type\n inputs[:, feature_ids.index(1)] = [12, 4, 2, 2]\n inputs[:, feature_ids.index(2)] = [1.0, 2.0, 3.0, 3.0]\n inputs[:, feature_ids.index(3)] = [15, 3, 15, normalization.MISSING_VALUE]\n input_blob = C2.NextBlob(\"input_blob\")\n workspace.FeedBlob(input_blob, np.array([0], dtype=np.float32))\n normalized_output_blob, _ = preprocessor.normalize_dense_matrix(\n input_blob, feature_ids, normalization_parameters, \"\", False\n )\n workspace.FeedBlob(input_blob, inputs)\n workspace.RunNetOnce(norm_net)\n normalized_feature_matrix = workspace.FetchBlob(normalized_output_blob)\n\n np.testing.assert_allclose(\n np.array(\n [\n [1.0, 1, 0, 0, 1, 0],\n [2.0, 0, 1, 0, 0, 1],\n [3.0, 0, 0, 1, 1, 0],\n [3.0, 0, 0, 1, 0, 0], # Missing values should go to all 0\n ]\n ),\n normalized_feature_matrix,\n )\n\n def test_persistency(self):\n feature_value_map = read_data()\n normalization_parameters = {}\n for name, values in feature_value_map.items():\n normalization_parameters[name] = normalization.identify_parameter(\n name, values, feature_type=self._feature_type_override(name)\n )\n\n s = normalization.serialize(normalization_parameters)\n read_parameters = normalization.deserialize(s)\n # Unfortunately, Thrift serializatin seems to lose a bit of precision.\n # Using `==` will be false.\n self.assertEqual(read_parameters.keys(), normalization_parameters.keys())\n for k in normalization_parameters:\n self.assertEqual(\n read_parameters[k].feature_type,\n normalization_parameters[k].feature_type,\n )\n self.assertEqual(\n read_parameters[k].possible_values,\n normalization_parameters[k].possible_values,\n )\n for field in [\n \"boxcox_lambda\",\n \"boxcox_shift\",\n \"mean\",\n \"stddev\",\n \"quantiles\",\n \"min_value\",\n \"max_value\",\n ]:\n if getattr(normalization_parameters[k], field) is None:\n self.assertEqual(\n getattr(read_parameters[k], field),\n getattr(normalization_parameters[k], field),\n )\n else:\n npt.assert_allclose(\n getattr(read_parameters[k], field),\n getattr(normalization_parameters[k], field),\n )\n\n def test_preprocessing_network(self):\n feature_value_map = read_data()\n\n normalization_parameters = {}\n for name, values in feature_value_map.items():\n normalization_parameters[name] = normalization.identify_parameter(\n name, values, feature_type=self._feature_type_override(name)\n )\n test_features = NumpyFeatureProcessor.preprocess(\n feature_value_map, normalization_parameters\n )\n\n net = core.Net(\"PreprocessingTestNet\")\n C2.set_net(net)\n preprocessor = PreprocessorNet()\n name_preprocessed_blob_map = {}\n for feature_name in feature_value_map:\n workspace.FeedBlob(str(feature_name), np.array([0], dtype=np.int32))\n preprocessed_blob, _ = preprocessor.preprocess_blob(\n str(feature_name), [normalization_parameters[feature_name]]\n )\n name_preprocessed_blob_map[feature_name] = preprocessed_blob\n\n workspace.CreateNet(net)\n\n for feature_name, feature_value in six.iteritems(feature_value_map):\n feature_value = np.expand_dims(feature_value, -1)\n workspace.FeedBlob(str(feature_name), feature_value)\n workspace.RunNetOnce(net)\n\n for feature_name in feature_value_map:\n normalized_features = workspace.FetchBlob(\n name_preprocessed_blob_map[feature_name]\n )\n if feature_name != ENUM_FEATURE_ID:\n normalized_features = np.squeeze(normalized_features, -1)\n\n tolerance = 0.01\n if feature_name == BOXCOX_FEATURE_ID:\n # At the limit, boxcox has some numerical instability\n tolerance = 0.5\n non_matching = np.where(\n np.logical_not(\n np.isclose(\n normalized_features,\n test_features[feature_name],\n rtol=tolerance,\n atol=tolerance,\n )\n )\n )\n self.assertTrue(\n np.all(\n np.isclose(\n normalized_features,\n test_features[feature_name],\n rtol=tolerance,\n atol=tolerance,\n )\n ),\n \"{} does not match: {} {}\".format(\n feature_name,\n normalized_features[non_matching].tolist(),\n test_features[feature_name][non_matching].tolist(),\n ),\n )\n\n def test_type_override(self):\n # Take a feature that should be identified as probability\n feature_value_map = read_data()\n probability_values = feature_value_map[PROBABILITY_FEATURE_ID]\n\n # And ask for a binary anyways\n parameter = normalization.identify_parameter(\n \"_\", probability_values, feature_type=identify_types.BINARY\n )\n self.assertEqual(parameter.feature_type, \"BINARY\")\n", "#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.\n\"\"\"\nA simple linear dynamic system\nhttps://www.argmin.net/2018/02/08/lqr/\n\"\"\"\nimport logging\n\nimport numpy as np\nfrom gym import Env\nfrom gym.spaces import Box\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass LinDynaEnv(Env):\n \"\"\"\n A linear dynamical system characterized by A, B, Q, and R.\n\n Suppose x_t is current state, u_t is current action, then:\n\n x_t+1 = A x_t + B u_t\n Reward_t = x_t' Q x_t + u_t' R u_t\n \"\"\"\n\n def __init__(self):\n self.max_steps = 4\n self.state_dim = 3\n self.action_dim = 2\n self.action_space = Box(low=-3, high=3, shape=(self.action_dim,))\n self.observation_space = Box(\n low=-1.7976931348623157e308,\n high=1.7976931348623157e308,\n shape=(self.state_dim,),\n )\n self.A = 0.2 * np.array([[-1.0, -1.0, 1.0], [2.0, 0.0, 2.0], [0.0, -1.0, 2.0]])\n self.B = 0.2 * np.array([[2.0, 2.0], [2.0, 2.0], [0.0, 1.0]])\n self.Q = 0.2 * np.array([[1.0, 0.0, 0.0], [0.0, 2.0, 0.5], [0.0, 0.5, 1.0]])\n self.R = 0.2 * np.array([[1.0, -1.0], [-1.0, 2.0]])\n S = np.zeros((self.state_dim, self.action_dim))\n # this matrix should be positive definite\n check_mat = np.vstack((np.hstack((self.Q, S)), np.hstack((S.T, self.R))))\n assert self.is_pos_def(check_mat)\n logger.info(\n f\"Initialized Linear Dynamics Environment:\\n\"\n f\"A:\\n{self.A}\\nB:\\n{self.B}\\nQ:\\n{self.Q}\\nR:\\n{self.R}\\n\"\n )\n\n @staticmethod\n def is_pos_def(x):\n return np.all(np.linalg.eigvals(x) > 0)\n\n def reset(self):\n self.state = np.random.randint(low=-1, high=2, size=(self.state_dim,)).astype(\n float\n )\n self.step_cnt = 0\n return self.state\n\n def step(self, action):\n assert len(action) == self.action_dim\n action = np.clip(action, self.action_space.low, self.action_space.high)\n action = action.reshape((self.action_dim, 1))\n state = self.state.reshape((self.state_dim, 1))\n next_state = (self.A.dot(state) + self.B.dot(action)).squeeze()\n # add the negative sign because we actually want to maximize the rewards, while an LRQ solution minimizes\n # rewards by convention\n reward = -(\n state.T.dot(self.Q).dot(state) + action.T.dot(self.R).dot(action)\n ).squeeze()\n self.step_cnt += 1\n terminal = False\n if self.step_cnt >= self.max_steps:\n terminal = True\n self.state = next_state\n return next_state, reward, terminal, None\n" ]
[ [ "numpy.logical_not", "numpy.expand_dims", "numpy.pad", "torch.tensor", "numpy.mean", "numpy.float32", "torch.device", "numpy.vstack" ], [ "torch.max", "numpy.random.seed", "torch.zeros", "torch.manual_seed", "torch.eye", "sklearn.model_selection.train_test_split", "torch.tensor", "torch.cuda.is_available", "sklearn.ensemble.GradientBoostingClassifier", "numpy.zeros", "torch.argmax" ], [ "numpy.expand_dims", "numpy.greater", "scipy.special.expit", "numpy.isfinite", "numpy.less", "numpy.squeeze", "numpy.all", "numpy.logical_or", "numpy.std", "numpy.mean", "numpy.array", "numpy.zeros", "numpy.where", "numpy.isclose" ], [ "numpy.hstack", "numpy.linalg.eigvals", "numpy.clip", "numpy.array", "numpy.zeros", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Derikka/simulacrum
[ "99c4db9b15913c3c838532a86569645b3345fb83", "99c4db9b15913c3c838532a86569645b3345fb83" ]
[ "model_service/model_service.py", "klystron_service/klystron_service.py" ]
[ "#!/usr/bin/env python3\nimport os\nimport argparse\nimport sys\nimport pickle\nimport asyncio\nimport time\nimport numpy as np\nimport zmq\nimport pytao\nfrom p4p.nt import NTTable\nfrom p4p.server import Server as PVAServer\nfrom p4p.server.asyncio import SharedPV\nfrom zmq.asyncio import Context\nimport simulacrum\n\n\nmodel_service_dir = os.path.dirname(os.path.realpath(__file__))\n#set up python logger\nL = simulacrum.util.SimulacrumLog(os.path.splitext(os.path.basename(__file__))[0], level='INFO')\n\nclass ModelService:\n def __init__(self, init_file, name, enable_jitter=False, plot=False):\n self.name = name\n tao_lib = os.environ.get('TAO_LIB', '')\n self.tao = pytao.Tao(so_lib=tao_lib)\n L.debug(\"Initializing Tao...\")\n if plot: \n self.tao.init(\"-init {init_file}\".format(init_file=init_file))\n else:\n self.tao.init(\"-noplot -init {init_file}\".format(init_file=init_file))\n L.debug(\"Tao initialization complete!\")\n self.tao.cmd(\"set global lattice_calc_on = F\")\n self.tao.cmd('set global var_out_file = \" \"')\n self.ctx = Context.instance()\n self.model_broadcast_socket = zmq.Context().socket(zmq.PUB)\n self.model_broadcast_socket.bind(\"tcp://*:{}\".format(os.environ.get('MODEL_BROADCAST_PORT', 66666)))\n self.loop = asyncio.get_event_loop()\n self.jitter_enabled = enable_jitter\n self.twiss_table = NTTable([(\"element\", \"s\"), (\"device_name\", \"s\"),\n (\"s\", \"d\"), (\"length\", \"d\"), (\"p0c\", \"d\"),\n (\"alpha_x\", \"d\"), (\"beta_x\", \"d\"), (\"eta_x\", \"d\"), (\"etap_x\", \"d\"), (\"psi_x\", \"d\"),\n (\"alpha_y\", \"d\"), (\"beta_y\", \"d\"), (\"eta_y\", \"d\"), (\"etap_y\", \"d\"), (\"psi_y\", \"d\")])\n self.rmat_table = NTTable([(\"element\", \"s\"), (\"device_name\", \"s\"), (\"s\", \"d\"), (\"length\", \"d\"),\n (\"r11\", \"d\"), (\"r12\", \"d\"), (\"r13\", \"d\"), (\"r14\", \"d\"), (\"r15\", \"d\"), (\"r16\", \"d\"),\n (\"r21\", \"d\"), (\"r22\", \"d\"), (\"r23\", \"d\"), (\"r24\", \"d\"), (\"r25\", \"d\"), (\"r26\", \"d\"),\n (\"r31\", \"d\"), (\"r32\", \"d\"), (\"r33\", \"d\"), (\"r34\", \"d\"), (\"r35\", \"d\"), (\"r36\", \"d\"),\n (\"r41\", \"d\"), (\"r42\", \"d\"), (\"r43\", \"d\"), (\"r44\", \"d\"), (\"r45\", \"d\"), (\"r46\", \"d\"),\n (\"r51\", \"d\"), (\"r52\", \"d\"), (\"r53\", \"d\"), (\"r54\", \"d\"), (\"r55\", \"d\"), (\"r56\", \"d\"),\n (\"r61\", \"d\"), (\"r62\", \"d\"), (\"r63\", \"d\"), (\"r64\", \"d\"), (\"r65\", \"d\"), (\"r66\", \"d\")])\n initial_twiss_table, initial_rmat_table = self.get_twiss_table()\n sec, nanosec = divmod(float(time.time()), 1.0)\n initial_twiss_table = self.twiss_table.wrap(initial_twiss_table)\n initial_twiss_table['timeStamp']['secondsPastEpoch'] = sec\n initial_twiss_table['timeStamp']['nanoseconds'] = nanosec\n initial_rmat_table = self.rmat_table.wrap(initial_rmat_table)\n initial_rmat_table['timeStamp']['secondsPastEpoch'] = sec\n initial_rmat_table['timeStamp']['nanoseconds'] = nanosec\n self.live_twiss_pv = SharedPV(nt=self.twiss_table, \n initial=initial_twiss_table,\n loop=self.loop)\n self.design_twiss_pv = SharedPV(nt=self.twiss_table, \n initial=initial_twiss_table,\n loop=self.loop)\n self.live_rmat_pv = SharedPV(nt=self.rmat_table, \n initial=initial_rmat_table,\n loop=self.loop)\n self.design_rmat_pv = SharedPV(nt=self.rmat_table, \n initial=initial_rmat_table,\n loop=self.loop)\n self.recalc_needed = False\n self.pva_needs_refresh = False\n self.need_zmq_broadcast = False\n \n def start(self):\n L.info(\"Starting %s Model Service.\", self.name)\n pva_server = PVAServer(providers=[{f\"SIMULACRUM:SYS0:1:{self.name}:LIVE:TWISS\": self.live_twiss_pv,\n f\"SIMULACRUM:SYS0:1:{self.name}:DESIGN:TWISS\": self.design_twiss_pv,\n f\"SIMULACRUM:SYS0:1:{self.name}:LIVE:RMAT\": self.live_rmat_pv,\n f\"SIMULACRUM:SYS0:1:{self.name}:DESIGN:RMAT\": self.design_rmat_pv,}])\n try:\n zmq_task = self.loop.create_task(self.recv())\n pva_refresh_task = self.loop.create_task(self.refresh_pva_table())\n broadcast_task = self.loop.create_task(self.broadcast_model_changes())\n jitter_task = self.loop.create_task(self.add_jitter())\n self.loop.run_forever()\n except KeyboardInterrupt:\n L.info(\"Shutting down Model Service.\")\n zmq_task.cancel()\n pva_refresh_task.cancel()\n broadcast_task.cancel()\n pva_server.stop()\n finally:\n self.loop.close()\n L.info(\"Model Service shutdown complete.\")\n \n def get_twiss_table(self):\n \"\"\"\n Queries Tao for model and RMAT info.\n Returns: A (twiss_table, rmat_table) tuple.\n \"\"\"\n start_time = time.time()\n #First we get a list of all the elements.\n #NOTE: the \"-no_slaves\" option for python lat_list only works in Tao 2019_1112 or above.\n element_name_list = self.tao.cmd(\"python lat_list -track_only 1@0>>*|model ele.name\")\n L.debug(element_name_list)\n for row in element_name_list:\n assert \"ERROR\" not in element_name_list, \"Fetching element names failed. This is probably because a version of Tao older than 2019_1112 is being used.\"\n last_element_index = 0\n for i, row in enumerate(reversed(element_name_list)):\n if row == \"END\":\n last_element_index = len(element_name_list)-1-i\n break\n element_data = {}\n attrs = (\"ele.s\", \"ele.l\", \"orbit.energy\", \"ele.a.alpha\", \"ele.a.beta\", \"ele.x.eta\", \"ele.x.etap\", \"ele.a.phi\", \"ele.b.alpha\", \"ele.b.beta\", \"ele.y.eta\", \"ele.y.etap\", \"ele.b.phi\", \"ele.mat6\")\n for attr in attrs:\n element_data[attr] = self.tao.cmd_real(\"python lat_list -track_only 1@0>>*|model real:{}\".format(attr))\n if attr == 'ele.mat6':\n element_data[attr] = element_data[attr].reshape((-1, 6, 6))\n assert len(element_data[attr]) == len(element_name_list), \"Number of elements in model data for {} doesn't match number of element names.\".format(attr)\n \n combined_rmat = np.identity(6)\n twiss_table_rows = []\n rmat_table_rows = []\n for i in range(0,last_element_index+1):\n element_name = element_name_list[i]\n try:\n device_name = simulacrum.util.convert_element_to_device(element_name.split(\"#\")[0])\n except KeyError:\n device_name = \"\"\n element_rmat = element_data['ele.mat6'][i]\n rmat = np.matmul(element_rmat, combined_rmat)\n combined_rmat = rmat\n twiss_table_rows.append({\"element\": element_name, \"device_name\": device_name, \"s\": element_data['ele.s'][i], \"length\": element_data['ele.l'][i], \"p0c\": element_data['orbit.energy'][i],\n \"alpha_x\": element_data['ele.a.alpha'][i], \"beta_x\": element_data['ele.a.beta'][i], \"eta_x\": element_data['ele.x.eta'][i], \"etap_x\": element_data['ele.x.etap'][i], \"psi_x\": element_data['ele.a.phi'][i],\n \"alpha_y\": element_data['ele.b.alpha'][i], \"beta_y\": element_data['ele.b.beta'][i], \"eta_y\": element_data['ele.y.eta'][i], \"etap_y\": element_data['ele.y.etap'][i], \"psi_y\": element_data['ele.b.phi'][i]})\n rmat_table_rows.append({\n \"element\": element_name, \"device_name\": device_name, \"s\": element_data['ele.s'][i], \"length\": element_data['ele.l'][i],\n \"r11\": rmat[0,0], \"r12\": rmat[0,1], \"r13\": rmat[0,2], \"r14\": rmat[0,3], \"r15\": rmat[0,4], \"r16\": rmat[0,5],\n \"r21\": rmat[1,0], \"r22\": rmat[1,1], \"r23\": rmat[1,2], \"r24\": rmat[1,3], \"r25\": rmat[1,4], \"r26\": rmat[1,5],\n \"r31\": rmat[2,0], \"r32\": rmat[2,1], \"r33\": rmat[2,2], \"r34\": rmat[2,3], \"r35\": rmat[2,4], \"r36\": rmat[2,5],\n \"r41\": rmat[3,0], \"r42\": rmat[3,1], \"r43\": rmat[3,2], \"r44\": rmat[3,3], \"r45\": rmat[3,4], \"r46\": rmat[3,5],\n \"r51\": rmat[4,0], \"r52\": rmat[4,1], \"r53\": rmat[4,2], \"r54\": rmat[4,3], \"r55\": rmat[4,4], \"r56\": rmat[4,5],\n \"r61\": rmat[5,0], \"r62\": rmat[5,1], \"r63\": rmat[5,2], \"r64\": rmat[5,3], \"r65\": rmat[5,4], \"r66\": rmat[5,5]})\n end_time = time.time()\n L.debug(\"get_twiss_table took %f seconds\", end_time - start_time)\n return twiss_table_rows, rmat_table_rows\n \n async def refresh_pva_table(self):\n \"\"\"\n This loop continuously checks if the PVAccess table needs to be refreshed,\n and publishes a new table if it does. The pva_needs_refresh flag is\n usually set when a tao command beginning with 'set' occurs.\n \"\"\"\n while True:\n if self.pva_needs_refresh:\n sec, nanosec = divmod(float(time.time()), 1.0)\n new_twiss_table, new_rmat_table = self.get_twiss_table()\n new_twiss_table = self.twiss_table.wrap(new_twiss_table)\n new_twiss_table['timeStamp']['secondsPastEpoch'] = sec\n new_twiss_table['timeStamp']['nanoseconds'] = nanosec\n new_rmat_table = self.rmat_table.wrap(new_rmat_table)\n new_rmat_table['timeStamp']['secondsPastEpoch'] = sec\n new_rmat_table['timeStamp']['nanoseconds'] = nanosec\n self.live_twiss_pv.post(new_twiss_table)\n self.live_rmat_pv.post(new_rmat_table)\n self.pva_needs_refresh = False\n await asyncio.sleep(1.0)\n \n async def add_jitter(self):\n while True:\n if self.jitter_enabled:\n x0 = np.random.normal(0.0, 0.12*0.001)\n y0 = np.random.normal(0.0, 0.12*0.001)\n self.tao.cmd(f\"set particle_start x = {x0}\")\n self.tao.cmd(f\"set particle_start y = {y0}\")\n self.recalc_needed = True\n self.need_zmq_broadcast = True\n await asyncio.sleep(1.0)\n \n async def broadcast_model_changes(self):\n \"\"\"\n This loop broadcasts new orbits, twiss parameters, etc. over ZMQ.\n \"\"\"\n while True:\n if self.recalc_needed:\n self.tao.cmd(\"set global lattice_calc_on = T\")\n self.tao.cmd(\"set global lattice_calc_on = F\")\n self.recalc_needed = False\n if self.need_zmq_broadcast:\n try:\n self.send_orbit()\n except Exception as e:\n L.warning(\"SEND ORBIT FAILED: %s\", e)\n try:\n self.send_profiles_data()\n except Exception as e:\n L.warning(\"SEND PROF DATA FAILED: %s\", e)\n try:\n self.send_und_twiss()\n except Exception as e:\n L.warning(\"SEND UND TWISS FAILED: %s\", e)\n\n self.need_zmq_broadcast = False\n await asyncio.sleep(0.1)\n \n def model_changed(self):\n self.recalc_needed = True\n self.pva_needs_refresh = True\n self.need_zmq_broadcast = True\n \n def get_orbit(self):\n start_time = time.time()\n #Get X Orbit\n x_orb_text = self.tao_cmd(\"show data orbit.x\")[3:-2]\n x_orb = _orbit_array_from_text(x_orb_text)\n #Get Y Orbit\n y_orb_text = self.tao_cmd(\"show data orbit.y\")[3:-2]\n y_orb = _orbit_array_from_text(y_orb_text)\n #Get e_tot, which we use to see if the single particle beam is dead\n e_text = self.tao_cmd(\"show data orbit.e\")[3:-2]\n e = _orbit_array_from_text(e_text)\n end_time = time.time()\n L.debug(\"get_orbit took %f seconds\", end_time-start_time)\n return np.stack((x_orb, y_orb, e))\n\n def get_prof_orbit(self):\n #Get X Orbit\n x_orb_text = self.tao_cmd(\"show data orbit.profx\")[3:-2]\n x_orb = _orbit_array_from_text(x_orb_text)\n #Get Y Orbit\n y_orb_text = self.tao_cmd(\"show data orbit.profy\")[3:-2]\n y_orb = _orbit_array_from_text(y_orb_text)\n return np.stack((x_orb, y_orb))\n \n def get_twiss(self):\n twiss_text = self.tao_cmd(\"show lat -no_label_lines -at alpha_a -at beta_a -at alpha_b -at beta_b UNDSTART\")\n if \"ERROR\" in twiss_text[0]:\n twiss_text = self.tao_cmd(\"show lat -no_label_lines -at alpha_a -at beta_a -at alpha_b -at beta_b BEGUNDH\")\n if \"ERROR\" in twiss_text[0]:\n twiss_text = self.tao_cmd(\"show lat -no_label_lines -at alpha_a -at beta_a -at alpha_b -at beta_b BEGUNDS\")\n #format to list of comma separated values\n #msg='twiss from get_twiss: {}'.format(twiss_text)\n #L.info(msg)\n twiss = twiss_text[0].split()\n return twiss\n\n def old_get_orbit(self):\n #Get X Orbit\n x_orb_text = self.tao_cmd(\"python lat_list 1@0>>BPM*|model orbit.vec.1\")\n x_orb = _orbit_array_from_text(x_orb_text)\n #Get Y Orbit\n y_orb_text = self.tao_cmd(\"python lat_list 1@0>>BPM*|model orbit.vec.3\")\n y_orb = _orbit_array_from_text(y_orb_text)\n return np.stack((x_orb, y_orb))\n \n #information broadcast by the model is sent as two separate messages:\n #metadata message: sent first with 1) tag describing data for services to filter on, 2) type -optional, 3) size -optional\n #data message: sent either as a python object or a series of bits\n \n def send_orbit(self):\n orb = self.get_orbit()\n metadata = {\"tag\" : \"orbit\", \"dtype\": str(orb.dtype), \"shape\": orb.shape}\n self.model_broadcast_socket.send_pyobj(metadata, zmq.SNDMORE)\n self.model_broadcast_socket.send(orb)\n\n def send_profiles_data(self):\n twiss_text = self.tao_cmd(\"show lat -no_label_lines -at beta_a -at beta_b -at e_tot Monitor::OTR*,Monitor::YAG*\")\n prof_beta_x = [float(l.split()[5]) for l in twiss_text]\n prof_beta_y = [float(l.split()[6]) for l in twiss_text]\n prof_e = [float(l.split()[7]) for l in twiss_text]\n prof_names = [l.split()[1] for l in twiss_text]\n prof_orbit = self.get_prof_orbit()\n prof_data = np.concatenate((prof_orbit, np.array([prof_beta_x, prof_beta_y, prof_e, prof_names])))\n\n metadata = {\"tag\" : \"prof_data\", \"dtype\": str(prof_data.dtype), \"shape\": prof_data.shape}\n self.model_broadcast_socket.send_pyobj(metadata, zmq.SNDMORE)\n self.model_broadcast_socket.send(prof_data);\n\n def send_particle_positions(self):\n twiss_text = self.tao_cmd(\"show lat -no_label_lines -at beta_a -at beta_b -at e_tot Monitor::OTR*,Monitor::YAG*\")\n prof_names = [l.split()[1] for l in twiss_text]\n positions_all = {}\n for screen in prof_names:\n positions = self.get_particle_positions(screen);\n if not positions:\n continue\n positions_all[screen] = [[float(position.split()[1]), float(position.split()[3])] for position in positions]\n metadata = {\"tag\": \"part_positions\"}\n self.model_broadcast_socket.send_pyobj(metadata, zmq.SNDMORE)\n self.model_broadcast_socket.send_pyobj(positions_all)\n\n def get_particle_positions(self, screen):\n L.debug(\"Getting particle positions\")\n cmd = \"show particle -all -ele {screen}\".format(screen=screen)\n results = self.tao_cmd(cmd);\n if(len(results) < 3):\n return False\n return results[2:]\n\n def send_und_twiss(self):\n twiss = self.get_twiss()\n metadata = {\"tag\": \"und_twiss\"}\n self.model_broadcast_socket.send_pyobj(metadata, zmq.SNDMORE)\n self.model_broadcast_socket.send_pyobj(twiss)\n \n def tao_cmd(self, cmd):\n if cmd.startswith(\"exit\"):\n return \"Please stop trying to exit the model service's Tao, you jerk!\"\n result = self.tao.cmd(cmd)\n if cmd.startswith(\"set\"):\n self.model_changed()\n return result\n \n def tao_batch(self, cmds):\n L.info(\"Starting command batch.\")\n results = [self.tao_cmd(cmd) for cmd in cmds]\n L.info(\"Batch complete.\")\n return results\n \n async def recv(self):\n s = self.ctx.socket(zmq.REP)\n s.bind(\"tcp://*:{}\".format(os.environ.get('MODEL_PORT', \"12312\")))\n while True:\n p = await s.recv_pyobj()\n msg = \"Got a message: {}\".format(p)\n L.debug(msg)\n if p['cmd'] == 'tao':\n try:\n retval = self.tao_cmd(p['val'])\n await s.send_pyobj({'status': 'ok', 'result': retval})\n except Exception as e:\n await s.send_pyobj({'status': 'fail', 'err': e})\n elif p['cmd'] == 'send_orbit':\n self.model_changed() #Sets the flag that will cause an orbit broadcast\n await s.send_pyobj({'status': 'ok'})\n elif p['cmd'] == 'echo':\n await s.send_pyobj({'status': 'ok', 'result': p['val']})\n elif p['cmd'] == 'send_profiles_twiss':\n self.model_changed() #Sets the flag that will cause a prof broadcast\n #self.send_profiles_twiss()\n #self.send_prof_orbit()\n await s.send_pyobj({'status': 'ok'})\n elif p['cmd'] == 'send_und_twiss':\n self.model_changed() #Sets the flag that will cause an und twiss broadcast\n #self.send_und_twiss()\n await s.send_pyobj({'status': 'ok'})\n elif p['cmd'] == 'tao_batch':\n try:\n results = self.tao_batch(p['val'])\n await s.send_pyobj({'status': 'ok', 'result': results})\n except Exception as e:\n await s.send_pyobj({'status': 'fail', 'err': e})\n\ndef _orbit_array_from_text(text):\n return np.array([float(l.split()[5]) for l in text])*1000.0\n\ndef find_model(model_name):\n \"\"\"\n Helper routine to find models using standard environmental variables:\n $LCLS_CLASSIC_LATTICE should point to a checkout of https://github.com/slaclab/lcls-classic-lattice \n $LCLS_LATTICE should point to a checkout of https://github.com/slaclab/lcls-lattice\n \n Availble models:\n lcls_classic\n cu_hxr\n cu_spec\n cu_sxr\n sc_hxr\n sc_sxr\n \n \"\"\"\n if model_name == 'lcls_classic':\n tao_initfile = os.path.join(os.environ['LCLS_CLASSIC_LATTICE'], 'bmad/model/tao.init')\n elif model_name in ['cu_hxr', 'cu_sxr', 'cu_spec', 'sc_sxr', 'sc_hxr']:\n root = os.environ['LCLS_LATTICE']\n tao_initfile = os.path.join(root, 'bmad/models/', model_name, 'tao.init') \n else:\n raise ValueError('Not a valid model: {}'.format(model_name))\n assert os.path.exists(tao_initfile), 'Error: file does not exist: ' + tao_initfile\n return tao_initfile\n\nif __name__==\"__main__\":\n parser = argparse.ArgumentParser(description=\"Simulacrum Model Service\")\n parser.add_argument(\n 'model_name',\n help='Name of a Tao model from either lcls-lattice or lcls-classic-lattice. Must be one of: ' + \n 'lcls_classic, cu_hxr, cu_spec, cu_sxr, sc_sxr, or sc_hxr'\n )\n parser.add_argument(\n '--enable-jitter',\n action='store_true',\n help='Apply jitter on every model update tick (10 Hz). This will significantly increase CPU usage.'\n )\n parser.add_argument(\n '--plot',\n action='store_true',\n help='Show tao plot'\n )\n model_service_args = parser.parse_args()\n tao_init_file = find_model(model_service_args.model_name)\n serv = ModelService(init_file=tao_init_file, name=model_service_args.model_name.upper(), enable_jitter=model_service_args.enable_jitter, \n plot=model_service_args.plot)\n serv.start()\n\n", "import numpy as np\nimport sys\nimport os\nimport asyncio\nfrom collections import OrderedDict\nfrom caproto.server import ioc_arg_parser, run, pvproperty, PVGroup\nfrom caproto import ChannelType\nimport simulacrum\nimport zmq\nfrom zmq.asyncio import Context\n\n#set up python logger\nL = simulacrum.util.SimulacrumLog(os.path.splitext(os.path.basename(__file__))[0], level='INFO')\n\nclass CudKlys(PVGroup):\n \"\"\"\n Represents the PVs used by the Klystron CUD.\n Every PV in here is just a static value, driven by\n the Klystron CUD MATLAB process. \n \"\"\"\n onbeam1 = pvproperty(value=0.0, name=':ONBEAM1')\n status = pvproperty(value=0.0, name=':STATUS')\n statusdesc = pvproperty(value='None', name=':STATUS.DESC', dtype=ChannelType.STRING)\n def __init__(self, device_name, element_name, initial_value, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.device_name = device_name\n self.element_name = element_name\n self.onbeam1._data['value'] = initial_value\n self.status._data['value'] = initial_value\n\nclass SubboosterPV(PVGroup):\n \"\"\"\n Represents the PVs for a subbooster. Currently these\n don't actually do anything, but some displays use them.\n \"\"\"\n pdes = pvproperty(value=0.0, name=':PDES') \n phas = pvproperty(value=0.0, name=':PHAS', read_only=True)\n poly = pvproperty(value=np.zeros(6), name=':POLY', dtype=ChannelType.DOUBLE)\n def __init__(self, device_name, element_name, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.device_name = device_name\n self.element_name = element_name\n\nclass KlystronPV(PVGroup):\n pdes = pvproperty(value=0.0, name=':PDES', precision=1) \n phas = pvproperty(value=0.0, name=':PHAS', read_only=True, precision=1)\n enld = pvproperty(value=0.0, name=':ENLD')\n ades = pvproperty(value=100.0, name=':ADES', precision=1)\n ampl = pvproperty(value=100.0, name=':AMPL', precision=1)\n bvjt = pvproperty(value=0.0, name=':BVJT')\n alem = pvproperty(value=0.0, name=':ALEM')\n plem = pvproperty(value=0.0, name=':PLEM')\n eglem = pvproperty(value=0.0, name=':EGLEM')\n chlem = pvproperty(value=0.0, name=':CHLEM')\n mkbvftpjasigma = pvproperty(value=0.0, name=':MKBVFTPJASIGMA')\n poly = pvproperty(value=np.zeros(6), name=':POLY', dtype=ChannelType.DOUBLE)\n # The seemingly random numbers in clear_* are the values these status\n # PVs have when a klystron is working normally, with no faults.\n clear_swrd = 0\n clear_hdsc = 32\n clear_dsta = [1610612737, 528640]\n clear_stat = 1\n swrd = pvproperty(value=clear_swrd, name=':SWRD')\n hdsc = pvproperty(value=clear_hdsc, name=':HDSC')\n dsta = pvproperty(value=clear_dsta, name=':DSTA')\n stat = pvproperty(value=clear_stat, name=':STAT')\n bc1_tctl = pvproperty(value=0, name=':BEAMCODE1_TCTL', dtype=ChannelType.ENUM,\n enum_strings=(\"Deactivate\", \"Reactivate\", \"Activate\"))\n bc1_tstat = pvproperty(value=0, name=':BEAMCODE1_TSTAT', dtype=ChannelType.ENUM,\n enum_strings=(\"Deactivated\", \"Activated\"), read_only=True)\n #BEAMCODE1_STAT represents the same data as BEAMCODE1_TSTAT, but does it as a float, not an enum. 2=off 1=on.\n bc1_stat = pvproperty(value=2, name=':BEAMCODE1_STAT', read_only=True)\n trim = pvproperty(value=0, name=':TRIMPHAS', dtype=ChannelType.ENUM,\n enum_strings=(\"Done\", \"TRIM\"))\n mod_reset = pvproperty(value=0, name=':MOD:RESET', dtype=ChannelType.ENUM,\n enum_strings=(\"Done\", \"RESET\"))\n mod_hv_ctrl = pvproperty(value=1, name=':MOD:HVON_SET', dtype=ChannelType.ENUM,\n enum_strings=(\"OFF\", \"ON\"))\n def __init__(self, device_name, element_name, change_callback, initial_values, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.device_name = device_name \n self.element_name = element_name\n self.orig_enld = initial_values[0]\n self.tripped = False\n self.hv_ctrl_on = True\n self.has_accel_triggers = True\n self.enld._data['value'] = initial_values[0]\n self.pdes._data['value'] = initial_values[1]\n self.phas._data['value'] = initial_values[1] \n self.bc1_tctl._data['value'] = 1\n self.bc1_tstat._data['value'] = 1\n self.bc1_stat._data['value'] = 1\n self.change_callback = change_callback \n\n async def interlock_trip(self):\n if self.tripped:\n return\n self.tripped = True\n dsta1, dsta2 = self.dsta.value\n dsta2 = dsta2 & ~(1 << 3) #Turn off \"Mod interlocks complete\"\n dsta2 = dsta2 & ~(1 << 7) #Turn off \"Mod HV on\"\n self.dsta._data['value'] = [dsta1, dsta2]\n await self.dsta.publish(0)\n await self.mod_hv_ctrl.write('OFF') # Only for SimUI\n await self.on_off_changed()\n \n @mod_reset.putter\n async def mod_reset(self, instance, value):\n ioc = instance.group\n if value == \"RESET\":\n dsta2 = self.clear_dsta[1]\n dsta2 = dsta2 & ~(1 << 7) #Keep \"Mod HV on\" bit zeroed\n dsta2 = dsta2 | (1 << 4) #Turn on the \"Mod HV Ready\" bit\n self.dsta._data['value'] = [self.clear_dsta[0], dsta2]\n self.swrd._data['value'] = self.clear_swrd\n self.hdsc._data['value'] = self.clear_hdsc\n # Note, mod reset doesn't change the STAT PV.\n await asyncio.gather(\n self.dsta.publish(0),\n self.swrd.publish(0),\n self.hdsc.publish(0))\n self.tripped = False\n return 0\n\n @mod_hv_ctrl.putter\n async def mod_hv_ctrl(self, instance, value):\n if value == \"ON\":\n await self.mod_on()\n else:\n await self.mod_off()\n \n return value\n\n async def mod_on(self):\n if self.tripped or self.hv_ctrl_on:\n return\n dsta1, dsta2 = self.dsta.value\n dsta2 = dsta2 | (1 << 7) #Turn on the \"Mod HV On\" bit\n dsta2 = dsta2 & ~(1 << 4) #Turn off the \"Mod HV Ready\" bit\n self.dsta._data['value'] = [dsta1, dsta2]\n await self.dsta.publish(0)\n self.hv_ctrl_on = True\n await self.on_off_changed()\n \n async def mod_off(self, hv_ready=True):\n #if self.tripped or (not self.hv_ctrl_on):\n if not self.hv_ctrl_on:\n return\n dsta1, dsta2 = self.dsta.value\n dsta2 = dsta2 & ~(1 << 7) #Zero the \"Mod HV On\" bit\n if hv_ready:\n dsta2 = dsta2 | (1 << 4) #Turn on the \"Mod HV Ready\" bit\n self.dsta._data['value'] = [dsta1, dsta2]\n await self.dsta.publish(0)\n self.hv_ctrl_on = False\n await self.on_off_changed()\n\n @swrd.putter\n async def swrd(self, instance, value):\n ioc = instance.group\n \"\"\"\n SWRD bit decoder:\n bit | meaning | klystron faulted if bit set?\n -----------------------------------------------------\n 0 | Bad Cable Status | Faulted\n 1 | MKSU Protect | Faulted\n 2 | No Triggers | Faulted\n 3 | Modulator Fault | Faulted\n 4 | Lost Acc Trigger | Faulted\n 5 | Low RF Power | Faulted\n 6 | Amplitude Mean | Not Faulted\n 7 | Amplitude Jitter | Not Faulted\n 8 | Lost Phase | Faulted\n 9 | Phase Mean | Not Faulted\n 10 | Phase Jitter | Not Faulted\n 11 | No Sample Rate | Not Faulted\n 12 | No Accel Rate | Faulted \n \"\"\"\n fault_mask = 0b1000100111111\n if (int(value) & fault_mask) > 0:\n await self.interlock_trip()\n return value\n \n @hdsc.putter\n async def hdsc(self, instance, value):\n ioc = instance.group\n \"\"\"\n HDSC bit decoder:\n bit | meaning | klystron faulted if bit set?\n ---------------------------------------------\n 0 | Phase Trim Disabled | Not Faulted\n 1 | Maintenance Mode | Faulted\n 2 | To Be Replaced | Faulted\n 3 | Awaiting Run Up | Faulted\n 4 | Additional Phase Ctrl | Not Faulted\n 5 | No Touch Up | Not Faulted\n 6 | Check Phase | Not Faulted\n 7 | 14:1 Winding Ratio | Not Faulted\n 8 | Designated Spare | Not Faulted\n 9 | Solid State Ph Shiftr | Not Faulted\n 10 | Controlled by EPICS | Not Faulted\n 11 | Powers Transverse RF | Not Faulted\n 12 | Power Savings Mode | Faulted \n \"\"\"\n fault_mask = 0b1000000001110\n if (int(value) & fault_mask) > 0:\n await self.interlock_trip()\n return value\n\n @stat.putter\n async def stat(self, instance, value):\n ioc = instance.group\n \"\"\"\n STAT bit decoder:\n bit | meaning | klystron HV off if bit set?\n ---------------------------------------------\n 0 | Status OK | Not Faulted\n 1 | Status Maintenance | Faulted\n 2 | Status Unit Offline | Faulted\n 3 | Status Out of Tol | Not Faulted\n 4 | Status Bad CAMAC | Not Faulted\n 5 | Status SWRD Error | Not Faulted\n 6 | Dead Man Timeout | Faulted\n 7 | Fox Phase Home Error | Not Faulted\n 8 | Phase Mean Out of Tol | Not Faulted\n 9 | Status IPL Required | Not Faulted\n 10 | Status Update Request | Not Faulted\n \"\"\"\n off_mask = 0b00001000110\n if (int(value) & off_mask) > 0:\n await self.mod_off(hv_ready=False)\n else:\n if self.mod_hv_ctrl.value == \"ON\":\n await self.mod_on()\n else:\n await self.mod_off(hv_ready=True)\n return value\n\n @dsta.putter\n async def dsta(self, instance, value):\n ioc = instance.group\n \"\"\"\n DSTA1 bit decoder:\n bit | meaning | klystron faulted if bit set?\n ---------------------------------------------\n 0 | SLED Cavity Tuned | Not Faulted\n 1 | SLED Cavity Detuned | Not Faulted\n 2 | SLED Motor Not at Limit | Faulted\n 3 | SLED Upper Needle Fault | Faulted\n 4 | SLED Lower Needle Fault | Faulted\n 5 | Electromagnet Current Tols | Faulted\n 6 | Klystron Temperature | Faulted\n 7 | Klystron Reflected Energy | Faulted\n 8 | Klystron Over-Voltage | Faulted\n 9 | Klystron Over-Current | Faulted\n 10 | ADC Read Error | Not Faulted\n 11 | ADC Out of Tolerance | Not Faulted\n 12 | Desired Phase Change | Not Faulted\n 13 | Water Summary Fault | Faulted\n 14 | Accelerator Water Flowswitch 1 | Faulted\n 15 | Accelerator Water Flowswitch 2 | Faulted\n 16 | Waveguide Water Flowswitch 1 | Faulted\n 17 | Waveguide Water Flowswitch 2 | Faulted\n 18 | Klystron Water Flowswitch | Faulted\n 19 | 24V Battery Fault | Faulted\n 20 | Waveguide Vacuum Fault | Faulted\n 21 | Klystron Vacuum Fault | Faulted\n 22 | Electromagnet Current Fault | Faulted\n 23 | Electromagnet Breaker Fault | Faulted\n 24 | MKSU Trigger Enable Fault | Faulted\n 25 | MOD Available | Not Faulted\n 26 | No Text Defined | Not Faulted\n \n DSTA2 bit decoder:\n bit | meaning | klystron faulted if bit set?\n ---------------------------------------------\n 0 | Modulator Control Power Fault | Faulted\n 1 | Modulator VVS Voltage Fault | Faulted\n 2 | Modulator Klys Heater Delay | Faulted\n 3 | Modulator Interlocks Complete | Not Faulted\n 4 | Modulator HV Ready | Not Faulted\n 5 | Modulator Fault Lockout | Faulted\n 6 | Modulator External Fault | Faulted\n 7 | Modulator HV On | Not Faulted\n 8 | Modulator Trigger Overcurrent | Faulted\n 9 | Mod. End-of-line Clipper Fault | Faulted\n 10 | Mod. Electromag Over Current | Faulted\n \"\"\"\n dsta1_fault_mask = 0b001111111111110001111111100\n dsta2_fault_mask = 0b11101100111\n if ((int(value[0]) & dsta1_fault_mask) > 0) or ((int(value[1]) & dsta2_fault_mask) > 0):\n await self.interlock_trip()\n return value\n\n @trim.putter\n async def trim(self, instance, value):\n ioc = instance.group\n if value == \"TRIM\":\n await asyncio.sleep(0.2)\n await ioc.phas.write(ioc.pdes.value)\n self.change_callback(self, ioc.phas.value, \"PHAS\")\n else:\n L.warning(\"Warning, only valid function is TRIM.\")\n return 0\n\n @enld.putter\n async def enld(self, instance, value):\n self.change_callback(self, value, \"ENLD\")\n return value\n\n @bc1_tctl.putter\n async def bc1_tctl(self, instance, value):\n self.has_accel_triggers = value in (\"Activate\", \"Reactivate\")\n await self.on_off_changed()\n await self.bc1_tstat.write(1 if self.has_accel_triggers else 0)\n await self.bc1_stat.write(1 if self.has_accel_triggers else 2)\n return value\n \n async def on_off_changed(self):\n is_on = self.has_accel_triggers and self.hv_ctrl_on and not self.tripped\n if is_on:\n await self.ampl.write(100.0)\n else:\n await self.ampl.write(0.0)\n self.change_callback(self, is_on, \"IS_ON\")\n\ndef _parse_klys_table(table):\n splits = [row.split() for row in table]\n return {'KLYS:LI{0}:{1}1'.format(ele_name[3:5],ele_name[6:8]): ( float(bmadEnld), float(bmadPhas), float(bmadEnld) > 1 ) for (_, ele_name, _, _, _, bmadEnld, bmadPhas) in splits}\n\ndef convert_device_to_element(device_name):\n return 'O_K{0}_{1}'.format(device_name[7:9],device_name[10])\n \ndef convert_sbst_to_element(device_name):\n return 'O_S{0}_{1}'.format(device_name[7:9],device_name[10])\n\ndef _parse_cudklys_table(table):\n \"\"\"\n Right now we basically just want a list of device names, eventually \n this might actually do something useful.\n \"\"\"\n splits = [row.split() for row in table]\n return {'CUDKLYS:LI{0}:{1}'.format(ele_name[3:5], ele_name[6:8]): 0 for (_, ele_name, _, _, _, _, _) in splits} \n\n\nclass KlystronService(simulacrum.Service):\n attr_for_klys_type = {\"ENLD\": \"ENLD_MeV\", \"PHAS\":\"PHAS_Deg\"} \n def __init__(self):\n super().__init__()\n self.ctx = Context.instance()\n #cmd socket is a synchronous socket, we don't want the asyncio context.\n self.cmd_socket = zmq.Context().socket(zmq.REQ)\n self.cmd_socket.connect(\"tcp://127.0.0.1:{}\".format(os.environ.get('MODEL_PORT', 12312)))\n init_vals, init_cud_vals = self.get_klystron_ACTs_from_model()\n init_sbst_vals = self.get_sbst_ACTs_from_model()\n klys_pvs = {device_name: KlystronPV(device_name, convert_device_to_element(device_name), self.on_klystron_change, initial_values=init_vals[device_name], prefix=device_name) for device_name in init_vals.keys()}\n cud_pvs = {device_name: CudKlys(device_name,convert_device_to_element(device_name), initial_value=init_cud_vals[device_name], prefix=device_name) for device_name in init_cud_vals.keys()}\n sbst_pvs = {device_name: SubboosterPV(device_name,convert_sbst_to_element(device_name), prefix=device_name) for device_name in init_sbst_vals.keys()}\n L.info(init_vals)\n self.add_pvs(klys_pvs)\n self.add_pvs(cud_pvs)\n self.add_pvs(sbst_pvs)\n L.info(\"Initialization complete.\")\n\n def get_klystron_ACTs_from_model(self):\n init_vals = {}\n init_CudVals = {}\n self.cmd_socket.send_pyobj({\"cmd\": \"tao\", \"val\": \"show lat -no_label_lines -attribute ENLD_MeV -attribute Phase_Deg O_K*\"})\n table = self.cmd_socket.recv_pyobj()['result']\n # We inject our own static data for the injector and TCAV stations, which aren't modelled.\n injector_stat = ['0 O_K20_5 Lcavity 5.6 --- 100 0', '0 O_K20_6 Lcavity 0.5 --- 6 0' , '0 O_K20_7 Lcavity 1.518 --- 58.5 0' , '0 O_K20_8 Lcavity 5.362 --- 114.0 0', '0 O_K24_8 Lcavity 160 --- 114.0 0']\n table.extend(injector_stat)\n init_vals = _parse_klys_table(table)\n init_CudVals = _parse_cudklys_table(table)\n return init_vals, init_CudVals\n\n def get_sbst_ACTs_from_model(self):\n init_vals = {} ## TODO: Integrate SBST phase with model Overlord\n for ii in range(21,31):\n init_vals[f'SBST:LI{ii}:1'] = (0,0)\n return init_vals\n\n def on_klystron_change(self, klystron_pv, value, parameter):\n element = klystron_pv.element_name\n if parameter == \"PHAS\":\n klys_attr = \"Phase_Deg\"\n elif parameter == \"ENLD\": \n klys_attr = \"ENLD_MeV\"\n elif parameter == \"IS_ON\":\n klys_attr = \"is_on\"\n value = 'T' if value else 'F'\n element = element[2:]+'*' #O_K30_8 overlay to K30_8*\n\n cmd = f'set ele {element} {klys_attr} = {value}'\n L.info(cmd)\n self.cmd_socket.send_pyobj({\"cmd\": \"tao\", \"val\": cmd})\n msg = self.cmd_socket.recv_pyobj()['result']\n L.info(msg)\n \ndef main():\n service = KlystronService()\n loop = asyncio.get_event_loop()\n _, run_options = ioc_arg_parser(\n default_prefix='',\n desc=\"Simulated Klystron Service\")\n run(service, **run_options)\n \nif __name__ == '__main__':\n main()\n \n\n\n\n \n" ]
[ [ "numpy.matmul", "numpy.stack", "numpy.random.normal", "numpy.identity", "numpy.array" ], [ "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
takuya-ki/wrs
[ "f6e1009b94332504042fbde9b39323410394ecde", "f6e1009b94332504042fbde9b39323410394ecde", "f6e1009b94332504042fbde9b39323410394ecde", "f6e1009b94332504042fbde9b39323410394ecde", "f6e1009b94332504042fbde9b39323410394ecde", "f6e1009b94332504042fbde9b39323410394ecde" ]
[ "basis/trimesh/io/stl.py", "drivers/devices/kinect_azure/helper.py", "basis/trimesh/visual.py", "0000_students_work/2021tro/projection_global_localrefine.py", "0000_examples/hayakawa/research_posemake_many.py", "motion/probabilistic/_rrt_connect_wrsnew.py" ]
[ "import numpy as np\n\nfrom ..util import is_binary_file\n\n# define a numpy datatype for the STL file\n_stl_dtype = np.dtype([('normals', np.float32, (3)), ('vertices', np.float32, (3, 3)), ('attributes', np.uint16)])\n_stl_dtype_header = np.dtype([('header', np.void, 80), ('face_count', np.int32)])\n\n\ndef load_stl(file_obj, file_type=None):\n if 'b' not in file_obj.mode:\n raise\n if is_binary_file(file_obj):\n return load_stl_binary(file_obj)\n else:\n return load_stl_ascii(file_obj)\n\n\ndef load_stl_binary(file_obj):\n \"\"\"\n Load a binary STL file into a trimesh object.\n Uses a single main struct.unpack call, and is significantly faster\n than looping methods or ASCII STL.\n :param file_obj:\n :return:\n \"\"\"\n header = np.fromstring(file_obj.read(84), dtype=_stl_dtype_header)\n # now we check the length from the header versus the length of the file\n # data_start should always be position 84, but hard coding that felt ugly\n data_start = file_obj.tell()\n # this seeks to the end of the file (position 0, relative to the end of the file 'whence=2')\n file_obj.seek(0, 2)\n # we save the location of the end of the file and seek back to where we started from\n data_end = file_obj.tell()\n file_obj.seek(data_start)\n # the binary format has a rigidly defined structure, and if the length\n # of the file doesn't match the header, the loaded version is almost\n # certainly going to be garbage. \n data_ok = (data_end - data_start) == (header['face_count'] * _stl_dtype.itemsize)\n\n # this check is to see if this really is a binary STL file. \n # if we don't do this and try to load a file that isn't structured properly \n # the struct.unpack call uses 100% memory until the whole thing crashes, \n # so it's much better to raise an exception here. \n if not data_ok:\n raise ValueError('Binary STL has incorrect length in header!')\n # all of our vertices will be loaded in order due to the STL format, \n # so faces are just sequential indices reshaped. \n faces = np.arange(header['face_count'] * 3).reshape((-1, 3))\n blob = np.fromstring(file_obj.read(), dtype=_stl_dtype)\n result = {'vertices': blob['vertices'].reshape((-1, 3)),\n 'face_normals': blob['normals'].reshape((-1, 3)),\n 'faces': faces}\n return result\n\n\ndef load_stl_ascii(file_obj):\n '''\n Load an ASCII STL file.\n '''\n header = file_obj.readline()\n\n text = file_obj.read()\n if hasattr(text, 'decode'):\n text = text.decode('utf-8')\n text = text.lower().split('endsolid')[0]\n blob = np.array(text.split())\n\n # there are 21 'words' in each face\n face_len = 21\n face_count = len(blob) / face_len\n if (len(blob) % face_len) != 0:\n raise ValueError('Incorrect number of values in STL file!')\n\n face_count = int(face_count)\n # this offset is to be added to a fixed set of indices that is tiled\n offset = face_len * np.arange(face_count).reshape((-1, 1))\n normal_index = np.tile([2, 3, 4], (face_count, 1)) + offset\n vertex_index = np.tile([8, 9, 10, 12, 13, 14, 16, 17, 18], (face_count, 1)) + offset\n\n # faces are groups of three sequential vertices, as vertices are not references\n faces = np.arange(face_count * 3).reshape((-1, 3))\n face_normals = blob[normal_index].astype(float)\n vertices = blob[vertex_index.reshape((-1, 3))].astype(float)\n\n return {'vertices': vertices,\n 'faces': faces,\n 'face_normals': face_normals}\n\n\ndef export_stl(mesh):\n '''\n Convert a Trimesh object into a binary STL file.\n Arguments\n ---------\n mesh: Trimesh object\n Returns\n ---------\n export: bytes, representing mesh in binary STL form\n '''\n header = np.zeros(1, dtype=_stl_dtype_header)\n header['face_count'] = len(mesh.faces)\n packed = np.zeros(len(mesh.faces), dtype=_stl_dtype)\n packed['normals'] = mesh.face_normals\n packed['vertices'] = mesh.triangles\n export = header.tostring()\n export += packed.tostring()\n return export\n\n\n_stl_loaders = {'stl': load_stl}\n", "import cv2\nimport numpy as np\nfrom drivers.devices.kinect_azure.pykinectazure import PyKinectAzure, _k4a\n\nmtx = np.array([[610.16101074, 0, 638.35681152], [0, 610.19384766, 367.82455444], [0, 0, 1]])\n\n\ndef get_images(pk_obj):\n while True:\n pk_obj.device_get_capture()\n color_image_handle = pk_obj.capture_get_color_image()\n depth_image_handle = pk_obj.capture_get_depth_image()\n if color_image_handle and depth_image_handle:\n color_image = pk_obj.image_convert_to_numpy(color_image_handle)\n depth_image = pk_obj.transform_depth_to_color(depth_image_handle, color_image_handle)\n pk_obj.image_release(color_image_handle)\n pk_obj.image_release(depth_image_handle)\n pk_obj.capture_release()\n return color_image, depth_image\n else:\n print(\"get color image failed\")\n pk_obj.capture_release()\n\n\ndef depth_to_point(mtx, pixel_pos, depth_image):\n img_x, img_y = pixel_pos\n rgbcam_fx = mtx[0][0]\n rgbcam_fy = mtx[1][1]\n rgbcam_cx = mtx[0][2]\n rgbcam_cy = mtx[1][2]\n world_x = (img_x - rgbcam_cx) * depth_image / (rgbcam_fx * 1000)\n world_y = (img_y - rgbcam_cy) * depth_image / (rgbcam_fy * 1000)\n world_z = depth_image / 1000\n return np.array([world_x, world_y, world_z])\n\n\ndef pcd_read(depth_image, mtx):\n image_size = depth_image.shape\n rgbcam_fx = mtx[0][0]\n rgbcam_fy = mtx[1][1]\n rgbcam_cx = mtx[0][2]\n rgbcam_cy = mtx[1][2]\n length = image_size[0] * image_size[1]\n kx = (np.arange(image_size[1]) - rgbcam_cx) / (rgbcam_fx * 1000)\n kx = np.tile(kx, image_size[0])\n ky = (np.arange(image_size[0]) - rgbcam_cy) / (rgbcam_fy * 1000)\n ky = ky.repeat(image_size[1])\n k = np.array(list(zip(kx, ky, np.ones(length, dtype=int) / 1000)))\n depth = depth_image.repeat(3).reshape(length, 3) + \\\n np.tile(np.array([rgbcam_fx, rgbcam_fy, 0]), length).reshape(length, 3)\n return k * depth\n\n\nif __name__ == \"__main__\":\n pk_obj = PyKinectAzure()\n pk_obj.device_open()\n device_config = pk_obj.config\n device_config.color_resolution = _k4a.K4A_COLOR_RESOLUTION_1080P\n device_config.depth_mode = _k4a.K4A_DEPTH_MODE_WFOV_2X2BINNED\n print(device_config)\n pk_obj.device_start_cameras(device_config)\n color, depth = get_images(pk_obj)\n cv2.imshow(\"color\", color)\n cv2.imshow(\"depth\", depth)\n cv2.waitKey(0)\n", "import numpy as np\nfrom colorsys import hsv_to_rgb\nfrom collections import deque\n\nfrom .util import is_sequence, is_shape, Cache, DataStore\nfrom .constants import log\n\nCOLORS = {'red': [205, 59, 34, 255],\n 'purple': [150, 111, 214, 255],\n 'blue': [119, 158, 203, 255],\n 'brown': [160, 85, 45, 255]}\nCOLOR_DTYPE = np.dtype(np.uint8)\nDEFAULT_COLOR = np.array(COLORS['purple'], dtype=COLOR_DTYPE)\nRED_COLOR = np.array(COLORS['red'], dtype=COLOR_DTYPE)\n\n\nclass VisualAttributes(object):\n '''\n Hold the visual attributes (usually colors) for a mesh. \n\n This is a bit of a dumpster fire and probably needs a re-write\n '''\n\n def __init__(self, mesh=None, dtype=None, **kwargs):\n self.mesh = mesh\n\n self._validate = True\n self._data = DataStore()\n self._cache = Cache(id_function=self._data.md5)\n\n if dtype is None:\n dtype = COLOR_DTYPE\n self.dtype = dtype\n\n colors = _kwargs_to_color(mesh, **kwargs)\n self.vertex_colors, self.face_colors = colors\n\n def choose(self):\n '''\n If both face and vertex colors are defined, choose one of them.\n '''\n if all(self._set.values()):\n sig_face = self._data['face_colors'].ptp(axis=0).sum()\n sig_vertex = self._cache['vertex_colors'].ptp(axis=0).sum()\n if sig_face > sig_vertex:\n self.vertex_colors = None\n else:\n self.face_colors = None\n\n @property\n def _set(self):\n result = {'face': is_shape(self._data['face_colors'], (-1, (3, 4))),\n 'vertex': is_shape(self._cache['vertex_colors'], (-1, (3, 4)))}\n return result\n\n @property\n def defined(self):\n defined = np.any(self._set.values())\n defined = defined and self.mesh is not None\n return defined\n\n @property\n def transparency(self):\n '''\n Returns\n ------------\n transparency: bool, does the visual attributes contain any transparency\n '''\n cached = self._cache.get('transparency')\n if cached is not None:\n return cached\n transparency = False\n color_max = (2 ** (COLOR_DTYPE.itemsize * 8)) - 1\n if self._set['face']:\n transparency = (is_shape(self._data['face_colors'], (-1, 4)) and\n np.any(self._data['face_colors'][:, 3] < color_max))\n elif self._set['vertex']:\n transparency = (is_shape(self._data['vertex_colors'], (-1, 4)) and\n np.any(self._cache['vertex_colors'][:, 3] < color_max))\n\n return self._cache.set(key='transparency',\n value=bool(transparency))\n\n def md5(self):\n return self._data.md5()\n\n @property\n def face_colors(self):\n stored = self._data['face_colors']\n if is_shape(stored, (len(self.mesh.faces), (3, 4))):\n return stored\n log.debug('Returning default colors for faces.')\n self._data['face_colors'] = np.tile(DEFAULT_COLOR,\n (len(self.mesh.faces), 1))\n return self._data['face_colors']\n\n @face_colors.setter\n def face_colors(self, values):\n values = np.asanyarray(values)\n if values.shape in ((3,), (4,)):\n # case where a single RGB/RGBa color has been passed to the setter\n # we apply this color to all faces \n values = np.tile(values, (len(self.mesh.faces), 1))\n self._data['face_colors'] = rgba(values, dtype=self.dtype)\n\n @property\n def vertex_colors(self):\n cached = self._cache['vertex_colors']\n if is_shape(cached, (len(self.mesh.vertices), (3, 4))):\n return cached\n\n log.debug('Vertex colors being generated from face colors')\n colors = face_to_vertex_color(self.mesh, self.face_colors)\n self._cache['vertex_colors'] = colors\n return colors\n\n @vertex_colors.setter\n def vertex_colors(self, values):\n self._cache['vertex_colors'] = rgba(values, dtype=self.dtype)\n\n def update_faces(self, mask):\n stored = self._data['face_colors']\n if not is_shape(stored, (-1, (3, 4))):\n return\n try:\n self._data['face_colors'] = stored[mask]\n except:\n log.warning('Face colors not updated', exc_info=True)\n\n def update_vertices(self, mask):\n stored = self._data['vertex_colors']\n if not is_shape(stored, (-1, (3, 4))):\n return\n try:\n self._data['vertex_colors'] = stored[mask]\n except:\n log.debug('Vertex colors not updated', exc_info=True)\n\n def subsets(self, faces_sequence):\n result = [VisualAttributes() for i in range(len(faces_sequence))]\n if self._set['face']:\n face = self._data['face_colors']\n for i, f in enumerate(faces_sequence):\n result[i].face_colors = face[list(f)]\n return np.array(result)\n\n def union(self, others):\n return visuals_union(np.append(self, others))\n\n\ndef _kwargs_to_color(mesh, **kwargs):\n '''\n Given a set of keyword arguments, see if any reference color\n in their name, and match the dimensions of the mesh.\n '''\n\n def pick_option(vf):\n if any(i is None for i in vf):\n return vf\n result = [None, None]\n signal = [i.ptp(axis=0).sum() for i in vf]\n signal_max = np.argmax(signal)\n result[signal_max] = vf[signal_max]\n return result\n\n def pick_color(sequence):\n if len(sequence) == 0:\n return None\n elif len(sequence) == 1:\n return sequence[0]\n else:\n signal = [i.ptp(axis=0).sum() for i in sequence]\n signal_max = np.argmax(signal)\n return sequence[signal_max]\n\n if mesh is None:\n result = [None, None]\n if 'face_colors' in kwargs:\n result[1] = np.asanyarray(kwargs['face_colors'])\n if 'vertex_colors' in kwargs:\n result[0] = np.asanyarray(kwargs['vertex_colors'])\n return result\n\n vertex = deque()\n face = deque()\n\n for key in kwargs.keys():\n if not ('color' in key):\n continue\n value = np.asanyarray(kwargs[key])\n if len(value) == len(mesh.vertices):\n vertex.append(value)\n elif len(value) == len(mesh.faces):\n face.append(value)\n return pick_option([pick_color(i) for i in [vertex, face]])\n\n\ndef visuals_union(visuals, *args):\n visuals = np.append(visuals, args)\n color = {'face_colors': None,\n 'vertex_colors': None}\n\n vertex_ok = True\n vertex = [None] * len(visuals)\n\n face_ok = True\n face = [None] * len(visuals)\n\n for i, v in enumerate(visuals):\n face_ok = face_ok and v._set['face']\n vertex_ok = vertex_ok and v._set['vertex']\n\n if face_ok:\n if v.mesh is None:\n # if the mesh is None, don't force a \n # dimension check for the colors\n face[i] = rgba(v._data['face_colors'])\n else:\n face[i] = rgba(v.face_colors)\n if vertex_ok:\n if v.mesh is None:\n vertex[i] = rgba(v._data['vertex_colors'])\n else:\n vertex[i] = rgba(v.vertex_colors)\n\n if face_ok:\n color['face_colors'] = np.vstack(face)\n if vertex_ok:\n color['vertex_colors'] = np.vstack(vertex)\n\n return VisualAttributes(**color)\n\n\ndef color_to_float(color, dtype=None):\n color = np.asanyarray(color)\n if dtype is None:\n dtype = color.dtype\n else:\n color = color.astype(dtype)\n if dtype.kind in 'ui':\n signed = int(dtype.kind == 'i')\n color_max = float((2 ** ((dtype.itemsize * 8) - signed)) - 1)\n color = color.astype(np.float) / color_max\n return color\n\n\ndef rgba(colors, dtype=None):\n '''\n Convert an RGB color to an RGBA color.\n\n Arguments\n ----------\n colors: (n,[3|4]) set of RGB or RGBA colors\n \n Returns\n ----------\n colors: (n,4) set of RGBA colors\n '''\n if not is_sequence(colors):\n return\n if dtype is None:\n dtype = COLOR_DTYPE\n colors = np.asanyarray(colors, dtype=dtype)\n if is_shape(colors, (-1, 3)):\n opaque = (2 ** (np.dtype(dtype).itemsize * 8)) - 1\n colors = np.column_stack((colors, opaque * np.ones(len(colors)))).astype(dtype)\n return colors\n\n\ndef random_color(dtype=COLOR_DTYPE):\n '''\n Return a random RGB color using datatype specified.\n '''\n hue = np.random.random() + .61803\n hue %= 1.0\n color = np.array(hsv_to_rgb(hue, .99, .99))\n if np.dtype(dtype).kind in 'iu':\n max_value = (2 ** (np.dtype(dtype).itemsize * 8)) - 1\n color *= max_value\n color = np.append(color, max_value).astype(dtype)\n return color\n\n\ndef vertex_to_face_colors(vertex_colors, faces):\n face_colors = vertex_colors[faces].mean(axis=2).astype(vertex_colors.dtype)\n return face_colors\n\n\ndef face_to_vertex_color(mesh, face_colors, dtype=COLOR_DTYPE):\n '''\n Convert a set of face colors into a set of vertex colors.\n '''\n\n color_dim = np.shape(face_colors)[1]\n\n vertex_colors = np.zeros((len(mesh.vertices), 3, color_dim))\n population = np.zeros((len(mesh.vertices), 3), dtype=np.bool)\n\n vertex_colors[[mesh.faces[:, 0], 0]] = face_colors\n vertex_colors[[mesh.faces[:, 1], 1]] = face_colors\n vertex_colors[[mesh.faces[:, 2], 2]] = face_colors\n\n population[[mesh.faces[:, 0], 0]] = True\n population[[mesh.faces[:, 1], 1]] = True\n population[[mesh.faces[:, 2], 2]] = True\n\n # clip the population sum to 1, to avoid a division error in edge cases\n populated = np.clip(population.sum(axis=1), 1, 3)\n vertex_colors = vertex_colors.sum(axis=1) / populated.reshape((-1, 1))\n return vertex_colors.astype(dtype)\n", "import numpy as np\nimport modeling.geometric_model as gm\nimport modeling.collision_model as cm\nimport visualization.panda.world as wd\nimport basis.robot_math as rm\nimport math\nfrom scipy.spatial import cKDTree\nimport vision.depth_camera.surface.rbf_surface as rbfs\n\nbase = wd.World(cam_pos=np.array([-.3,-.7,.42]), lookat_pos=np.array([0,0,0]))\n# gm.gen_frame().attach_to(base)\nbowl_model = cm.CollisionModel(initor=\"./objects/bowl.stl\")\nbowl_model.set_rgba([.3,.3,.3,.3])\nbowl_model.set_rotmat(rm.rotmat_from_euler(math.pi,0,0))\n# bowl_model.attach_to(base)\n\npn_direction = np.array([0, 0, -1])\n\nbowl_samples, bowl_sample_normals = bowl_model.sample_surface(toggle_option='normals', radius=.002)\nselection = bowl_sample_normals.dot(-pn_direction)>.1\nbowl_samples = bowl_samples[selection]\nbowl_sample_normals=bowl_sample_normals[selection]\ntree = cKDTree(bowl_samples)\nsurface = rbfs.RBFSurface(bowl_samples[:, :2], bowl_samples[:,2])\nsurface.get_gometricmodel(rgba=[.3,.3,.3,.3]).attach_to(base)\n\npt_direction = rm.orthogonal_vector(pn_direction, toggle_unit=True)\ntmp_direction = np.cross(pn_direction, pt_direction)\nplane_rotmat = np.column_stack((pt_direction, tmp_direction, pn_direction))\nhomomat=np.eye(4)\nhomomat[:3,:3] = plane_rotmat\nhomomat[:3,3] = np.array([-.07,-.03,.1])\ntwod_plane = gm.gen_box(np.array([.2, .2, .001]), homomat=homomat, rgba=[1,1,1,.3])\ntwod_plane.attach_to(base)\n\ncircle_radius=.05\nline_segs = [[homomat[:3,3], homomat[:3,3]+pt_direction*.05], [homomat[:3,3]+pt_direction*.05, homomat[:3,3]+pt_direction*.05+tmp_direction*.05],\n [homomat[:3,3]+pt_direction*.05+tmp_direction*.05, homomat[:3,3]+tmp_direction*.05], [homomat[:3,3]+tmp_direction*.05, homomat[:3,3]]]\n# gm.gen_linesegs(line_segs).attach_to(base)\nfor sec in line_segs:\n gm.gen_stick(spos=sec[0], epos=sec[1], rgba=[0, 0, 0, 1], thickness=.002, type='round').attach_to(base)\nepos = (line_segs[0][1]-line_segs[0][0])*.7+line_segs[0][0]\ngm.gen_arrow(spos=line_segs[0][0], epos=epos, thickness=0.004).attach_to(base)\nspt = homomat[:3,3]\n# gm.gen_stick(spt, spt + pn_direction * 10, rgba=[0,1,0,1]).attach_to(base)\n# base.run()\ngm.gen_dasharrow(spt, spt-pn_direction*.07, thickness=.004).attach_to(base) # p0\ncpt, cnrml = bowl_model.ray_hit(spt, spt + pn_direction * 10000, option='closest')\ngm.gen_dashstick(spt, cpt, rgba=[.57,.57,.57,.7], thickness=0.003).attach_to(base)\ngm.gen_sphere(pos=cpt, radius=.005).attach_to(base)\ngm.gen_dasharrow(cpt, cpt-pn_direction*.07, thickness=.004).attach_to(base) # p0\ngm.gen_dasharrow(cpt, cpt+cnrml*.07, thickness=.004).attach_to(base) # p0\n\nangle = rm.angle_between_vectors(-pn_direction, cnrml)\nvec = np.cross(-pn_direction, cnrml)\nrotmat = rm.rotmat_from_axangle(vec, angle)\nnew_plane_homomat = np.eye(4)\nnew_plane_homomat[:3,:3] = rotmat.dot(homomat[:3,:3])\nnew_plane_homomat[:3,3] = cpt\ntwod_plane = gm.gen_box(np.array([.2, .2, .001]), homomat=new_plane_homomat, rgba=[1,1,1,.3])\ntwod_plane.attach_to(base)\nnew_line_segs = [[cpt, cpt+rotmat.dot(pt_direction)*.05],\n [cpt+rotmat.dot(pt_direction)*.05, cpt+rotmat.dot(pt_direction)*.05+rotmat.dot(tmp_direction)*.05],\n [cpt+rotmat.dot(pt_direction)*.05+rotmat.dot(tmp_direction)*.05, cpt+rotmat.dot(tmp_direction)*.05],\n [cpt+rotmat.dot(tmp_direction)*.05, cpt]]\n# gm.gen_linesegs(new_line_segs).attach_to(base)\nfor sec in new_line_segs:\n gm.gen_stick(spos=sec[0], epos=sec[1], rgba=[0, 0, 0, 1], thickness=.002, type='round').attach_to(base)\nepos = (new_line_segs[0][1]-new_line_segs[0][0])*.7+new_line_segs[0][0]\ngm.gen_arrow(spos=new_line_segs[0][0], epos=epos, thickness=0.004).attach_to(base)\n\nlast_normal = cnrml\ndirection = rotmat.dot(pt_direction)\nn=3\nfor tick in range(1, n+1):\n len = .05/n\n tmp_cpt = cpt\n extended_len = 0\n for p in np.linspace(0, len, 1000):\n tmp_t_npt = cpt+direction*p\n tmp_z_surface = surface.get_zdata(np.array([tmp_t_npt[:2]]))\n tmp_projected_point = np.array([tmp_t_npt[0], tmp_t_npt[1], tmp_z_surface[0]])\n tmp_len = np.linalg.norm(tmp_projected_point - tmp_cpt)\n extended_len += tmp_len\n tmp_cpt = tmp_projected_point\n print(tick, extended_len, len)\n if extended_len>len:\n break\n projected_point = tmp_projected_point\n t_npt = tmp_t_npt\n domain_grid = np.meshgrid(np.linspace(-.005, .005, 100, endpoint=True),\n np.linspace(-.005, .005, 100, endpoint=True))\n domain_0, domain_1 = domain_grid\n domain = np.column_stack((domain_0.ravel()+t_npt[0], domain_1.ravel()+t_npt[1]))\n codomain = surface.get_zdata(domain)\n vertices = np.column_stack((domain, codomain))\n plane_center, plane_normal = rm.fit_plane(vertices)\n new_normal = plane_normal\n if pn_direction.dot(new_normal) > .1:\n new_normal = -new_normal\n angle = rm.angle_between_vectors(last_normal, new_normal)\n vec = rm.unit_vector(np.cross(last_normal, new_normal))\n new_rotmat = rm.rotmat_from_axangle(vec, angle)\n direction = new_rotmat.dot(direction)\n gm.gen_stick(spos=cpt, epos=projected_point, rgba=[1,.6,0,1], thickness=.002, type='round').attach_to(base)\n cpt=projected_point\n last_normal = new_normal\n\ndirection = new_rotmat.dot(tmp_direction)\nfor tick in range(1, n+1):\n len = .05/n\n tmp_cpt = cpt\n extended_len = 0\n for p in np.linspace(0, len, 1000):\n tmp_t_npt = cpt+direction*p\n tmp_z_surface = surface.get_zdata(np.array([tmp_t_npt[:2]]))\n tmp_projected_point = np.array([tmp_t_npt[0], tmp_t_npt[1], tmp_z_surface[0]])\n tmp_len = np.linalg.norm(tmp_projected_point - tmp_cpt)\n extended_len += tmp_len\n tmp_cpt = tmp_projected_point\n print(tick, extended_len, len)\n if extended_len>len:\n break\n projected_point = tmp_projected_point\n t_npt = tmp_t_npt\n domain_grid = np.meshgrid(np.linspace(-.005, .005, 100, endpoint=True),\n np.linspace(-.005, .005, 100, endpoint=True))\n domain_0, domain_1 = domain_grid\n domain = np.column_stack((domain_0.ravel()+t_npt[0], domain_1.ravel()+t_npt[1]))\n codomain = surface.get_zdata(domain)\n vertices = np.column_stack((domain, codomain))\n plane_center, plane_normal = rm.fit_plane(vertices)\n new_normal = plane_normal\n if pn_direction.dot(new_normal) > .1:\n new_normal = -new_normal\n angle = rm.angle_between_vectors(last_normal, new_normal)\n vec = rm.unit_vector(np.cross(last_normal, new_normal))\n new_rotmat = rm.rotmat_from_axangle(vec, angle)\n direction = new_rotmat.dot(tmp_direction)\n gm.gen_stick(spos=cpt, epos=projected_point, rgba=[1,.6,0,1], thickness=.002, type='round').attach_to(base)\n cpt=projected_point\n last_normal = new_normal\n\ndirection = new_rotmat.dot(-pt_direction)\nfor tick in range(1, n+1):\n len = .05/n\n tmp_cpt = cpt\n extended_len = 0\n for p in np.linspace(0, len, 1000):\n tmp_t_npt = cpt+direction*p\n tmp_z_surface = surface.get_zdata(np.array([tmp_t_npt[:2]]))\n tmp_projected_point = np.array([tmp_t_npt[0], tmp_t_npt[1], tmp_z_surface[0]])\n tmp_len = np.linalg.norm(tmp_projected_point - tmp_cpt)\n extended_len += tmp_len\n tmp_cpt = tmp_projected_point\n print(tick, extended_len, len)\n if extended_len>len:\n break\n projected_point = tmp_projected_point\n t_npt = tmp_t_npt\n domain_grid = np.meshgrid(np.linspace(-.005, .005, 100, endpoint=True),\n np.linspace(-.005, .005, 100, endpoint=True))\n domain_0, domain_1 = domain_grid\n domain = np.column_stack((domain_0.ravel()+t_npt[0], domain_1.ravel()+t_npt[1]))\n codomain = surface.get_zdata(domain)\n vertices = np.column_stack((domain, codomain))\n plane_center, plane_normal = rm.fit_plane(vertices)\n new_normal = plane_normal\n if pn_direction.dot(new_normal) > .1:\n new_normal = -new_normal\n angle = rm.angle_between_vectors(last_normal, new_normal)\n vec = rm.unit_vector(np.cross(last_normal, new_normal))\n new_rotmat = rm.rotmat_from_axangle(vec, angle)\n direction = new_rotmat.dot(-pt_direction)\n gm.gen_stick(spos=cpt, epos=projected_point, rgba=[1,.6,0,1], thickness=.002, type='round').attach_to(base)\n cpt=projected_point\n last_normal = new_normal\n\ndirection = new_rotmat.dot(-tmp_direction)\nfor tick in range(1, n+1):\n len = .05/n\n tmp_cpt = cpt\n extended_len = 0\n for p in np.linspace(0, len, 1000):\n tmp_t_npt = cpt+direction*p\n tmp_z_surface = surface.get_zdata(np.array([tmp_t_npt[:2]]))\n tmp_projected_point = np.array([tmp_t_npt[0], tmp_t_npt[1], tmp_z_surface[0]])\n tmp_len = np.linalg.norm(tmp_projected_point - tmp_cpt)\n extended_len += tmp_len\n tmp_cpt = tmp_projected_point\n print(tick, extended_len, len)\n if extended_len>len:\n break\n projected_point = tmp_projected_point\n t_npt = tmp_t_npt\n domain_grid = np.meshgrid(np.linspace(-.005, .005, 100, endpoint=True),\n np.linspace(-.005, .005, 100, endpoint=True))\n domain_0, domain_1 = domain_grid\n domain = np.column_stack((domain_0.ravel()+t_npt[0], domain_1.ravel()+t_npt[1]))\n codomain = surface.get_zdata(domain)\n vertices = np.column_stack((domain, codomain))\n plane_center, plane_normal = rm.fit_plane(vertices)\n new_normal = plane_normal\n if pn_direction.dot(new_normal) > .1:\n new_normal = -new_normal\n angle = rm.angle_between_vectors(last_normal, new_normal)\n vec = rm.unit_vector(np.cross(last_normal, new_normal))\n new_rotmat = rm.rotmat_from_axangle(vec, angle)\n direction = new_rotmat.dot(-tmp_direction)\n gm.gen_stick(spos=cpt, epos=projected_point, rgba=[1,.6,0,1], thickness=.002, type='round').attach_to(base)\n cpt=projected_point\n last_normal = new_normal\n\nbase.run()\n", "#### Program for my research\n#### generate many pose of graspping in both hands(for pulling rope) and pushing in left hand(for pushing object)\nimport visualization.panda.world as wd\nimport robot_sim.end_effectors.gripper.robotiq85.robotiq85 as rtq85\nimport robot_sim.robots.ur3_dual.ur3_dual as rbts\nimport modeling.geometric_model as gm\nimport basis.robot_math as rm\nimport math\nimport numpy as np\nfrom pyquaternion import Quaternion\nimport copy\n\n\nclass PoseMaker(object):\n\n def __init__(self):\n self.rtq85 = rtq85.Robotiq85()\n self.rbt = rbts.UR3Dual()\n # # import manipulation.grip.robotiq85.robotiq85 as rtq85\n # # self.base = pandactrl.World(camp=[5000, 3000, 3000], lookatp=[0, 0, 700])\n # self.hndfa = rtq85.Robotiq85Factory()\n # self.rtq85 = self.hndfa.genHand()\n # self.rgthnd = self.hndfa.genHand()\n # self.lfthnd = self.hndfa.genHand()\n # self.robot_s = robot_s.Ur3DualRobot(self.rgthnd, self.lfthnd)\n # self.rbtmg = robotmesh.Ur3DualMesh()\n # # self.obj = cm.CollisionModel(initor=\"./objects/research_box.stl\")\n\n def lftgrasppose(self):\n lftdirstart = 250\n lftverticalstart = lftdirstart + 90\n handrotrange = 5\n predefined_grasps_lft = []\n handdirect_lft = []\n loc_z = np.array([0, -1, 0])\n rotmat = rm.rotmat_from_axangle(loc_z, math.radians(-90))\n predefined_grasps_lft.append(\n self.rtq85.grip_at_with_jczy(np.array([.005, .005, .005]),\n loc_z,\n rotmat.dot(np.array([1, 0, 0])),\n jaw_width=self.rtq85.jawwidth_rng[1]))\n handdirect_lft.append([0, -1, 0])\n for i in range(8):\n loc_z = np.array([math.cos(math.radians(lftdirstart + i * handrotrange)),\n math.sin(math.radians(lftdirstart + i * handrotrange)), -.2])\n rotmat = rm.rotmat_from_axangle(loc_z, math.radians(-90))\n predefined_grasps_lft.append(\n self.rtq85.grip_at_with_jczy(np.array([.005, .005, .005]),\n loc_z,\n rotmat.dot(np.array([math.cos(math.radians(lftverticalstart + i * handrotrange)),\n math.sin(math.radians(lftverticalstart + i * handrotrange)),\n 0])),\n jaw_width=self.rtq85.jawwidth_rng[0]))\n handdirect_lft.append([math.cos(math.radians(lftdirstart + i * handrotrange)),\n math.sin(math.radians(lftdirstart + i * handrotrange)), -.2])\n\n for i in range(8):\n loc_z = np.array([math.cos(math.radians(lftdirstart + i * handrotrange)),\n math.sin(math.radians(lftdirstart + i * handrotrange)), 0])\n rotmat = rm.rotmat_from_axangle(loc_z, math.radians(-90))\n predefined_grasps_lft.append(\n self.rtq85.grip_at_with_jczy(np.array([.005, .005, .005]),\n loc_z,\n rotmat.dot(np.array([math.cos(math.radians(lftverticalstart + i * handrotrange)),\n math.sin(math.radians(lftverticalstart + i * handrotrange)),\n 0])),\n jaw_width=self.rtq85.jawwidth_rng[0]))\n handdirect_lft.append([math.cos(math.radians(lftdirstart + i * handrotrange)),\n math.sin(math.radians(lftdirstart + i * handrotrange)), 0])\n\n for i in range(8):\n loc_z = np.array([math.cos(math.radians(lftdirstart + i * handrotrange)),\n math.sin(math.radians(lftdirstart + i * handrotrange)), 0])\n rotmat = rm.rotmat_from_axangle(loc_z, math.radians(-90))\n predefined_grasps_lft.append(\n self.rtq85.grip_at_with_jczy(np.array([.005, .005, .005]),\n loc_z,\n rotmat.dot(np.array([math.cos(math.radians(lftverticalstart + i * handrotrange)),\n math.sin(math.radians(lftverticalstart + i * handrotrange)),\n 0])),\n jaw_width=self.rtq85.jawwidth_rng[0]))\n handdirect_lft.append([math.cos(math.radians(lftdirstart + i * handrotrange)),\n math.sin(math.radians(lftdirstart + i * handrotrange)), .2])\n return predefined_grasps_lft, handdirect_lft\n\n def rgtgrasppose(self):\n rgtdirstart = 90 # hand approach direction\n rgtverticalstart = rgtdirstart - 90 # thumb direction\n handrotrange = 5\n predefined_grasps_rgt = []\n handdirect_rgt = []\n loc_z = np.array([0, 1, 0])\n rotmat = rm.rotmat_from_axangle(loc_z, math.radians(90))\n predefined_grasps_rgt.append(\n self.rtq85.grip_at_with_jczy(np.array([.005, .005, .005]), loc_z, rotmat.dot(np.array([1, 0, 0])),\n jaw_width=self.rtq85.jawwidth_rng[1]))\n handdirect_rgt.append([0, 1, 0])\n for i in range(4):\n loc_z = np.array([math.cos(math.radians(rgtdirstart - i * handrotrange)),\n math.sin(math.radians(rgtdirstart - i * handrotrange)), -.1])\n rotmat = rm.rotmat_from_axangle(loc_z, math.radians(90))\n predefined_grasps_rgt.append(\n self.rtq85.grip_at_with_jczy(np.array([.005, .005, .005]),\n loc_z,\n rotmat.dot(np.array([math.cos(math.radians(rgtverticalstart - i * handrotrange)),\n math.sin(math.radians(rgtverticalstart - i * handrotrange)),\n 0])),\n jaw_width=self.rtq85.jawwidth_rng[0]))\n handdirect_rgt.append([math.cos(math.radians(rgtdirstart - i * handrotrange)),\n math.sin(math.radians(rgtdirstart - i * handrotrange)), -.1])\n for i in range(4):\n loc_z = np.array([math.cos(math.radians(rgtdirstart - i * handrotrange)),\n math.sin(math.radians(rgtdirstart - i * handrotrange)), 0])\n rotmat = rm.rotmat_from_axangle(loc_z, math.radians(90))\n predefined_grasps_rgt.append(\n self.rtq85.grip_at_with_jczy(np.array([.005, .005, .005]),\n loc_z,\n rotmat.dot(np.array([math.cos(math.radians(rgtverticalstart - i * handrotrange)),\n math.sin(math.radians(rgtverticalstart - i * handrotrange)),\n 0])),\n jaw_width=self.rtq85.jawwidth_rng[0]))\n handdirect_rgt.append([math.cos(math.radians(rgtdirstart - i * handrotrange)),\n math.sin(math.radians(rgtdirstart - i * handrotrange)), 0])\n\n for i in range(4):\n loc_z = np.array([math.cos(math.radians(rgtdirstart - i * handrotrange)),\n math.sin(math.radians(rgtdirstart - i * handrotrange)), .1])\n rotmat = rm.rotmat_from_axangle(loc_z, math.radians(90))\n predefined_grasps_rgt.append(\n self.rtq85.grip_at_with_jczy(np.array([.005, .005, .005]),\n loc_z,\n rotmat.dot(np.array([math.cos(math.radians(rgtverticalstart - i * handrotrange)),\n math.sin(math.radians(rgtverticalstart - i * handrotrange)),\n 0])),\n jaw_width=self.rtq85.jawwidth_rng[0]))\n handdirect_rgt.append([math.cos(math.radians(rgtdirstart - i * handrotrange)),\n math.sin(math.radians(rgtdirstart - i * handrotrange)), .1])\n return predefined_grasps_rgt, handdirect_rgt\n\n def pushpose(self, axisvec, pushpoint, toggle_debug=False):\n pushposelist = []\n pushpose_rotmatlist = []\n zaxis = np.array([0, 0, 1])\n axisvec_norm = np.linalg.norm(axisvec) ## 円錐の中心のベクトル\n theta = 5\n degree = 90 ## 30\n handrotate = 180 ## 30\n thetamax = 30 ## 60\n thetarange = int(thetamax / theta)\n degreerange = int(360 / degree)\n handrotaterange = int(360 / handrotate)\n for i in range(thetarange):\n referencevec = axisvec + (axisvec_norm * math.tan(math.radians(theta * (i + 1)))) * zaxis\n referencepoint = pushpoint + referencevec\n ## プッシングする点からの相対座標に変換して、クォータニオンを求める\n q_refvec = Quaternion(0, referencepoint[0] - pushpoint[0], referencepoint[1] - pushpoint[1],\n referencepoint[2] - pushpoint[2])\n for j in range(degreerange):\n q_axis = Quaternion(axis=rm.unit_vector(axisvec), degrees=degree * (j + 1)) ## 回転クォータニオン\n q_new = q_axis * q_refvec * q_axis.inverse\n ## 絶対座標に戻す\n point = np.array([q_new[1] + pushpoint[0], q_new[2] + pushpoint[1], q_new[3] + pushpoint[2]])\n # base.pggen.plotSphere(base.render, pos=point, radius=10, rgba=[0,0,1,1])\n handdir = pushpoint - point\n handdir_projection = copy.copy(handdir) ## xy平面への正射影\n handdir_projection[2] = 0\n handdir_projection = rm.unit_vector(handdir_projection)\n ## ハンド座標系の各要素となるベクトル\n handdir = rm.unit_vector(handdir) ## z\n thumb_verticalvec = np.cross(zaxis, handdir_projection) ## x\n zaxis_hand = np.cross(handdir, thumb_verticalvec) ## y\n # pushposelist.append(self.rtq85.approachAt(5,5,5,thumb_verticalvec[0], thumb_verticalvec[1], thumb_verticalvec[2],\n # handdir[0], handdir[1], handdir[2], jawwidth=0))\n ## ハンドの方向を軸に-90度ずつ回転した姿勢を生成\n for k in range(handrotaterange):\n handrotmat = np.empty((0, 3))\n ## test\n # handrotmat = np.append(handrotmat, np.array([handdir]), axis=0)\n # handrotmat = np.append(handrotmat, np.array([thumb_verticalvec]), axis=0)\n # handrotmat = np.append(handrotmat, np.array([zaxis_hand]), axis=0)\n handrotmat = np.append(handrotmat, np.array([thumb_verticalvec]), axis=0)\n handrotmat = np.append(handrotmat, np.array([zaxis_hand]), axis=0)\n handrotmat = np.append(handrotmat, np.array([handdir]), axis=0)\n handrotmat = handrotmat.T\n handrotmat = np.dot(rm.rotmat_from_axangle(handrotmat[:, 2], - math.radians(handrotate * k)),\n handrotmat)\n pushposelist.append(\n self.rtq85.grip_at_with_jczy(np.array([.005, .005, .005]),\n np.array([handrotmat[:, 2][0], handrotmat[:, 2][1], handrotmat[:, 2][2]]),\n np.array([handrotmat[:, 0][0], handrotmat[:, 0][1], handrotmat[:, 0][2]]),\n jaw_width=self.rtq85.jawwidth_rng[0]))\n if toggle_debug:\n self.rtq85.copy().gen_meshmodel().attach_to(base)\n pushpose_rotmatlist.append(handrotmat)\n return pushpose_rotmatlist\n\n\nif __name__ == \"__main__\":\n base = wd.World(cam_pos=[.3, 0, .3], lookat_pos=[0, 0, 0])\n gm.gen_frame().attach_to(base)\n ## プッシング時の姿勢を生成\n axisvec = np.array([0, 1, 0])\n pushpoint = np.array([0, 0, 0])\n p_maker = PoseMaker()\n rotmatlist = p_maker.pushpose(axisvec, pushpoint, toggle_debug=True)\n print(\"len\", len(rotmatlist))\n base.run()\n", "import time\nimport random\nimport networkx as nx\nfrom motion.probabilistic import rrt\n\n\nclass RRTConnect(rrt.RRT):\n\n def __init__(self, robot_s):\n super().__init__(robot_s)\n self.roadmap_start = nx.Graph()\n self.roadmap_goal = nx.Graph()\n\n def _extend_roadmap(self,\n component_name,\n roadmap,\n conf,\n ext_dist,\n goal_conf,\n obstacle_list=[],\n otherrobot_list=[],\n animation=False):\n \"\"\"\n find the nearest point between the given roadmap and the conf and then extend towards the conf\n :return:\n author: weiwei\n date: 20201228\n \"\"\"\n nearest_nid = self._get_nearest_nid(roadmap, conf)\n new_conf_list = self._extend_conf(roadmap.nodes[nearest_nid]['conf'], conf, ext_dist)\n for new_conf in new_conf_list:\n if self._is_collided(component_name, new_conf, obstacle_list, otherrobot_list):\n return -1\n else:\n new_nid = random.randint(0, 1e16)\n roadmap.add_node(new_nid, conf=new_conf)\n roadmap.add_edge(nearest_nid, new_nid)\n nearest_nid = new_nid\n # all_sampled_confs.append([new_node.point, False])\n if animation:\n self.draw_wspace([self.roadmap_start, self.roadmap_goal], self.start_conf, self.goal_conf,\n obstacle_list, [roadmap.nodes[nearest_nid]['conf'], conf], new_conf, '^c')\n # check goal\n if self._goal_test(conf=roadmap.nodes[new_nid]['conf'], goal_conf=goal_conf, threshold=ext_dist):\n roadmap.add_node('connection', conf=goal_conf) # TODO current name -> connection\n roadmap.add_edge(new_nid, 'connection')\n return 'connection'\n else:\n return nearest_nid\n\n def _smooth_path(self,\n component_name,\n path,\n obstacle_list=[],\n otherrobot_list=[],\n granularity=2,\n iterations=50,\n animation=False):\n smoothed_path = path\n for _ in range(iterations):\n if len(smoothed_path) <= 2:\n return smoothed_path\n i = random.randint(0, len(smoothed_path) - 1)\n j = random.randint(0, len(smoothed_path) - 1)\n if abs(i - j) <= 1:\n continue\n if j < i:\n i, j = j, i\n shortcut = self._shortcut_conf(smoothed_path[i], smoothed_path[j], granularity, exact_end=True)\n if (len(shortcut) <= (j - i) + 1) and all(not self._is_collided(component_name=component_name,\n conf=conf,\n obstacle_list=obstacle_list,\n otherrobot_list=otherrobot_list)\n for conf in shortcut):\n smoothed_path = smoothed_path[:i] + shortcut + smoothed_path[j + 1:]\n if animation:\n self.draw_wspace([self.roadmap_start, self.roadmap_goal], self.start_conf, self.goal_conf,\n obstacle_list, shortcut=shortcut, smoothed_path=smoothed_path)\n return smoothed_path\n\n def plan(self,\n component_name,\n start_conf,\n goal_conf,\n obstacle_list=[],\n otherrobot_list=[],\n ext_dist=2,\n rand_rate=70,\n max_iter=1000,\n max_time=15.0,\n smoothing_iterations=50,\n animation=False):\n self.roadmap.clear()\n self.roadmap_start.clear()\n self.roadmap_goal.clear()\n self.start_conf = start_conf\n self.goal_conf = goal_conf\n # check start and goal\n if self._is_collided(component_name, start_conf, obstacle_list, otherrobot_list):\n print(\"The start robot_s configuration is in collision!\")\n return None\n if self._is_collided(component_name, goal_conf, obstacle_list, otherrobot_list):\n print(\"The goal robot_s configuration is in collision!\")\n return None\n if self._goal_test(conf=start_conf, goal_conf=goal_conf, threshold=ext_dist):\n return [start_conf, goal_conf]\n self.roadmap_start.add_node('start', conf=start_conf)\n self.roadmap_goal.add_node('goal', conf=goal_conf)\n last_nid = 'goal'\n tic = time.time()\n for _ in range(max_iter):\n toc = time.time()\n if max_time > 0.0:\n if toc - tic > max_time:\n print(\"Too much motion time! Failed to find a path.\")\n return None\n # Random Sampling\n while True:\n if last_nid != -1:\n goal_nid = last_nid\n goal_conf = self.roadmap_goal.nodes[goal_nid]['conf']\n rand_conf = self._sample_conf(component_name=component_name, rand_rate=rand_rate,\n default_conf=goal_conf)\n # goal_nid = 'goal'\n last_nid = self._extend_roadmap(component_name=component_name,\n roadmap=self.roadmap_start,\n conf=rand_conf,\n ext_dist=ext_dist,\n goal_conf=goal_conf,\n obstacle_list=obstacle_list,\n otherrobot_list=otherrobot_list,\n animation=animation)\n if last_nid != -1:\n break\n if last_nid == 'connection':\n self.roadmap = nx.compose(self.roadmap_start, self.roadmap_goal)\n self.roadmap.add_edge(last_nid, goal_nid)\n break\n else:\n while True:\n if last_nid != -1:\n goal_nid = last_nid\n goal_conf = self.roadmap_start.nodes[goal_nid]['conf']\n rand_conf = self._sample_conf(component_name=component_name, rand_rate=rand_rate,\n default_conf=goal_conf)\n last_nid = self._extend_roadmap(component_name=component_name,\n roadmap=self.roadmap_goal,\n conf=rand_conf,\n ext_dist=ext_dist,\n goal_conf=goal_conf,\n obstacle_list=obstacle_list,\n otherrobot_list=otherrobot_list,\n animation=animation)\n if last_nid != -1:\n break\n if last_nid == 'connection':\n self.roadmap = nx.compose(self.roadmap_start, self.roadmap_goal)\n self.roadmap.add_edge(last_nid, goal_nid)\n break\n else:\n print(\"Reach to maximum iteration! Failed to find a path.\")\n return None\n path = self._path_from_roadmap()\n smoothed_path = self._smooth_path(component_name=component_name,\n path=path,\n obstacle_list=obstacle_list,\n otherrobot_list=otherrobot_list,\n granularity=ext_dist,\n iterations=smoothing_iterations,\n animation=animation)\n return smoothed_path\n\n\nif __name__ == '__main__':\n import numpy as np\n import matplotlib.pyplot as plt\n import robot_sim._kinematics.jlchain as jl\n import robot_sim.robots.robot_interface as ri\n\n\n class XYBot(ri.RobotInterface):\n\n def __init__(self, pos=np.zeros(3), rotmat=np.eye(3), name='XYBot'):\n super().__init__(pos=pos, rotmat=rotmat, name=name)\n self.jlc = jl.JLChain(homeconf=np.zeros(2), name='XYBot')\n self.jlc.jnts[1]['type'] = 'prismatic'\n self.jlc.jnts[1]['loc_motionax'] = np.array([1, 0, 0])\n self.jlc.jnts[1]['loc_pos'] = np.zeros(3)\n self.jlc.jnts[1]['motion_rng'] = [-2.0, 15.0]\n self.jlc.jnts[2]['type'] = 'prismatic'\n self.jlc.jnts[2]['loc_motionax'] = np.array([0, 1, 0])\n self.jlc.jnts[2]['loc_pos'] = np.zeros(3)\n self.jlc.jnts[2]['motion_rng'] = [-2.0, 15.0]\n self.jlc.reinitialize()\n\n def fk(self, component_name='all', jnt_values=np.zeros(2)):\n if component_name != 'all':\n raise ValueError(\"Only support hnd_name == 'all'!\")\n self.jlc.fk(jnt_values)\n\n def rand_conf(self, component_name='all'):\n if component_name != 'all':\n raise ValueError(\"Only support hnd_name == 'all'!\")\n return self.jlc.rand_conf()\n\n def get_jntvalues(self, component_name='all'):\n if component_name != 'all':\n raise ValueError(\"Only support hnd_name == 'all'!\")\n return self.jlc.get_jnt_values()\n\n def is_collided(self, obstacle_list=[], otherrobot_list=[]):\n for (obpos, size) in obstacle_list:\n dist = np.linalg.norm(np.asarray(obpos) - self.get_jntvalues())\n if dist <= size / 2.0:\n return True # collision\n return False # safe\n\n\n # ====Search Path with RRT====\n obstacle_list = [\n ((5, 5), 3),\n ((3, 6), 3),\n ((3, 8), 3),\n ((3, 10), 3),\n ((7, 5), 3),\n ((9, 5), 3),\n ((10, 5), 3),\n ((10, 0), 3),\n ((10, -2), 3),\n ((10, -4), 3),\n ((0, 12), 3),\n ((-1, 10), 3),\n ((-2, 8), 3)\n ] # [x,y,size]\n # Set Initial parameters\n robot = XYBot()\n rrtc = RRTConnect(robot)\n path = rrtc.plan(component_name='all', start_conf=np.array([0, 0]), goal_conf=np.array([5, 10]),\n obstacle_list=obstacle_list,\n ext_dist=1, rand_rate=70, max_time=300, animation=True)\n # import time\n # total_t = 0\n # for i in range(100):\n # tic = time.time()\n # path = rrtc.plan(seed_jnt_values=np.array([0, 0]), end_conf=np.array([5, 10]), obstacle_list=obstacle_list,\n # ext_dist=1, rand_rate=70, max_time=300, hnd_name=None, animation=False)\n # toc = time.time()\n # total_t = total_t + toc - tic\n # print(total_t)\n # Draw final path\n print(path)\n rrtc.draw_wspace([rrtc.roadmap_start, rrtc.roadmap_goal],\n rrtc.start_conf, rrtc.goal_conf, obstacle_list, delay_time=0)\n plt.plot([conf[0] for conf in path], [conf[1] for conf in path], linewidth=7, linestyle='-', color='c')\n # plt.savefig(str(rrtc.img_counter)+'.jpg')\n # pathsm = smoother.pathsmoothing(path, rrt, 30)\n # plt.plot([point[0] for point in pathsm], [point[1] for point in pathsm], '-r')\n # plt.pause(0.001) # Need for Mac\n plt.show()\n" ]
[ [ "numpy.arange", "numpy.zeros", "numpy.tile", "numpy.dtype" ], [ "numpy.arange", "numpy.array", "numpy.tile", "numpy.ones" ], [ "numpy.random.random", "numpy.dtype", "numpy.append", "numpy.asanyarray", "numpy.argmax", "numpy.shape", "numpy.any", "numpy.array", "numpy.vstack" ], [ "numpy.cross", "numpy.linspace", "numpy.eye", "numpy.linalg.norm", "numpy.column_stack", "numpy.array", "scipy.spatial.cKDTree" ], [ "numpy.array", "numpy.linalg.norm", "numpy.empty", "numpy.cross" ], [ "numpy.asarray", "numpy.eye", "matplotlib.pyplot.plot", "numpy.array", "matplotlib.pyplot.show", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
NathanLoPresto/UI-for-servicing-OP-AMPs
[ "75605aa8b2315c1bbf3e7eee0f8ad706fa7e4f14" ]
[ "Python/DDR.py" ]
[ "import ok\nfrom fpga import FPGA\nimport numpy as np\nimport time\nimport matplotlib.pyplot as plot\nimport struct\nfrom collections import namedtuple\n\nBLOCK_SIZE = (16384)\nWRITE_SIZE=(8*1024*1024)\nREAD_SIZE = (8*1024*1024)\ng_nMemSize = (8*1024*1024)\nsample_size = (524288)\nep = namedtuple('ep', 'addr bits type')\ncontrol = ep(0x00, [i for i in range(32)], 'wi') # note this is active low \n\n# wire outs for \"1 deep FIFO\" \none_deep_fifo = ep(0x20, [i for i in range(32)], 'wo')\n\n# triggers in \nvalid = ep(0x40, 0, 'ti')\nfpga_reset = ep(0x40, 1, 'ti')\nfifo_reset = ep(0x40, 2, 'ti')\n\n#given the amplitude, and the time between each step, returns array to be plotted\ndef make_flat_voltage(input_voltage):\n time_axis = np.arange (0, np.pi*2 , (1/sample_size*2*np.pi) )\n amplitude = np.arange (0, np.pi*2 , (1/sample_size*2*np.pi) )\n for x in range (len(amplitude)):\n amplitude[x] = input_voltage\n amplitude = amplitude.astype(np.int32)\n return time_axis, amplitude\n\n#Given the amplitude and period, returns an array to be plotted \ndef make_sin_wave(amplitude_shift, frequency_shift=16):\n time_axis = np.arange (0, np.pi*2 , (1/sample_size*2*np.pi) )\n print (\"length of time axis after creation \", len(time_axis))\n amplitude = (amplitude_shift*1000*np.sin(time_axis))\n y = len(amplitude)\n for x in range (y):\n amplitude[x]= amplitude[x]+(10000)\n for x in range (y):\n amplitude[x]= (int)(amplitude[x]/20000*16384)\n amplitude = amplitude.astype(np.int32)\n return time_axis, amplitude\n\n#given a buffer, it writes a bytearray to the DDR3\ndef writeSDRAM(g_buf):\n\n print (\"Length of buffer at the top of WriteSDRAM\", len(g_buf))\n #Reset FIFOs\n f.set_wire(0x30, 4)\n f.set_wire(0x03, 0)\n f.set_wire(0x03, 2)\n\n print (\"Writing to DDR...\")\n time1 = time.time()\n #for i in range ((int)(len(g_buf)/WRITE_SIZE)):\n r = f.xem.WriteToBlockPipeIn( epAddr= 0x80, blockSize= BLOCK_SIZE,\n data= g_buf[0:(len(g_buf))])\n print (\"The length of the write is \", r)\n \n time2 = time.time()\n time3 = (time2-time1)\n mbs = (int)(r/1024/1024/ time3)\n print (\"The speed of the write was \", mbs, \" MegaBytes per second\")\n\n #below sets the HDL into read mode\n f.set_wire(0x03, 4)\n f.set_wire(0x03, 0)\n f.set_wire(0x03, 1)\n\n#reads to an empty array passed to the function\ndef readSDRAM():\n amplitude = np.zeros((sample_size,), dtype=int)\n pass_buf = bytearray(amplitude)\n #Reset FIFOs\n #below sets the HDL into read mode\n f.set_wire(0x03, 4)\n f.set_wire(0x03, 0)\n f.set_wire(0x03, 1)\n\n print (\"Reading from DDR...\")\n #Address changed to A5\n for i in range ((int)(g_nMemSize/WRITE_SIZE)):\n r = f.xem.ReadFromBlockPipeOut( epAddr= 0xA0, blockSize= BLOCK_SIZE,\n data= pass_buf)\n print (\"The length of the read is:\", r)\n return pass_buf\n\n#given a buffer, it unpacks into into human readable float values\ndef unpack(buf):\n unpacked_var = []\n for x in range (sample_size):\n unpacked_var.append(struct.unpack('i', buf[(x*4):((x+1)*4)]))\n return unpacked_var\n\n#Given two arrays, plots the x and y axis with hardcoded axis names \ndef testplot(x_axis, y_axis):\n plot.plot(x_axis, y_axis)\n plot.title('The outputted wave should look like this')\n plot.xlabel('time')\n plot.ylabel('amplitude (millivolts)')\n plot.grid(True, which = 'both')\n plot.axhline(y=0, color = 'k')\n plot.show()\n\n#given an amplitude and a period, it will write a waveform to the DDR3\ndef write_sin_wave (a):\n time_axis, g_buf_init = make_sin_wave(a)\n print (\"The length of the array before casting \", len(g_buf_init))\n pass_buf = bytearray(g_buf_init)\n writeSDRAM(pass_buf)\n\n#given and amplitude and a period, it will write a step function to the DDR3 \ndef write_flat_voltage(input_voltage):\n time_axis, g_buf_init = make_flat_voltage(input_voltage)\n pass_buf2 = bytearray(g_buf_init)\n writeSDRAM(pass_buf2)\n\n#Reads and prints the contents of the DDR3\ndef print_DDR3():\n g_rbuf = readSDRAM()\n unpacked_g_rbuf = np.array(unpack(g_rbuf)).astype('float64')\n for x in range (len(unpacked_g_rbuf)):\n unpacked_g_rbuf[x] = (unpacked_g_rbuf[x]/1000)\n testplot(np.arange (0, sample_size, 1), unpacked_g_rbuf)\ndef send_trig(ep_bit):\n ''' \n expects a single bit, not yet implement for list of bits \n '''\n f.xem.ActivateTriggerIn(ep_bit.addr, ep_bit.bits)\n\nif __name__ == \"__main__\":\n\n f = FPGA(bitfile = '728.bit')\n if (False == f.init_device()):\n raise SystemExit\n \n #Wait for the configuration\n time.sleep(3)\n factor = (int)(sample_size/8)\n f.xem.SetWireInValue(0x04, factor)\n #f.xem.SetWireInValue(0x04, 0xFF)\n f.xem.UpdateWireIns()\n\n #Sample rate speed, to bits 18:9\n f.xem.SetWireInValue(0x02, 0x0000A000, 0x0003FF00 )\n f.xem.UpdateWireIns()\n write_sin_wave(2)\n f.xem.WriteRegister(0x80000010, 0x00003410)\n f.xem.ActivateTriggerIn(0x40, 8)\n #f.xem.UpdateWireOuts()\n #print (f.xem.GetWireOutValue(0x3E))\n\n '''\n time.sleep(2)\n dacs = [1,2,3,4]\n # SPI Master configuration: divide reg, ctrl reg, SS register \n # MSB: 8 - set address, 4 - write data \n\n # creg_val = 0x40003610 # Char length of 16; set both Tx_NEG, Rx_NEG; set ASS, IE. ADS7952\n creg_val = 0x40003010 # Char length of 16; clear both Tx_NEG, Rx_NEG; set ASS, IE. AD5453 \n # val = 0x40001fff # AD5453 (half-scale)\n\n for val in [0x80000051, 0x40000013, # divider (need to look into settings of 1 and 2 didn't show 16 clock cycles) \n 0x80000041, creg_val, # control register (CHAR_LEN = 16, bits 10,9, 13 and 12)\n 0x80000061, 0x40000001]: # slave select (just setting bit0)\n\n f.set_wire(0x00, val, mask = 0xffffffff)\n send_trig(valid) \n\n # now send SPI command \n value = 0x40003FFF # AD5453 (half-scale)\n\n for val in [0x80000001, value, # Tx register, data to send \n 0x80000041, creg_val | (1 << 8)]: # Control register - GO (bit 8)\n f.set_wire(0x00, val, mask = 0xffffffff)\n send_trig(valid) \n'''" ]
[ [ "matplotlib.pyplot.axhline", "matplotlib.pyplot.title", "numpy.arange", "numpy.sin", "matplotlib.pyplot.plot", "matplotlib.pyplot.grid", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "numpy.zeros", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]