repo_name
stringlengths
6
130
hexsha
sequence
file_path
sequence
code
sequence
apis
sequence
possible_versions
list
cristianmatache/HOLA
[ "28baeae1ee165df702f6e230b29f8433d96c7009" ]
[ "analysis/infra/igr_runner.py" ]
[ "# Copyright 2021 BlackRock, Inc.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom __future__ import annotations\n\nfrom dataclasses import dataclass\nfrom typing import Callable, cast\n\nimport pandera.typing as pat\nfrom pandas import concat\n\nfrom analysis.infra.data_persister import Analysis\nfrom analysis.optimizers.benchmarks.ackley import ackley\nfrom analysis.optimizers.benchmarks.ackley import get_params as get_ackley_params\nfrom analysis.optimizers.benchmarks.branin import PARAMS as BRANIN_PARAMS\nfrom analysis.optimizers.benchmarks.branin import branin_np\nfrom analysis.optimizers.benchmarks.bukin_6 import PARAMS as BUKIN_PARAMS\nfrom analysis.optimizers.benchmarks.bukin_6 import bukin_6_np\nfrom analysis.optimizers.benchmarks.drop_wave import PARAMS as DROP_WAVE_PARAMS\nfrom analysis.optimizers.benchmarks.drop_wave import drop_wave_np\nfrom analysis.optimizers.benchmarks.egg_holder import PARAMS as EGG_HOLDER_PARAMS\nfrom analysis.optimizers.benchmarks.egg_holder import egg_holder_np\nfrom analysis.optimizers.benchmarks.forrester import PARAMS as FORRESTER_PARAMS\nfrom analysis.optimizers.benchmarks.forrester import forrester_np\nfrom analysis.optimizers.benchmarks.holder_table import PARAMS as HOLDER_TABLE_PARAMS\nfrom analysis.optimizers.benchmarks.holder_table import holder_table_np\nfrom analysis.optimizers.benchmarks.levy_13 import PARAMS as LEVY_13_PARAMS\nfrom analysis.optimizers.benchmarks.levy_13 import levy_13_np\nfrom analysis.optimizers.benchmarks.rastrigin import get_params as get_rastrigin_params\nfrom analysis.optimizers.benchmarks.rastrigin import rastrigin\nfrom analysis.optimizers.benchmarks.schwefel import get_params as get_schwefel_params\nfrom analysis.optimizers.benchmarks.schwefel import schwefel\nfrom analysis.optimizers.benchmarks.six_hump_camel import PARAMS as SIX_HUMP_PARAMS\nfrom analysis.optimizers.benchmarks.six_hump_camel import six_hump_camel_np\nfrom hola.igr_algorithm import IterativeGridRefinement\nfrom hola.params import ParamConfig\n\n\n@dataclass\nclass Bench:\n func: Callable\n params: dict[str, ParamConfig]\n name: str\n\n\nBENCHMARKS = [\n Bench(ackley, get_ackley_params(2), \"ackley_2d\"),\n Bench(rastrigin, get_rastrigin_params(2), \"rastrigin_2d\"),\n Bench(schwefel, get_schwefel_params(2), \"schwefel_2d\"),\n Bench(branin_np, BRANIN_PARAMS, \"branin\"),\n Bench(bukin_6_np, BUKIN_PARAMS, \"bukin6\"),\n Bench(drop_wave_np, DROP_WAVE_PARAMS, \"drop_wave\"),\n Bench(egg_holder_np, EGG_HOLDER_PARAMS, \"egg_holder\"),\n Bench(six_hump_camel_np, SIX_HUMP_PARAMS, \"six_hump_camel\"),\n Bench(forrester_np, FORRESTER_PARAMS, \"forrester\"),\n Bench(holder_table_np, HOLDER_TABLE_PARAMS, \"holder_table\"),\n Bench(levy_13_np, LEVY_13_PARAMS, \"holder_table\"),\n]\n\n\ndef get_igr_values() -> pat.DataFrame[Analysis]:\n dfs: list[pat.DataFrame[Analysis]] = []\n for bench in BENCHMARKS:\n print(f\"Function: {bench.func}\")\n tuner = IterativeGridRefinement(bench.params, spacing=4)\n for iterations in [25, 50, 75, 100, 200, 300, 500, 1000]:\n best = tuner.tune(bench.func, max_iterations=iterations)\n dfs.append(Analysis.build_row(bench.name, \"IGR\", 0, iterations, best.val, str(best.params)))\n tuner = IterativeGridRefinement(bench.params, spacing=9)\n for iterations in [25, 50, 75, 100, 200, 300, 500, 1000]:\n best = tuner.tune(bench.func, max_iterations=iterations)\n dfs.append(Analysis.build_row(bench.name, \"IGR\", 1, iterations, best.val, str(best.params)))\n df = concat(dfs, ignore_index=True)\n return cast(pat.DataFrame[Analysis], df)\n\n\nif __name__ == \"__main__\":\n print(get_igr_values().drop(columns=Analysis.best_params))\n" ]
[ [ "pandas.concat" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
renqianluo/GBDT-NAS
[ "c3409ddcfcaf8049b5ad4ae7404fc8bb5c118591" ]
[ "imagenet/model.py" ]
[ "import os\nimport sys\nimport math\nimport time\nimport numpy as np\nimport logging\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom layers import set_layer_from_config\nfrom utils import BasicUnit\n \n\nclass MobileInvertedResidualBlock(BasicUnit):\n def __init__(self, mobile_inverted_conv, shortcut):\n super(MobileInvertedResidualBlock, self).__init__()\n\n self.mobile_inverted_conv = mobile_inverted_conv\n self.shortcut = shortcut\n\n def forward(self, x):\n if self.mobile_inverted_conv.is_zero_layer():\n res = x\n elif self.shortcut is None or self.shortcut.is_zero_layer():\n res = self.mobile_inverted_conv(x)\n else:\n conv_x = self.mobile_inverted_conv(x)\n skip_x = self.shortcut(x)\n res = skip_x + conv_x\n return res\n\n @property\n def unit_str(self):\n return '(%s, %s)' % (\n self.mobile_inverted_conv.unit_str, self.shortcut.unit_str if self.shortcut is not None else None\n )\n\n @property\n def config(self):\n return {\n 'name': MobileInvertedResidualBlock.__name__,\n 'mobile_inverted_conv': self.mobile_inverted_conv.config,\n 'shortcut': self.shortcut.config if self.shortcut is not None else None,\n }\n\n @staticmethod\n def build_from_config(config):\n mobile_inverted_conv = set_layer_from_config(config['mobile_inverted_conv'])\n shortcut = set_layer_from_config(config['shortcut'])\n return MobileInvertedResidualBlock(mobile_inverted_conv, shortcut)\n\n\nclass NASNet(BasicUnit):\n\n def __init__(self, first_conv, blocks, feature_mix_layer, classifier):\n super(NASNet, self).__init__()\n\n self.first_conv = first_conv\n self.blocks = nn.ModuleList(blocks)\n self.feature_mix_layer = feature_mix_layer\n self.global_avg_pooling = nn.AdaptiveAvgPool2d(1)\n self.classifier = classifier\n\n def forward(self, x):\n x = self.first_conv(x)\n for block in self.blocks:\n x = block(x)\n if self.feature_mix_layer:\n x = self.feature_mix_layer(x)\n x = self.global_avg_pooling(x)\n x = x.view(x.size(0), -1) # flatten\n x = self.classifier(x)\n return x\n\n @property\n def unit_str(self):\n _str = ''\n for block in self.blocks:\n _str += block.unit_str + '\\n'\n return _str\n\n @property\n def config(self):\n return {\n 'name': NASNet.__name__,\n 'bn': self.get_bn_param(),\n 'first_conv': self.first_conv.config,\n 'feature_mix_layer': self.feature_mix_layer.config if self.feature_mix_layer is not None else None,\n 'classifier': self.classifier.config,\n 'blocks': [\n block.config for block in self.blocks\n ],\n }\n\n @staticmethod\n def build_from_config(config):\n first_conv = set_layer_from_config(config['first_conv'])\n feature_mix_layer = set_layer_from_config(config['feature_mix_layer'])\n classifier = set_layer_from_config(config['classifier'])\n blocks = []\n for block_config in config['blocks']:\n blocks.append(MobileInvertedResidualBlock.build_from_config(block_config))\n\n return NASNet(first_conv, blocks, feature_mix_layer, classifier)\n\n def set_bn_param(self, bn_momentum, bn_eps):\n for m in self.modules():\n if isinstance(m, nn.BatchNorm2d):\n m.momentum = bn_momentum\n m.eps = bn_eps\n return\n\n def get_bn_param(self):\n for m in self.modules():\n if isinstance(m, nn.BatchNorm2d):\n return {\n 'momentum': m.momentum,\n 'eps': m.eps,\n }\n return None\n\n def init_model(self, model_init, init_div_groups=True):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n if model_init == 'he_fout':\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n if init_div_groups:\n n /= m.groups\n m.weight.data.normal_(0, math.sqrt(2. / n))\n elif model_init == 'he_fin':\n n = m.kernel_size[0] * m.kernel_size[1] * m.in_channels\n if init_div_groups:\n n /= m.groups\n m.weight.data.normal_(0, math.sqrt(2. / n))\n else:\n raise NotImplementedError\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n elif isinstance(m, nn.Linear):\n if m.bias is not None:\n m.bias.data.zero_()\n elif isinstance(m, nn.BatchNorm1d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\n def weight_parameters(self):\n return self.parameters()\n\n @staticmethod\n def _make_divisible(v, divisor, min_val=None):\n \"\"\"\n This function is taken from the original tf repo.\n It ensures that all layers have a channel number that is divisible by 8\n It can be seen here:\n https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py\n :param v:\n :param divisor:\n :param min_val:\n :return:\n \"\"\"\n if min_val is None:\n min_val = divisor\n new_v = max(min_val, int(v + divisor / 2) // divisor * divisor)\n # Make sure that round down does not go down by more than 10%.\n if new_v < 0.9 * v:\n new_v += divisor\n return new_v\n" ]
[ [ "torch.nn.ModuleList", "torch.nn.AdaptiveAvgPool2d" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
usgs/MinVel
[ "c56a247e6b612d81a678ca19b2def64ce3aef107" ]
[ "python/tests/test.py" ]
[ "# Import the modules\nimport sys\nimport MinVel as mv\nimport numpy as np\n\nnMin = 1\nnPT = 1\nfl = 'tests/testInput.dat.DONOTCHANGE'\nprint('Reading input file {0:s}'.format(fl))\nf = open(fl,\"r\")\nif f.mode == \"r\":\n nPT = 0\n ln = 0\n for line in f:\n line = line.strip()\n columns = line.split(\",\")\n if ln < 2:\n nMin = len(columns)\n else:\n nPT = nPT + 1\n ln = ln + 1\nf.close()\nT = np.zeros((nPT),dtype=np.float64)\nP = np.zeros((nPT),dtype=np.float64)\nCm = np.zeros((nMin),dtype=np.int8)\nCv = np.zeros((nMin),dtype=np.float64)\nf = open(fl,\"r\")\nif f.mode == \"r\":\n ln = 0\n jT = 0\n for line in f:\n line = line.strip()\n columns = line.split(\",\")\n if ln == 0:\n for j in range(0,len(columns)):\n Cm[j] = columns[j]\n elif ln == 1:\n for j in range(0,len(columns)):\n Cv[j] = columns[j]\n else:\n T[jT] = columns[0]\n P[jT] = columns[1]\n jT = jT + 1\n ln = ln + 1\nf.close()\nKc = np.zeros((nPT),dtype=np.float64)\nGc = np.zeros((nPT),dtype=np.float64)\nEc = np.zeros((nPT),dtype=np.float64)\nlc = np.zeros((nPT),dtype=np.float64)\nvc = np.zeros((nPT),dtype=np.float64)\nVpc = np.zeros((nPT),dtype=np.float64)\nVsc = np.zeros((nPT),dtype=np.float64)\ndenc = np.zeros((nPT),dtype=np.float64)\nfl = 'tests/results.npy'\nprint('Reading results file {0:s}'.format(fl))\nCompare = np.load(fl)\n\n# MAke sure volume fractions sum to 1\nif sum(Cv) < 1:\n print('Composition does not sum to one. - Exiting')\n sys.exit()\n\nPar, MinNames, nPar, nAllMin = mv.loadPar('../database/MineralPhysicsDatabase.nc')\nMinIndex = Par[0,:];\n\nprint('{0:21s}{1:20s}'.format('Mineral','Volume fraction'))\nfor j in range(0,nMin):\n k = mv.find(MinIndex,Cm[j]);\n print(MinNames[:,k].tobytes().decode('utf-8'),'(',Cv[j],')')\nif nPT > 1:\n print('There are',nPT,'temperature and pressure points')\nelse:\n print('Temperature',T)\n print('Pressure',P)\nprint('')\n\nK, G, E, l, v, Vp, Vs, den = mv.CalcMV(Cm,Cv,T,P);\nprint('Difference between calculation and expectation for 8 parameters')\nprint(sum(K-Compare[0,:]))\nprint(sum(G-Compare[1,:]))\nprint(sum(E-Compare[2,:]))\nprint(sum(l-Compare[3,:]))\nprint(sum(v-Compare[4,:]))\nprint(sum(Vp-Compare[5,:]))\nprint(sum(Vs-Compare[6,:]))\nprint(sum(den-Compare[7,:]))\nprint('')\n\nsm = sum(K-Compare[0,:]) + sum(G-Compare[1,:]) + sum(E-Compare[2,:]) + sum(l-Compare[3,:]) + \\\n sum(v-Compare[4,:]) + sum(Vp-Compare[5,:]) + sum(Vs-Compare[6,:]) + sum(den-Compare[7,:])\nif sm == 0:\n print('Passed')\nelse:\n print('Did not pass')\n\n\nsys.exit()\n" ]
[ [ "numpy.load", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
theomeb/deepchain-app-pfam-32.0
[ "1397afa0fcd14ce7e0d10ebc14c3159771a3bc62" ]
[ "src/app.py" ]
[ "\"\"\"Template file to develop personal app\nWARNINGS: if you run the app locally and don't have a GPU\n you should choose device='cpu'\n\"\"\"\nfrom pathlib import Path\nfrom typing import Dict, List, Optional\n\nimport torch\nfrom biotransformers import BioTransformers\nfrom deepchain.components import DeepChainApp\nfrom torch import load\n\nimport joblib\n\nScore = Dict[str, float]\nScoreList = List[Score]\n\n\nclass App(DeepChainApp):\n \"\"\"DeepChain App template:\n\n - Implement score_names() and compute_score() methods.\n - Choose a a transformer available on BioTranfformers\n - Choose a personal keras/tensorflow model\n \"\"\"\n\n def __init__(self, device: str = \"cuda:0\"):\n self._device = device\n self.transformer = BioTransformers(backend=\"protbert\", device=device)\n # Make sure to put your checkpoint file in your_app/checkpoint folder\n self._checkpoint_filename: Optional[str] = \"family_model.pt\"\n self._checkpoint_label_encoder: Optional[str] = \"label_encoder.joblib\"\n\n # load_model for tensorflow/keras model-load for pytorch model\n if self._checkpoint_filename is not None:\n self.model = load(self.get_checkpoint_path(__file__))\n\n # load the family label encoder\n self.label_encoder = joblib.load(self.get_checkpoint_label_encoder_path(__file__))\n\n def get_checkpoint_label_encoder_path(self, root_path: str) -> str:\n \"\"\"\n Return solve checkpoint model path\n Args:\n root_path : path of the app file launch\n Raise:\n FileExistsError if no file are found inside the checkpoint folder\n \"\"\"\n checkpoint_dir = (Path(root_path).parent / \"../checkpoint\").resolve()\n path_filename = checkpoint_dir / self._checkpoint_label_encoder\n if not path_filename.is_file():\n raise FileExistsError(\n f\"File {self._checkpoint_label_encoder} not found in checkpoint folder.\"\n f\" Set 'self._checkpoint_filename = None' if file not exists\"\n )\n return path_filename\n\n @staticmethod\n def score_names() -> List[str]:\n \"\"\"App Score Names. Must be specified.\n\n Example:\n return [\"max_probability\", \"min_probability\"]\n \"\"\"\n return [\"protein_family_id\"]\n\n def compute_scores(self, sequences: List[str]) -> ScoreList:\n \"\"\"Return a list of protein family id predictions for the given sequences.\n\n Args:\n sequences: sequences for which to predict family ids\n \"\"\"\n\n try:\n # biotransformers v0.0.3\n x_embedding = self.transformer.compute_embeddings(sequences, pool_mode=[\"mean\"])[\"mean\"]\n except:\n # biotransformers v0.0.2\n x_embedding = self.transformer.compute_embeddings(sequences, pooling_list=[\"mean\"])[\"mean\"]\n\n y_hat = self.model(torch.tensor(x_embedding))\n predictions = torch.max(y_hat, dim=1)[1]\n predictions = predictions.detach().cpu().numpy()\n\n family_predictions = self.label_encoder.inverse_transform(predictions)\n family_list = [{\"protein_family_id\": family_pred} for family_pred in family_predictions]\n\n return family_list\n\n\nif __name__ == \"__main__\":\n\n sequences = [\n \"MKTVRQERLKSIVRILERSKEPVSGAQLAEELSVSRQVIVQDIAYLRSLGYNIVATPRGYVLAGG\",\n \"KALTARQQEVFDLIRDHISQTGMPPTRAEIAQRLGFRSPNAAEEHLKALARKGVIEIVSGASRGIRLLQEE\",\n ]\n app = App(\"cpu\")\n scores = app.compute_scores(sequences)\n print(scores)\n" ]
[ [ "torch.max", "torch.tensor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
HUTTON9453/Adversarial_Domain_Adaptation_for_Low_Resolution
[ "d233037c940885ab4c6fe768e6a8ea2776f8b993" ]
[ "models/discriminator.py" ]
[ "\"\"\"Discriminator model for ADDA.\"\"\"\n\nfrom torch import nn\n\n\nclass Discriminator(nn.Module):\n \"\"\"Discriminator model for source domain.\"\"\"\n\n def __init__(self, input_dims, hidden_dims, output_dims):\n \"\"\"Init discriminator.\"\"\"\n super(Discriminator, self).__init__()\n\n self.restored = False\n\n self.layer = nn.Sequential(\n nn.Linear(input_dims, hidden_dims),\n nn.ReLU(),\n nn.Linear(hidden_dims, hidden_dims),\n nn.ReLU(),\n nn.Linear(hidden_dims, output_dims),\n nn.LogSoftmax(dim=1)\n )\n\n def forward(self, input):\n \"\"\"Forward the discriminator.\"\"\"\n out = self.layer(input)\n return out\n" ]
[ [ "torch.nn.Linear", "torch.nn.ReLU", "torch.nn.LogSoftmax" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
xiaoyoyoada/MNIST_CIFAR_Classification_TF
[ "edad97f58a5259896b0c2bd677e75c4a703b54cb" ]
[ "cifar_cnn_augment.py" ]
[ "import os\nimport numpy as np\nimport tensorflow as tf\nfrom keras.datasets import cifar10\nfrom sklearn import utils\nfrom sklearn.model_selection import train_test_split\nfrom keras.utils.np_utils import to_categorical\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom cifar_data import batch_features_labels\n\n\ndef variable_summaries(var, name):\n \"\"\"Attach a lot of summaries to a Tensor (for TensorBoard visualization).\"\"\"\n with tf.name_scope(name):\n mean = tf.reduce_mean(var)\n tf.summary.scalar('mean', mean)\n with tf.name_scope('stddev'):\n stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))\n tf.summary.scalar('stddev', stddev)\n tf.summary.scalar('max', tf.reduce_max(var))\n tf.summary.scalar('min', tf.reduce_min(var))\n tf.summary.histogram('histogram', var)\n\n\ndef main():\n\n summary_path = \"./summary/summary_cifar_aug/\"\n if not os.path.exists(summary_path):\n os.makedirs(summary_path)\n\n # prepare dataset\n print(\"load dataset...\")\n (x_train, y_train), (x_test, y_test) = cifar10.load_data()\n\n x_train = x_train.reshape(x_train.shape[0], 32, 32, 3).astype(np.float32) / 255.0\n y_train = to_categorical(y_train, num_classes=10)\n\n x_train, x_valid, y_train, y_valid = train_test_split(x_train, y_train, test_size=0.1, random_state=12345)\n\n x_test = x_test.reshape(x_test.shape[0], 32, 32, 3).astype(np.float32) / 255.0\n y_test = to_categorical(y_test, num_classes=10)\n\n print(\"dataset augmentation...\")\n image_generator = ImageDataGenerator(\n featurewise_center=False,\n samplewise_center=False,\n featurewise_std_normalization=False,\n samplewise_std_normalization=False,\n zca_whitening=False,\n zca_epsilon=1e-06,\n rotation_range=15,\n width_shift_range=0.1,\n height_shift_range=0.1,\n shear_range=0.0,\n zoom_range=0.0,\n channel_shift_range=0.0,\n fill_mode='nearest',\n cval=0.0,\n horizontal_flip=True,\n vertical_flip=False,\n rescale=None,\n preprocessing_function=None,\n data_format=\"channels_last\")\n\n image_generator.fit(x_train, augment=False)\n x_augmented = x_train.copy()\n y_augmented = y_train.copy()\n x_augmented = image_generator.flow(x_augmented, np.zeros(x_augmented.shape[0]), batch_size=x_augmented.shape[0],\n shuffle=False).next()[0]\n\n x_train = np.concatenate((x_train, x_augmented))\n y_train = np.concatenate((y_train, y_augmented))\n\n x_train, y_train = utils.shuffle(x_train, y_train, random_state=0)\n\n # hyperparameters\n epochs = 100\n batch_size = 200\n learning_rate = 0.001\n init_lr = learning_rate\n lr_decay_rate = 0.03\n grad_clip = 5.0\n l2_norm_rate = 0.001\n\n with tf.Graph().as_default(), tf.Session() as sess:\n print(\"build model...\")\n with tf.name_scope('input'):\n x = tf.placeholder(tf.float32, shape=(None, 32, 32, 3), name='input_x')\n y = tf.placeholder(tf.float32, shape=(None, 10), name='output_y')\n lr = tf.placeholder(tf.float32, name='learning_rate')\n is_train = tf.placeholder(tf.bool, shape=[], name=\"is_train\")\n\n with tf.name_scope('input_reshape'):\n image_shaped_input = x\n tf.summary.image('input', image_shaped_input, 10)\n\n with tf.name_scope('conv_layer_1'):\n conv1_filter = tf.get_variable(shape=[3, 3, 3, 32], name=\"conv1_filter\", dtype=tf.float32)\n conv1 = tf.nn.conv2d(x, conv1_filter, strides=[1, 1, 1, 1], padding='SAME')\n conv1 = tf.nn.elu(conv1)\n conv1 = tf.layers.batch_normalization(conv1)\n # l2 regularizer\n weight_loss = tf.multiply(tf.nn.l2_loss(conv1_filter), l2_norm_rate)\n tf.add_to_collection('weight_losses', weight_loss)\n\n with tf.name_scope(\"conv_layer_2\"):\n conv2_filter = tf.get_variable(shape=[3, 3, 32, 32], name=\"conv2_filter\", dtype=tf.float32)\n conv2 = tf.nn.conv2d(conv1, conv2_filter, strides=[1, 1, 1, 1], padding='SAME')\n conv2 = tf.nn.elu(conv2)\n conv2 = tf.layers.batch_normalization(conv2)\n conv2 = tf.nn.max_pool(conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\n conv2 = tf.layers.dropout(conv2, rate=0.2, training=is_train)\n # l2 regularizer\n weight_loss = tf.multiply(tf.nn.l2_loss(conv2_filter), l2_norm_rate)\n tf.add_to_collection('weight_losses', weight_loss)\n\n with tf.name_scope(\"conv_layer_3\"):\n conv3_filter = tf.get_variable(shape=[3, 3, 32, 64], name=\"conv3_filter\", dtype=tf.float32)\n conv3 = tf.nn.conv2d(conv2, conv3_filter, strides=[1, 1, 1, 1], padding='SAME')\n conv3 = tf.nn.elu(conv3)\n conv3 = tf.layers.batch_normalization(conv3)\n # l2 regularizer\n weight_loss = tf.multiply(tf.nn.l2_loss(conv3_filter), l2_norm_rate)\n tf.add_to_collection('weight_losses', weight_loss)\n\n with tf.name_scope(\"conv_layer_4\"):\n conv4_filter = tf.get_variable(shape=[3, 3, 64, 64], name=\"conv4_filter\", dtype=tf.float32)\n conv4 = tf.nn.conv2d(conv3, conv4_filter, strides=[1, 1, 1, 1], padding='SAME')\n conv4 = tf.nn.elu(conv4)\n conv4 = tf.layers.batch_normalization(conv4)\n conv4 = tf.nn.max_pool(conv4, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\n conv4 = tf.layers.dropout(conv4, rate=0.3, training=is_train)\n # l2 regularizer\n weight_loss = tf.multiply(tf.nn.l2_loss(conv4_filter), l2_norm_rate)\n tf.add_to_collection('weight_losses', weight_loss)\n\n with tf.name_scope(\"conv_layer_5\"):\n conv5_filter = tf.get_variable(shape=[3, 3, 64, 128], name=\"conv5_filter\", dtype=tf.float32)\n conv5 = tf.nn.conv2d(conv4, conv5_filter, strides=[1, 1, 1, 1], padding='SAME')\n conv5 = tf.nn.elu(conv5)\n conv5 = tf.layers.batch_normalization(conv5)\n # l2 regularizer\n weight_loss = tf.multiply(tf.nn.l2_loss(conv5_filter), l2_norm_rate)\n tf.add_to_collection('weight_losses', weight_loss)\n\n with tf.name_scope(\"conv_layer_6\"):\n conv6_filter = tf.get_variable(shape=[3, 3, 128, 128], name=\"conv6_filter\", dtype=tf.float32)\n conv6 = tf.nn.conv2d(conv5, conv6_filter, strides=[1, 1, 1, 1], padding='SAME')\n conv6 = tf.nn.elu(conv6)\n conv6 = tf.layers.batch_normalization(conv6)\n conv6 = tf.nn.max_pool(conv6, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\n conv6 = tf.layers.dropout(conv6, rate=0.4, training=is_train)\n # l2 regularizer\n weight_loss = tf.multiply(tf.nn.l2_loss(conv6_filter), l2_norm_rate)\n tf.add_to_collection('weight_losses', weight_loss)\n\n with tf.name_scope(\"conv_layer_7\"):\n conv7_filter = tf.get_variable(shape=[3, 3, 128, 256], name=\"conv7_filter\", dtype=tf.float32)\n conv7 = tf.nn.conv2d(conv6, conv7_filter, strides=[1, 1, 1, 1], padding='SAME')\n conv7 = tf.nn.elu(conv7)\n conv7 = tf.layers.batch_normalization(conv7)\n # l2 regularizer\n weight_loss = tf.multiply(tf.nn.l2_loss(conv7_filter), l2_norm_rate)\n tf.add_to_collection('weight_losses', weight_loss)\n\n with tf.name_scope(\"conv_layer_8\"):\n conv8_filter = tf.get_variable(shape=[3, 3, 256, 256], name=\"conv8_filter\", dtype=tf.float32)\n conv8 = tf.nn.conv2d(conv7, conv8_filter, strides=[1, 1, 1, 1], padding='SAME')\n conv8 = tf.nn.elu(conv8)\n conv8 = tf.layers.batch_normalization(conv8)\n conv8 = tf.nn.max_pool(conv8, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\n conv8 = tf.layers.dropout(conv8, rate=0.5, training=is_train)\n # l2 regularizer\n weight_loss = tf.multiply(tf.nn.l2_loss(conv8_filter), l2_norm_rate)\n tf.add_to_collection('weight_losses', weight_loss)\n\n with tf.name_scope(\"reshape_layer\"):\n flat = tf.contrib.layers.flatten(conv8)\n\n with tf.name_scope(\"fc_layer_1\"):\n flat_shape = flat.get_shape().as_list()\n weight = tf.get_variable(shape=[flat_shape[1], 512], name=\"fc1_weight\", dtype=tf.float32)\n variable_summaries(weight, name=\"output_weight\")\n bias = tf.get_variable(shape=[512], dtype=tf.float32, initializer=tf.constant_initializer(0.001),\n name=\"fc1_bias\")\n variable_summaries(bias, name=\"output_bias\")\n fc1_pre = tf.matmul(flat, weight) + bias\n tf.summary.histogram('before_relu', fc1_pre)\n fc1 = tf.nn.relu(fc1_pre)\n tf.summary.histogram('after_relu', fc1)\n fc1 = tf.layers.batch_normalization(fc1)\n fc1 = tf.layers.dropout(fc1, rate=0.0, training=is_train)\n # l2 regularizer\n weight_loss = tf.multiply(tf.nn.l2_loss(weight), l2_norm_rate)\n tf.add_to_collection('weight_losses', weight_loss)\n\n with tf.name_scope(\"fc_layer_2\"):\n weight1 = tf.get_variable(shape=[512, 10], name=\"fc2_weight\", dtype=tf.float32)\n bias1 = tf.get_variable(shape=[10], dtype=tf.float32, initializer=tf.constant_initializer(0.001),\n name=\"fc2_bias\")\n logits = tf.matmul(fc1, weight1) + bias1\n # l2 regularizer\n weight_loss = tf.multiply(tf.nn.l2_loss(weight1), l2_norm_rate)\n tf.add_to_collection('weight_losses', weight_loss)\n\n # cost\n with tf.name_scope(\"cost\"):\n cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=y))\n weight_cost = tf.add_n(tf.get_collection('weight_losses'))\n cost += weight_cost\n tf.summary.scalar(\"loss\", cost)\n\n optimizer = tf.train.AdamOptimizer(learning_rate=lr)\n if grad_clip is not None and grad_clip > 0.0:\n grads, vs = zip(*optimizer.compute_gradients(cost))\n grads, _ = tf.clip_by_global_norm(grads, grad_clip)\n train_op = optimizer.apply_gradients(zip(grads, vs))\n else:\n train_op = optimizer.minimize(cost)\n\n # accuracy\n with tf.name_scope('accuracy'):\n accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(logits, 1), tf.argmax(y, 1)), tf.float32),\n name='accuracy')\n tf.summary.scalar(\"accuracy\", accuracy)\n\n summary = tf.summary.merge_all()\n train_writer = tf.summary.FileWriter(summary_path + \"train\", sess.graph)\n valid_writer = tf.summary.FileWriter(summary_path + 'valid')\n test_writer = tf.summary.FileWriter(summary_path + \"test\")\n\n print('Training...')\n sess.run(tf.global_variables_initializer())\n # training\n cur_step = 0\n for epoch in range(epochs):\n for batch_features, batch_labels in batch_features_labels(x_train, y_train, batch_size):\n cur_step += 1\n _, loss, summ = sess.run([train_op, cost, summary], feed_dict={x: batch_features, y: batch_labels,\n is_train: True, lr: learning_rate})\n train_writer.add_summary(summ, cur_step)\n if cur_step % 20 == 0:\n summ1 = sess.run(summary, feed_dict={x: x_valid, y: y_valid, is_train: False, lr: None})\n valid_writer.add_summary(summ1, cur_step)\n if cur_step % 100 == 0:\n acc, summ2 = sess.run([accuracy, summary], feed_dict={x: x_test, y: y_test, is_train: False,\n lr: None})\n test_writer.add_summary(summ2, cur_step)\n print(\"Step {}, test acc: {}\".format(cur_step, acc))\n if epoch >= 25:\n learning_rate = init_lr / (1 + epoch * lr_decay_rate)\n\n train_writer.close()\n valid_writer.close()\n test_writer.close()\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "tensorflow.get_variable", "tensorflow.nn.max_pool", "tensorflow.layers.dropout", "numpy.concatenate", "tensorflow.nn.l2_loss", "tensorflow.contrib.layers.flatten", "tensorflow.train.AdamOptimizer", "tensorflow.summary.scalar", "tensorflow.nn.conv2d", "tensorflow.Graph", "tensorflow.layers.batch_normalization", "tensorflow.get_collection", "tensorflow.summary.image", "tensorflow.name_scope", "tensorflow.Session", "tensorflow.square", "tensorflow.argmax", "numpy.zeros", "tensorflow.matmul", "tensorflow.nn.elu", "sklearn.model_selection.train_test_split", "tensorflow.placeholder", "tensorflow.global_variables_initializer", "tensorflow.summary.merge_all", "tensorflow.add_to_collection", "tensorflow.summary.histogram", "tensorflow.nn.relu", "tensorflow.reduce_max", "tensorflow.summary.FileWriter", "tensorflow.reduce_mean", "sklearn.utils.shuffle", "tensorflow.constant_initializer", "tensorflow.reduce_min", "tensorflow.clip_by_global_norm", "tensorflow.nn.softmax_cross_entropy_with_logits_v2" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
rahulghangas/cogent3
[ "f00cf822efce5f3141b3c7dafac81cb94a311e22" ]
[ "src/cogent3/evolve/simulate.py" ]
[ "#!/usr/bin/env python\n\"\"\"Random sequences and random evolution of sequences in a tree\"\"\"\n\nimport bisect\n\nimport numpy\n\n\n__author__ = \"Peter Maxwell\"\n__copyright__ = \"Copyright 2007-2019, The Cogent Project\"\n__credits__ = [\"Peter Maxwell\"]\n__license__ = \"BSD-3\"\n__version__ = \"2019.9.13a\"\n__maintainer__ = \"Peter Maxwell\"\n__email__ = \"[email protected]\"\n__status__ = \"Production\"\n\n\ndef argpicks(freqs, random_series):\n partition = numpy.add.accumulate(freqs)\n assert abs(partition[-1] - 1.0) < 1e-6, (freqs, partition)\n while True:\n x = random_series.uniform(0.0, 1.0)\n i = bisect.bisect_left(partition, x)\n yield i\n\n\ndef argpick(freqs, random_series):\n return next(argpicks(freqs, random_series))\n\n\ndef _randomMotifGenerator(random_series, motif_probs):\n motifs = list(motif_probs.keys())\n freqs = [motif_probs[m] for m in motifs]\n for i in argpicks(freqs, random_series):\n yield motifs[i]\n\n\ndef evolve_sequence(\n random_series, motifs, parent_seq, site_cats, psubs, preserved_sites=()\n):\n \"\"\"Evolve a new sequence derived from parent_seq. Uses psubs[site_cats[i]]\n to pick a new motif derived from parent_seq[i]\"\"\"\n seq = []\n randomMotifSources = {}\n for (i, parent_motif) in enumerate(parent_seq):\n if i in preserved_sites:\n edge_motif = preserved_sites[i]\n else:\n if parent_motif not in randomMotifSources:\n mprobs = {}\n parent_motif_index = motifs.index(parent_motif)\n site_cat = site_cats[i]\n psub = psubs[site_cat]\n for (dest_motif_index, dest_motif) in enumerate(motifs):\n prob = psub[parent_motif_index, dest_motif_index]\n mprobs[dest_motif] = prob\n randomMotifSources[site_cat, parent_motif] = _randomMotifGenerator(\n random_series, mprobs\n )\n edge_motif = next(randomMotifSources[site_cat, parent_motif])\n seq.append(edge_motif)\n return seq\n\n\ndef random_sequence(random_series, motif_probs, sequence_length):\n getRootRandomMotif = _randomMotifGenerator(random_series, motif_probs).__next__\n return [getRootRandomMotif() for i in range(sequence_length)]\n\n\nclass AlignmentEvolver(object):\n # Encapsulates settings that are constant throughout the recursive generation\n # of a synthetic alignment.\n\n def __init__(\n self,\n random_series,\n orig_ambig,\n exclude_internal,\n bin_names,\n site_bins,\n psub_for,\n motifs,\n ):\n self.random_series = random_series\n self.orig_ambig = orig_ambig\n self.exclude_internal = exclude_internal\n self.bin_names = bin_names\n self.site_bins = site_bins\n self.psub_for = psub_for\n self.motifs = motifs\n\n def __call__(self, tree, root_sequence):\n # probsd = dict(enumerate(self.bin_probs))\n # bprobs = _randomMotifGenerator(self.random_series, probsd)\n # site_bins = [bprobs.next() for c in range(len(root_sequence))]\n return self.generate_simulated_seqs(tree, root_sequence)\n\n def generate_simulated_seqs(self, parent, parent_seq):\n \"\"\"recursively generate the descendant sequences by descending the tree\n from root.\n Each child will be set by mutating the parent motif based on the probs\n in the psub matrix of this edge.\n\n random_series - get a random numer 0-1 by calling random_series.random()\n length - the desired alignment length\n parent - the edge structure.\n parent_seq - the corresponding sequence. This will be mutated for each\n of its children, based on their psub matricies.\n \"\"\"\n\n # This depends on parameter names 'mprobs', 'alignment2', 'bprobs' and\n # 'psubs'. Might be better to integrate it into likelihood_calculation.\n\n if self.exclude_internal and parent.children:\n simulated_sequences = {}\n else:\n simulated_sequences = {parent.name: \"\".join(parent_seq)}\n\n for edge in parent.children:\n # The result for this edge - a list of motifs\n\n # Keep original ambiguity codes\n if edge.name in self.orig_ambig:\n orig_seq_ambig = self.orig_ambig[edge.name]\n else:\n orig_seq_ambig = {}\n\n # Matrix of substitution probabilities\n psubs = [self.psub_for(edge.name, bin) for bin in self.bin_names]\n\n # Make the semi-random sequence for this edge.\n edge_seq = evolve_sequence(\n self.random_series,\n self.motifs,\n parent_seq,\n self.site_bins,\n psubs,\n orig_seq_ambig,\n )\n\n # Pass this new edge sequence on down the tree\n descendant_sequences = self.generate_simulated_seqs(edge, edge_seq)\n simulated_sequences.update(descendant_sequences)\n\n return simulated_sequences\n" ]
[ [ "numpy.add.accumulate" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
minhnhatphan/rnsa21-cnn-lstm
[ "77d566d27cf465df0e222721d8fcadcc483a865d" ]
[ "src/data_retriever.py" ]
[ "from torch.utils.data import Dataset\nimport random\nimport torch\nimport glob\nimport os\n\nfrom utils import load_image, uniform_temporal_subsample, load_dicom\n\nclass DataRetriever(Dataset):\n def __init__(self, patient_path, paths, targets, n_frames, img_size, transform=None):\n self.patient_path = patient_path\n self.paths = paths\n self.targets = targets\n self.n_frames = n_frames\n self.img_size = img_size\n self.transform = transform\n \n def __len__(self):\n return len(self.paths)\n \n def read_video(self, vid_paths):\n video = [load_image(path, (self.img_size, self.img_size)) for path in vid_paths]\n if self.transform:\n seed = random.randint(0,99999)\n for i in range(len(video)):\n random.seed(seed)\n video[i] = self.transform(image=video[i])[\"image\"]\n \n video = [torch.tensor(frame, dtype=torch.float32) for frame in video]\n if len(video)==0:\n video = torch.zeros(self.n_frames, self.img_size, self.img_size)\n else:\n video = torch.stack(video) # T * C * H * W\n return video\n \n def __getitem__(self, index):\n _id = self.paths[index]\n patient_path = os.path.join(self.patient_path, f'{str(_id).zfill(5)}/')\n\n channels = []\n for t in [\"FLAIR\", \"T1w\", \"T1wCE\", \"T2w\"]:\n t_paths = sorted(\n glob.glob(os.path.join(patient_path, t, \"*\")), \n key=lambda x: int(x[:-4].split(\"-\")[-1]),\n )\n num_samples = self.n_frames\n if len(t_paths) < num_samples:\n in_frames_path = t_paths\n else:\n in_frames_path = uniform_temporal_subsample(t_paths, num_samples)\n \n channel = self.read_video(in_frames_path)\n if channel.shape[0] == 0:\n channel = torch.zeros(num_samples, self.img_size, self.img_size)\n channels.append(channel)\n \n channels = torch.stack(channels).transpose(0,1)\n y = torch.tensor(self.targets[index], dtype=torch.float)\n return {\"X\": channels.float(), \"y\": y}\n\nclass TestDataRetriever(Dataset):\n def __init__(self, patient_path, paths, n_frames, img_size, transform=None):\n self.patient_path = patient_path\n self.paths = paths\n self.n_frames = n_frames\n self.img_size = img_size\n self.transform = transform\n \n def __len__(self):\n return len(self.paths)\n \n def read_video(self, vid_paths):\n video = [load_dicom(path, self.img_size) for path in vid_paths]\n if len(video)==0:\n video = torch.zeros(self.n_frames, self.img_size, self.img_size)\n else:\n video = torch.stack(video) # T * C * H * W\n return video\n \n def __getitem__(self, index):\n _id = self.paths[index]\n patient_path = os.path.join(self.patient_path, f'{str(_id).zfill(5)}/')\n channels = []\n for t in [\"FLAIR\",\"T1w\", \"T1wCE\", \"T2w\"]:\n t_paths = sorted(\n glob.glob(os.path.join(patient_path, t, \"*\")), \n key=lambda x: int(x[:-4].split(\"-\")[-1]),\n )\n num_samples = self.n_frames\n if len(t_paths) < num_samples:\n in_frames_path = t_paths\n else:\n in_frames_path = uniform_temporal_subsample(t_paths, num_samples)\n \n channel = self.read_video(in_frames_path)\n if channel.shape[0] == 0:\n print(\"1 channel empty\")\n channel = torch.zeros(num_samples, self.img_size, self.img_size)\n channels.append(channel)\n \n channels = torch.stack(channels).transpose(0,1)\n return {\"X\": channels.float(), \"id\": _id}" ]
[ [ "torch.stack", "torch.zeros", "torch.tensor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
shivamkejriwal9/algo_ds_101
[ "ba61e729ed99fafd5187cc29c34798d26ab0db17" ]
[ "Maths/Calculus/Runge–Kutta_Method/Runge-Kutta.py" ]
[ "import numpy as np\n\n\ndef dydx(x, y):\n return 2*x + np.sin( x * y )\n\n\nx0 = float(input(\"Enter value of x0:- \"))\ny0 = float(input(\"Enter value oy y0:- \"))\nh = float(input(\"Enter step size:- \"))\nn = int(input(\"Enter number of points.\"))\n\ny_vec = np.zeros((n, 1))\ny_vec[0] = y0\nx_vec = np.zeros((n, 1))\nx_vec[0] = x0\nfor i in range(n-1):\n xi, yi = x_vec[i], y_vec[i]\n k1 = dydx(xi, yi)\n k2 = dydx(xi + h/2, yi + h*k1/2)\n k3 = dydx(xi + h/2, yi + h*k2/2)\n k4 = dydx(xi + h, yi + k3)\n x_vec[i+1] = xi + h\n y_vec[i+1] = yi + h / 6 * (k1 + 2*k2 + 2*k3 + k4)\n\nprint(\"--- Results ---\")\nprint(\"x | y\")\nprint(np.array([x_vec, y_vec]).T)\n" ]
[ [ "numpy.array", "numpy.zeros", "numpy.sin" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
dazfuller/example-pyspark-unittesting
[ "aabd0815d5c7f125f87d06a8a9a84a24ec9ef02f" ]
[ "demo/dates.py" ]
[ "\"\"\"Provides methods for working with dates in Python.\"\"\"\n\nfrom datetime import date, datetime, time, timedelta, timezone\nfrom typing import Union\n\nimport pandas as pd\nfrom pyspark.sql import DataFrame, SparkSession\nfrom pyspark.sql.functions import (date_format, dayofmonth, from_unixtime,\n month, pandas_udf, to_date, year)\nfrom pyspark.sql.types import IntegerType\n\n\ndef get_iso_weeknumber(dt: Union[date, datetime]) -> int:\n \"\"\"Return the ISO week number for a given date or datetime.\n\n Parameters\n ----------\n dt : Union[date, datetime]\n Date object to return the ISO week number for\n\n Returns\n -------\n int\n The ISO week number\n \"\"\"\n if type(dt) is not datetime and type(dt) is not date:\n return None\n\n return dt.isocalendar()[1]\n\n\n@pandas_udf(IntegerType())\ndef iso_week_of_year(dt: pd.Series) -> pd.Series:\n \"\"\"ISO week of the year as part of ISO 8601 standard.\n\n Week 1 is the week with the first Thursday of the gregorian calendar in it. Further information\n is available in [Wikipedia](https://en.wikipedia.org/wiki/ISO_week_date).\n\n Parameters\n ----------\n dt : pd.Series\n The series containing the date or datetime values\n\n Returns\n -------\n pd.Series\n A new series with the same number of records containing the week number for the year\n \"\"\"\n return dt.apply(lambda x: get_iso_weeknumber(x) if not pd.isnull(x) else pd.NA)\n\n\ndef create_date_dimension(spark: SparkSession, start: datetime, end: datetime) -> DataFrame:\n \"\"\"Create a date dimension.\n\n Generates a date dimension for dates between the start and end dates provided.\n\n Parameters\n ----------\n spark : SparkSession\n Session instance to use to create the date dimension\n start : Union[date, datetime]\n The first day of the date dimension\n end : Union[date, datetime]\n The last day of the date dimensions\n\n Returns\n -------\n DataFrame\n A new DataFrame containing the date dimension data\n\n Raises\n ------\n ValueError\n If an invalid SparkSession instance is provided\n ValueError\n If the start value is not a valid datetime.datetime\n ValueError\n If the end value is not a valid datetime.datetime\n ValueError\n If the provided start date occurs after the end date\n \"\"\"\n if spark is None or type(spark) is not SparkSession:\n raise ValueError(\"A valid SparkSession instance must be provided\")\n\n if type(start) is not datetime:\n raise ValueError(\"Start date must be a datetime.datetime object\")\n\n if type(end) is not datetime:\n raise ValueError(\"End date must be a datetime.datetime object\")\n\n if start >= end:\n raise ValueError(\"Start date must be before the end date\")\n\n if start.tzinfo is None:\n start = datetime.combine(start.date(), time(0, 0, 0), tzinfo=timezone.utc)\n\n if end.tzinfo is None:\n end = datetime.combine(end.date(), time(0, 0, 0), tzinfo=timezone.utc)\n\n end = end + timedelta(days=1)\n\n return (\n spark.range(start=start.timestamp(), end=end.timestamp(), step=24 * 60 * 60)\n .withColumn(\"date\", to_date(from_unixtime(\"id\")))\n .withColumn(\"date_key\", date_format(\"date\", \"yyyyMMdd\").cast(\"int\"))\n .withColumn(\"day\", dayofmonth(\"date\"))\n .withColumn(\"day_name\", date_format(\"date\", \"EEEE\"))\n .withColumn(\"day_short_name\", date_format(\"date\", \"EEE\"))\n .withColumn(\"month\", month(\"date\"))\n .withColumn(\"month_name\", date_format(\"date\", \"MMMM\"))\n .withColumn(\"month_short_name\", date_format(\"date\", \"MMM\"))\n .withColumn(\"year\", year(\"date\"))\n .withColumn(\"week_number\", iso_week_of_year(\"date\"))\n )\n" ]
[ [ "pandas.isnull" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
tkeskita/phasepy
[ "d5a5573859258857ab1570568ad171c3c9b86d82" ]
[ "phasepy/mixtures.py" ]
[ "from __future__ import division, print_function, absolute_import\nimport numpy as np\nfrom collections import Counter\nfrom pandas import read_excel\nimport os\nfrom copy import copy\nfrom itertools import combinations\nfrom .saft_forcefield import saft_forcefield\nfrom .constants import kb, R\n\n\nclass component(object):\n '''\n Object class for storing pure component information.\n\n Parameters\n ----------\n name : str\n Name of the component\n Tc : float\n Critical temperature [K]\n Pc : float\n Critical pressure [bar]\n Zc : float\n Critical compressibility factor\n Vc : float\n Critical molar volume [:math:`\\mathrm{cm^3/mol}`]\n w : float\n Acentric factor\n c : float\n Volume translation parameter used in cubic EoS [:math:`\\mathrm{cm^3/mol}`]\n cii : List[float]\n Polynomial coefficients for influence parameter used in SGT model\n ksv : List[float]\n Parameter for alpha for PRSV EoS\n Ant : List[float]\n Antoine correlation parameters\n GC : dict\n Group contribution information used in Modified-UNIFAC\n activity coefficient model. Group definitions can be found `here\n <http://www.ddbst.com/PublishedParametersUNIFACDO.html#ListOfMainGroups>`_.\n Mw : float\n molar weight of the fluid [g/mol]\n '''\n\n def __init__(self, name='None', Tc=0, Pc=0, Zc=0, Vc=0, w=0, c=0,\n cii=0, ksv=[0, 0], Ant=[0, 0, 0], GC=None, Mw=1.,\n ms=1, sigma=0, eps=0, lambda_r=12., lambda_a=6.,\n eAB=0., rcAB=1., rdAB=0.4, sites=[0, 0, 0]):\n\n self.name = name\n self.Tc = Tc # Critical Temperature in K\n self.Pc = Pc # Critical Pressure in bar\n self.Zc = Zc # Critical compressibility factor\n self.Vc = Vc # Critical volume in cm3/mol\n if Vc == 0 and Zc != 0:\n self.Vc = R*Zc*Tc/Pc\n elif Vc != 0 and Zc == 0:\n self.Zc = Pc*Vc/(R*Tc)\n self.w = w # Acentric Factor\n self.Ant = Ant # Antoine coefficeint, base e = 2.71\n self.cii = cii # Influence factor SGT, list or array\n self.ksv = ksv\n self.c = c # volume translation for cubic EoS\n self.GC = GC # Dict, Group contribution info\n self.nc = 1\n self.Mw = Mw # molar weight in g/mol\n # Saft Parameters\n\n self.ms = ms\n self.sigma = sigma * 1e-10\n self.eps = eps * kb\n self.lambda_a = np.asarray(lambda_a)\n self.lambda_r = np.asarray(lambda_r)\n self.lambda_ar = self.lambda_r + self.lambda_a\n\n # Association Parameters\n self.eAB = eAB * kb\n self.rcAB = rcAB * self.sigma\n self.rdAB = rdAB * self.sigma\n self.sites = sites\n\n def psat(self, T):\n \"\"\"\n Returns vapour saturation pressure [bar] at a given temperature\n using Antoine equation. Exponential base is :math:`e`.\n\n Parameters\n ----------\n T : float\n Absolute temperature [K]\n \"\"\"\n\n coef = self.Ant\n return np.exp(coef[0]-coef[1]/(T+coef[2]))\n\n def tsat(self, P):\n \"\"\"\n Returns vapour saturation temperature [K] at a given pressure using\n Antoine equation. Exponential base is :math:`e`.\n\n Parameters\n ----------\n P : float\n Saturation pressure [bar]\n \"\"\"\n\n coef = self.Ant\n T = - coef[2] + coef[1] / (coef[0] - np.log(P))\n\n return T\n\n def vlrackett(self, T):\n \"\"\"\n Returns liquid molar volume [:math:`\\mathrm{cm^3/mol}`] at a given\n temperature using the Rackett equation.\n\n Parameters\n ----------\n T : float\n Absolute temperature [K]\n \"\"\"\n\n Tr = T/self.Tc\n V = self.Vc*self.Zc**((1-Tr)**(2/7))\n return V\n\n def ci(self, T):\n \"\"\"\n Returns value of SGT model influence parameter\n [:math:`\\mathrm{J m^5 / mol}`] at a given temperature.\n\n Parameters\n ----------\n T : float\n absolute temperature [K]\n \"\"\"\n\n return np.polyval(self.cii, T)\n\n def saftvrmie(self, ms, rhol07):\n lambda_r, lambda_a, ms, eps, sigma = saft_forcefield(ms, self.Tc,\n self.w, rhol07)\n self.lambda_a = np.asarray(lambda_a)\n self.lambda_r = np.asarray(lambda_r)\n self.lambda_ar = self.lambda_r + self.lambda_a\n self.ms = ms\n self.sigma = sigma\n self.eps = eps\n\n\nclass mixture(object):\n '''\n Object class for info about a mixture.\n\n Parameters\n ----------\n component1 : component\n First mixture component object\n component2 : component\n Second mixture component object\n\n Attributes\n ----------\n name : List[str]\n Names of the components\n Tc : List[float]\n Critical temperatures [K]\n Pc : List[float]\n Critical pressures [bar]\n Zc : List[float]\n critical compressibility factors\n Vc : List[float]\n Critical molar volumes [:math:`\\mathrm{cm^3/mol}`]\n w : List[float]\n Acentric factors\n c : List[float]\n Volume translation parameter used in cubic EoS [:math:`\\mathrm{cm^3/mol}`]\n cii : List[list]\n Polynomial coefficients for influence parameter used in SGT model\n ksv : List[list]\n Parameters for alpha for PRSV EoS, if fitted\n Ant : List[list]\n Antoine correlation parameters\n GC : List[dict]\n Group contribution information used in Modified-UNIFAC\n activity coefficient model. Group definitions can be found `here\n <http://www.ddbst.com/PublishedParametersUNIFACDO.html#ListOfMainGroups>`_.\n '''\n\n def __init__(self, component1, component2):\n self.names = [component1.name, component2.name]\n self.Tc = [component1.Tc, component2.Tc]\n self.Pc = [component1.Pc, component2.Pc]\n self.Zc = [component1.Zc, component2.Zc]\n self.w = [component1.w, component2.w]\n self.Ant = [component1.Ant, component2.Ant]\n self.Vc = [component1.Vc, component2.Vc]\n self.cii = [component1.cii, component2.cii]\n self.c = [component1.c, component2.c]\n self.ksv = [component1.ksv, component2.ksv]\n self.nc = 2\n self.GC = [component1.GC, component2.GC]\n self.Mw = [component1.Mw, component2.Mw]\n\n self.lr = [component1.lambda_r, component2.lambda_r]\n self.la = [component1.lambda_a, component2.lambda_a]\n self.sigma = [component1.sigma, component2.sigma]\n self.eps = [component1.eps, component2.eps]\n self.ms = [component1.ms, component2.ms]\n self.eAB = [component1.eAB, component2.eAB]\n self.rc = [component1.rcAB, component2.rcAB]\n self.rd = [component1.rdAB, component2.rdAB]\n self.sitesmix = [component1.sites, component2.sites]\n\n def add_component(self, component):\n \"\"\"\n Adds a component to the mixture\n \"\"\"\n self.names.append(component.name)\n self.Tc.append(component.Tc)\n self.Pc.append(component.Pc)\n self.Zc.append(component.Zc)\n self.Vc.append(component.Vc)\n self.w.append(component.w)\n self.Ant.append(component.Ant)\n self.cii.append(component.cii)\n self.c.append(component.c)\n self.ksv.append(component.ksv)\n self.GC.append(component.GC)\n self.Mw.append(component.Mw)\n\n self.lr.append(component.lambda_r)\n self.la.append(component.lambda_a)\n self.sigma.append(component.sigma)\n self.eps.append(component.eps)\n self.ms.append(component.ms)\n self.eAB.append(component.eAB)\n self.rc.append(component.rcAB)\n self.rd.append(component.rdAB)\n self.sitesmix.append(component.sites)\n\n self.nc += 1\n\n def psat(self, T):\n \"\"\"\n Returns array of vapour saturation pressures [bar] at a given\n temperature using Antoine equation. Exponential base is :math:`e`.\n\n Parameters\n ----------\n T : float\n Absolute temperature [K]\n\n Returns\n -------\n Psat : array_like\n saturation pressure of each component [bar]\n \"\"\"\n\n coef = np.vstack(self.Ant)\n return np.exp(coef[:, 0]-coef[:, 1]/(T+coef[:, 2]))\n\n def tsat(self, P):\n \"\"\"\n Returns array of vapour saturation temperatures [K] at a given pressure\n using Antoine equation. Exponential base is :math:`e`.\n\n Parameters\n ----------\n Psat : float\n Saturation pressure [bar]\n\n Returns\n -------\n Tsat : array_like\n saturation temperature of each component [K]\n \"\"\"\n\n coef = np.vstack(self.Ant)\n T = - coef[:, 2] + coef[:, 1] / (coef[:, 0] - np.log(P))\n return T\n\n def vlrackett(self, T):\n \"\"\"\n Returns array of liquid molar volumes [:math:`\\mathrm{cm^3/mol}`] at a\n given temperature using the Rackett equation.\n\n Parameters\n ----------\n T : float\n Absolute temperature [K]\n\n Returns\n -------\n vl : array_like\n liquid volume of each component [cm3 mol-1]\n \"\"\"\n\n Tc = np.array(self.Tc)\n Vc = np.array(self.Vc)\n Zc = np.array(self.Zc)\n Tr = T/Tc\n V = Vc*Zc**((1-Tr)**(2/7))\n return V\n\n def kij_saft(self, kij):\n '''\n Adds kij binary interaction matrix for SAFT-VR-Mie to the\n mixture. Matrix must be symmetrical and the main diagonal must\n be zero.\n\n .. math::\n \\epsilon_{ij} = (1-k_{ij}) \\frac{\\sqrt{\\sigma_i^3 \\sigma_j^3}}{\\sigma_{ij}^3} \\sqrt{\\epsilon_i \\epsilon_j}\n\n Parameters\n ----------\n kij: array_like\n Matrix of interaction parameters\n '''\n nc = self.nc\n KIJ = np.asarray(kij)\n shape = KIJ.shape\n\n isSquare = shape == (nc, nc)\n isSymmetric = np.allclose(KIJ, KIJ.T)\n\n if isSquare and isSymmetric:\n self.KIJsaft = kij\n else:\n raise Exception('kij matrix is not square or symmetric')\n\n def kij_ws(self, kij):\n '''\n Adds kij matrix coefficients for WS mixing rule to the\n mixture. Matrix must be symmetrical and the main diagonal must\n be zero.\n\n Parameters\n ----------\n kij: array_like\n Matrix of interaction parameters\n '''\n nc = self.nc\n KIJ = np.asarray(kij)\n shape = KIJ.shape\n\n isSquare = shape == (nc, nc)\n isSymmetric = np.allclose(KIJ, KIJ.T)\n\n if isSquare and isSymmetric:\n self.Kijws = kij\n else:\n raise Exception('kij matrix is not square or symmetric')\n\n def kij_cubic(self, kij):\n '''\n Adds kij matrix coefficients for QMR mixing rule to the\n mixture. Matrix must be symmetrical and the main diagonal must\n be zero.\n\n Parameters\n ----------\n kij: array_like\n Matrix of interaction parameters\n '''\n nc = self.nc\n KIJ = np.asarray(kij)\n shape = KIJ.shape\n\n isSquare = shape == (nc, nc)\n isSymmetric = np.allclose(KIJ, KIJ.T)\n\n if isSquare and isSymmetric:\n self.kij = kij\n else:\n raise Exception('kij matrix is not square or symmetric')\n\n def NRTL(self, alpha, g, g1=None):\n r'''\n Adds NRTL parameters to the mixture.\n\n Parameters\n ----------\n alpha: array\n Aleatory factor\n g: array\n Matrix of energy interactions [K]\n g1: array, optional\n Matrix of energy interactions [1/K]\n\n Note\n ----\n Parameters are evaluated as a function of temperature:\n :math:`\\tau = g/T + g_1`\n '''\n nc = self.nc\n Alpha = np.asarray(alpha)\n shape = Alpha.shape\n\n isSquare = shape == (nc, nc)\n isSymmetric = np.allclose(Alpha, Alpha.T)\n\n if isSquare and isSymmetric:\n self.alpha = Alpha\n else:\n raise Exception('alpha matrix is not square or symmetric')\n\n self.g = g\n if g1 is None:\n g1 = np.zeros_like(g)\n self.g1 = g1\n self.actmodelp = (self.alpha, self.g, self.g1)\n\n def rkt(self, D):\n '''\n Adds a ternary polynomial modification for NRTL model to the mixture.\n\n Parameters\n ----------\n D: array\n Ternary interaction parameter values\n '''\n\n self.rkternario = D\n self.actmodelp = (self.alpha, self.g, self.g1, self.rkternario)\n\n def wilson(self, A):\n '''\n Adds Wilson model coefficients to the mixture.\n Argument matrix main diagonal must be zero.\n\n Parameters\n ----------\n A: array\n Interaction parameter values [K]\n '''\n\n self.Aij = A\n self.actmodelp = (self.Aij, self.vlrackett)\n\n def rkb(self, c, c1=None):\n '''\n Adds binary Redlich Kister polynomial coefficients for\n excess Gibbs energy to the mixture.\n\n Parameters\n ----------\n c: array\n Polynomial values [Adim]\n c1: array, optional\n Polynomial values [K]\n\n Note\n ----\n Parameters are evaluated as a function of temperature:\n :math:`G = c + c_1/T`\n '''\n\n self.rkb = c\n if c1 is None:\n c1 = np.zeros_like(c)\n self.rkbT = c1\n self.actmodelp = (c, c1)\n\n def rk(self, c, c1=None):\n '''\n Adds Redlich Kister polynomial coefficients for\n excess Gibbs energy to the mixture.\n\n Parameters\n ----------\n c: array\n Polynomial values [Adim]\n c1: array, optional\n Polynomial values [K]\n\n Note\n ----\n Parameters are evaluated as a function of temperature:\n :math:`G = c + c_1/T`\n '''\n\n nc = self.nc\n combinatory = np.array(list(combinations(range(nc), 2)), dtype=np.int)\n self.combinatory = combinatory\n c = np.atleast_2d(c)\n self.rkp = c\n if c1 is None:\n c1 = np.zeros_like(c)\n c1 = np.atleast_2d(c1)\n self.rkpT = c1\n self.actmodelp = (c, c1, combinatory)\n\n def unifac(self):\n \"\"\"\n Reads the Dortmund database for Modified-UNIFAC model\n to the mixture for calculation of activity coefficients.\n\n Group definitions can be found `here\n <http://www.ddbst.com/PublishedParametersUNIFACDO.html#ListOfMainGroups>`_.\n \"\"\"\n\n # UNIFAC database reading\n database = os.path.join(os.path.dirname(__file__), 'database')\n database += '/dortmund.xlsx'\n qkrk = read_excel(database, 'RkQk', index_col='Especie', engine='openpyxl')\n a0 = read_excel(database, 'A0', index_col='Grupo', engine='openpyxl')\n a0.fillna(0, inplace=True)\n a1 = read_excel(database, 'A1', index_col='Grupo', engine='openpyxl')\n a1.fillna(0, inplace=True)\n a2 = read_excel(database, 'A2', index_col='Grupo', engine='openpyxl')\n a2.fillna(0, inplace=True)\n\n # Reading pure component and mixture group contribution info\n puregc = self.GC\n mix = Counter()\n for i in puregc:\n mix += Counter(i)\n\n subgroups = list(mix.keys())\n\n # Dicts created for each component\n vk = []\n dics = []\n for i in puregc:\n d = dict.fromkeys(subgroups, 0)\n d.update(i)\n dics.append(d)\n vk.append(list(d.values()))\n Vk = np.array(vk)\n\n groups = qkrk.loc[subgroups, 'Grupo ID'].values\n\n a = a0.loc[groups, groups].values\n b = a1.loc[groups, groups].values\n c = a2.loc[groups, groups].values\n\n # Reading info of present groups\n rq = qkrk.loc[subgroups, ['Rk', 'Qk']].values\n Qk = rq[:, 1]\n\n ri, qi = (Vk@rq).T\n ri34 = ri**(0.75)\n\n Xmi = (Vk.T/Vk.sum(axis=1)).T\n t = Xmi*Qk\n tethai = (t.T/t.sum(axis=1)).T\n\n self.actmodelp = (qi, ri, ri34, Vk, Qk, tethai, a, b, c)\n\n def ci(self, T):\n \"\"\"\n Returns the matrix of cij interaction parameters for SGT model at\n a given temperature.\n\n Parameters\n ----------\n T : float\n Absolute temperature [K]\n \"\"\"\n\n n = len(self.cii)\n ci = np.zeros(n)\n for i in range(n):\n ci[i] = np.polyval(self.cii[i], T)\n self.cij = np.sqrt(np.outer(ci, ci))\n return self.cij\n\n def copy(self):\n \"\"\"\n Returns a copy of the mixture object\n \"\"\"\n return copy(self)\n" ]
[ [ "numpy.log", "pandas.read_excel", "numpy.allclose", "numpy.asarray", "numpy.atleast_2d", "numpy.zeros_like", "numpy.zeros", "numpy.exp", "numpy.outer", "numpy.array", "numpy.polyval", "numpy.vstack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
rakutentech/iterative_training
[ "4bd9b8f64cdf2766af9b6fda0b4cb20d5d75b3bf" ]
[ "main_imagenet_float.py" ]
[ "\"\"\"\nSource: https://github.com/pytorch/examples/tree/master/imagenet\nLicense: BSD 3-clause\n\"\"\"\nimport argparse\nfrom bisect import bisect\nfrom datetime import datetime\nimport json\nimport os\nimport random\nimport shutil\nimport sys\nimport time\nimport warnings\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.backends.cudnn as cudnn\nimport torch.distributed as dist\nimport torch.optim\nimport torch.multiprocessing as mp\nimport torch.utils.data\nimport torch.utils.data.distributed\nimport torchvision.transforms as transforms\nimport torchvision.datasets as datasets\nimport torchvision.models as models\nfrom torch.utils.tensorboard import SummaryWriter\n\nfrom mlmt.time_stamp import TimeStamp\n\n#model_names = sorted(name for name in models.__dict__\n# if name.islower() and not name.startswith(\"__\")\n# and callable(models.__dict__[name]))\n\nparser = argparse.ArgumentParser(description='PyTorch ImageNet Training')\nparser.add_argument('data', metavar='DIR',\n help='path to ImageNet dataset')\n\"\"\"\nparser.add_argument('-a', '--arch', metavar='ARCH', default='resnet18',\n choices=model_names,\n help='model architecture: ' +\n ' | '.join(model_names) +\n ' (default: resnet18)')\n\"\"\"\nparser.add_argument('-j', '--workers', default=4, type=int, metavar='N',\n help='number of data loading workers (default: 4)')\nparser.add_argument('--epochs', default=90, type=int, metavar='N',\n help='number of total epochs to run')\nparser.add_argument('--start-epoch', default=0, type=int, metavar='N',\n help='manual epoch number (useful on restarts)')\nparser.add_argument('-b', '--batch-size', default=256, type=int,\n metavar='N',\n help='mini-batch size (default: 256), this is the total '\n 'batch size of all GPUs on the current node when '\n 'using Data Parallel or Distributed Data Parallel')\nparser.add_argument('--lr', '--learning-rate', default=0.1, type=float,\n metavar='LR', help='initial learning rate', dest='lr')\nparser.add_argument('--momentum', default=0.9, type=float, metavar='M',\n help='momentum')\nparser.add_argument('--wd', '--weight-decay', default=1e-4, type=float,\n metavar='W', help='weight decay (default: 1e-4)',\n dest='weight_decay')\nparser.add_argument('-p', '--print-freq', default=10, type=int,\n metavar='N', help='print frequency (default: 10)')\nparser.add_argument('--resume', default='', type=str, metavar='PATH',\n help='path to latest checkpoint (default: none)')\nparser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',\n help='evaluate model on validation set')\n#parser.add_argument('--pretrained', dest='pretrained', action='store_true',\n# help='use pre-trained model')\nparser.add_argument('--world-size', default=-1, type=int,\n help='number of nodes for distributed training')\nparser.add_argument('--rank', default=-1, type=int,\n help='node rank for distributed training')\nparser.add_argument('--dist-url', default='tcp://224.66.41.62:23456', type=str,\n help='url used to set up distributed training')\nparser.add_argument('--dist-backend', default='nccl', type=str,\n help='distributed backend')\nparser.add_argument('--seed', default=None, type=int,\n help='seed for initializing training. ')\nparser.add_argument('--gpu', default=None, type=int,\n help='GPU id to use.')\nparser.add_argument('--multiprocessing-distributed', action='store_true',\n help='Use multi-processing distributed training to launch '\n 'N processes per node, which has N GPUs. This is the '\n 'fastest way to use PyTorch for either single node or '\n 'multi node data parallel training')\n\nparser.add_argument('--save-checkpoint', action='store_true',\n help='Turn on checkpointing (default: off)')\nparser.add_argument('--checkpoint-interval', default=8, type=int,\n help='Checkpoint interval (default: 8 epochs)')\nparser.add_argument('--dry-run', action='store_true',\n help='Dry run (very short) training (default: off)')\n\nparser.add_argument('logdir', metavar='LOGDIR',\n help='directory to write tensorboard logs (default: ./logs)')\nparser.add_argument('--lr-milestones', type=str, default='[84]',\n help='Reduce learning rate by 10 at beginning of each epoch in this list')\n\n\nbest_acc1 = 0\n\n\ndef save_args(args, writer):\n print(\"argv: {}\".format(sys.argv))\n writer.add_text('argv', str(sys.argv), 0)\n\n print(\"args:\")\n options = vars(args)\n for k, v in options.items():\n #print(\" {} {} {} {}\".format(k, v, type(k), type(v)))\n print(\" {} {}\".format(k, v))\n writer.add_text(\"args/\"+k, str(v), 0)\n\ndef experiment_name_from(args) -> str:\n name = f'lr{args.lr}-ep{args.epochs}'\n if args.dry_run:\n name += '-dryrun'\n return name\n\ndef experiment_id() -> str:\n return datetime.now().strftime('%Y-%m-%d_%H%M%S_%f')\n\n\n\n\n\ndef main():\n args = parser.parse_args()\n\n if args.multiprocessing_distributed:\n raise ValueError(\"--multiprocessing-distributed not supported\")\n\n if args.seed is not None:\n random.seed(args.seed)\n torch.manual_seed(args.seed)\n cudnn.deterministic = True\n warnings.warn('You have chosen to seed training. '\n 'This will turn on the CUDNN deterministic setting, '\n 'which can slow down your training considerably! '\n 'You may see unexpected behavior when restarting '\n 'from checkpoints.')\n\n if args.gpu is not None:\n warnings.warn('You have chosen a specific GPU. This will completely '\n 'disable data parallelism.')\n\n if args.dist_url == \"env://\" and args.world_size == -1:\n args.world_size = int(os.environ[\"WORLD_SIZE\"])\n\n args.distributed = args.world_size > 1 or args.multiprocessing_distributed\n\n\n ngpus_per_node = torch.cuda.device_count()\n if args.multiprocessing_distributed:\n # Since we have ngpus_per_node processes per node, the total world_size\n # needs to be adjusted accordingly\n args.world_size = ngpus_per_node * args.world_size\n # Use torch.multiprocessing.spawn to launch distributed processes: the\n # main_worker process function\n mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))\n else:\n # Simply call main_worker function\n main_worker(args.gpu, ngpus_per_node, args)\n\n\ndef main_worker(gpu, ngpus_per_node, args):\n print(f'gpu {gpu}')\n print(f'ngpus_per_node {ngpus_per_node}')\n global best_acc1\n args.gpu = gpu\n\n if (args.distributed and gpu==0) or not args.distributed:\n print(\"Creating SummaryWriter ...\")\n experiment_dir = os.path.join(args.logdir, experiment_name_from(args) + '_' + experiment_id())\n if not os.path.exists(experiment_dir):\n os.makedirs(experiment_dir)\n tb_writer = SummaryWriter(experiment_dir)\n save_args(args, tb_writer)\n\n if args.gpu is not None:\n print(\"Use GPU: {} for training\".format(args.gpu))\n\n if args.distributed:\n if args.dist_url == \"env://\" and args.rank == -1:\n args.rank = int(os.environ[\"RANK\"])\n if args.multiprocessing_distributed:\n # For multiprocessing distributed training, rank needs to be the\n # global rank among all the processes\n args.rank = args.rank * ngpus_per_node + gpu\n dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,\n world_size=args.world_size, rank=args.rank)\n # create model\n \"\"\"\n if args.pretrained:\n print(\"=> using pre-trained model '{}'\".format(args.arch))\n model = models.__dict__[args.arch](pretrained=True)\n else:\n print(\"=> creating model '{}'\".format(args.arch))\n model = models.__dict__[args.arch]()\n \"\"\"\n print(\"=> creating model resnet18\")\n from models.resnet_imagenet import resnet18\n model = resnet18()\n\n if not torch.cuda.is_available():\n print('using CPU, this will be slow')\n elif args.distributed:\n # For multiprocessing distributed, DistributedDataParallel constructor\n # should always set the single device scope, otherwise,\n # DistributedDataParallel will use all available devices.\n if args.gpu is not None:\n torch.cuda.set_device(args.gpu)\n model.cuda(args.gpu)\n # When using a single GPU per process and per\n # DistributedDataParallel, we need to divide the batch size\n # ourselves based on the total number of GPUs we have\n args.batch_size = int(args.batch_size / ngpus_per_node)\n args.workers = int((args.workers + ngpus_per_node - 1) / ngpus_per_node)\n model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])\n print('model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])')\n else:\n model.cuda()\n # DistributedDataParallel will divide and allocate batch_size to all\n # available GPUs if device_ids are not set\n model = torch.nn.parallel.DistributedDataParallel(model)\n elif args.gpu is not None:\n torch.cuda.set_device(args.gpu)\n model = model.cuda(args.gpu)\n print('model = model.cuda(args.gpu)')\n else:\n # DataParallel will divide and allocate batch_size to all available GPUs\n \"\"\"\n if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):\n model.features = torch.nn.DataParallel(model.features)\n model.cuda()\n else:\n model = torch.nn.DataParallel(model).cuda()\n \"\"\"\n model = torch.nn.DataParallel(model).cuda()\n\n # define loss function (criterion) and optimizer\n criterion = nn.CrossEntropyLoss().cuda(args.gpu)\n\n optimizer = torch.optim.SGD(model.parameters(), args.lr,\n momentum=args.momentum,\n weight_decay=args.weight_decay)\n\n # optionally resume from a checkpoint\n if args.resume:\n if os.path.isfile(args.resume):\n print(\"=> loading checkpoint '{}'\".format(args.resume))\n if args.gpu is None:\n checkpoint = torch.load(args.resume)\n else:\n # Map model to be loaded to specified single gpu.\n loc = 'cuda:{}'.format(args.gpu)\n checkpoint = torch.load(args.resume, map_location=loc)\n args.start_epoch = checkpoint['epoch']\n best_acc1 = checkpoint['best_acc1']\n if args.gpu is not None:\n # best_acc1 may be from a checkpoint from a different GPU\n best_acc1 = best_acc1.to(args.gpu)\n model.load_state_dict(checkpoint['state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n print(\"=> loaded checkpoint '{}' (epoch {})\"\n .format(args.resume, checkpoint['epoch']))\n else:\n print(\"=> no checkpoint found at '{}'\".format(args.resume))\n raise ValueError\n\n cudnn.benchmark = True\n\n # Data loading code\n traindir = os.path.join(args.data, 'train')\n valdir = os.path.join(args.data, 'val')\n normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n\n train_dataset = datasets.ImageFolder(\n traindir,\n transforms.Compose([\n transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n normalize,\n ]))\n\n if args.distributed:\n train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)\n else:\n train_sampler = None\n\n train_loader = torch.utils.data.DataLoader(\n train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None),\n num_workers=args.workers, pin_memory=True, sampler=train_sampler)\n\n val_loader = torch.utils.data.DataLoader(\n datasets.ImageFolder(valdir, transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n normalize,\n ])),\n batch_size=args.batch_size, shuffle=False,\n num_workers=args.workers, pin_memory=True)\n\n if args.evaluate:\n validate(val_loader, model, criterion, args)\n return\n\n lr_milestones = json.loads(args.lr_milestones)\n for epoch in range(args.start_epoch, args.epochs):\n if args.distributed:\n train_sampler.set_epoch(epoch)\n\n learning_rate = adjust_learning_rate(\n optimizer = optimizer, \n epoch = epoch, \n base_lr = args.lr, \n lr_milestones = lr_milestones,\n )\n if (args.distributed and gpu==0) or not args.distributed:\n print(f'Epoch {epoch} LR {learning_rate}')\n tb_writer.add_scalar('train/learning_rate', learning_rate, epoch)\n\n # train for one epoch\n ts = TimeStamp()\n train_acc1, train_acc5 = train(train_loader, model, criterion, optimizer, epoch, args)\n if (args.distributed and gpu==0) or not args.distributed:\n tb_writer.add_scalar('train/duration', ts.elapsed()/60.0, epoch)\n tb_writer.add_scalar('train/acc1', train_acc1, epoch)\n tb_writer.add_scalar('train/acc5', train_acc5, epoch)\n\n # evaluate on validation set\n ts = TimeStamp()\n acc1, acc5 = validate(val_loader, model, criterion, args)\n\n # remember best acc@1 and save checkpoint\n is_best = acc1 > best_acc1\n best_acc1 = max(acc1, best_acc1)\n if (args.distributed and gpu==0) or not args.distributed:\n tb_writer.add_scalar('val/duration', ts.elapsed()/60.0, epoch)\n tb_writer.add_scalar('val/acc1', acc1, epoch)\n #tb_writer.add_scalar('val/best_acc1', best_acc1, epoch)\n tb_writer.add_scalar('val/acc5', acc5, epoch)\n\n if not args.multiprocessing_distributed or (args.multiprocessing_distributed\n and args.rank % ngpus_per_node == 0):\n if args.save_checkpoint:\n save_checkpoint(\n state = {\n 'epoch': epoch + 1,\n 'state_dict': model.state_dict(),\n 'best_acc1': best_acc1,\n 'optimizer' : optimizer.state_dict(),\n }, \n is_best = is_best,\n experiment_dir = experiment_dir,\n epoch = epoch,\n interval = args.checkpoint_interval,\n )\n\n \n # Flush tensorboard data\n if (args.distributed and gpu==0) or not args.distributed:\n tb_writer.close()\n\n\n\ndef train(train_loader, model, criterion, optimizer, epoch, args):\n batch_time = AverageMeter('Time', ':6.3f')\n data_time = AverageMeter('Data', ':6.3f')\n losses = AverageMeter('Loss', ':.4e')\n top1 = AverageMeter('Acc@1', ':6.2f')\n top5 = AverageMeter('Acc@5', ':6.2f')\n progress = ProgressMeter(\n len(train_loader),\n [batch_time, data_time, losses, top1, top5],\n prefix=\"Epoch: [{}]\".format(epoch))\n\n # switch to train mode\n model.train()\n\n end = time.time()\n for i, (images, target) in enumerate(train_loader):\n # measure data loading time\n data_time.update(time.time() - end)\n\n if args.gpu is not None:\n images = images.cuda(args.gpu, non_blocking=True)\n if torch.cuda.is_available():\n target = target.cuda(args.gpu, non_blocking=True)\n\n # compute output\n output = model(images)\n loss = criterion(output, target)\n\n # measure accuracy and record loss\n acc1, acc5 = accuracy(output, target, topk=(1, 5))\n losses.update(loss.item(), images.size(0))\n top1.update(acc1[0], images.size(0))\n top5.update(acc5[0], images.size(0))\n\n # compute gradient and do SGD step\n optimizer.zero_grad()\n loss.backward()\n optimizer.step() # parameter update\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if i % args.print_freq == 0:\n progress.display(i)\n if args.dry_run:\n if i > 1: break\n\n return top1.avg, top5.avg\n\n\ndef validate(val_loader, model, criterion, args):\n batch_time = AverageMeter('Time', ':6.3f')\n losses = AverageMeter('Loss', ':.4e')\n top1 = AverageMeter('Acc@1', ':6.2f')\n top5 = AverageMeter('Acc@5', ':6.2f')\n progress = ProgressMeter(\n len(val_loader),\n [batch_time, losses, top1, top5],\n prefix='Test: ')\n\n # switch to evaluate mode\n model.eval()\n\n with torch.no_grad():\n end = time.time()\n for i, (images, target) in enumerate(val_loader):\n if args.gpu is not None:\n images = images.cuda(args.gpu, non_blocking=True)\n if torch.cuda.is_available():\n target = target.cuda(args.gpu, non_blocking=True)\n\n # compute output\n output = model(images)\n loss = criterion(output, target)\n\n # measure accuracy and record loss\n acc1, acc5 = accuracy(output, target, topk=(1, 5))\n losses.update(loss.item(), images.size(0))\n top1.update(acc1[0], images.size(0))\n top5.update(acc5[0], images.size(0))\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if i % args.print_freq == 0:\n progress.display(i)\n if args.dry_run:\n if i > 1: break\n\n # TODO: this should also be done with the ProgressMeter\n print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'\n .format(top1=top1, top5=top5))\n\n return top1.avg, top5.avg\n\n\ndef save_checkpoint(*, state, is_best, experiment_dir, epoch, interval):\n \"\"\"Save checkpoints\n Save the running checkpoint.\n Save checkpoints at given interval.\n Save the running best checkpoint.\n \"\"\"\n filename = os.path.join(experiment_dir, 'checkpoint.pth.tar')\n print(f'Saving current: {filename}')\n torch.save(state, filename)\n if is_best:\n best = os.path.join(experiment_dir, 'model_best.pth.tar')\n print(f'Saving best: {best}')\n shutil.copyfile(filename, best)\n if (interval > 0) and ((epoch+1) % interval == 0):\n current = os.path.join(experiment_dir, f'checkpoint{epoch}.pth.tar')\n print(f'Saving interval: {current}')\n shutil.copyfile(filename, current)\n\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n def __init__(self, name, fmt=':f'):\n self.name = name\n self.fmt = fmt\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\n def __str__(self):\n fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'\n return fmtstr.format(**self.__dict__)\n\n\nclass ProgressMeter(object):\n def __init__(self, num_batches, meters, prefix=\"\"):\n self.batch_fmtstr = self._get_batch_fmtstr(num_batches)\n self.meters = meters\n self.prefix = prefix\n\n def display(self, batch):\n entries = [self.prefix + self.batch_fmtstr.format(batch)]\n entries += [str(meter) for meter in self.meters]\n print('\\t'.join(entries))\n\n def _get_batch_fmtstr(self, num_batches):\n num_digits = len(str(num_batches // 1))\n fmt = '{:' + str(num_digits) + 'd}'\n return '[' + fmt + '/' + fmt.format(num_batches) + ']'\n\n\ndef adjust_learning_rate(optimizer, epoch, base_lr, lr_milestones, gamma=0.1) -> float:\n \"\"\"Sets the learning rate to decay by gamma (multiplicatively) when reaching each milestone\"\"\"\n factor = gamma ** bisect(lr_milestones, epoch)\n lr = base_lr * factor\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n return lr\n\n\ndef accuracy(output, target, topk=(1,)):\n \"\"\"Computes the accuracy over the k top predictions for the specified values of k\"\"\"\n with torch.no_grad():\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "torch.nn.CrossEntropyLoss", "torch.distributed.init_process_group", "torch.multiprocessing.spawn", "torch.utils.data.distributed.DistributedSampler", "torch.cuda.set_device", "torch.manual_seed", "torch.load", "torch.utils.data.DataLoader", "torch.nn.DataParallel", "torch.no_grad", "torch.utils.tensorboard.SummaryWriter", "torch.cuda.is_available", "torch.cuda.device_count", "torch.nn.parallel.DistributedDataParallel", "torch.save" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
YaGiNA/DLfS2
[ "3dbaba7a62c198b50849de2e3b74d92897a4cae7" ]
[ "ch06/rnn_gradient_graph.py" ]
[ "import numpy as np\r\nimport matplotlib as mpl\r\nmpl.use('Agg')\r\nimport matplotlib.pyplot as plt\r\n\r\nN = 2 # Size of minibatch\r\nH = 3 # Number of dimension of hidden vec\r\nT = 20 # Length of time data\r\n\r\ndh = np.ones((N, H))\r\nnp.random.seed(3) # Set seed of random number due to reproducibility\r\n# Wh = np.random.randn(H, H)\r\nWh = np.random.randn(H, H) * 0.5\r\n\r\n\r\nnorm_list = []\r\nfor t in range(T):\r\n dh = np.dot(dh, Wh.T)\r\n norm = np.sqrt(np.sum(dh**2)) / N\r\n norm_list.append(norm)\r\n\r\nprint(norm_list)\r\n\r\n# グラフの描画\r\nplt.plot(np.arange(len(norm_list)), norm_list)\r\nplt.xlabel('time step')\r\nplt.ylabel('norm')\r\n\r\nplt.savefig('graph.png')\r\n" ]
[ [ "numpy.dot", "numpy.random.seed", "matplotlib.use", "matplotlib.pyplot.savefig", "numpy.ones", "numpy.random.randn", "matplotlib.pyplot.xlabel", "numpy.sum", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
xapharius/HadoopML
[ "c0129f298007ca89b538eb1a3800f991141ba361" ]
[ "Engine/src/examples/linear_regression.py" ]
[ "import numpy as np\nfrom algorithms.linearRegression.LinearRegressionFactory import LinearRegressionFactory\nfrom algorithms.linearRegression.scipy_linreg import SciPyLinReg\nfrom algorithms.linearRegression.scipy_linreg_factory import SciPyLinRegFactory\nfrom datahandler.numerical.NumericalDataHandler import NumericalDataHandler\nfrom engine.constants.run_type import *\nfrom engine.engine import Engine\nfrom validator.PredictionValidator import PredictionValidator\nimport matplotlib.pyplot as plt\n\nif __name__ == '__main__':\n \n print(\"=== Linear Regression Example ===\")\n \n nr_params = 11\n nr_label_dim = 1\n run_type = LOCAL\n data_file = 'hdfs:///user/linda/ml/data/winequality-red.csv' if run_type == HADOOP else '../data/wine-quality/winequality-red.csv'\n input_scalling = None\n target_scalling = None\n \n print( \"\\n data: \" + data_file\n + \"\\n params: \" + str(nr_params)\n + \"\\n label dim: \" + str(nr_label_dim)\n + \"\\n run type: \" + run_type\n + \"\\n input scalling: \" + str(input_scalling)\n + \"\\n target scalling: \" + str(target_scalling)\n + \"\\n\"\n )\n \n # 1. define algorithm\n# regression = LinearRegressionFactory(nr_params)\n regression = SciPyLinRegFactory(SciPyLinReg.RIDGE)\n \n # 2. set data handler (pre-processing, normalization, data set creation)\n data_handler = NumericalDataHandler(nr_params, nr_label_dim, input_scalling=input_scalling, target_scalling=target_scalling)\n \n # 3. run\n engine = Engine(regression, data_file, data_handler=data_handler, verbose=True)\n trained_alg = engine.start(_run_type=run_type)\n \n # 4. validate result\n validation_stats = engine.validate(trained_alg, PredictionValidator(), _run_type=run_type)\n targets = np.array(validation_stats['targets'])\n pred = np.array(validation_stats['pred'])\n plt.plot(targets, 'go')\n plt.plot(pred, 'r+')\n plt.show()\n" ]
[ [ "matplotlib.pyplot.plot", "numpy.array", "matplotlib.pyplot.show" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
MartinBCN/BasicML
[ "7ad7bd075c62d883143dd10b54c80287d06a99b0" ]
[ "src/classification/classification_base.py" ]
[ "from abc import ABC, abstractmethod\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom src.utils.activation import softmax, cross_entropy\nfrom src.utils.scoring import classification_rate\nfrom src.utils.transform import one_hot\n\nnp.set_printoptions(linewidth=10000)\nimport logging\nlogger = logging.getLogger()\n# ===========================================================================\n\n\nclass BaseClassification(ABC):\n\n def __init__(self, epochs: int = 10000, early_stop_epochs: int = 5, batch_size: int = None,\n learning_rate: float = 0.00001, velocity: float = 0,\n decay: float = 0.9):\n\n # Hyper Parameter\n self.epochs = epochs\n self.early_stop_epochs = early_stop_epochs\n self.batch_size = batch_size\n self.learning_rate = learning_rate\n self.reg = 0.000001\n self.velocity = velocity\n self.decay = decay\n\n # Tracking\n self.batch_losses_train = []\n self.epoch_losses_train = []\n self.batch_classification_rate_train = []\n self.epoch_classification_rate_train = []\n\n self.batch_losses_validation = []\n self.epoch_losses_validation = []\n self.batch_classification_rate_validation = []\n self.epoch_classification_rate_validation = []\n\n @abstractmethod\n def _initialize_weights(self, X: np.array, Y: np.array) -> None:\n return\n\n @abstractmethod\n def _forward(self, X: np.array, *args, **kwargs) -> np.array:\n \"\"\"\n\n Parameters\n ----------\n X\n\n Returns\n -------\n\n \"\"\"\n\n @abstractmethod\n def _backward(self, X: np.array, T: np.array, Y: np.array, *args, **kwargs) -> None:\n \"\"\"\n\n Parameters\n ----------\n X\n T\n Y\n\n Returns\n -------\n\n \"\"\"\n\n def predict(self, feature_vector):\n probabilities = self._forward(feature_vector)\n if type(probabilities) == tuple:\n probabilities = probabilities[0]\n result = np.zeros_like(probabilities)\n result[np.arange(len(probabilities)), probabilities.argmax(1)] = 1\n return result\n\n @abstractmethod\n def step(self, feature_batch: np.array, target_batch: np.array):\n \"\"\"\n\n Parameters\n ----------\n feature_batch\n target_batch\n\n Returns\n -------\n\n \"\"\"\n\n def fit(self, feature_vector: np.array, target: np.array) -> None:\n \"\"\"\n Generic fit routine that can be used for a variety of implementations\n\n Parameters\n ----------\n feature_vector\n target\n\n Returns\n -------\n\n \"\"\"\n\n if len(target.shape) == 1:\n target = one_hot(target)\n\n self._initialize_weights(feature_vector, target)\n n, k = feature_vector.shape\n\n # Split dataset into train and validation\n indices = np.random.permutation(n)\n n_train = int(n * 0.15)\n n_validation = n - n_train\n training_idx, validation_idx = indices[:n_train], indices[n_train:]\n feature_train, feature_validation = feature_vector[training_idx, :], feature_vector[validation_idx, :]\n target_train, target_validation = target[training_idx, :], target[validation_idx, :]\n n_batch_train = n_train // self.batch_size\n n_batch_validation = n_validation // self.batch_size\n\n # Initialise tracked quantities\n self.batch_losses_train = []\n self.epoch_losses_train = []\n self.batch_classification_rate_train = []\n self.epoch_classification_rate_train = []\n\n self.batch_losses_validation = []\n self.epoch_losses_validation = []\n self.batch_classification_rate_validation = []\n self.epoch_classification_rate_validation = []\n\n # Initialise best validation loss. Once this quantity has not been improved for the number of epochs\n # defined as the early_stop_number we break the epoch iteration\n best_validation_loss = np.inf\n epochs_no_improvement = 0\n\n # Iterate Epochs\n for i in range(self.epochs):\n\n # --- Iterate training batches for backpropagation ---\n epoch_loss = 0\n epoch_classification = []\n\n for j in range(n_batch_train):\n feature_batch = feature_train[j * self.batch_size: (j + 1) * self.batch_size, :]\n target_batch = target_train[j * self.batch_size: (j + 1) * self.batch_size, :]\n\n prediction, batch_loss = self._backpropagation(feature_batch, target_batch)\n epoch_loss += batch_loss\n self.batch_losses_train.append(batch_loss)\n\n batch_classification_rate = classification_rate(target_batch, prediction)\n epoch_classification.append(batch_classification_rate)\n self.batch_classification_rate_train.append(batch_classification_rate)\n\n # Epoch analysis\n self.epoch_losses_train.append(epoch_loss)\n self.epoch_classification_rate_train.append(np.mean(epoch_classification))\n\n # --- Iterate validation batches for scoring ---\n epoch_loss = 0\n epoch_classification = []\n\n for j in range(n_batch_validation):\n feature_batch = feature_validation[j * self.batch_size: (j + 1) * self.batch_size, :]\n target_batch = target_validation[j * self.batch_size: (j + 1) * self.batch_size, :]\n\n # Depending on the exact structure of the result of the forward result we may find the desired result\n # as the first item of a tuple\n prediction = self._forward(feature_batch)\n if type(prediction) is tuple:\n prediction = prediction[0]\n batch_loss = cross_entropy(prediction, target_batch)\n epoch_loss += batch_loss\n self.batch_losses_validation.append(batch_loss)\n\n batch_classification_rate = classification_rate(target_batch, prediction)\n epoch_classification.append(batch_classification_rate)\n self.batch_classification_rate_validation.append(batch_classification_rate)\n\n # Epoch analysis\n self.epoch_losses_validation.append(epoch_loss)\n self.epoch_classification_rate_validation.append(np.mean(epoch_classification))\n\n # We need improvement of at least 0.0001:\n if epoch_loss / best_validation_loss > (1 - 1e-6):\n epochs_no_improvement = 0\n best_validation_loss = epoch_loss\n else:\n epochs_no_improvement += 1\n\n if epochs_no_improvement >= self.early_stop_epochs:\n print(f'No improvement over {self.early_stop_epochs} epochs: abort training')\n break\n\n def plot(self):\n # Number of entries for running mean\n n = 100\n ax1 = plt.subplot(3, 2, 1)\n plt.plot(self.epoch_classification_rate_train, label='Train')\n plt.plot(self.epoch_classification_rate_validation, label='Validation')\n plt.legend()\n ax1.title.set_text('Epoch Classification Rate')\n\n ax2 = plt.subplot(3, 2, 2)\n plt.plot(self.epoch_losses_train, label='Train')\n plt.plot(self.epoch_losses_validation, label='Validation')\n plt.legend()\n ax2.title.set_text('Epoch Losses')\n\n ax3 = plt.subplot(3, 2, 3)\n y = self.batch_classification_rate_train\n plt.plot(y, label='Train')\n running_mean = np.convolve(y, np.ones((n,)) / n, mode='valid')\n plt.plot(running_mean)\n ax3.title.set_text('Batch Classification Rate Train')\n\n ax4 = plt.subplot(3, 2, 4)\n y = self.batch_classification_rate_validation\n plt.plot(y, label='Train')\n running_mean = np.convolve(y, np.ones((n,)) / n, mode='valid')\n plt.plot(running_mean)\n ax4.title.set_text('Batch Classification Rate Validation')\n\n ax5 = plt.subplot(3, 2, 5)\n y = self.batch_losses_train\n plt.plot(y, label='Train')\n running_mean = np.convolve(y, np.ones((n,)) / n, mode='valid')\n plt.plot(running_mean)\n plt.legend()\n ax5.title.set_text('Batch Losses Train')\n\n ax6 = plt.subplot(3, 2, 6)\n y = self.batch_losses_validation\n plt.plot(y, label='Validation')\n running_mean = np.convolve(y, np.ones((n,)) / n, mode='valid')\n plt.plot(running_mean)\n plt.legend()\n ax6.title.set_text('Batch Losses Validation')\n\n plt.show()\n" ]
[ [ "matplotlib.pyplot.legend", "numpy.set_printoptions", "numpy.ones", "matplotlib.pyplot.plot", "numpy.random.permutation", "numpy.zeros_like", "matplotlib.pyplot.subplot", "numpy.mean", "matplotlib.pyplot.show" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
prasad-madhale/machine-learning
[ "bb611f809c16e1425136052e215ca83bd1148652", "bb611f809c16e1425136052e215ca83bd1148652" ]
[ "SVM/SVM_mnist_HAAR.py", "HAAR/HAAR_feature.py" ]
[ "import tensorflow as tf\nfrom HAAR_feature import generate_rectangles, each_item, verify_rectangle, reduce_train\nfrom sklearn.metrics import accuracy_score\nfrom sklearn import svm\nimport numpy as np\n\n\nmnist = tf.keras.datasets.mnist\n\n# get training and testing mnist set\n(x_train, y_train), (x_test, y_test) = mnist.load_data()\n\n\n# # GENERATE AND STORE HAAR FEATURES\n#\n# # generate 100 rectangles\n# rectangles = generate_rectangles()\n#\n# # verify areas between points\n# correct = verify_rectangle(rectangles)\n#\n# # assert if all the generated rectangles have required areas\n# assert correct, True\n#\n# # only pick 20% of each class data points from training set\n# new_train, new_labels = reduce_train(x_train, y_train, data_percent=0.30)\n#\n# # save HAAR features to files\n# np.savez('./data/train_labels', train_labels=new_labels)\n#\n# # get haar features\n# haar = each_item(new_train, rectangles)\n#\n# np.savez('./data/train_haar_features', train_haar=haar)\n#\n# # get haar features for test data as well\n# test_haar = each_item(x_test, rectangles)\n#\n# np.savez('./data/test_haar_features', test_haar=test_haar)\n\n# LOAD HAAR FEATURES from npz files\n\ntrain_haar = np.load(file='./data/train_haar_features.npz')['train_haar']\ntest_haar = np.load(file='./data/test_haar_features.npz')['test_haar']\ntrain_labels = np.load(file='./data/train_labels.npz')['train_labels']\ntest_labels = y_test\n\n\nwith open('./logs/out_svm_haar', 'w') as file_op:\n print('HAAR features loaded!', file=file_op)\n print('SHAPES:', file=file_op)\n print(train_haar.shape, test_haar.shape, train_labels.shape, test_labels.shape, file=file_op)\n\n clf = svm.LinearSVC(dual=False)\n\n # fit train data with labels\n model = clf.fit(train_haar, train_labels)\n\n # predict for test data\n test_predictions = model.predict(test_haar)\n\n test_acc = accuracy_score(test_labels, test_predictions)\n\n print('Test Accuracy: {}'.format(test_acc), file=file_op)\n\nprint('Done!')\n", "import tensorflow as tf\nimport numpy as np\nfrom Ecoc import ECOC\nfrom sklearn.preprocessing import StandardScaler\n\n\ndef generate_rectangles(min_area=130, max_area=170, num_points=100):\n\n points = []\n\n for first in range(28):\n\n # make half rectangles horizontally aligned\n for second_hor in range(28):\n if len(points) >= (num_points // 2):\n break\n\n lent = abs(second_hor - first)\n area = lent * 28\n\n if min_area <= area <= max_area:\n points.append([(first, 0), (second_hor, 27)])\n\n # and other half vertically aligned\n for second_ver in range(28):\n if len(points) >= num_points:\n break\n\n lent = abs(second_ver - first)\n area = lent * 28\n\n if min_area <= area <= max_area:\n points.append([(0, second_ver), (27, first)])\n\n return np.array(points)\n\n\ndef verify_rectangle(rectangles, min_area=130, max_area=170):\n\n for r in rectangles:\n first = r[0]\n second = r[1]\n\n x_diff = abs(first[0] - second[0])\n y_diff = abs(first[1] - second[1])\n\n area = x_diff * y_diff\n\n if min_area > area or area > max_area:\n return False\n\n return True\n\n\ndef compute_black_vals(image):\n\n black = np.array([[0 for _ in range(28)] for _ in range(28)])\n\n for r in range(28):\n for c in range(28):\n\n if r == 0 and c == 0:\n black[r][c] = image[r][c]\n elif r == 0:\n black[r][c] = black[r][c - 1] + image[r][c]\n elif c == 0:\n black[r][c] = black[r - 1][c] + image[r][c]\n else:\n black[r][c] = black[r][c-1] + black[r-1][c] - black[r-1][c-1] + image[r][c]\n\n return black\n\n\ndef compute_black_rectangle(rec, black):\n first = rec[0]\n second = rec[1]\n\n top = black[second[0]][first[1]]\n left = black[first[0]][second[1]]\n entire_img = black[27][27]\n\n rec_black = entire_img - top - left + black[first[0]][first[1]]\n\n return rec_black\n\n\ndef each_item(train, recs):\n\n haar_features = []\n\n for i, image in enumerate(train):\n # black values\n black = compute_black_vals(image)\n\n haar_rec = []\n\n for j, r in enumerate(recs):\n hor = get_hor_half(r)\n ver = get_ver_half(r)\n\n hor_feature = split_black_val(hor, black)\n ver_feature = split_black_val(ver, black)\n\n haar_rec.append(hor_feature)\n haar_rec.append(ver_feature)\n\n haar_features.append(haar_rec)\n\n return np.array(haar_features)\n\n\ndef split_black_val(splitted, black):\n first = splitted[0]\n second = splitted[1]\n\n first_val = compute_black_rectangle(first, black)\n second_val = compute_black_rectangle(second, black)\n\n return first_val - second_val\n\n\ndef get_ver_half(rec):\n\n first = rec[0]\n second = rec[1]\n\n x_len = abs(first[0] - second[0]) // 2\n\n left = [(first[0], first[1]), (second[0] - x_len, second[1])]\n right = [(min(first[0] + x_len, 27), first[1]), (second[0], second[1])]\n\n return [left, right]\n\n\ndef get_hor_half(rec):\n\n first = rec[0]\n second = rec[1]\n\n y_len = abs(first[1] - second[1]) // 2\n\n # top half\n top = [(first[0], first[1]), (second[0], second[1] - y_len)]\n bottom = [(first[0], min(first[1] + y_len, 27)), (second[0], second[1])]\n\n return [top, bottom]\n\n\ndef reduce_train(x_train, y_train):\n new_train = np.empty(shape=(1, 28, 28))\n new_labels = np.array([])\n\n for label in range(10):\n indices = np.where(y_train == label)[0]\n\n # number of values to pick\n new_size = int(0.20 * len(indices))\n\n # 20% values\n picks = np.random.choice(indices, new_size)\n\n label_reduced = x_train[picks]\n\n new_train = np.concatenate([new_train, label_reduced])\n\n r_labels = y_train[picks]\n\n new_labels = np.concatenate([new_labels, r_labels])\n\n return np.array(new_train[1:]), np.array(new_labels)\n\n\ndef generate_code_matrix(num_features = 50):\n rand_mat = np.random.randint(2, size=num_features)\n coding_matrix = []\n coding_matrix.append(rand_mat)\n coding_matrix.append(np.zeros((num_features,)))\n\n for i in range(1, 9):\n rand_mat = np.roll(rand_mat, i)\n coding_matrix.append(rand_mat)\n\n return np.array(coding_matrix)\n\n\ndef normalize(data, train_size):\n # data = minmax_scale(data, feature_range=(0, 1))\n\n std = StandardScaler()\n std.fit(data)\n std.transform(data)\n\n train_data = data[:train_size]\n test_data = data[train_size:]\n\n return train_data, test_data\n\n\nmnist = tf.keras.datasets.mnist\n\n# get training and testing mnist set\n(x_train, y_train), (x_test, y_test) = mnist.load_data()\n\n\n# GENERATE HAAR FEATURES\n\n# # generate 100 rectangles\n# rectangles = generate_rectangles()\n#\n# # verify areas between points\n# correct = verify_rectangle(rectangles)\n#\n# # assert if all the generated rectangles have required areas\n# assert correct, True\n#\n# new_train, new_labels = reduce_train(x_train, y_train)\n#\n# # get har features\n# haar = each_item(new_train, rectangles)\n#\n# test_haar = each_item(x_test, rectangles)\n#\n# all_haar = np.concatenate([haar, test_haar])\n\n\n# LOAD HAAR FEATURES FROM NPZ FILES\ntrain_haar = np.load(file='./data/train_haar_features.npz')['train_haar']\ntest_haar = np.load(file='./data/test_haar_features.npz')['test_haar']\ntrain_labels = np.load(file='./data/train_labels.npz')['train_labels']\ntest_labels = y_test\n\n# full_haar = np.concatenate([train_haar, test_haar])\n#\n# # normalize haar features\n# train_haar, test_haar = normalize(full_haar, train_size=len(train_haar))\n\n# coding_matrix = generate_code_matrix(num_features=30)\n\n# generate coding matrix for ECOC procedure\ncoding_matrix = np.array([[1,0,1,0,0,1,1,0,0,1,1,0,1,0,1,1,1,1,1,0],\n [1,0,0,0,0,1,0,1,0,0,1,1,0,0,1,1,0,1,0,1],\n [1,1,0,0,0,0,1,0,1,0,0,1,1,0,0,1,1,0,1,0],\n [1,1,1,0,0,0,0,1,0,1,0,0,1,1,0,0,1,1,0,1],\n [1,1,1,1,0,0,0,0,1,0,1,0,0,1,1,0,0,1,1,0],\n [1,1,1,1,1,0,0,0,0,1,0,1,0,0,1,1,0,0,1,1],\n [0,1,1,1,1,1,0,0,0,0,1,0,1,0,0,1,1,0,0,1],\n [1,0,1,1,1,1,1,0,0,0,0,1,0,1,0,0,1,1,0,0],\n [0,1,0,1,0,0,1,1,0,0,1,1,0,1,0,1,1,1,1,1],\n [0,0,1,0,1,0,0,1,1,0,0,1,1,0,1,0,1,1,1,1]])\n\necoc = ECOC(train_haar, train_labels, test_haar, test_labels, 3000, coding_matrix)\necoc.train()\n" ]
[ [ "numpy.load", "sklearn.svm.LinearSVC", "sklearn.metrics.accuracy_score" ], [ "numpy.random.choice", "numpy.concatenate", "numpy.where", "numpy.load", "sklearn.preprocessing.StandardScaler", "numpy.array", "numpy.zeros", "numpy.roll", "numpy.empty", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
neuromorphs/LTC21-SNN
[ "c0bcde952fbcb340ac78f5d4b6784e4952ed40ac" ]
[ "models/predictive_model_dl.py" ]
[ "import nengo\nimport numpy as np\nimport pandas as pd\nimport scipy.linalg\nfrom scipy.special import legendre\nimport pickle\nimport nengo_dl\n# TODO MAKE THE MODELS AS OBJECT CLASSES\n# TODO ADD DOCUMENTATION\n\n# This code is completely taken from Terry Steward:\nclass DiscreteDelay(nengo.synapses.Synapse):\n def __init__(self, delay, size_in=1):\n self.delay = delay\n super().__init__(default_size_in=size_in, default_size_out=size_in)\n\n def make_state(self, shape_in, shape_out, dt, dtype=None, y0=None):\n return {}\n\n def make_step(self, shape_in, shape_out, dt, rng, state=None):\n steps = int(self.delay/dt)\n if steps == 0:\n def step_delay(t, x):\n return x\n return step_delay\n assert steps > 0\n\n state = np.zeros((steps, shape_in[0]))\n state_index = np.array([0])\n\n def step_delay(t, x, state=state, state_index=state_index):\n result = state[state_index]\n state[state_index] = x\n state_index[:] = (state_index + 1) % state.shape[0]\n return result\n\n return step_delay\n\n# This code is completely taken from Terry Steward:\nclass LDN(nengo.Process):\n def __init__(self, theta, q, size_in=1):\n self.q = q # number of internal state dimensions per input\n self.theta = theta # size of time window (in seconds)\n self.size_in = size_in # number of inputs\n\n # Do Aaron's math to generate the matrices A and B so that\n # dx/dt = Ax + Bu will convert u into a legendre representation over a window theta\n # https://github.com/arvoelke/nengolib/blob/master/nengolib/synapses/analog.py#L536\n A = np.zeros((q, q))\n B = np.zeros((q, 1))\n for i in range(q):\n B[i] = (-1.) ** i * (2 * i + 1)\n for j in range(q):\n A[i, j] = (2 * i + 1) * (-1 if i < j else (-1.) ** (i - j + 1))\n self.A = A / theta\n self.B = B / theta\n\n super().__init__(default_size_in=size_in, default_size_out=q * size_in)\n\n def make_step(self, shape_in, shape_out, dt, rng, state=None):\n state = np.zeros((self.q, self.size_in))\n\n # Handle the fact that we're discretizing the time step\n # https://en.wikipedia.org/wiki/Discretization#Discretization_of_linear_state_space_models\n Ad = scipy.linalg.expm(self.A * dt)\n Bd = np.dot(np.dot(np.linalg.inv(self.A), (Ad - np.eye(self.q))), self.B)\n\n # this code will be called every timestep\n def step_legendre(t, x, state=state):\n state[:] = np.dot(Ad, state) + np.dot(Bd, x[None, :])\n return state.T.flatten()\n\n return step_legendre\n\n def get_weights_for_delays(self, r):\n # compute the weights needed to extract the value at time r\n # from the network (r=0 is right now, r=1 is theta seconds ago)\n r = np.asarray(r)\n m = np.asarray([legendre(i)(2 * r - 1) for i in range(self.q)])\n return m.reshape(self.q, -1).T\n\n\nclass DataParser:\n def __init__(self, data_df, sample_freq, vars=[]):\n self.data_df = data_df\n self.sample_freq = sample_freq\n self.vars = vars\n\n def parse_data(self, t):\n r = [self.data_df[x].iloc[int(t * self.sample_freq)] for x in self.vars]\n return r\n\n def update_data(self, data_df):\n self.data_df = data_df\n\n\nclass PredictiveModelLMU_dl:\n def __init__(self, seed=42, neurons_per_dim=100, sample_freq=50,\n lmu_theta=0.1, lmu_q=20, radius=1.5, dt=0.001,\n t_delays=[0.02], learning_rate=5e-5, action_vars=[\"Q\"],\n state_vars=[\"angle_sin\", \"angle_cos\", \"angleD\", \"position\", \"positionD\"],\n action_df=None, state_df=None, weights=None, scales={}, *args, **kwargs):\n\n self.seed = seed\n self.neurons_per_dim = neurons_per_dim\n self.sample_freq = sample_freq\n self.t_delays = t_delays\n self.learning_rate = learning_rate\n self.action_vars = action_vars\n self.action_dim = len(action_vars)\n self.state_vars = state_vars\n self.state_dim = len(state_vars)\n self.radius = radius\n self.dt = dt\n self.lmu_q = lmu_q\n self.lmu_theta = lmu_theta\n self.weights = weights\n self.scales = scales\n\n if action_df is None:\n self.action_df = pd.DataFrame(\n np.zeros((1, len(action_vars) + 1)),\n columns=[\"time\"] + action_vars,\n )\n\n if state_df is None:\n self.state_df = pd.DataFrame(\n np.zeros((1, len(state_vars) + 1)),\n columns=[\"time\"] + state_vars,\n )\n\n self.action_parser = DataParser(\n data_df=self.action_df,\n sample_freq=self.sample_freq,\n vars=self.action_vars\n )\n\n self.state_parser = DataParser(\n data_df=self.state_df,\n sample_freq=self.sample_freq,\n vars=self.state_vars\n )\n\n # this function streams the state signal from file to node\n def action_stim_func(t):\n return self.action_parser.parse_data(t)\n\n # this function streams the state signal from file to node\n def state_stim_func(t):\n return self.state_parser.parse_data(t)\n\n self.action_stim_func = action_stim_func\n self.state_stim_func = state_stim_func\n\n self.model, self.recordings = self.make_model()\n self.sim = nengo_dl.Simulator(self.model, dt=self.dt, progress_bar=False)\n\n def set_inputs(self, action_df, state_df):\n\n for v in self.action_vars + [\"time\"]:\n assert v in action_df.columns\n for v in self.state_vars:\n assert v in state_df.columns\n\n self.action_df = action_df\n self.state_df = state_df\n\n self.action_parser.update_data(self.action_df)\n self.state_parser.update_data(self.state_df)\n\n def reset_sim(self):\n\n self.sim.reset(seed=self.seed)\n\n def set_weights(self, weights):\n\n weights = np.array(weights)\n assert weights.shape == (len(self.t_delays),\n self.state_dim,\n self.neurons_per_dim * (self.state_dim + self.action_dim) * (1 + self.lmu_q))\n\n self.weights = weights\n for i, con in enumerate(self.connections):\n self.sim.data[con].weights = self.weights[i]\n\n def get_weights(self):\n\n weights = []\n for con in self.connections:\n weights.append(self.sim.data[con].weights)\n return np.array(weights)\n\n def process_files(self):\n\n t_max = self.action_df[\"time\"].max() # number of seconds to run\n self.sim.run(t_max)\n\n return self.recordings\n\n def get_state_dict(self):\n\n state_dict = {\n \"seed\": self.seed,\n \"neurons_per_dim\": self.neurons_per_dim,\n \"sample_freq\": self.sample_freq,\n \"t_delays\": self.t_delays,\n \"learning_rate\": self.learning_rate,\n \"action_vars\": self.action_vars,\n \"state_vars\": self.state_vars,\n \"radius\": self.radius,\n \"dt\": self.dt,\n \"lmu_q\": self.lmu_q,\n \"lmu_theta\": self.lmu_theta,\n \"weights\": self.get_weights(),\n \"scales\": self.scales\n }\n\n return state_dict\n\n def save_state_dict(self, path=\"model_state.pkl\"):\n\n state_dict = self.get_state_dict()\n with open(path, \"wb\") as f:\n pickle.dump(state_dict, f)\n\n def make_model(self):\n\n if self.weights is None:\n self.weights = np.zeros((\n len(self.t_delays),\n self.state_dim,\n self.neurons_per_dim * (self.state_dim + self.action_dim) * (1 + self.lmu_q))\n )\n\n model = nengo.Network()\n with model:\n # set the default synapse to None (normal default is 0.005s)\n model.config[nengo.Connection].synapse = None\n\n # initialize input nodes\n a = nengo.Node(self.action_stim_func)\n s = nengo.Node(self.state_stim_func)\n\n # record the input to the network\n p_a = nengo.Probe(a)\n p_s = nengo.Probe(s)\n\n # the value to be predicted (which in this case is just the first dimension of the input)\n z = nengo.Node(None, size_in=self.state_dim)\n nengo.Connection(s, z)\n\n # make LMU unit\n ldn = nengo.Node(LDN(theta=self.lmu_theta, q=self.lmu_q, size_in=self.state_dim + self.action_dim))\n nengo.Connection(a, ldn[:self.action_dim])\n nengo.Connection(s, ldn[self.action_dim:])\n\n # make the hidden layer\n ens = nengo.Ensemble(\n n_neurons=self.neurons_per_dim * (self.state_dim + self.action_dim)*(1+self.lmu_q),\n dimensions=(self.state_dim + self.action_dim)*(1+self.lmu_q),\n neuron_type=nengo.LIFRate(),\n seed=self.seed\n )\n nengo.Connection(a, ens[:self.action_dim])\n nengo.Connection(s, ens[self.action_dim:self.action_dim+self.state_dim])\n nengo.Connection(ldn, ens[self.action_dim+self.state_dim:])\n\n z_preds = []\n self.connections = []\n errors = []\n\n recordings = {\n \"states\" : p_s,\n \"actions\": p_a,\n \"predictions\": {}\n }\n\n for i, t_d in enumerate(self.t_delays):\n\n z_preds.append(nengo.Node(None, size_in=self.state_dim))\n\n # make the output weights we can learn\n self.connections.append(\n nengo.Connection(\n ens.neurons,\n z_preds[-1],\n transform=self.weights[i], # change this if you have pre-recorded weights to use\n seed=self.seed,\n learning_rule_type=nengo.PES(\n learning_rate=self.learning_rate,\n pre_synapse=DiscreteDelay(t_d) # delay the activity value when updating weights\n )\n )\n )\n\n # compute the error by subtracting the current measurement from a delayed version of the predicton\n errors.append(nengo.Node(None, size_in=self.state_dim))\n nengo.Connection(z_preds[-1], errors[-1], synapse=DiscreteDelay(t_d))\n nengo.Connection(z, errors[-1], transform=-1)\n # apply the error to the learning rule\n nengo.Connection(errors[-1], self.connections[-1].learning_rule)\n\n prediction = {\n f\"{i}\": {\n \"delay\": t_d,\n # record the prediction\n \"states_pred\": nengo.Probe(z_preds[-1]),\n # record the error\n \"errors\": nengo.Probe(errors[-1]),\n }\n }\n\n recordings[\"predictions\"].update(prediction)\n\n return model, recordings\n\n\"\"\"\ndef make_model(action_df, state_df, weights=None, seed=42, n=100, samp_freq=50,\n t_delay=0.02, learning_rate=5e-5):\n if weights is None:\n weights = np.zeros((4, n * 5))\n\n model = nengo.Network()\n with model:\n model.config[nengo.Connection].synapse = None # set the default synapse to None (normal default is 0.005s)\n\n # the input to the network\n def action_stim_func(t):\n return action_df[\"Q\"].iloc[int(t * samp_freq)]\n\n a = nengo.Node(action_stim_func)\n\n # this function streams the state signal from file to node\n def state_stim_func(t):\n return state_df[\"angle_sin\"].iloc[int(t * samp_freq)], \\\n state_df[\"angleD\"].iloc[int(t * samp_freq)], \\\n state_df[\"position\"].iloc[int(t * samp_freq)], \\\n state_df[\"positionD\"].iloc[int(t * samp_freq)]\n\n s = nengo.Node(state_stim_func)\n\n # the value to be predicted (which in this case is just the first dimension of the input)\n z = nengo.Node(None, size_in=4)\n nengo.Connection(s, z)\n\n z_pred = nengo.Node(None, size_in=4)\n\n # make the hidden layer\n ens = nengo.Ensemble(n_neurons=n * 5, dimensions=5,\n neuron_type=nengo.LIFRate(), seed=seed)\n nengo.Connection(a, ens[0])\n nengo.Connection(s, ens[1:])\n\n # make the output weights we can learn\n conn = nengo.Connection(ens.neurons, z_pred,\n transform=weights, # change this if you have pre-recorded weights to use\n learning_rule_type=nengo.PES(learning_rate=learning_rate,\n pre_synapse=DiscreteDelay(t_delay)\n # delay the activity value when updating weights\n ))\n\n # compute the error by subtracting the current measurement from a delayed version of the predicton\n error = nengo.Node(None, size_in=4)\n nengo.Connection(z_pred, error, synapse=DiscreteDelay(t_delay))\n nengo.Connection(z, error, transform=-1)\n # apply the error to the learning rule\n nengo.Connection(error, conn.learning_rule)\n\n # record the input to the network\n p_a = nengo.Probe(a)\n p_s = nengo.Probe(s)\n # record the value to be predicted\n p_z = nengo.Probe(z)\n # record the prediction\n p_z_pred = nengo.Probe(z_pred)\n # record the error\n p_e = nengo.Probe(error)\n # record the weights (but only every 0.1 seconds just to save memory)\n p_weights = nengo.Probe(conn, 'weights', sample_every=0.1)\n\n return model, [p_a, p_s, p_z, p_z_pred, p_e, p_weights]\n\n'''\n\nLMU Nengo models and implementation\n\nTo use in network: ldn = nengo.Node(LDN(theta=theta, q=q))\n\n'''\n\n\ndef make_model_LMU(action_df, state_df, weights=None, seed=42, n=100, samp_freq=50, lmu_theta=0.1, lmu_q=20,\n t_delay=0.02, learning_rate=5e-5):\n if weights is None:\n weights = np.zeros((4, n * 5))\n\n model = nengo.Network()\n with model:\n model.config[nengo.Connection].synapse = None # set the default synapse to None (normal default is 0.005s)\n\n # the input to the network\n def action_stim_func(t):\n return action_df[\"Q\"].iloc[int(t * samp_freq)]\n\n a = nengo.Node(action_stim_func)\n\n # this function streams the state signal from file to node\n def state_stim_func(t):\n return state_df[\"angle_sin\"].iloc[int(t * samp_freq)], \\\n state_df[\"angleD\"].iloc[int(t * samp_freq)], \\\n state_df[\"position\"].iloc[int(t * samp_freq)], \\\n state_df[\"positionD\"].iloc[int(t * samp_freq)]\n\n s = nengo.Node(state_stim_func)\n\n # the value to be predicted (which in this case is just the first dimension of the input)\n z = nengo.Node(None, size_in=4)\n nengo.Connection(s, z)\n\n z_pred = nengo.Node(None, size_in=4)\n\n ldn = nengo.Node(LDN(theta=lmu_theta, q=lmu_q, size_in=5))\n\n nengo.Connection(a, ldn[0])\n nengo.Connection(s, ldn[1:])\n\n # make the hidden layer\n ens = nengo.Ensemble(n_neurons=n * 5, dimensions=5*lmu_q,\n neuron_type=nengo.LIFRate(), seed=seed)\n\n #How do I connect each lmu to one dimension of ens?\n nengo.Connection(ldn, ens)\n\n\n # make the output weights we can learn\n conn = nengo.Connection(ens.neurons, z_pred,\n transform=weights, # change this if you have pre-recorded weights to use\n learning_rule_type=nengo.PES(learning_rate=learning_rate,\n pre_synapse=DiscreteDelay(t_delay)\n # delay the activity value when updating weights\n ))\n\n # compute the error by subtracting the current measurement from a delayed version of the predicton\n error = nengo.Node(None, size_in=4)\n nengo.Connection(z_pred, error, synapse=DiscreteDelay(t_delay))\n nengo.Connection(z, error, transform=-1)\n # apply the error to the learning rule\n nengo.Connection(error, conn.learning_rule)\n\n # record the input to the network\n p_a = nengo.Probe(a)\n p_s = nengo.Probe(s)\n # record the value to be predicted\n p_z = nengo.Probe(z)\n # record the prediction\n p_z_pred = nengo.Probe(z_pred)\n # record the error\n p_e = nengo.Probe(error)\n # record the weights (but only every 0.1 seconds just to save memory)\n p_weights = nengo.Probe(conn, 'weights', sample_every=0.1)\n\n return model, [p_a, p_s, p_z, p_z_pred, p_e, p_weights]\n\n\n# The LMU2 model includes both the state and its temporal factorization in the ensemble to generate a prediction.\n\ndef make_model_LMU2(action_df, state_df, weights=None, seed=42, n=100, samp_freq=50, lmu_theta=0.1, lmu_q=20,\n t_delay=0.02, learning_rate=5e-5, radius=1.5):\n\n if weights is None:\n weights = np.zeros((5, n*6*(1+lmu_q)))\n\n model = nengo.Network()\n with model:\n model.config[nengo.Connection].synapse = None # set the default synapse to None (normal default is 0.005s)\n\n # the input to the network\n def action_stim_func(t):\n return action_df[\"Q\"].iloc[int(t * samp_freq)]\n\n a = nengo.Node(action_stim_func)\n\n # this function streams the state signal from file to node\n def state_stim_func(t):\n return state_df[\"angle_sin\"].iloc[int(t * samp_freq)], \\\n state_df[\"angle_cos\"].iloc[int(t * samp_freq)], \\\n state_df[\"angleD\"].iloc[int(t * samp_freq)], \\\n state_df[\"position\"].iloc[int(t * samp_freq)], \\\n state_df[\"positionD\"].iloc[int(t * samp_freq)]\n\n s = nengo.Node(state_stim_func)\n\n # the value to be predicted (which in this case is just the first dimension of the input)\n z = nengo.Node(None, size_in=5)\n nengo.Connection(s, z)\n\n z_pred = nengo.Node(None, size_in=5)\n\n ldn = nengo.Node(LDN(theta=lmu_theta, q=lmu_q, size_in=6))\n\n nengo.Connection(a, ldn[0])\n nengo.Connection(s, ldn[1:])\n\n # make the hidden layer\n ens = nengo.Ensemble(n_neurons=n*6*(1+lmu_q), dimensions=6*(1+lmu_q),\n neuron_type=nengo.LIFRate(), seed=seed, radius=radius)\n\n #How do I connect each lmu to one dimension of ens?\n nengo.Connection(a, ens[:1])\n nengo.Connection(s, ens[1:6])\n nengo.Connection(ldn, ens[6:])\n\n\n # make the output weights we can learn\n conn = nengo.Connection(ens.neurons, z_pred,\n transform=weights, # change this if you have pre-recorded weights to use\n learning_rule_type=nengo.PES(learning_rate=learning_rate,\n pre_synapse=DiscreteDelay(t_delay)\n # delay the activity value when updating weights\n ))\n\n # compute the error by subtracting the current measurement from a delayed version of the predicton\n error = nengo.Node(None, size_in=5)\n nengo.Connection(z_pred, error, synapse=DiscreteDelay(t_delay))\n nengo.Connection(z, error, transform=-1)\n # apply the error to the learning rule\n nengo.Connection(error, conn.learning_rule)\n\n # record the input to the network\n p_a = nengo.Probe(a)\n p_s = nengo.Probe(s)\n # record the value to be predicted\n p_z = nengo.Probe(z)\n # record the prediction\n p_z_pred = nengo.Probe(z_pred)\n # record the error\n p_e = nengo.Probe(error)\n # record the weights (but only every 0.1 seconds just to save memory)\n p_weights = nengo.Probe(conn, 'weights', sample_every=0.1)\n\n return model, [p_a, p_s, p_z, p_z_pred, p_e, p_weights]\n\ndef make_model_LMU3(action_df, state_df, weights=None, seed=42, n=100, samp_freq=50, lmu_theta=0.1, lmu_q=20, # LMU2 but with 'angle' state\n t_delay=0.02, learning_rate=5e-5, radius=1.5):\n\n if weights is None:\n weights = np.zeros((6, n * 7 * (1 + lmu_q)))\n\n model = nengo.Network()\n with model:\n model.config[nengo.Connection].synapse = None # set the default synapse to None (normal default is 0.005s)\n\n # the input to the network\n def action_stim_func(t):\n return action_df[\"Q\"].iloc[int(t * samp_freq)]\n\n a = nengo.Node(action_stim_func)\n\n # this function streams the state signal from file to node\n def state_stim_func(t):\n return state_df[\"angle\"].iloc[int(t * samp_freq)], \\\n state_df[\"angleD\"].iloc[int(t * samp_freq)], \\\n state_df[\"angle_sin\"].iloc[int(t * samp_freq)], \\\n state_df[\"angle_cos\"].iloc[int(t * samp_freq)], \\\n state_df[\"position\"].iloc[int(t * samp_freq)], \\\n state_df[\"positionD\"].iloc[int(t * samp_freq)]\n\n s = nengo.Node(state_stim_func)\n\n # the value to be predicted (which in this case is just the first dimension of the input)\n z = nengo.Node(None, size_in=6)\n nengo.Connection(s, z)\n\n z_pred = nengo.Node(None, size_in=6)\n\n ldn = nengo.Node(LDN(theta=lmu_theta, q=lmu_q, size_in=7))\n\n nengo.Connection(a, ldn[0])\n nengo.Connection(s, ldn[1:])\n\n # make the hidden layer\n ens = nengo.Ensemble(n_neurons=n * 7 * (1 + lmu_q), dimensions=7 * (1 + lmu_q),\n neuron_type=nengo.LIFRate(), seed=seed, radius=radius)\n\n # How do I connect each lmu to one dimension of ens?\n nengo.Connection(a, ens[:1])\n nengo.Connection(s, ens[1:7])\n nengo.Connection(ldn, ens[7:])\n\n # make the output weights we can learn\n conn = nengo.Connection(ens.neurons, z_pred,\n transform=weights, # change this if you have pre-recorded weights to use\n learning_rule_type=nengo.PES(learning_rate=learning_rate,\n pre_synapse=DiscreteDelay(t_delay)\n # delay the activity value when updating weights\n ))\n\n # compute the error by subtracting the current measurement from a delayed version of the predicton\n error = nengo.Node(None, size_in=6)\n nengo.Connection(z_pred, error, synapse=DiscreteDelay(t_delay))\n nengo.Connection(z, error, transform=-1)\n # apply the error to the learning rule\n nengo.Connection(error, conn.learning_rule)\n\n # record the input to the network\n p_a = nengo.Probe(a)\n p_s = nengo.Probe(s)\n # record the value to be predicted\n p_z = nengo.Probe(z)\n # record the prediction\n p_z_pred = nengo.Probe(z_pred)\n # record the error\n p_e = nengo.Probe(error)\n # record the weights (but only every 0.1 seconds just to save memory)\n p_weights = nengo.Probe(conn, 'weights', sample_every=0.1)\n\n return model, [p_a, p_s, p_z, p_z_pred, p_e, p_weights]\n\n\"\"\"" ]
[ [ "numpy.dot", "numpy.asarray", "numpy.linalg.inv", "numpy.eye", "scipy.special.legendre", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "0.14", "0.15", "1.4", "0.16", "1.0", "0.19", "0.18", "1.2", "0.12", "0.10", "0.17", "1.3" ], "tensorflow": [] } ]
MATRL19/Single-Vehicle-Routing-Problem
[ "45441e8a66b0efff8884d4078bb91322d6f1063c" ]
[ "LinearApproximation.py" ]
[ "import random\nimport math\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport Tile\nimport os, os.path\nimport numpy as np\nfrom math import pow\nimport pandas as pd\nimport csv\nfrom math import pow, sqrt\nfrom time import sleep\nfrom collections import defaultdict\nfrom scipy import sparse\nfrom numpy import array\n\n\nbinNumber= 10 #number of divisions in each direction of tilings\ntilingNumber = 4 #number of tilings\nexpandRate = 25 #the overlap of tilings\n\n\nclass DataPreProcessing:\n\n def __init__(self, datafile):\n self.datafile = datafile\n\n def _read_text_file(self):\n flist = open(self.datafile).readlines()[:]\n listData=[s.rstrip('\\n') for s in flist]\n return listData\n\n def _numberOfCities(self):\n listdata=self._read_text_file()\n numberOfCities=len(listdata)\n return numberOfCities\n\n def _transform_to_list_ndarray(self):\n listData=self._read_text_file()\n for i in range (len(listData)):\n listData[i]=np.fromstring(listData[i], dtype=float, sep=' ')\n\n return listData\n\n def extract_coordinates(self):\n Coordiantes=[]\n listData=self._transform_to_list_ndarray()\n for i in range(len(listData)):\n Coordiantes.append(np.array(listData[i][1:3]))\n return Coordiantes\n \n def extract_single_x_coordinate(self, customerID):\n coordinate=self.extract_coordinates_single_customer(customerID)\n X_coordiante=coordinate[0]\n return X_coordiante\n\n def extract_single_y_coordinate(self, customerID):\n coordinate=self.extract_coordinates_single_customer(customerID)\n Y_coordiante=coordinate[1]\n return Y_coordiante\n\n def extract_all_x_coordinate(self):\n X_coordiantes=[]\n coordinates=self.shift_coordinates()\n for i in range(len(coordinates)):\n X_coordiantes.append(np.array(coordinates[i][0]))\n X_coordinateslist=[]\n for i in range (len(X_coordiantes)):\n X_coordinateslist.append(float(X_coordiantes[i]))\n return X_coordinateslist\n\n\n def extract_all_x_coordinate_modified(self, ID_depot):\n size=self._numberOfCities()+1\n x_coordiantes=[0]*size\n x_coordiantes[0]=float(self.extract_single_x_coordinate(ID_depot))\n x_coordiantes[1:]=self.extract_all_x_coordinate()\n return x_coordiantes\n\n def extract_all_y_coordinate(self):\n Y_coordiantes=[]\n coordinates=self.shift_coordinates()\n for i in range(len(coordinates)):\n Y_coordiantes.append(np.array(coordinates[i][1]))\n Y_coordinateslist=[]\n for i in range (len(Y_coordiantes)):\n Y_coordinateslist.append(float(Y_coordiantes[i]))\n return Y_coordinateslist\n\n def extract_all_y_coordinate_modified(self, ID_depot):\n size=self._numberOfCities()+1\n y_coordiantes=[0]*size\n y_coordiantes[0]=float(self.extract_single_y_coordinate(ID_depot))\n y_coordiantes[1:]=self.extract_all_y_coordinate()\n return y_coordiantes\n\n\n def extract_coordinates_single_customer(self,customer_ID):\n coordinates=self.shift_coordinates()\n return coordinates[(customer_ID)-1]\n\n def compute_distance_between_2_customers(self,customer_ID1,customer_ID2):\n coordinate_customer_ID1= self.extract_coordinates_single_customer(customer_ID1)\n coordinate_customer_ID2= self.extract_coordinates_single_customer(customer_ID2)\n distance=sqrt((pow((coordinate_customer_ID1[0]-coordinate_customer_ID2[0]),2))+(pow((coordinate_customer_ID1[1]-coordinate_customer_ID2[1]),2)))\n return distance\n\n\n def extract_ID_customer_from_coordinates(self, coordinates):\n all_coordinates=self.shift_coordinates()\n for i in range(len(all_coordinates)):\n if coordinates[0] == all_coordinates[i][0] and coordinates[1] == all_coordinates[i][1]:\n return i+1\n break\n return None\n\n def extract_ID_depot(self):\n all_coordinates=self.extract_coordinates()\n ID_depot=len(all_coordinates)\n return ID_depot\n\n\n def shift_coordinates(self):\n ID_depot=self.extract_ID_depot()\n coordinates=self.extract_coordinates()\n coordiantes_depot=coordinates[ID_depot-1]\n x_coordinate_depot=coordiantes_depot[0]\n y_coordinate_depot=coordiantes_depot[1]\n for i in range(len(coordinates)):\n coordinates[i][0]=coordinates[i][0]-(x_coordinate_depot)\n coordinates[i][1]=coordinates[i][1]-(y_coordinate_depot)\n\n return coordinates\n\n\n\n def compute_distance_Matrix(self):\n D = np.zeros(shape=(self._numberOfCities(),self._numberOfCities()))\n for i in range(len(D)):\n for j in range (len(D)):\n if i==0 and j==0:\n D[i][j] = self.compute_distance_between_2_customers(self._numberOfCities(),self._numberOfCities())\n if i==0 and j!=0:\n D[i][j]=self.compute_distance_between_2_customers(self._numberOfCities(),j)\n if j==0 and i!=0:\n D[i][j] = self.compute_distance_between_2_customers(i,self._numberOfCities())\n else:\n D[i][j] = self.compute_distance_between_2_customers(i,j)\n return np.negative(D)\n\n\n def print_circuit_broad(self,X_coordinates,Y_coordinates):\n Customers_X_coordinates=X_coordinates[0:len(X_coordinates)-1]\n Customers_Y_coordinates=Y_coordinates[0:len(Y_coordinates)-1]\n Depot_X_coordinate=X_coordinates[len(X_coordinates)-1]\n Depot_Y_coordinate=Y_coordinates[len(Y_coordinates)-1]\n plt.figure(figsize=(20,10))\n plt.scatter(Customers_X_coordinates, Customers_Y_coordinates, c='b')\n plt.scatter(Depot_X_coordinate, Depot_Y_coordinate, c='r')\n\n for i in range (len(X_coordinates)):\n plt.annotate('$%d$' %(i+1), (X_coordinates[i],Y_coordinates[i]), horizontalalignment='left',verticalalignment='bottom')\n\n plt.grid(True)\n plt.xlabel('X_coordinates',fontsize=20)\n plt.ylabel('Y_coordinates',fontsize=20)\n plt.title('Cities Representation, TSP',fontsize=20)\n\n plt.yticks(fontsize=22)\n plt.xticks(fontsize=22)\n\n def compute_distance_Matrix(self):\n D=np.zeros(shape=(self._numberOfCities(),self._numberOfCities()))\n for i in range(len(D)):\n for j in range (len(D)):\n if i==0 and j==0:\n D[i][j] = self.compute_distance_between_2_customers(self._numberOfCities(),self._numberOfCities())\n if i==0 and j!=0:\n D[i][j] = self.compute_distance_between_2_customers(self._numberOfCities(),j)\n if j==0 and i!=0:\n D[i][j] = self.compute_distance_between_2_customers(i,self._numberOfCities())\n else:\n D[i][j] = self.compute_distance_between_2_customers(i,j)\n return np.negative(D)\n\n\n\nclass SemiGradientSarSa:\n\n def __init__(self, alpha, temperature):\n self.alpha = alpha\n self.temperature = temperature\n\n\n def Boltzman_policy( self, state, tempertaure_param, w, dictBinCustomerID, listOfVisitedCustomers):\n listActions = self.PossibleActions(dictBinCustomerID)\n approximatedQ = []\n\n for i in range(len(listActions)):\n approximatedQ.append((np.array(w).reshape(1, 400)@np.array(listActions[i]).reshape(400, 1))[0][0])\n\n p = np.array([ approximatedQ[x]/tempertaure_param for x in range(len(approximatedQ))])\n pi_actions = np.exp(p) / np.sum(np.exp(p))\n max_action = np.argmax(pi_actions)\n tmpcounter = 1\n \n while(1 == 1):\n if max_action in listOfVisitedCustomers:\n pi_actions[max_action] = 0\n if sum(pi_actions) == 0:\n max_action = random.randint(0, len(pi_actions)-1)\n else:\n max_action = np.argmax(pi_actions)\n tmpcounter += 1\n else:\n return max_action\n\n return max_action\n\n def Dict(self, dataFile, tilling):\n dict_bin_customerID = dict()\n dict_coor_customerID = dict()\n customersCoordsList = dataFile.shift_coordinates()\n\n for i in range(0, 11):\n dict_coor_customerID[i] = customersCoordsList[i]\n dict_bin_customerID[i] = tilling.binCoordinates(customersCoordsList[i][0], customersCoordsList[i][1])\n \n return dict_bin_customerID, dict_coor_customerID\n\n def InitialState(self, dictBinCustomerID):\n state = dictBinCustomerID.get(len(dictBinCustomerID)-1)\n return state, len(dictBinCustomerID)-1\n\n def ReturnKeyOfThisValue(self, state, dictBinCustomerID):\n print(state[1])\n listOfKeys = []\n print(listOfKeys)\n return listOfKeys\n\n def PossibleActions(self, dictBinCustomerID):\n actionList = []\n for i in range(len(dictBinCustomerID)-1):\n actionList.append(dictBinCustomerID.get(i))\n\n return actionList\n\n def reward(self, step, S, Sprime, numberOfCustomers, dataFile):\n if step == 1:\n return dataFile.compute_distance_between_2_customers(numberOfCustomers ,Sprime)\n elif step > numberOfCustomers:\n return dataFile.compute_distance_between_2_customers(S, numberOfCustomers)\n \n return dataFile.compute_distance_between_2_customers(S, Sprime)\n\n\n\ndef main(alpha, gamma, lamda, seedi, epsilon, tempertaure_param):\n random.seed(seedi)\n w = [random.uniform(-0.001, 0.001)] * np.ones([tilingNumber,binNumber*binNumber])*epsilon\n stateSeq = []\n tiling = Tile.Tile(-200, 200, -200, 200, tilingNumber, binNumber, expandRate)\n tiling.generate()\n cwd = os.getcwd()\n path, dirs, files = next(os.walk(cwd+\"/Train/\"))\n comulativeReward = 0\n for epoch in range(100):\n print(\"Epoch:\")\n print(epoch)\n comulativeReward = 0\n \n for k in range(0,len(files)): \n stateSeq = []\n policySarsa = SemiGradientSarSa(0.1, 2)\n fullPath = path+files[k]\n dataFile=DataPreProcessing(fullPath)\n dictBinCustomerID, dictCoorCustomerID = policySarsa.Dict(dataFile, tiling)\n s, stateID = policySarsa.InitialState(dictBinCustomerID)\n stateSeq.append(stateID)\n action = policySarsa.Boltzman_policy(s, tempertaure_param, w, dictBinCustomerID, stateSeq)\n singleFileDistance = 0\n\n for step in range(1, dataFile._numberOfCities()):\n\n reward = policySarsa.reward(step, stateID, action, dataFile._numberOfCities()-1, dataFile)\n sprime_binary=dictBinCustomerID.get(action)\n stateSeq.append(action)\n\n if step != dataFile._numberOfCities()-1:\n actionprime = policySarsa.Boltzman_policy(sprime_binary, tempertaure_param, w, dictBinCustomerID, stateSeq)\n ssecondprime_binary=dictBinCustomerID.get(actionprime)\n qhat = np.array(w).reshape(1,400)@np.array(sprime_binary).reshape(400, 1)\n qhatprime = np.array(w).reshape(1,400)@np.array(ssecondprime_binary).reshape(400, 1)\n w=w+(alpha*(reward+(gamma * qhatprime)-qhat))*sprime_binary\n singleFileDistance += reward\n stateID = action \n action = actionprime\n\n reward = policySarsa.reward(12, stateID, 11, dataFile._numberOfCities()-1, dataFile)\n sBinaryDepot, stateDepotID = policySarsa.InitialState(dictBinCustomerID)\n stateSeq.append(stateDepotID)\n qfinalhat = np.array(w).reshape(1,400)@np.array(sBinaryDepot).reshape(400, 1)\n w=w+(alpha*(reward - qfinalhat))*sBinaryDepot\n singleFileDistance += reward\n comulativeReward += singleFileDistance\n\n print(\"comulative reward is:\")\n print(comulativeReward)\n\n #############END OF TRAIN\n path, dirs, files = next(os.walk(cwd+\"Test/\"))\n \n for k in range(0,len(files)):\n stateSeq = []\n policySarsa = SemiGradientSarSa(0.1, 2)\n fullPath = path+files[k]\n dataFile=DataPreProcessing(fullPath)\n dictBinCustomerID, dictCoorCustomerID = policySarsa.Dict(dataFile, tiling)\n s, stateID = policySarsa.InitialState(dictBinCustomerID)\n stateSeq.append(stateID)\n action = policySarsa.Boltzman_policy(s, tempertaure_param, w, dictBinCustomerID, stateSeq)\n singleFileDistance = 0\n \n for step in range(1, dataFile._numberOfCities()):\n reward = policySarsa.reward(step, stateID, action, dataFile._numberOfCities()-1, dataFile)\n sprime_binary=dictBinCustomerID.get(action)\n stateSeq.append(action)\n\n if step != dataFile._numberOfCities()-1:\n actionprime = policySarsa.Boltzman_policy(sprime_binary, tempertaure_param, w, dictBinCustomerID, stateSeq)\n singleFileDistance += reward\n stateID = action \n action = actionprime\n\n reward = policySarsa.reward(12, stateID, 11, dataFile._numberOfCities()-1, dataFile)\n sBinaryDepot, stateDepotID = policySarsa.InitialState(dictBinCustomerID)\n stateSeq.append(stateDepotID)\n qfinalhat = np.array(w).reshape(1,400)@np.array(sBinaryDepot).reshape(400, 1)\n w=w+(alpha*(reward - qfinalhat))*sBinaryDepot\n singleFileDistance += reward\n print(\"fileName:\")\n print(files[k])\n print(\"sequence\")\n print(stateSeq)\n print(\"total distance:\")\n print(singleFileDistance)\n comulativeReward += singleFileDistance\n\n print(\"comulative reward is:\")\n print(comulativeReward)\n ##########END OF TEST\n return stateSeq,comulativeReward\n\nlistreward=[]\nlisttemp=[1]\n\nfor temp in listtemp:\n print(\"temp=\", temp)\n _, comulativereward = main(0.01, 0.8, 1, 2, 0.05, temp)\n listreward.append(comulativereward)\n\nprint(listreward)\n" ]
[ [ "numpy.array", "matplotlib.pyplot.scatter", "matplotlib.pyplot.title", "matplotlib.pyplot.annotate", "numpy.ones", "matplotlib.pyplot.ylabel", "numpy.argmax", "numpy.fromstring", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.grid", "numpy.negative", "matplotlib.pyplot.yticks", "numpy.exp", "matplotlib.pyplot.xticks", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
JackHanley26/On-the-Edge-Anomaly-Detection
[ "219d323fbaae8c4ed2bb01e8162a182dbf79c227" ]
[ "src/utils/data_manager.py" ]
[ "import datetime\n\nfrom pandas import read_csv\nfrom sklearn.preprocessing import LabelEncoder, MinMaxScaler\n\n\"\"\"\n# backup\ncolours = [\"#C0C0C0\", \"#808080\", \"#000000\", \"#FF0000\", \"#800000\", \"#FFFF00\", \"#808000\", \"#00FF00\", \"#008000\", \"#00FFFF\",\n \"#008080\", \"#0000FF\", \"#000080\", \"#FF00FF\", \"#800080\", \"#CD5C5C\", \"#FA8072\", \"#E9967A\", ]\n\"\"\"\n\n\ndef date_parser(time_in_secs):\n return datetime.datetime.fromtimestamp(float(time_in_secs))\n\n\ndef read_csv_data(path, parse_date=True):\n if parse_date:\n df = read_csv(path, parse_dates=True, date_parser=date_parser, index_col='timestamp')\n else:\n df = read_csv(path)\n print(\"DataFrame size: %s\" % str(len(df.values)))\n return df\n\n\ndef drop_columns(normal_df, abnormal_df):\n normal_columns = normal_df.columns.values\n abnormal_columns = abnormal_df.columns.values\n\n normal_columns_to_drop = [i for i, v in enumerate(normal_columns) if v not in abnormal_columns]\n abnormal_columns_to_drop = [i for i, v in enumerate(abnormal_columns) if v not in normal_columns]\n\n normal_df.drop(normal_df.columns[normal_columns_to_drop], axis=1, inplace=True)\n\n abnormal_df.drop(abnormal_df.columns[abnormal_columns_to_drop], axis=1, inplace=True)\n\n return normal_df, abnormal_df\n\n\ndef get_feature_count(normal, abnormal):\n if len(normal.columns.values) == len(abnormal.columns.values):\n return len(normal.columns.values)\n raise Exception(\"DataFrames must have the same number of features\")\n\n\ndef get_sample(data, index, size):\n sample = data[index: index + size]\n print(\"Sample size: %s, Sample Shape: %s\" % (len(sample), sample.shape))\n return sample\n\n\ndef encode_values(df):\n encoder = LabelEncoder()\n\n for col in df.columns:\n if df[col].dtype == 'object':\n index = df.columns.get_loc(col)\n df.values[:, index] = encoder.fit_transform(df.values[:, index].astype(str))\n return df.values\n\n\ndef scale_data(data):\n return MinMaxScaler(feature_range=(0, 1)).fit_transform(data)\n" ]
[ [ "sklearn.preprocessing.LabelEncoder", "pandas.read_csv", "sklearn.preprocessing.MinMaxScaler" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
hiyoung123/Chinese-Text-Matching-Pytorch
[ "a024a18167fa66e6de8bbb64cfe78699a3e59534" ]
[ "src/evaluation.py" ]
[ "#!usr/bin/env python\n#-*- coding:utf-8 -*-\n\nimport pickle\nimport random\nfrom tqdm import tqdm\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.metrics import accuracy_score, f1_score\n\nimport torch\nfrom torch.utils.data import DataLoader\nfrom transformers import (\n BertTokenizer, BertConfig, BertModel,\n # XLNetTokenizer, XLNetConfig, XLNetModel,\n # AlbertTokenizer, AlbertConfig, AlbertModel,\n)\n\nfrom .models import (\n ESIM, EnhancedRCNN\n)\n\nfrom .datasets import EmbeddingDataset\nfrom utils.log import Log\n\n\nMODEL_CLASSES = {\n 'ESIM': (None, None, None, ESIM, EmbeddingDataset),\n 'EnhancedRCNN': (None, None, None, EnhancedRCNN, EmbeddingDataset),\n}\n\n\nclass Evaluator:\n def __init__(self, config, model):\n self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n self.model = model.to(self.device)\n self.model = torch.load(config.model_path, map_location=self.device)\n self.logger = Log()\n\n def evaluate(self, data):\n desc = '[Evaluate]'\n batch_iterator = tqdm(data, desc=desc, ncols=100)\n pre_list = []\n label_list = []\n logits_list = []\n prob_list = []\n with torch.no_grad():\n self.model.eval()\n for i, batch in enumerate(batch_iterator):\n batch = {key: value.to(self.device) for key, value in batch.items()}\n logits, prob = self.model(batch)\n _, pre = torch.max(logits, 1)\n pre_list += pre.cpu().numpy().tolist()\n label_list += batch['label'].cpu().numpy().tolist()\n logits_list += logits.cpu().numpy().tolist()\n prob_list += prob.cpu().numpy().tolist()\n result = {\n 'acc': accuracy_score(label_list, pre_list),\n 'f1': f1_score(label_list, pre_list, average='macro')\n }\n self.logger.info('Evaluate', 'evaluation score is %.4f' % result['acc'])\n self.logger.info('Evaluate', 'evaluation f1 is %.4f' % result['f1'])\n return result\n\n\ndef set_seed(seed):\n # seed = 7874\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n if torch.cuda.is_available():\n torch.cuda.manual_seed_all(seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n\n\ndef build_embedding(config, vocab):\n embedding_matrix = np.zeros((len(vocab) + 1, config.embed_dim))\n embeddings_index = pickle.load(open(config.embedding_path, 'rb'))\n for word, i in vocab.items():\n embedding_vector = embeddings_index.get(word)\n if embedding_vector is not None:\n embedding_matrix[i] = embedding_vector\n return torch.Tensor(embedding_matrix)\n\n\ndef run_eval(config):\n\n dev = pd.read_csv(config.dev_path)\n\n set_seed(config.seed)\n\n bert_tokenizer, bert_config, bert_model, model, dataset = MODEL_CLASSES[config.model]\n if config.get('embedding_path', False):\n tokenizer = pickle.load(open(config.vocab_path, 'rb'))\n config['embedding'] = build_embedding(config, tokenizer)\n model = model(config)\n else:\n tokenizer = bert_tokenizer.from_pretrained(config.pre_trained_model + '/vocab.txt')\n bert_config = bert_config.from_pretrained(config.pre_trained_model + '/bert_config.json')\n bert = bert_model.from_pretrained(config.pre_trained_model + '/pytorch_model.bin', config=bert_config)\n model = model(bert=bert, config=config)\n\n dev = dataset(dev, tokenizer, config.max_seq_len, True)\n dev = DataLoader(dev, batch_size=config.batch_size)\n evaluator = Evaluator(config, model)\n result = evaluator.evaluate(dev)\n # print(acc)\n" ]
[ [ "pandas.read_csv", "torch.max", "torch.Tensor", "numpy.random.seed", "torch.load", "torch.manual_seed", "torch.utils.data.DataLoader", "torch.no_grad", "torch.cuda.is_available", "torch.cuda.manual_seed_all", "sklearn.metrics.f1_score", "sklearn.metrics.accuracy_score" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
oxfordeo/sat-extractor
[ "1d6841751e8b2ce65a02f5d3d608f181a31ab917" ]
[ "src/satextractor/preparer/gcp_preparer.py" ]
[ "from datetime import datetime\nfrom typing import Dict\nfrom typing import List\n\nimport numpy as np\nimport zarr\nfrom gcsfs import GCSFileSystem\nfrom joblib import delayed\nfrom joblib import Parallel\nfrom loguru import logger\nfrom satextractor.models import ExtractionTask\nfrom satextractor.models import Tile\nfrom satextractor.models.constellation_info import BAND_INFO\nfrom satextractor.preparer import create_zarr_patch_structure\nfrom satextractor.utils import tqdm_joblib\nfrom tqdm import tqdm\n\n\ndef gcp_prepare_archive(\n credentials: str,\n tasks: List[ExtractionTask],\n tiles: List[Tile],\n constellations: List[str],\n storage_root: str,\n patch_size: int,\n overwrite: bool,\n chunk_size: int,\n n_jobs: int = -1,\n verbose: int = 0,\n **kwargs,\n) -> bool:\n fs = GCSFileSystem(token=credentials)\n # make a dict of tiles and constellations sensing times\n tile_constellation_sensing_times: Dict[str, Dict[str, List[datetime]]] = {\n tt.id: {kk: [] for kk in BAND_INFO.keys() if kk in constellations}\n for tt in tiles\n }\n\n for task in tasks:\n\n # check tiles meet spec\n assert isinstance(\n task,\n ExtractionTask,\n ), \"Task does not match ExtractionTask spec\"\n\n for tile in task.tiles:\n tile_constellation_sensing_times[tile.id][task.constellation].append(\n task.sensing_time,\n )\n\n # get the unique sensing times\n for tt in tiles:\n for kk in constellations:\n tile_constellation_sensing_times[tt.id][kk] = np.array(\n [\n np.datetime64(el)\n for el in sorted(\n list(set(tile_constellation_sensing_times[tt.id][kk])),\n )\n ],\n )\n\n items = tile_constellation_sensing_times.items()\n with tqdm_joblib(\n tqdm(\n desc=f\"parallel building zarr tile roots on {storage_root}\",\n total=len(items),\n ),\n ):\n Parallel(n_jobs=n_jobs, verbose=verbose, prefer=\"threads\")(\n [\n delayed(zarr.open)(fs.get_mapper(f\"{storage_root}/{tile_id}\"), \"a\")\n for tile_id, _ in items\n ],\n )\n\n logger.info(f\"parallel building zarr archives on {storage_root}\")\n jobs = []\n for tile_id, vv in items:\n for constellation, sensing_times in vv.items():\n jobs.append(\n delayed(create_zarr_patch_structure)(\n fs.get_mapper,\n storage_root,\n tile_id,\n patch_size,\n chunk_size,\n sensing_times,\n constellation,\n BAND_INFO[constellation],\n overwrite,\n ),\n )\n\n with tqdm_joblib(\n tqdm(desc=\"Building Archives.\", total=len(tiles) * len(constellations)),\n ):\n Parallel(n_jobs=n_jobs, verbose=verbose, prefer=\"threads\")(jobs)\n\n return True\n" ]
[ [ "numpy.datetime64" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
garywangiam02/vnpy
[ "fbb168bf977d95ae874e92a3655c6c893db16a1f", "fbb168bf977d95ae874e92a3655c6c893db16a1f" ]
[ "vnpy/app/cta_crypto/strategies/strategy144_macd_channel_group_v1_2.py", "vnpy/app/cta_crypto/strategies/strategy144_macd_channel_group_v1_5.py" ]
[ "# encoding: UTF-8\r\n\r\n# 首先写系统内置模块\r\nimport sys\r\nimport os\r\nfrom datetime import datetime, timedelta, time, date\r\nimport copy\r\nimport traceback\r\nfrom collections import OrderedDict\r\nfrom typing import Union\r\nimport numpy as np\r\n\r\n# 然后是自己编写的模块\r\nfrom vnpy.trader.utility import round_to\r\nfrom vnpy.app.cta_crypto.template import CtaFutureTemplate, Direction, get_underlying_symbol, Interval\r\nfrom vnpy.component.cta_policy import CtaPolicy\r\nfrom vnpy.component.cta_position import CtaPosition\r\nfrom vnpy.component.cta_grid_trade import CtaGridTrade, uuid, CtaGrid\r\nfrom vnpy.component.cta_line_bar import get_cta_bar_type, TickData, BarData, CtaMinuteBar, CtaHourBar, CtaDayBar\r\n\r\n\r\n########################################################################\r\nclass StrategyMacdChannelGroup_v1_2(CtaFutureTemplate):\r\n \"\"\"数字货币CTA MACD+通道 组合策略\r\n v1:\r\n 1.使用变量将MACD的快慢均线交叉点记录,然后获取上次交叉到本次交叉之间的周期数。\r\n 2.当MACD出现顶底背离时,开多开空;\r\n 核心计算: 1.MACD交叉状态记录\r\n 2.构建周期内的高低点区间\r\n 3.描述背离状态,同时保存结果;\r\n 多头进场:1.最近一个MACD信号是金叉,突破周期内高点;\r\n 2. 出现底背离时,开多;\r\n 空头进场:1.最近一个MACD信号是死叉,突破周期内低点;\r\n 2.出现顶背离时,开空;\r\n 出场:移动出场\r\n 周期:60~120分钟以上\r\n v1.1:\r\n 优化趋势开仓信号:\r\n 背驰信号,必须是与线段不同向;\r\n 突破信号,必须是与新段同向\r\n v1.2:\r\n 若不存在反向信号时,才激活趋势进场信号。\r\n \"\"\"\r\n author = u'大佳'\r\n # 输入参数 [ macd快均线长度_慢均线长度_信号线长度_K线周期], 可配置多个参数\r\n bar_names = ['f12_s26_n9_M120']\r\n\r\n # 策略在外部设置的参数\r\n parameters = [\"activate_market\",\r\n \"max_invest_pos\",\r\n \"max_invest_margin\",\r\n \"max_invest_rate\",\r\n \"bar_names\",\r\n \"backtesting\"]\r\n\r\n # ----------------------------------------------------------------------\r\n def __init__(self, cta_engine,\r\n strategy_name,\r\n vt_symbol,\r\n setting=None):\r\n \"\"\"Constructor\"\"\"\r\n super().__init__(cta_engine=cta_engine,\r\n strategy_name=strategy_name,\r\n vt_symbol=vt_symbol,\r\n setting=setting)\r\n\r\n # 创建一个策略规则\r\n self.policy = GroupPolicy(strategy=self)\r\n\r\n # 仓位状态\r\n self.position = CtaPosition(strategy=self) # 0 表示没有仓位,1 表示持有多头,-1 表示持有空头\r\n\r\n # 创建网格交易,用来记录\r\n self.gt = CtaGridTrade(strategy=self)\r\n\r\n self.kline_count = len(self.bar_names)\r\n\r\n self.display_bars = False\r\n\r\n if setting:\r\n # 根据配置文件更新参数\r\n self.update_setting(setting)\r\n\r\n # 更新监控的k线总数\r\n self.kline_count = len(self.bar_names)\r\n\r\n for bar_name in self.bar_names:\r\n # 创建K线\r\n kline_setting = {}\r\n para_fast_len, para_slow_len, para_signal_len, name = bar_name.split('_')\r\n kline_class, interval_num = get_cta_bar_type(name)\r\n kline_setting['name'] = bar_name\r\n\r\n para_fast_len = int(para_fast_len.replace('f', ''))\r\n para_slow_len = int(para_slow_len.replace('s', ''))\r\n para_signal_len = int(para_signal_len.replace('n', ''))\r\n\r\n kline_setting['bar_interval'] = interval_num # K线的Bar时长\r\n kline_setting['para_atr1_len'] = 2 * para_fast_len # ATR均值\r\n kline_setting['para_ma1_len'] = para_fast_len # 第1条均线\r\n kline_setting['para_ma2_len'] = para_slow_len # 第2条均线\r\n\r\n kline_setting['para_macd_fast_len'] = para_fast_len\r\n kline_setting['para_macd_slow_len'] = para_slow_len\r\n kline_setting['para_macd_signal_len'] = para_signal_len\r\n\r\n kline_setting['para_active_chanlun'] = True # 激活缠论\r\n\r\n kline_setting['price_tick'] = self.price_tick\r\n kline_setting['underly_symbol'] = get_underlying_symbol(vt_symbol.split('.')[0]).upper()\r\n kline_setting['is_7x24'] = True\r\n self.write_log(f'创建K线:{kline_setting}')\r\n kline = kline_class(self, self.on_bar_k, kline_setting)\r\n self.klines.update({bar_name: kline})\r\n\r\n self.export_klines()\r\n\r\n if self.backtesting:\r\n # 回测时,自动初始化\r\n self.on_init()\r\n\r\n def export_klines(self):\r\n \"\"\"输出K线=》csv文件\"\"\"\r\n if not self.backtesting:\r\n return\r\n\r\n for kline_name, kline in self.klines.items():\r\n # 写入文件\r\n import os\r\n kline.export_filename = os.path.abspath(\r\n os.path.join(self.cta_engine.get_logs_path(),\r\n u'{}_{}.csv'.format(self.strategy_name, kline_name)))\r\n\r\n kline.export_fields = [\r\n {'name': 'datetime', 'source': 'bar', 'attr': 'datetime', 'type_': 'datetime'},\r\n {'name': 'open', 'source': 'bar', 'attr': 'open_price', 'type_': 'float'},\r\n {'name': 'high', 'source': 'bar', 'attr': 'high_price', 'type_': 'float'},\r\n {'name': 'low', 'source': 'bar', 'attr': 'low_price', 'type_': 'float'},\r\n {'name': 'close', 'source': 'bar', 'attr': 'close_price', 'type_': 'float'},\r\n {'name': 'turnover', 'source': 'bar', 'attr': 'turnover', 'type_': 'float'},\r\n {'name': 'volume', 'source': 'bar', 'attr': 'volume', 'type_': 'float'},\r\n {'name': 'open_interest', 'source': 'bar', 'attr': 'open_interest', 'type_': 'float'},\r\n {'name': 'dif', 'source': 'line_bar', 'attr': 'line_dif', 'type_': 'list'},\r\n {'name': 'dea', 'source': 'line_bar', 'attr': 'line_dea', 'type_': 'list'},\r\n {'name': 'macd', 'source': 'line_bar', 'attr': 'line_macd', 'type_': 'list'},\r\n {'name': f'ma{kline.para_ma1_len}', 'source': 'line_bar', 'attr': 'line_ma1', 'type_': 'list'},\r\n {'name': f'ma{kline.para_ma2_len}', 'source': 'line_bar', 'attr': 'line_ma2', 'type_': 'list'},\r\n {'name': f'upper', 'source': 'line_bar', 'attr': 'line_macd_chn_upper', 'type_': 'list'},\r\n {'name': f'lower', 'source': 'line_bar', 'attr': 'line_macd_chn_lower', 'type_': 'list'},\r\n ]\r\n\r\n kline.export_bi_filename = os.path.abspath(\r\n os.path.join(self.cta_engine.get_logs_path(),\r\n u'{}_{}_bi.csv'.format(self.strategy_name, kline_name)))\r\n\r\n kline.export_zs_filename = os.path.abspath(\r\n os.path.join(self.cta_engine.get_logs_path(),\r\n u'{}_{}_zs.csv'.format(self.strategy_name, kline_name)))\r\n\r\n kline.export_duan_filename = os.path.abspath(\r\n os.path.join(self.cta_engine.get_logs_path(),\r\n u'{}_{}_duan.csv'.format(self.strategy_name, kline_name)))\r\n\r\n # ----------------------------------------------------------------------\r\n def on_init(self, force=False):\r\n \"\"\"初始化\"\"\"\r\n self.write_log(u'策略初始化')\r\n\r\n if self.inited:\r\n if force:\r\n self.write_log(u'策略强制初始化')\r\n self.inited = False\r\n self.trading = False # 控制是否启动交易\r\n self.position.pos = 0 # 仓差\r\n self.position.long_pos = 0 # 多头持仓\r\n self.position.short_pos = 0 # 空头持仓\r\n self.gt.up_grids = []\r\n self.gt.dn_grids = []\r\n else:\r\n self.write_log(u'策略初始化')\r\n self.write_log(u'已经初始化过,不再执行')\r\n return\r\n\r\n # 得到持久化的Policy中的子事务数据\r\n self.init_policy()\r\n self.display_tns()\r\n\r\n if not self.backtesting:\r\n self.init_position() # 初始持仓数据\r\n\r\n if not self.backtesting:\r\n # 这里是使用gateway历史数据\r\n if not self.init_data():\r\n self.write_error(u'初始数据失败')\r\n\r\n self.inited = True\r\n if not self.backtesting:\r\n self.trading = True # 控制是否启动交易\r\n\r\n self.write_log(u'策略初始化加载历史持仓、策略数据完成')\r\n self.display_grids()\r\n self.display_tns()\r\n\r\n self.put_event()\r\n\r\n def init_data(self):\r\n \"\"\"初始化数据\"\"\"\r\n try:\r\n # 优先从本地缓存文件,获取缓存\r\n last_bar_dt = self.load_klines_from_cache()\r\n dt_now = datetime.now()\r\n # 开始时间\r\n if last_bar_dt:\r\n load_days = max((dt_now - last_bar_dt).days, 1)\r\n else:\r\n load_days = 90\r\n self.display_bars = False\r\n\r\n def on_bar_cb(bar, **kwargs):\r\n \"\"\"\"\"\"\r\n if last_bar_dt and bar.datetime < last_bar_dt:\r\n return\r\n self.cur_price = bar.close_price\r\n self.cur_datetime = bar.datetime\r\n if self.cur_datetime > dt_now - timedelta(days=1) and not self.display_bars:\r\n self.display_bars = True\r\n for kline in self.klines.values():\r\n kline.add_bar(bar)\r\n\r\n self.cta_engine.load_bar(vt_symbol=self.vt_symbol,\r\n days=load_days,\r\n interval=Interval.MINUTE,\r\n callback=on_bar_cb)\r\n return True\r\n\r\n except Exception as ex:\r\n self.write_error(u'init_data Exception:{},{}'.format(str(ex), traceback.format_exc()))\r\n return False\r\n\r\n # ----------------------------------------------------------------------\r\n def on_tick(self, tick: TickData):\r\n \"\"\"行情更新\r\n :type tick: object\r\n \"\"\"\r\n # 实盘检查是否初始化数据完毕。如果数据未初始化完毕,则不更新tick,避免影响cur_price\r\n if not self.backtesting:\r\n if not self.inited:\r\n self.write_log(u'数据还没初始化完毕,不更新tick')\r\n return\r\n\r\n # 更新所有tick dict(包括 指数/主力/历史持仓合约)\r\n self.tick_dict.update({tick.vt_symbol: tick})\r\n\r\n if tick.vt_symbol == self.vt_symbol:\r\n self.cur_tick = tick\r\n self.cur_price = tick.last_price\r\n\r\n else:\r\n # 所有非vt_symbol得tick,全部返回\r\n return\r\n\r\n # 更新策略执行的时间(用于回测时记录发生的时间)\r\n self.cur_datetime = tick.datetime\r\n self.cur_price = tick.last_price\r\n\r\n for kline in self.klines.values():\r\n kline.on_tick(copy.copy(tick))\r\n\r\n if not self.inited or not self.trading:\r\n return\r\n\r\n self.account_pos = self.cta_engine.get_position(vt_symbol=self.vt_symbol, direction=Direction.NET)\r\n\r\n # 执行撤单逻辑\r\n self.tns_cancel_logic(tick.datetime, reopen=False)\r\n\r\n # 网格逐一止损/止盈检查\r\n self.grid_check_stop()\r\n\r\n # 实盘这里是每分钟执行\r\n if self.last_minute != tick.datetime.minute:\r\n self.last_minute = tick.datetime.minute\r\n\r\n # 开仓逻辑处理\r\n self.tns_open_logic()\r\n\r\n self.display_grids()\r\n self.display_tns()\r\n\r\n # 事务平衡仓位\r\n self.tns_calcute_net_pos()\r\n\r\n self.put_event()\r\n\r\n # ----------------------------------------------------------------------\r\n def on_bar(self, bar: BarData):\r\n \"\"\"\r\n 分钟K线数据(仅用于回测时,从策略外部调用)\r\n :param bar:\r\n :return:\r\n \"\"\"\r\n\r\n # if '201604082134' in self.cur_datetime.strftime(\"%Y%m%d%H%M\"):\r\n # a = 1\r\n\r\n if self.backtesting:\r\n new_dt = bar.datetime + timedelta(seconds=60)\r\n if self.cur_datetime and new_dt < self.cur_datetime:\r\n return\r\n self.cur_datetime = new_dt\r\n self.cur_price = bar.close_price\r\n\r\n if self.inited:\r\n self.account_pos = self.cta_engine.get_position(vt_symbol=self.vt_symbol, direction=Direction.NET)\r\n\r\n # 执行撤单逻辑\r\n self.tns_cancel_logic(bar.datetime)\r\n\r\n # 网格逐一止损/止盈检查\r\n self.grid_check_stop()\r\n\r\n # 推送tick到大周期K线\r\n try:\r\n for kline_name, kline in self.klines.items():\r\n if kline_name.endswith('M1'):\r\n bar_complete = True\r\n else:\r\n bar_complete = False\r\n kline.add_bar(bar=copy.copy(bar), bar_is_completed=bar_complete)\r\n\r\n except Exception as ex:\r\n self.write_error(u'{},{}'.format(str(ex), traceback.format_exc()))\r\n\r\n if self.inited and self.trading:\r\n # 开仓逻辑处理\r\n self.tns_open_logic()\r\n\r\n # 事务平衡仓位\r\n self.tns_calcute_net_pos()\r\n\r\n # 显示各指标信息\r\n self.display_tns()\r\n\r\n def is_macd_signal(self, kline, direction):\r\n \"\"\"\r\n 条件1:判断是否突破macd的通道上下轨道,突破就开仓\r\n 'start': self.cur_datetime.strftime(\"%Y-%m-%d %H:%M:%S\"),\r\n 'end': self.cur_datetime.strftime(\"%Y-%m-%d %H:%M:%S\"),\r\n 'cross': self.cur_macd_cross,\r\n 'macd_count': self.cur_macd_count,\r\n 'max_price': self.high_array[-1],\r\n 'min_price': self.low_array[-1],\r\n 'max_dif': self.line_dif[-1],\r\n 'min_dif': self.line_dif[-1],\r\n 'macd_area': abs(self.line_macd[-1]),\r\n 'max_macd': self.line_macd[-1],\r\n 'min_macd': self.line_macd[-1]\r\n 条件2: 判断是否顶背离、底背离\r\n :param kline: k线\r\n :param direction: 需要开仓的方向\r\n :return: True/False, 信号\r\n \"\"\"\r\n\r\n if len(kline.macd_segment_list) < 3 or len(kline.bi_zs_list) == 0:\r\n return False, \"\"\r\n\r\n # 缠论线段\r\n cur_duan = kline.duan_list[-1]\r\n cur_bi = kline.bi_list[-1]\r\n cur_zs = kline.bi_zs_list[-1]\r\n cur_fx = kline.fenxing_list[-1]\r\n\r\n # 最后三个macd的分段\r\n tre_seg, pre_seg, cur_seg = kline.macd_segment_list[-3:]\r\n\r\n # 是否有原信号\r\n signal = self.policy.signals.get(kline.name, {})\r\n last_signal = signal.get('last_signal', None)\r\n open_price = signal.get('open_price', None)\r\n duan_start = signal.get('duan_start', None)\r\n stop_price = signal.get('stop_price')\r\n\r\n if direction == Direction.LONG:\r\n\r\n # 当前属于零轴上方macd的金叉,判断是否突破上一个金叉周期的最高位\r\n if kline.name not in self.policy.short_klines\\\r\n and cur_duan.direction == 1 \\\r\n and cur_duan.high == cur_bi.high \\\r\n and max([bi.high for bi in cur_duan.bi_list]) == cur_duan.high \\\r\n and cur_seg['macd_count'] > 0 \\\r\n and kline.close_array[-1] > tre_seg['max_close'] \\\r\n and pre_seg['cross'] > cur_seg['cross'] > 0:\r\n\r\n # 排除1:\r\n if cur_zs.end > cur_duan.start and cur_duan.high < cur_zs.high:\r\n if (not open_price) or open_price > kline.cur_price:\r\n return False, ''\r\n # 排除2:\r\n if len(kline.duan_list) > 2:\r\n tre_duan, pre_duan = kline.duan_list[-3:-1]\r\n if pre_duan.height < tre_duan.height* 0.5 and pre_duan.height > cur_duan.height:\r\n return False, \"\"\r\n d = {\r\n \"datetime\": self.cur_datetime,\r\n \"price\": kline.cur_price,\r\n \"operation\": 'long_break',\r\n \"signal\": f'{kline.name}.long',\r\n \"stop_price\": pre_seg[\"min_price\"]\r\n }\r\n self.save_dist(d)\r\n return True, 'long_break'\r\n\r\n # 出现底背离\r\n if cur_duan.direction == -1 \\\r\n and len(cur_duan.bi_list) >= 5 \\\r\n and cur_duan.low == cur_bi.low \\\r\n and cur_duan.low == np.float32(pre_seg['min_price']) \\\r\n and min([bi.low for bi in cur_duan.bi_list]) == cur_bi.low \\\r\n and kline.cur_macd_count == 1 \\\r\n and abs(tre_seg['macd_count']) > 4 \\\r\n and abs(pre_seg['macd_count']) >= 4 \\\r\n and kline.dif_buttom_divergence:\r\n\r\n if cur_zs.end > cur_duan.start and cur_duan.low > cur_zs.low:\r\n return False, ''\r\n\r\n d = {\r\n \"datetime\": self.cur_datetime,\r\n \"price\": kline.cur_price,\r\n \"operation\": 'buttom_div',\r\n \"signal\": f'{kline.name}.dif_div',\r\n \"stop_price\": pre_seg[\"min_price\"]\r\n }\r\n self.save_dist(d)\r\n return True, 'buttom_div'\r\n\r\n # 连续递增的五个分笔以上的段,末端形成最后两个分笔的底背离\r\n if cur_duan.direction == -1:\r\n extra_bi_list = [bi for bi in kline.bi_list[-3:] if bi.start >= cur_duan.end]\r\n if len(extra_bi_list) == 1 \\\r\n and len(cur_duan.bi_list) >= 5 \\\r\n and cur_duan.bi_list[-1].high < cur_duan.bi_list[-4].low\\\r\n and extra_bi_list[0].high > cur_duan.bi_list[-2].low\\\r\n and kline.is_fx_macd_divergence(direction=Direction.SHORT, cur_duan=cur_duan):\r\n d = {\r\n \"datetime\": self.cur_datetime,\r\n \"price\": kline.cur_price,\r\n \"operation\": 'buttom_div',\r\n \"signal\": f'{kline.name}.bi_divi',\r\n \"stop_price\": cur_duan.end\r\n }\r\n self.save_dist(d)\r\n return True, 'buttom_div'\r\n\r\n # 做多得走势线段后,出现反向下跌线段,下跌线段内具有一个中枢,形成中枢背驰\r\n if cur_duan.direction == -1 and len(kline.duan_list) >= 2:\r\n # 需要参照前一线段\r\n tre_duan, pre_duan = kline.duan_list[-3:-1]\r\n if cur_duan.height < pre_duan.height * 0.618:\r\n # 发生段背驰\r\n if kline.is_zs_beichi_inside_duan(direction=Direction.SHORT, cur_duan=cur_duan) \\\r\n and len(cur_duan.bi_list) >= 5:\r\n if 0 < cur_seg['macd_count'] < 4 \\\r\n and cur_duan.low == np.float32(pre_seg['min_price']):\r\n d = {\r\n \"datetime\": self.cur_datetime,\r\n \"price\": kline.cur_price,\r\n \"operation\": 'bi_beichi',\r\n \"signal\": f'{kline.name}.bi_beichi',\r\n \"stop_price\": float(pre_seg[\"max_price\"])\r\n }\r\n self.save_dist(d)\r\n return True, 'bi_beichi'\r\n\r\n # 判断三个线段,是否形成背驰\r\n if max(pre_duan.height, cur_duan.height) < tre_duan.height * 0.618:\r\n\r\n if tre_duan.low > cur_duan.low: # and tre_duan.atan > cur_duan.atan:\r\n extra_bi_list = [bi for bi in kline.bi_list[-3:] if bi.start >= cur_duan.end]\r\n if len(extra_bi_list) == 1:\r\n pre_dif = kline.get_dif_by_dt(tre_duan.end)\r\n cur_dif = kline.get_dif_by_dt(cur_duan.end)\r\n if pre_dif < cur_dif < 0:\r\n d = {\r\n \"datetime\": self.cur_datetime,\r\n \"price\": kline.cur_price,\r\n \"operation\": 'duan_beichi',\r\n \"signal\": f'{kline.name}.duan_beichi',\r\n \"stop_price\": float(pre_seg[\"max_price\"])\r\n }\r\n self.save_dist(d)\r\n return True, 'duan_beichi'\r\n\r\n # 突破或顶背离失败\r\n if open_price and last_signal and duan_start:\r\n # 线段是多,持仓是空单,开仓价低于当前线段得高点(即当前价),反手做多\r\n\r\n # 顶背离失败,原有段出现新高\r\n if cur_duan.direction == 1 \\\r\n and last_signal == 'short' \\\r\n and open_price < cur_duan.high \\\r\n and stop_price and stop_price < cur_duan.high \\\r\n and cur_duan.start < duan_start < cur_duan.end:\r\n d = {\r\n \"datetime\": self.cur_datetime,\r\n \"price\": kline.cur_price,\r\n \"operation\": 'top_div_fail',\r\n \"signal\": f'{kline.name}.long',\r\n \"stop_price\": pre_seg[\"min_price\"]\r\n }\r\n self.save_dist(d)\r\n return True, 'long_break'\r\n\r\n # 原有段突破做多失败,出现反向的做空段。\r\n # 反向线段的高点,高于做空的位置,当前必须是完整的下跌一笔,当前价格高于开空位置\r\n if cur_duan.direction == 1 \\\r\n and last_signal == 'short' \\\r\n and open_price < cur_duan.high \\\r\n and duan_start < cur_duan.bi_list[-1].start \\\r\n and kline.cur_price > open_price:\r\n if cur_zs.end > cur_duan.start and cur_bi.low < cur_zs.high:\r\n return False, ''\r\n\r\n # 要么一根分笔直接打穿,或者三根分笔形成小线段\r\n if len(cur_duan.bi_list) == 1 \\\r\n or (cur_bi.direction == -1 and not cur_fx.is_rt):\r\n d = {\r\n \"datetime\": self.cur_datetime,\r\n \"price\": kline.cur_price,\r\n \"operation\": 'short_rev',\r\n \"signal\": f'{kline.name}.long',\r\n \"stop_price\": pre_seg[\"min_price\"]\r\n }\r\n self.save_dist(d)\r\n return True, 'long_break'\r\n\r\n if direction == Direction.SHORT:\r\n # 当前属于macd的死叉,判断是否突破上一个金叉周期的最低位\r\n if kline.name not in self.policy.long_klines\\\r\n and cur_duan.direction == -1 \\\r\n and cur_duan.low == cur_bi.low \\\r\n and min([bi.low for bi in cur_duan.bi_list]) == cur_duan.low \\\r\n and cur_seg['macd_count'] < 0 \\\r\n and kline.close_array[-1] < tre_seg['min_close'] \\\r\n and pre_seg['cross'] < cur_seg['cross'] < 0:\r\n # 当前线段的结尾,不能落入中枢(可能是来自一个横跨多个线段的笔中枢)\r\n if cur_zs.end > cur_duan.start and cur_duan.low > cur_zs.low:\r\n if (not open_price) or open_price < kline.cur_price:\r\n return False, ''\r\n # 排除2:\r\n if len(kline.duan_list) > 2:\r\n tre_duan,pre_duan = kline.duan_list[-3:-1]\r\n if pre_duan.height < tre_duan.height* 0.5 and pre_duan.height > cur_duan.height:\r\n return False, \"\"\r\n d = {\r\n \"datetime\": self.cur_datetime,\r\n \"price\": kline.cur_price,\r\n \"operation\": 'short_break',\r\n \"signal\": f'{kline.name}.short',\r\n \"stop_price\": float(pre_seg[\"max_price\"])\r\n }\r\n self.save_dist(d)\r\n return True, 'short_break'\r\n if kline.cur_macd_count == -1:\r\n a = 1\r\n # # 出现顶背离, pre_seg 存在线段得最高价格\r\n if cur_duan.direction == 1 \\\r\n and len(cur_duan.bi_list) >= 5 \\\r\n and cur_duan.high == cur_bi.high \\\r\n and cur_duan.high == np.float32(pre_seg['max_price']) \\\r\n and max([bi.high for bi in cur_duan.bi_list]) == cur_bi.high \\\r\n and kline.cur_macd_count == -1 \\\r\n and abs(tre_seg['macd_count']) > 4 \\\r\n and abs(pre_seg['macd_count']) >= 4 \\\r\n and kline.dif_top_divergence:\r\n\r\n # 当前线段的结尾,不能落入中枢(可能是来自一个横跨多个线段的笔中枢)\r\n if cur_zs.end > cur_duan.start and cur_duan.high < cur_zs.high:\r\n return False, ''\r\n\r\n d = {\r\n \"datetime\": self.cur_datetime,\r\n \"price\": kline.cur_price,\r\n \"operation\": 'top_div',\r\n \"signal\": f'{kline.name}.dif_div',\r\n \"stop_price\": float(pre_seg[\"max_price\"])\r\n }\r\n self.save_dist(d)\r\n return True, 'top_div'\r\n\r\n # 上涨线段最后两个分笔的背离\r\n if cur_duan.direction == 1:\r\n extra_bi_list = [bi for bi in kline.bi_list[-3:] if bi.start >= cur_duan.end]\r\n if len(extra_bi_list) == 1 \\\r\n and len(cur_duan.bi_list) >=5 \\\r\n and cur_duan.bi_list[-1].low > cur_duan.bi_list[-4].high\\\r\n and extra_bi_list[0].low < cur_duan.bi_list[-2].high\\\r\n and kline.is_fx_macd_divergence(direction=Direction.LONG, cur_duan=cur_duan):\r\n d = {\r\n \"datetime\": self.cur_datetime,\r\n \"price\": kline.cur_price,\r\n \"operation\": 'top_div',\r\n \"signal\": f'{kline.name}.bi_dif',\r\n \"stop_price\": cur_duan.end\r\n }\r\n self.save_dist(d)\r\n return True, 'top_div'\r\n\r\n # 做空得走势线段后,出现反向上升线段,线段具有中枢,形成中枢背驰\r\n if cur_duan.direction == 1 and len(kline.duan_list) >= 3:\r\n # 需要参照前一线段\r\n tre_duan, pre_duan = kline.duan_list[-3:-1]\r\n if cur_duan.height < pre_duan.height * 0.618:\r\n # 发生段背驰\r\n if kline.is_zs_beichi_inside_duan(direction=Direction.LONG, cur_duan=cur_duan) \\\r\n and len(cur_duan.bi_list) >= 5:\r\n if -4 < cur_seg['macd_count'] < 0 \\\r\n and cur_duan.high == np.float32(pre_seg['max_price']):\r\n d = {\r\n \"datetime\": self.cur_datetime,\r\n \"price\": kline.cur_price,\r\n \"operation\": 'bi_beichi',\r\n \"signal\": f'{kline.name}.bi_beichi',\r\n \"stop_price\": pre_seg[\"min_price\"]\r\n }\r\n self.save_dist(d)\r\n return True, 'bi_beichi'\r\n\r\n # 判断三个线段,是否形成背驰\r\n if max(pre_duan.height, cur_duan.height) < tre_duan.height * 0.618:\r\n\r\n if tre_duan.high < cur_duan.high: # and tre_duan.atan > cur_duan.atan:\r\n extra_bi_list = [bi for bi in kline.bi_list[-3:] if bi.start >= cur_duan.end]\r\n if len(extra_bi_list) == 1:\r\n pre_dif = kline.get_dif_by_dt(tre_duan.end)\r\n cur_dif = kline.get_dif_by_dt(cur_duan.end)\r\n if pre_dif > cur_dif > 0:\r\n d = {\r\n \"datetime\": self.cur_datetime,\r\n \"price\": kline.cur_price,\r\n \"operation\": 'duan_beichi',\r\n \"signal\": f'{kline.name}.duan_beichi',\r\n \"stop_price\": pre_seg[\"min_price\"]\r\n }\r\n self.save_dist(d)\r\n return True, 'duan_beichi'\r\n\r\n # 突破或底背离失败\r\n if open_price and last_signal and duan_start:\r\n\r\n # 底背离失败,原有段延续出现新低\r\n if cur_duan.direction == -1 \\\r\n and last_signal == 'long' \\\r\n and open_price > cur_duan.low \\\r\n and stop_price and stop_price > cur_duan.low \\\r\n and cur_duan.start < duan_start < cur_duan.end:\r\n # and kline.ma12_count < 0:\r\n d = {\r\n \"datetime\": self.cur_datetime,\r\n \"price\": kline.cur_price,\r\n \"operation\": 'buttom_div_fail',\r\n \"signal\": f'{kline.name}.short',\r\n \"stop_price\": float(pre_seg[\"max_price\"])\r\n }\r\n self.save_dist(d)\r\n return True, 'short_break'\r\n\r\n # 做多突破失败,反转出现做空线段\r\n # 反向线段的底点,低于做多的开仓价,当前必须是完整的上涨一笔,当前价格低于开多价\r\n if cur_duan.direction == -1 \\\r\n and last_signal == 'long' \\\r\n and open_price > cur_duan.low \\\r\n and duan_start <= cur_duan.bi_list[-1].start \\\r\n and kline.cur_price < open_price:\r\n\r\n if cur_zs.end > cur_duan.start and cur_bi.high > cur_zs.low:\r\n return False, ''\r\n if len(cur_duan.bi_list) == 1 \\\r\n or (cur_bi.direction == 1 \\\r\n and not cur_fx.is_rt):\r\n # and kline.ma12_count < 0:\r\n d = {\r\n \"datetime\": self.cur_datetime,\r\n \"price\": kline.cur_price,\r\n \"operation\": 'long_rev',\r\n \"signal\": f'{kline.name}.short',\r\n \"stop_price\": float(pre_seg[\"max_price\"])\r\n }\r\n self.save_dist(d)\r\n return True, 'short_break'\r\n\r\n return False, \"\"\r\n\r\n def tns_open_logic(self):\r\n \"\"\"\r\n 开仓逻辑\r\n :return:\r\n \"\"\"\r\n if self.entrust != 0:\r\n return\r\n\r\n if self.cur_datetime.strftime(\"%Y-%m-%d %H\") in [ '2019-12-20 10','2020-06-02 16' ]:\r\n a = 1\r\n\r\n for kline_name in list(self.klines.keys()):\r\n kline = self.klines.get(kline_name)\r\n\r\n if len(kline.ma12_cross_list) < 3 or len(kline.duan_list) < 1:\r\n continue\r\n\r\n # 做多事务\r\n if kline_name not in self.policy.long_klines:\r\n\r\n # 判断1:macd金叉+突破通道, 或者是底背离\r\n cond01, signal_name = self.is_macd_signal(kline, direction=Direction.LONG)\r\n\r\n if cond01:\r\n signal = self.policy.signals.get(kline_name, {})\r\n if signal.get('last_signal', '') != 'long':\r\n # 出现多头突破信号\r\n if signal_name == 'long_break':\r\n low = kline.duan_list[-1].low\r\n duan_start = kline.duan_list[-1].start\r\n # 出现底背离信号,或者中枢背驰等反转信号\r\n else:\r\n low = kline.duan_list[-1].low\r\n duan_start = kline.duan_list[-1].end\r\n\r\n signal = {'last_signal': 'long',\r\n 'last_signal_time': self.cur_datetime,\r\n 'stop_price': float(low),\r\n 'open_price': kline.cur_price,\r\n 'duan_start': duan_start}\r\n\r\n self.policy.signals.update({kline_name: signal})\r\n self.policy.save()\r\n\r\n if kline_name in self.policy.short_klines:\r\n self.write_log(u'从做空信号队列中移除:{}'.format(kline_name))\r\n self.policy.short_klines.remove(kline_name)\r\n\r\n self.write_log(u'从做多信号队列中增加:{}'.format(kline_name))\r\n self.policy.long_klines.append(kline_name)\r\n continue\r\n\r\n # 做空事务\r\n if kline_name not in self.policy.short_klines:\r\n\r\n # 判断1:死叉+突破 或者顶背离\r\n cond01, signal_name = self.is_macd_signal(kline, direction=Direction.SHORT)\r\n\r\n if cond01:\r\n signal = self.policy.signals.get(kline_name, {})\r\n if signal.get('last_signal', '') != 'short':\r\n if signal_name == 'short_break':\r\n high = kline.duan_list[-1].high\r\n duan_start = kline.duan_list[-1].start\r\n else:\r\n high = kline.duan_list[-1].high\r\n duan_start = kline.duan_list[-1].end\r\n\r\n signal = {'last_signal': 'short',\r\n 'last_signal_time': self.cur_datetime,\r\n 'stop_price': float(high),\r\n 'open_price': kline.cur_price,\r\n 'duan_start': duan_start\r\n }\r\n self.policy.signals.update({kline_name: signal})\r\n self.policy.save()\r\n\r\n if kline_name in self.policy.long_klines:\r\n self.write_log(u'从做多信号队列中移除:{}'.format(kline_name))\r\n self.policy.long_klines.remove(kline_name)\r\n\r\n self.write_log(u'从做空信号队列中增加:{}'.format(kline_name))\r\n self.policy.short_klines.append(kline_name)\r\n\r\n def on_bar_k(self, bar, **kwargs):\r\n \"\"\"\r\n K线数据\r\n :param bar: 预定的周期Bar\r\n :return:\r\n \"\"\"\r\n pass\r\n\r\n def tns_calcute_net_pos(self):\r\n \"\"\"事务计算仓位轧差\"\"\"\r\n if not self.trading or self.entrust != 0:\r\n return\r\n\r\n self.account_pos = self.cta_engine.get_position(self.vt_symbol, direction=Direction.NET)\r\n if not self.account_pos:\r\n self.write_error(f'未能获取{self.vt_symbol}净仓')\r\n\r\n # 校验多一次多空信号\r\n long_klines = [s for s in self.policy.long_klines if s in self.bar_names]\r\n short_klines = [s for s in self.policy.short_klines if s in self.bar_names]\r\n\r\n if len(long_klines) != len(self.policy.long_klines):\r\n self.send_wechat(f'{self.strategy_name}多头信号校验不一致,修正{self.policy.long_klines} => {long_klines}')\r\n self.policy.long_klines = copy.copy(long_klines)\r\n\r\n if len(short_klines) != len(self.policy.short_klines):\r\n self.send_wechat(f'{self.strategy_name}空头信号校验不一致,修正:{self.policy.short_klines} => {short_klines}')\r\n self.policy.short_klines = copy.copy(short_klines)\r\n\r\n # 多/空/净仓分数\r\n long_kline_count = len(self.policy.long_klines)\r\n short_kline_count = len(self.policy.short_klines)\r\n net_kline_count = long_kline_count - short_kline_count\r\n\r\n if net_kline_count != self.policy.last_net_count:\r\n self.write_log(u'信号K线净仓变化 {} =>{}'.format(self.policy.last_net_count, net_kline_count))\r\n self.policy.last_net_count = net_kline_count\r\n\r\n # 计算目标头寸,(正数:多头, 负数:空头,0:持平)\r\n if self.max_invest_pos > 0:\r\n # 采用固定最大仓位时\r\n target_volume = round_to(\r\n value=self.max_invest_pos * net_kline_count / self.kline_count,\r\n target=self.volumn_tick)\r\n single_volume = round_to(\r\n value=float(self.max_invest_pos / self.kline_count),\r\n target=self.volumn_tick)\r\n max_volume = self.max_invest_pos\r\n else:\r\n # 采用资金投入百分比\r\n balance, avaliable, _, _ = self.cta_engine.get_account()\r\n invest_margin = balance * self.max_invest_rate\r\n if invest_margin > self.max_invest_margin > 0:\r\n invest_margin = self.max_invest_margin\r\n max_volume = round_to(\r\n value=invest_margin / (self.cur_price * self.margin_rate),\r\n target=self.volumn_tick)\r\n single_volume = round_to(\r\n value=float(max_volume / self.kline_count),\r\n target=self.volumn_tick)\r\n target_volume = round_to(\r\n value=max_volume * net_kline_count / self.kline_count,\r\n target=self.volumn_tick)\r\n\r\n diff_volume = target_volume - self.position.pos\r\n diff_volume = round(diff_volume, 7)\r\n single_volume = round(single_volume, 7)\r\n\r\n # 排除一些噪音(根据净值百分比出来的偏差)\r\n if abs(diff_volume) < single_volume * 0.8:\r\n return\r\n acc_volume = self.account_pos.volume if self.account_pos else 0\r\n\r\n self.write_log(f\"{self.vt_symbol}, 账号净仓:{acc_volume},\"\r\n f\"策略净仓:{self.position.pos},多单:{self.position.long_pos},空单:{self.position.short_pos}\\n\"\r\n f\"目标仓位:{target_volume},偏差仓位:{diff_volume},\"\r\n f\"最大限仓:{max_volume}, 单次变动:{single_volume}\")\r\n\r\n if diff_volume > 0:\r\n\r\n cover_volume = 0\r\n buy_volume = diff_volume\r\n if self.position.short_pos < 0:\r\n cover_volume = abs(self.position.short_pos)\r\n if cover_volume > diff_volume:\r\n cover_volume = diff_volume\r\n buy_volume = 0\r\n else:\r\n buy_volume = round(round_to(diff_volume - cover_volume, self.volumn_tick), 7)\r\n\r\n self.write_log(f'需要增加{self.vt_symbol}仓位{diff_volume} = [平空:{cover_volume}] + 开多{buy_volume}]')\r\n\r\n if cover_volume > 0:\r\n self.write_log(f'执行 {self.vt_symbol} cover:{cover_volume}')\r\n ret = self.tns_process_cover(cover_volume=cover_volume)\r\n if ret:\r\n self.write_log(f'委托平仓空单成功')\r\n return\r\n else:\r\n self.write_log(u'执行平仓失败,转移买入数量:{} => {}'.format(buy_volume, buy_volume + cover_volume))\r\n buy_volume += cover_volume\r\n buy_volume = round(buy_volume, 7)\r\n\r\n if buy_volume > 0:\r\n self.write_log(f'执行 {self.vt_symbol} buy:{buy_volume}')\r\n grid = CtaGrid(direction=Direction.LONG,\r\n vt_symbol=self.vt_symbol,\r\n open_price=self.cur_price,\r\n close_price=sys.maxsize,\r\n stop_price=0,\r\n volume=buy_volume)\r\n\r\n ret = self.grid_buy(grid)\r\n if not ret:\r\n self.write_error(u'执行买入仓位事务失败')\r\n\r\n elif diff_volume < 0:\r\n\r\n sell_volume = 0\r\n short_volume = abs(diff_volume)\r\n if self.position.long_pos > 0:\r\n sell_volume = abs(self.position.long_pos)\r\n if sell_volume > abs(diff_volume):\r\n sell_volume = abs(diff_volume)\r\n short_volume = 0\r\n else:\r\n short_volume = abs(diff_volume) - sell_volume\r\n short_volume = round(round_to(short_volume, self.volumn_tick), 7)\r\n self.write_log(f'需要减少{self.vt_symbol}仓位{diff_volume} = [多平:{sell_volume}] + 空开{short_volume}]')\r\n\r\n if sell_volume > 0:\r\n self.write_log(f'执行 {self.vt_symbol}sell:{sell_volume}')\r\n ret = self.tns_process_sell(sell_volume=sell_volume)\r\n if ret:\r\n self.write_log(f'委托平仓多单成功')\r\n return\r\n else:\r\n self.write_log(u'执行平仓失败,转移做空数量:{} => {}'.format(short_volume, short_volume + sell_volume))\r\n short_volume += sell_volume\r\n short_volume = round_to(short_volume, self.volumn_tick)\r\n\r\n if short_volume > 0:\r\n self.write_log(f'执行 {self.vt_symbol} short:{short_volume}')\r\n grid = CtaGrid(direction=Direction.SHORT,\r\n vt_symbol=self.vt_symbol,\r\n open_price=self.cur_price,\r\n close_price=-sys.maxsize,\r\n stop_price=0,\r\n volume=short_volume)\r\n\r\n ret = self.grid_short(grid)\r\n if not ret:\r\n self.write_error(u'执行调整仓位事务失败')\r\n\r\n self.policy.save()\r\n\r\n def tns_process_cover(self, cover_volume):\r\n \"\"\"事务执行平空计划\"\"\"\r\n\r\n # 合约得持仓信息\r\n if self.account_pos is None:\r\n self.write_error(u'当前{}合约得持仓信息获取不到'.format(self.vt_symbol))\r\n return False\r\n\r\n cover_grid = self.tns_get_grid(direction=Direction.SHORT, close_volume=cover_volume)\r\n if cover_grid is None:\r\n self.write_error(u'无法获取合适的平空网格')\r\n return False\r\n\r\n return self.grid_cover(cover_grid)\r\n\r\n def tns_process_sell(self, sell_volume):\r\n \"\"\"事务执行平多计划\"\"\"\r\n\r\n # 合约得持仓信息\r\n if self.account_pos is None:\r\n self.write_error(u'当前{}合约得持仓信息获取不到'.format(self.vt_symbol))\r\n return False\r\n\r\n sell_grid = self.tns_get_grid(direction=Direction.LONG, close_volume=sell_volume)\r\n if sell_grid is None:\r\n self.write_error(u'无法获取合适的平多网格')\r\n return False\r\n\r\n return self.grid_sell(sell_grid)\r\n\r\n def tns_get_grid(self, direction, close_volume):\r\n \"\"\"根据需要平仓的volume,选取/创建出一个grid\"\"\"\r\n\r\n opened_grids = self.gt.get_opened_grids(direction=direction)\r\n if len(opened_grids) == 0:\r\n self.write_error(u'当前没有{}单得网格'.format(direction))\r\n return None\r\n\r\n select_grid = None\r\n remove_gids = []\r\n\r\n for g in opened_grids:\r\n if g.order_status:\r\n self.write_log(f'该网格正在委托中,不选择:{g.__dict__}')\r\n continue\r\n\r\n if select_grid is None:\r\n select_grid = g\r\n # 恰好等于需要close的数量\r\n if round(select_grid.volume, 7) == close_volume:\r\n self.write_log(u'选中首个网格,仓位:{}'.format(close_volume))\r\n break\r\n # volume 大于需要close的数量\r\n if select_grid.volume > close_volume:\r\n remain_volume = select_grid.volume - close_volume\r\n remain_volume = round(remain_volume, 7)\r\n select_grid.volume = close_volume\r\n remain_grid = copy.copy(select_grid)\r\n remain_grid.id = str(uuid.uuid1())\r\n remain_grid.volume = remain_volume\r\n if direction == Direction.SHORT:\r\n self.gt.up_grids.append(remain_grid)\r\n else:\r\n self.gt.dn_grids.append(remain_grid)\r\n self.write_log(u'选择首个网格,仓位超出,创建新的剩余网格:{}'.format(remain_volume))\r\n break\r\n else:\r\n # 如果\r\n if select_grid.volume + g.volume <= close_volume:\r\n old_volume = select_grid.volume\r\n select_grid.volume += g.volume\r\n select_grid.volume = round(select_grid.volume, 7)\r\n\r\n g.volume = 0\r\n remove_gids.append(g.id)\r\n self.write_log(u'close_volume: {} => {},需要移除:{}'\r\n .format(old_volume, select_grid.volume, g.__dict__))\r\n if select_grid.volume == close_volume:\r\n break\r\n elif select_grid.volume + g.volume > close_volume:\r\n g.volume -= (close_volume - select_grid.volume)\r\n select_grid.volume = close_volume\r\n self.write_log(u'cover_volume已满足')\r\n break\r\n\r\n if select_grid is None:\r\n self.write_error(u'没有可选择的{}单网格'.format(direction))\r\n return None\r\n\r\n if round(select_grid.volume, 7) != close_volume:\r\n self.write_error(u'没有可满足数量{}的{}单网格'.format(close_volume, direction))\r\n return None\r\n\r\n self.gt.remove_grids_by_ids(direction=direction, ids=remove_gids)\r\n\r\n return select_grid\r\n\r\n def display_tns(self):\r\n if not self.inited:\r\n return\r\n if self.backtesting:\r\n return\r\n\r\n\r\nclass GroupPolicy(CtaPolicy):\r\n \"\"\"组合策略事务\"\"\"\r\n\r\n def __init__(self, strategy):\r\n super().__init__(strategy)\r\n\r\n self.signals = {} # kline_name: { 'last_signal': '', 'last_signal_time': datetime }\r\n\r\n self.long_klines = [] # 做多信号得kline.name list\r\n self.short_klines = [] # 做空信号得kline.name list\r\n\r\n self.last_net_count = 0\r\n self.last_fund_rate = 1\r\n\r\n def to_json(self):\r\n \"\"\"\r\n 将数据转换成dict\r\n :return:\r\n \"\"\"\r\n j = dict()\r\n j['create_time'] = self.create_time.strftime(\r\n '%Y-%m-%d %H:%M:%S') if self.create_time is not None else \"\"\r\n j['save_time'] = self.save_time.strftime('%Y-%m-%d %H:%M:%S') if self.save_time is not None else \"\"\r\n\r\n d = {}\r\n for kline_name, signal in self.signals.items():\r\n save_signal = copy.deepcopy(signal)\r\n\r\n last_signal_time = save_signal.get('last_signal_time', None)\r\n\r\n if isinstance(last_signal_time, datetime):\r\n save_signal.update({\"last_signal_time\": last_signal_time.strftime(\r\n '%Y-%m-%d %H:%M:%S')})\r\n elif last_signal_time is None:\r\n save_signal.update({\"last_signal_time\": \"\"})\r\n\r\n d.update({kline_name: save_signal})\r\n j['signals'] = d\r\n\r\n j['long_klines'] = self.long_klines\r\n j['short_klines'] = self.short_klines\r\n\r\n j['last_net_count'] = self.last_net_count\r\n j['last_fund_rate'] = self.last_fund_rate\r\n return j\r\n\r\n def from_json(self, json_data):\r\n \"\"\"\r\n 将dict转化为属性\r\n :param json_data:\r\n :return:\r\n \"\"\"\r\n if not isinstance(json_data, dict):\r\n return\r\n\r\n if 'create_time' in json_data:\r\n try:\r\n if len(json_data['create_time']) > 0:\r\n self.create_time = datetime.strptime(json_data['create_time'], '%Y-%m-%d %H:%M:%S')\r\n else:\r\n self.create_time = datetime.now()\r\n except Exception as ex:\r\n self.create_time = datetime.now()\r\n\r\n if 'save_time' in json_data:\r\n try:\r\n if len(json_data['save_time']) > 0:\r\n self.save_time = datetime.strptime(json_data['save_time'], '%Y-%m-%d %H:%M:%S')\r\n else:\r\n self.save_time = datetime.now()\r\n except Exception as ex:\r\n self.save_time = datetime.now()\r\n\r\n signals = json_data.get('signals', {})\r\n for kline_name, signal in signals.items():\r\n last_signal = signal.get('last_signal', \"\")\r\n str_last_signal_time = signal.get('last_signal_time', \"\")\r\n last_signal_time = None\r\n try:\r\n if len(str_last_signal_time) > 0:\r\n last_signal_time = datetime.strptime(str_last_signal_time, '%Y-%m-%d %H:%M:%S')\r\n else:\r\n last_signal_time = None\r\n except Exception as ex:\r\n last_signal_time = None\r\n signal.update({'last_signal_time': last_signal_time})\r\n self.signals.update({kline_name: signal})\r\n\r\n self.long_klines = json_data.get('long_klines', [])\r\n self.short_klines = json_data.get('short_klines', [])\r\n self.last_net_count = json_data.get('last_net_count', 0)\r\n self.last_fund_rate = json_data.get('last_fund_rate', 1)\r\n\r\n def clean(self):\r\n \"\"\"\r\n 清空数据\r\n :return:\r\n \"\"\"\r\n self.write_log(u'清空policy数据')\r\n self.signals = {}\r\n self.long_klines = []\r\n self.short_klines = []\r\n self.last_net_count = 0\r\n self.last_fund_rate = 1\r\n", "# encoding: UTF-8\r\n\r\n# 首先写系统内置模块\r\nimport sys\r\nimport os\r\nfrom datetime import datetime, timedelta, time, date\r\nimport copy\r\nimport traceback\r\nfrom collections import OrderedDict\r\nfrom typing import Union\r\nimport numpy as np\r\n\r\n# 然后是自己编写的模块\r\nfrom vnpy.trader.utility import round_to\r\nfrom vnpy.app.cta_crypto.template import CtaFutureTemplate, Direction, get_underlying_symbol, Interval\r\nfrom vnpy.component.cta_policy import CtaPolicy\r\nfrom vnpy.component.cta_position import CtaPosition\r\nfrom vnpy.component.cta_grid_trade import CtaGridTrade, uuid, CtaGrid\r\nfrom vnpy.component.cta_line_bar import get_cta_bar_type, TickData, BarData, CtaMinuteBar, CtaHourBar, CtaDayBar\r\n\r\n\r\n########################################################################\r\nclass StrategyMacdChannelGroup_v1_5(CtaFutureTemplate):\r\n \"\"\"数字货币CTA MACD+通道 组合策略\r\n v1:\r\n 1.使用变量将MACD的快慢均线交叉点记录,然后获取上次交叉到本次交叉之间的周期数。\r\n 2.当MACD出现顶底背离时,开多开空;\r\n 核心计算: 1.MACD交叉状态记录\r\n 2.构建周期内的高低点区间\r\n 3.描述背离状态,同时保存结果;\r\n 多头进场:1.最近一个MACD信号是金叉,突破周期内高点;\r\n 2. 出现底背离时,开多;\r\n 空头进场:1.最近一个MACD信号是死叉,突破周期内低点;\r\n 2.出现顶背离时,开空;\r\n 出场:移动出场\r\n 周期:60~120分钟以上\r\n \"\"\"\r\n author = u'大佳'\r\n # 输入参数 [ macd快均线长度_慢均线长度_信号线长度_K线周期], 可配置多个参数\r\n bar_names = ['f12_s26_n9_M120']\r\n\r\n # 策略在外部设置的参数\r\n parameters = [\"activate_market\",\r\n \"max_invest_pos\",\r\n \"max_invest_margin\",\r\n \"max_invest_rate\",\r\n \"bar_names\",\r\n \"backtesting\"]\r\n\r\n # ----------------------------------------------------------------------\r\n def __init__(self, cta_engine,\r\n strategy_name,\r\n vt_symbol,\r\n setting=None):\r\n \"\"\"Constructor\"\"\"\r\n super().__init__(cta_engine=cta_engine,\r\n strategy_name=strategy_name,\r\n vt_symbol=vt_symbol,\r\n setting=setting)\r\n\r\n # 创建一个策略规则\r\n self.policy = GroupPolicy(strategy=self)\r\n\r\n # 仓位状态\r\n self.position = CtaPosition(strategy=self) # 0 表示没有仓位,1 表示持有多头,-1 表示持有空头\r\n\r\n # 创建网格交易,用来记录\r\n self.gt = CtaGridTrade(strategy=self)\r\n\r\n self.kline_count = len(self.bar_names)\r\n\r\n self.display_bars = False\r\n\r\n if setting:\r\n # 根据配置文件更新参数\r\n self.update_setting(setting)\r\n\r\n # 更新监控的k线总数\r\n self.kline_count = len(self.bar_names)\r\n\r\n for bar_name in self.bar_names:\r\n # 创建K线\r\n kline_setting = {}\r\n para_fast_len, para_slow_len, para_signal_len, name = bar_name.split('_')\r\n kline_class, interval_num = get_cta_bar_type(name)\r\n kline_setting['name'] = bar_name\r\n\r\n para_fast_len = int(para_fast_len.replace('f', ''))\r\n para_slow_len = int(para_slow_len.replace('s', ''))\r\n para_signal_len = int(para_signal_len.replace('n', ''))\r\n\r\n kline_setting['bar_interval'] = interval_num # K线的Bar时长\r\n kline_setting['para_atr1_len'] = 2 * para_fast_len # ATR均值\r\n kline_setting['para_ma1_len'] = 55 # 缠论常用得第1条均线\r\n kline_setting['para_ma2_len'] = 89 # 缠论常用得第2条均线\r\n\r\n kline_setting['para_macd_fast_len'] = para_fast_len\r\n kline_setting['para_macd_slow_len'] = para_slow_len\r\n kline_setting['para_macd_signal_len'] = para_signal_len\r\n\r\n kline_setting['para_active_chanlun'] = True # 激活缠论\r\n\r\n kline_setting['price_tick'] = self.price_tick\r\n kline_setting['underly_symbol'] = get_underlying_symbol(vt_symbol.split('.')[0]).upper()\r\n kline_setting['is_7x24'] = True\r\n self.write_log(f'创建K线:{kline_setting}')\r\n kline = kline_class(self, self.on_bar_k, kline_setting)\r\n self.klines.update({bar_name: kline})\r\n\r\n self.export_klines()\r\n\r\n if self.backtesting:\r\n # 回测时,自动初始化\r\n self.on_init()\r\n\r\n self.debug_dates = ['2020-09-14', '2021-02-06', '2021-02-07', '2021-02-22']\r\n\r\n def export_klines(self):\r\n \"\"\"输出K线=》csv文件\"\"\"\r\n if not self.backtesting:\r\n return\r\n\r\n for kline_name, kline in self.klines.items():\r\n # 写入文件\r\n import os\r\n kline.export_filename = os.path.abspath(\r\n os.path.join(self.cta_engine.get_logs_path(),\r\n u'{}_{}.csv'.format(self.strategy_name, kline_name)))\r\n\r\n kline.export_fields = [\r\n {'name': 'datetime', 'source': 'bar', 'attr': 'datetime', 'type_': 'datetime'},\r\n {'name': 'open', 'source': 'bar', 'attr': 'open_price', 'type_': 'float'},\r\n {'name': 'high', 'source': 'bar', 'attr': 'high_price', 'type_': 'float'},\r\n {'name': 'low', 'source': 'bar', 'attr': 'low_price', 'type_': 'float'},\r\n {'name': 'close', 'source': 'bar', 'attr': 'close_price', 'type_': 'float'},\r\n {'name': 'turnover', 'source': 'bar', 'attr': 'turnover', 'type_': 'float'},\r\n {'name': 'volume', 'source': 'bar', 'attr': 'volume', 'type_': 'float'},\r\n {'name': 'open_interest', 'source': 'bar', 'attr': 'open_interest', 'type_': 'float'},\r\n {'name': 'dif', 'source': 'line_bar', 'attr': 'line_dif', 'type_': 'list'},\r\n {'name': 'dea', 'source': 'line_bar', 'attr': 'line_dea', 'type_': 'list'},\r\n {'name': 'macd', 'source': 'line_bar', 'attr': 'line_macd', 'type_': 'list'},\r\n {'name': f'ma{kline.para_ma1_len}', 'source': 'line_bar', 'attr': 'line_ma1', 'type_': 'list'},\r\n {'name': f'ma{kline.para_ma2_len}', 'source': 'line_bar', 'attr': 'line_ma2', 'type_': 'list'},\r\n {'name': f'upper', 'source': 'line_bar', 'attr': 'line_macd_chn_upper', 'type_': 'list'},\r\n {'name': f'lower', 'source': 'line_bar', 'attr': 'line_macd_chn_lower', 'type_': 'list'},\r\n ]\r\n\r\n kline.export_bi_filename = os.path.abspath(\r\n os.path.join(self.cta_engine.get_logs_path(),\r\n u'{}_{}_bi.csv'.format(self.strategy_name, kline_name)))\r\n\r\n kline.export_zs_filename = os.path.abspath(\r\n os.path.join(self.cta_engine.get_logs_path(),\r\n u'{}_{}_zs.csv'.format(self.strategy_name, kline_name)))\r\n\r\n kline.export_duan_filename = os.path.abspath(\r\n os.path.join(self.cta_engine.get_logs_path(),\r\n u'{}_{}_duan.csv'.format(self.strategy_name, kline_name)))\r\n\r\n # ----------------------------------------------------------------------\r\n def on_init(self, force=False):\r\n \"\"\"初始化\"\"\"\r\n self.write_log(u'策略初始化')\r\n\r\n if self.inited:\r\n if force:\r\n self.write_log(u'策略强制初始化')\r\n self.inited = False\r\n self.trading = False # 控制是否启动交易\r\n self.position.pos = 0 # 仓差\r\n self.position.long_pos = 0 # 多头持仓\r\n self.position.short_pos = 0 # 空头持仓\r\n self.gt.up_grids = []\r\n self.gt.dn_grids = []\r\n else:\r\n self.write_log(u'策略初始化')\r\n self.write_log(u'已经初始化过,不再执行')\r\n return\r\n\r\n # 得到持久化的Policy中的子事务数据\r\n self.init_policy()\r\n self.display_tns()\r\n\r\n if not self.backtesting:\r\n self.init_position() # 初始持仓数据\r\n\r\n if not self.backtesting:\r\n # 这里是使用gateway历史数据\r\n if not self.init_data():\r\n self.write_error(u'初始数据失败')\r\n\r\n self.inited = True\r\n if not self.backtesting:\r\n self.trading = True # 控制是否启动交易\r\n\r\n self.write_log(u'策略初始化加载历史持仓、策略数据完成')\r\n self.display_grids()\r\n self.display_tns()\r\n\r\n self.put_event()\r\n\r\n def init_data(self):\r\n \"\"\"初始化数据\"\"\"\r\n try:\r\n # 优先从本地缓存文件,获取缓存\r\n last_bar_dt = self.load_klines_from_cache()\r\n dt_now = datetime.now()\r\n # 开始时间\r\n if last_bar_dt:\r\n load_days = max((dt_now - last_bar_dt).days, 1)\r\n else:\r\n load_days = 90\r\n self.display_bars = False\r\n\r\n def on_bar_cb(bar, **kwargs):\r\n \"\"\"\"\"\"\r\n if last_bar_dt and bar.datetime < last_bar_dt:\r\n return\r\n self.cur_price = bar.close_price\r\n self.cur_datetime = bar.datetime\r\n if self.cur_datetime > dt_now - timedelta(days=1) and not self.display_bars:\r\n self.display_bars = True\r\n for kline in self.klines.values():\r\n kline.add_bar(bar)\r\n\r\n self.cta_engine.load_bar(vt_symbol=self.vt_symbol,\r\n days=load_days,\r\n interval=Interval.MINUTE,\r\n callback=on_bar_cb)\r\n return True\r\n\r\n except Exception as ex:\r\n self.write_error(u'init_data Exception:{},{}'.format(str(ex), traceback.format_exc()))\r\n return False\r\n\r\n # ----------------------------------------------------------------------\r\n def on_tick(self, tick: TickData):\r\n \"\"\"行情更新\r\n :type tick: object\r\n \"\"\"\r\n # 实盘检查是否初始化数据完毕。如果数据未初始化完毕,则不更新tick,避免影响cur_price\r\n if not self.backtesting:\r\n if not self.inited:\r\n self.write_log(u'数据还没初始化完毕,不更新tick')\r\n return\r\n\r\n # 更新所有tick dict(包括 指数/主力/历史持仓合约)\r\n self.tick_dict.update({tick.vt_symbol: tick})\r\n\r\n if tick.vt_symbol == self.vt_symbol:\r\n self.cur_tick = tick\r\n self.cur_price = tick.last_price\r\n\r\n else:\r\n # 所有非vt_symbol得tick,全部返回\r\n return\r\n\r\n # 更新策略执行的时间(用于回测时记录发生的时间)\r\n self.cur_datetime = tick.datetime\r\n self.cur_price = tick.last_price\r\n\r\n for kline in self.klines.values():\r\n kline.on_tick(copy.copy(tick))\r\n\r\n if not self.inited or not self.trading:\r\n return\r\n\r\n self.account_pos = self.cta_engine.get_position(vt_symbol=self.vt_symbol, direction=Direction.NET)\r\n\r\n # 执行撤单逻辑\r\n self.tns_cancel_logic(tick.datetime, reopen=False)\r\n\r\n # 网格逐一止损/止盈检查\r\n self.grid_check_stop()\r\n\r\n # 实盘这里是每分钟执行\r\n if self.last_minute != tick.datetime.minute:\r\n self.last_minute = tick.datetime.minute\r\n\r\n # 开仓逻辑处理\r\n self.tns_open_logic()\r\n\r\n self.display_grids()\r\n self.display_tns()\r\n\r\n # 事务平衡仓位\r\n self.tns_calcute_net_pos()\r\n\r\n self.put_event()\r\n\r\n # ----------------------------------------------------------------------\r\n def on_bar(self, bar: BarData):\r\n \"\"\"\r\n 分钟K线数据(仅用于回测时,从策略外部调用)\r\n :param bar:\r\n :return:\r\n \"\"\"\r\n\r\n # if '201604082134' in self.cur_datetime.strftime(\"%Y%m%d%H%M\"):\r\n # a = 1\r\n\r\n if self.backtesting:\r\n new_dt = bar.datetime + timedelta(seconds=60)\r\n if self.cur_datetime and new_dt < self.cur_datetime:\r\n return\r\n self.cur_datetime = new_dt\r\n self.cur_price = bar.close_price\r\n\r\n if self.inited:\r\n self.account_pos = self.cta_engine.get_position(vt_symbol=self.vt_symbol, direction=Direction.NET)\r\n\r\n # 执行撤单逻辑\r\n self.tns_cancel_logic(bar.datetime)\r\n\r\n # 网格逐一止损/止盈检查\r\n self.grid_check_stop()\r\n\r\n # 推送tick到大周期K线\r\n try:\r\n for kline_name, kline in self.klines.items():\r\n if kline_name.endswith('M1'):\r\n bar_complete = True\r\n else:\r\n bar_complete = False\r\n kline.add_bar(bar=copy.copy(bar), bar_is_completed=bar_complete)\r\n\r\n except Exception as ex:\r\n self.write_error(u'{},{}'.format(str(ex), traceback.format_exc()))\r\n\r\n if self.inited and self.trading:\r\n\r\n if '201909262212' in self.cur_datetime.strftime(\"%Y%m%d%H%M\"):\r\n a = 1\r\n\r\n # 开仓逻辑处理\r\n self.tns_open_logic()\r\n\r\n # 事务平衡仓位\r\n self.tns_calcute_net_pos()\r\n\r\n # 显示各指标信息\r\n self.display_tns()\r\n\r\n def is_macd_signal(self, kline, direction):\r\n \"\"\"\r\n 条件1:判断是否突破macd的通道上下轨道,突破就开仓\r\n 'start': self.cur_datetime.strftime(\"%Y-%m-%d %H:%M:%S\"),\r\n 'end': self.cur_datetime.strftime(\"%Y-%m-%d %H:%M:%S\"),\r\n 'cross': self.cur_macd_cross,\r\n 'macd_count': self.cur_macd_count,\r\n 'max_price': self.high_array[-1],\r\n 'min_price': self.low_array[-1],\r\n 'max_dif': self.line_dif[-1],\r\n 'min_dif': self.line_dif[-1],\r\n 'macd_area': abs(self.line_macd[-1]),\r\n 'max_macd': self.line_macd[-1],\r\n 'min_macd': self.line_macd[-1]\r\n 条件2: 判断是否顶背离、底背离\r\n :param kline: k线\r\n :param direction: 需要开仓的方向\r\n :return: True/False, 信号\r\n \"\"\"\r\n\r\n if len(kline.macd_segment_list) < 3 or len(kline.bi_zs_list) == 0:\r\n return False, \"\"\r\n\r\n # 缠论线段\r\n cur_duan = kline.cur_duan\r\n pre_duan = kline.pre_duan\r\n tre_duan = kline.tre_duan\r\n\r\n cur_bi = kline.bi_list[-1]\r\n cur_zs = kline.bi_zs_list[-1]\r\n cur_fx = kline.fenxing_list[-1]\r\n\r\n # 最后三个macd的分段(排除毛刺得分段)\r\n tre_seg, pre_seg, cur_seg = kline.macd_segment_list[-3:]\r\n if abs(pre_seg['macd_count']) == 1 and len(kline.macd_segment_list) > 5:\r\n tre_seg, pre_seg = kline.macd_segment_list[-5:-3]\r\n\r\n # 是否有原信号\r\n signal = self.policy.signals.get(kline.name, {})\r\n last_signal = signal.get('last_signal', None)\r\n last_signal_time = signal.get('last_signal_time', None)\r\n if last_signal_time and isinstance(last_signal_time, datetime):\r\n last_signal_time = last_signal_time.strftime('%Y-%m-%d %H:%M:%S')\r\n signal_name = signal.get('signal_name', None)\r\n open_price = signal.get('open_price', None)\r\n duan_start = signal.get('duan_start', None)\r\n bi_start = signal.get('bi_start', None)\r\n stop_price = signal.get('stop_price')\r\n\r\n # 判断是否有做多信号\r\n if direction == Direction.LONG:\r\n # 空单信号演变:首先调整做空信号的开仓位,如果出现同样macd段突破信号,修改开仓时间和价位\r\n if last_signal == 'short' \\\r\n and last_signal_time < cur_seg['start'] \\\r\n and open_price > kline.cur_price \\\r\n and (duan_start < cur_duan.start or (\r\n duan_start == cur_duan.start and last_signal_time < cur_duan.bi_list[0].end)):\r\n if cur_duan.direction == -1 \\\r\n and cur_duan.low == cur_bi.low \\\r\n and len(cur_duan.bi_list) >= 3 \\\r\n and min([bi.low for bi in cur_duan.bi_list]) == cur_duan.low \\\r\n and cur_seg['macd_count'] < 0 \\\r\n and kline.close_array[-1] < tre_seg['min_close']: # and pre_seg['cross'] < cur_seg['cross'] < 0\r\n self.write_log(f'再次出现做空信号,调整开仓时间和开仓价')\r\n signal_name = 'short_break'\r\n open_price = kline.cur_price\r\n duan_start = cur_duan.start\r\n bi_start = cur_bi.start\r\n stop_price = float(cur_duan.high)\r\n last_signal_time = kline.cur_datetime.strftime('%Y-%m-%d %H:%M:%S')\r\n signal.update({\"last_signal_time\": kline.cur_datetime,\r\n \"open_price\": open_price,\r\n \"duan_start\": duan_start,\r\n \"bi_start\": bi_start,\r\n \"stop_price\": stop_price,\r\n \"signal_name\": signal_name})\r\n self.policy.signals.update({kline.name: signal})\r\n self.policy.save()\r\n\r\n # 信号01:当前属于下macd的金叉,判断是否突破上一个死叉周期的最高位\r\n if cur_seg['macd_count'] > 0 \\\r\n and abs(pre_seg['macd_count']) >= 4 \\\r\n and kline.close_array[-1] > tre_seg['max_close'] \\\r\n and pre_seg['cross'] > cur_seg['cross'] > 0:\r\n d = {\r\n \"datetime\": self.cur_datetime,\r\n \"price\": kline.cur_price,\r\n \"operation\": 'long_break',\r\n \"signal\": f'{kline.name}.long',\r\n \"stop_price\": pre_seg[\"min_price\"]\r\n }\r\n self.save_dist(d)\r\n return True, 'long_break'\r\n\r\n # 信号02 出现底背离\r\n if kline.cur_macd_count == 1 \\\r\n and len(cur_duan.bi_list) >= 5 \\\r\n and cur_duan.low == np.float32(pre_seg['min_price']) \\\r\n and min([bi.low for bi in cur_duan.bi_list]) == cur_bi.low \\\r\n and abs(tre_seg['macd_count']) > 4 \\\r\n and abs(pre_seg['macd_count']) >= 4 \\\r\n and kline.dif_buttom_divergence \\\r\n and cur_bi.low < kline.bi_list[-3].low:\r\n\r\n # 判断底背离的前三个macd区间段交叉点的相对位置\r\n four_seg = kline.macd_segment_list[-4]\r\n if (four_seg['cross'] - tre_seg['cross']) * 0.5 > pre_seg['cross'] - tre_seg['cross']:\r\n d = {\r\n \"datetime\": self.cur_datetime,\r\n \"price\": kline.cur_price,\r\n \"operation\": 'buttom_div',\r\n \"signal\": f'{kline.name}.dif_div',\r\n \"stop_price\": pre_seg[\"min_price\"]\r\n }\r\n self.save_dist(d)\r\n return True, 'buttom_div'\r\n\r\n # 出现连续三个逐一降低的底分型,最后两个形成底背离\r\n if kline.cur_macd_count > 0 \\\r\n and len(cur_duan.bi_list) >= 5 \\\r\n and min([bi.low for bi in cur_duan.bi_list]) == cur_bi.low \\\r\n and cur_duan.bi_list[-5].low > cur_duan.bi_list[-3].low \\\r\n and cur_duan.bi_list[-5].low > cur_duan.bi_list[-1].high \\\r\n and kline.is_fx_macd_divergence(direction=Direction.SHORT, cur_duan=cur_duan):\r\n\r\n seg_list = [seg for seg in kline.macd_segment_list if seg['start'] > cur_duan.bi_list[-2].start]\r\n if len(seg_list) == 1:\r\n d = {\r\n \"datetime\": self.cur_datetime,\r\n \"price\": kline.cur_price,\r\n \"operation\": 'buttom_div2',\r\n \"signal\": f'{kline.name}.dif_div',\r\n \"stop_price\": pre_seg[\"min_price\"]\r\n }\r\n self.save_dist(d)\r\n return True, 'buttom_div'\r\n # 做空突破,或顶背离失败\r\n if open_price and last_signal and duan_start:\r\n # 线段是多,持仓是空单,开仓价低于当前线段得高点(即当前价),反手做多\r\n\r\n # 信号03:顶背离失败,原有段出现新高\r\n if cur_duan.direction == 1 \\\r\n and last_signal == 'short' \\\r\n and open_price < cur_duan.high \\\r\n and stop_price and stop_price < cur_duan.high \\\r\n and cur_duan.start < duan_start < cur_duan.end:\r\n d = {\r\n \"datetime\": self.cur_datetime,\r\n \"price\": kline.cur_price,\r\n \"operation\": 'top_div_fail',\r\n \"signal\": f'{kline.name}.long',\r\n \"stop_price\": pre_seg[\"min_price\"]\r\n }\r\n self.save_dist(d)\r\n return True, 'long_break'\r\n\r\n # 信号04 原有段突破做空失败,出现反向的做多笔,且做多。 #\r\n if cur_duan.direction == -1 \\\r\n and cur_bi.direction == 1 \\\r\n and last_signal == 'short' \\\r\n and signal_name in ['short_break'] \\\r\n and bi_start == kline.bi_list[-2].start \\\r\n and open_price < cur_bi.high:\r\n\r\n kline.rt_count_macd()\r\n is_long_trend = kline.ma12_count > 0 \\\r\n or kline.close_array[-1] > max(kline.line_ma1[-1], kline.line_ma2[-1]) \\\r\n or kline.line_ma1[-1] > kline.line_ma1[-2] \\\r\n or kline.line_ma2[-1] > kline.line_ma2[-2]\r\n if is_long_trend \\\r\n and cur_bi.high > kline.bi_list[-2].high \\\r\n and (kline.rt_macd_count > 0 or kline.cur_macd_count > 0) \\\r\n and kline.cur_price > pre_seg['max_price']:\r\n d = {\r\n \"datetime\": self.cur_datetime,\r\n \"price\": kline.cur_price,\r\n \"operation\": 'break_fail',\r\n \"signal\": f'{kline.name}.long',\r\n \"stop_price\": cur_bi.low\r\n }\r\n self.save_dist(d)\r\n return True, 'fail_div'\r\n\r\n # 原有macd段突破做空失败,出现反向的做多笔,且做多。#\r\n if cur_duan.direction == -1 \\\r\n and cur_bi.direction == 1 \\\r\n and last_signal == 'short' \\\r\n and signal_name in ['short_break'] \\\r\n and pre_seg['start'] <= last_signal_time <= cur_seg['start'] \\\r\n and open_price < cur_bi.high:\r\n\r\n kline.rt_count_macd()\r\n\r\n if (kline.rt_macd_count > 0 or kline.cur_macd_count > 0) \\\r\n and kline.cur_price > min(pre_seg['max_price'],\r\n pre_seg['min_price'] + kline.bi_height_ma()):\r\n d = {\r\n \"datetime\": self.cur_datetime,\r\n \"price\": kline.cur_price,\r\n \"operation\": 'break_fail2',\r\n \"signal\": f'{kline.name}.long',\r\n \"stop_price\": cur_bi.low\r\n }\r\n self.save_dist(d)\r\n return True, 'fail_div'\r\n\r\n # 信号05 原有段突破做空失败,出现反向的做多段。\r\n # 反向线段的高点,高于做空的位置,当前必须是完整的上涨一笔,当前价格高于开空位置\r\n if cur_duan.direction == 1 \\\r\n and last_signal == 'short' \\\r\n and open_price < cur_duan.high \\\r\n and duan_start < cur_duan.bi_list[-1].start \\\r\n and kline.cur_price > open_price \\\r\n and last_signal_time < cur_seg['start']:\r\n if cur_zs.end > cur_duan.start and cur_bi.low < cur_zs.high:\r\n return False, ''\r\n\r\n # 要么一根分笔直接打穿,或者三根分笔形成小线段\r\n if len(cur_duan.bi_list) == 1 \\\r\n or (cur_bi.direction == -1 and not cur_fx.is_rt):\r\n d = {\r\n \"datetime\": self.cur_datetime,\r\n \"price\": kline.cur_price,\r\n \"operation\": 'short_rev',\r\n \"signal\": f'{kline.name}.long',\r\n \"stop_price\": pre_seg[\"min_price\"]\r\n }\r\n self.save_dist(d)\r\n return True, 'long_break'\r\n\r\n if direction == Direction.SHORT:\r\n # 首先调整做多信号的开仓位,如果出现同样macd段突破信号,修改开仓时间和价位\r\n if last_signal == 'long' \\\r\n and last_signal_time < cur_seg['start'] \\\r\n and open_price < kline.cur_price \\\r\n and (duan_start < cur_duan.start or (\r\n duan_start == cur_duan.start and last_signal_time < cur_duan.bi_list[0].end)):\r\n if cur_duan.direction == 1 \\\r\n and cur_duan.high == cur_bi.high \\\r\n and len(cur_duan.bi_list) >= 3 \\\r\n and max([bi.high for bi in cur_duan.bi_list]) == cur_duan.high \\\r\n and cur_seg['macd_count'] > 0 \\\r\n and kline.close_array[-1] > tre_seg['max_close']: # pre_seg['cross'] > cur_seg['cross'] > 0\r\n self.write_log(f'再次出现做多信号,调整开仓时间和开仓价')\r\n signal_name = 'long_break'\r\n open_price = kline.cur_price\r\n duan_start = cur_duan.start\r\n bi_start = cur_bi.start\r\n stop_price = float(cur_duan.low)\r\n last_signal_time = kline.cur_datetime.strftime('%Y-%m-%d %H:%M:%S')\r\n signal.update({\"last_signal_time\": kline.cur_datetime,\r\n \"open_price\": open_price,\r\n \"duan_start\": duan_start,\r\n \"bi_start\": bi_start,\r\n \"stop_price\": stop_price,\r\n \"signal_name\": signal_name})\r\n self.policy.signals.update({kline.name: signal})\r\n self.policy.save()\r\n\r\n # 当前属于macd的死叉,判断是否突破上一个金叉周期的最低位\r\n if cur_seg['macd_count'] < 0 \\\r\n and abs(pre_seg['macd_count']) >= 4 \\\r\n and kline.close_array[-1] < tre_seg['min_close'] \\\r\n and pre_seg['cross'] < cur_seg['cross'] < 0:\r\n d = {\r\n \"datetime\": self.cur_datetime,\r\n \"price\": kline.cur_price,\r\n \"operation\": 'short_break',\r\n \"signal\": f'{kline.name}.short',\r\n \"stop_price\": float(pre_seg[\"max_price\"])\r\n }\r\n self.save_dist(d)\r\n return True, 'short_break'\r\n\r\n # # 出现顶背离\r\n if kline.cur_macd_count == -1 \\\r\n and len(cur_duan.bi_list) >= 5 \\\r\n and cur_duan.high == np.float32(pre_seg['max_price']) \\\r\n and max([bi.high for bi in cur_duan.bi_list]) == cur_bi.high \\\r\n and abs(tre_seg['macd_count']) > 4 \\\r\n and abs(pre_seg['macd_count']) >= 4 \\\r\n and kline.dif_top_divergence \\\r\n and cur_bi.high > kline.bi_list[-3].high:\r\n\r\n # 判断前三个交叉点的相对位置\r\n four_seg = kline.macd_segment_list[-4]\r\n if (tre_seg['cross'] - four_seg['cross']) * 0.5 > tre_seg['cross'] - pre_seg['cross']:\r\n d = {\r\n \"datetime\": self.cur_datetime,\r\n \"price\": kline.cur_price,\r\n \"operation\": 'top_div',\r\n \"signal\": f'{kline.name}.dif_div',\r\n \"stop_price\": float(pre_seg[\"max_price\"])\r\n }\r\n self.save_dist(d)\r\n return True, 'top_div'\r\n\r\n # 出现连续三个顶分型,最后两个顶背离\r\n if kline.cur_macd_count < 0 \\\r\n and len(cur_duan.bi_list) >= 5 \\\r\n and max([bi.high for bi in cur_duan.bi_list]) == cur_bi.high \\\r\n and cur_duan.bi_list[-5].high < cur_duan.bi_list[-3].high \\\r\n and cur_duan.bi_list[-5].high < cur_duan.bi_list[-1].low \\\r\n and kline.is_fx_macd_divergence(direction=Direction.LONG, cur_duan=cur_duan):\r\n\r\n # 重点是解决top_div中,下跌区间未形成分段\r\n seg_list = [seg for seg in kline.macd_segment_list if seg['start'] > cur_duan.bi_list[-2].start]\r\n if len(seg_list) == 1:\r\n d = {\r\n \"datetime\": self.cur_datetime,\r\n \"price\": kline.cur_price,\r\n \"operation\": 'top_div2',\r\n \"signal\": f'{kline.name}.dif_div',\r\n \"stop_price\": float(pre_seg[\"max_price\"])\r\n }\r\n self.save_dist(d)\r\n return True, 'top_div2'\r\n\r\n # 突破或底背离失败\r\n if open_price and last_signal and duan_start:\r\n\r\n # 底背离失败,原有段延续出现新低\r\n if cur_duan.direction == -1 \\\r\n and last_signal == 'long' \\\r\n and open_price > cur_duan.low \\\r\n and stop_price and stop_price > cur_duan.low \\\r\n and cur_duan.start < duan_start < cur_duan.end:\r\n d = {\r\n \"datetime\": self.cur_datetime,\r\n \"price\": kline.cur_price,\r\n \"operation\": 'buttom_div_fail',\r\n \"signal\": f'{kline.name}.short',\r\n \"stop_price\": float(pre_seg[\"max_price\"])\r\n }\r\n self.save_dist(d)\r\n return True, 'short_break'\r\n\r\n # 原有段突破做空失败,出现反向的做多笔,且做多。 #\r\n if cur_duan.direction == 1 \\\r\n and cur_bi.direction == -1 \\\r\n and last_signal == 'long' \\\r\n and signal_name in ['long_break'] \\\r\n and bi_start == kline.bi_list[-2].start \\\r\n and open_price > cur_bi.low:\r\n kline.rt_count_macd()\r\n is_short_trend = kline.ma12_count < 0 \\\r\n or kline.close_array[-1] < min(kline.line_ma1[-1], kline.line_ma2[-1]) \\\r\n or kline.line_ma1[-1] < kline.line_ma1[-2] \\\r\n or kline.line_ma2[-1] < kline.line_ma2[-2]\r\n if is_short_trend \\\r\n and cur_bi.low < kline.bi_list[-2].low \\\r\n and (kline.rt_macd_count < 0 or kline.cur_macd_count < 0) \\\r\n and kline.cur_price < pre_seg['min_price']:\r\n d = {\r\n \"datetime\": self.cur_datetime,\r\n \"price\": kline.cur_price,\r\n \"operation\": 'break_fail',\r\n \"signal\": f'{kline.name}.short',\r\n \"stop_price\": cur_bi.high\r\n }\r\n self.save_dist(d)\r\n return True, 'fail_div'\r\n\r\n # 原有macd段突破做多失败,出现反向的做空笔。 #\r\n if cur_duan.direction == 1 \\\r\n and cur_bi.direction == -1 \\\r\n and last_signal == 'long' \\\r\n and signal_name in ['long_break'] \\\r\n and pre_seg['start'] <= last_signal_time <= cur_seg['start'] \\\r\n and open_price > cur_bi.low:\r\n kline.rt_count_macd()\r\n # 实时死叉,或者已经死叉\r\n # 价格反向运动,超过一定的幅度( 前macd 区间段的最低价,或者最高价-平均笔的高度)\r\n if (kline.rt_macd_count < 0 or kline.cur_macd_count < 0) \\\r\n and kline.cur_price < max(pre_seg['min_price'],\r\n pre_seg['max_price'] - kline.bi_height_ma()):\r\n d = {\r\n \"datetime\": self.cur_datetime,\r\n \"price\": kline.cur_price,\r\n \"operation\": 'break_fail2',\r\n \"signal\": f'{kline.name}.short',\r\n \"stop_price\": cur_bi.high\r\n }\r\n self.save_dist(d)\r\n return True, 'fail_div'\r\n\r\n # 做多突破失败,反转出现做空线段\r\n # 反向线段的底点,低于做多的开仓价,当前必须是完整的上涨一笔,当前价格低于开多价\r\n if cur_duan.direction == -1 \\\r\n and last_signal == 'long' \\\r\n and open_price > cur_duan.low \\\r\n and duan_start <= cur_duan.bi_list[-1].start \\\r\n and kline.cur_price < open_price \\\r\n and last_signal_time < cur_seg['start'] \\\r\n and last_signal_time < cur_duan.start:\r\n\r\n if cur_zs.end > cur_duan.start and cur_bi.high > cur_zs.low:\r\n return False, ''\r\n if len(cur_duan.bi_list) == 1 \\\r\n or (cur_bi.direction == 1 \\\r\n and not cur_fx.is_rt):\r\n d = {\r\n \"datetime\": self.cur_datetime,\r\n \"price\": kline.cur_price,\r\n \"operation\": 'long_rev',\r\n \"signal\": f'{kline.name}.short',\r\n \"stop_price\": float(pre_seg[\"max_price\"])\r\n }\r\n self.save_dist(d)\r\n return True, 'short_break'\r\n return False, \"\"\r\n\r\n def tns_open_logic(self):\r\n \"\"\"\r\n 开仓逻辑\r\n :return:\r\n \"\"\"\r\n if self.entrust != 0:\r\n return\r\n\r\n if self.cur_datetime.strftime(\"%Y-%m-%d\") in self.debug_dates:\r\n a = 1\r\n\r\n for kline_name in list(self.klines.keys()):\r\n kline = self.klines.get(kline_name)\r\n\r\n if len(kline.ma12_cross_list) < 3 or len(kline.duan_list) < 1:\r\n continue\r\n\r\n # 做多事务\r\n if kline_name not in self.policy.long_klines:\r\n\r\n # 判断1:macd金叉+突破通道, 或者是底背离\r\n cond01, signal_name = self.is_macd_signal(kline, direction=Direction.LONG)\r\n\r\n if cond01:\r\n signal = self.policy.signals.get(kline_name, {})\r\n if signal.get('last_signal', '') != 'long':\r\n # 出现多头突破信号\r\n if signal_name in ['long_break']:\r\n low = kline.duan_list[-1].low\r\n duan_start = kline.duan_list[-1].start\r\n # 出现底背离信号,或者中枢背驰等反转信号\r\n else:\r\n low = kline.duan_list[-1].low\r\n duan_start = kline.duan_list[-1].end\r\n\r\n signal = {'last_signal': 'long',\r\n 'last_signal_time': self.cur_datetime,\r\n 'signal_name': signal_name,\r\n 'stop_price': float(low),\r\n 'open_price': kline.cur_price,\r\n 'duan_start': duan_start,\r\n 'bi_start': kline.bi_list[-1].start}\r\n\r\n self.policy.signals.update({kline_name: signal})\r\n self.policy.save()\r\n\r\n if kline_name in self.policy.short_klines:\r\n self.write_log(u'从做空信号队列中移除:{}'.format(kline_name))\r\n self.policy.short_klines.remove(kline_name)\r\n\r\n self.write_log(u'从做多信号队列中增加:{}'.format(kline_name))\r\n self.policy.long_klines.append(kline_name)\r\n continue\r\n\r\n # 做空事务\r\n if kline_name not in self.policy.short_klines:\r\n\r\n # 判断1:死叉+突破 或者顶背离\r\n cond01, signal_name = self.is_macd_signal(kline, direction=Direction.SHORT)\r\n\r\n if cond01:\r\n signal = self.policy.signals.get(kline_name, {})\r\n if signal.get('last_signal', '') != 'short':\r\n if signal_name in ['short_break']:\r\n high = kline.duan_list[-1].high\r\n duan_start = kline.duan_list[-1].start\r\n else:\r\n high = kline.duan_list[-1].high\r\n duan_start = kline.duan_list[-1].end\r\n\r\n signal = {'last_signal': 'short',\r\n 'last_signal_time': self.cur_datetime,\r\n 'signal_name': signal_name,\r\n 'stop_price': float(high),\r\n 'open_price': kline.cur_price,\r\n 'duan_start': duan_start,\r\n 'bi_start': kline.bi_list[-1].start\r\n }\r\n self.policy.signals.update({kline_name: signal})\r\n self.policy.save()\r\n\r\n if kline_name in self.policy.long_klines:\r\n self.write_log(u'从做多信号队列中移除:{}'.format(kline_name))\r\n self.policy.long_klines.remove(kline_name)\r\n\r\n self.write_log(u'从做空信号队列中增加:{}'.format(kline_name))\r\n self.policy.short_klines.append(kline_name)\r\n\r\n def on_bar_k(self, bar, **kwargs):\r\n \"\"\"\r\n K线数据\r\n :param bar: 预定的周期Bar\r\n :return:\r\n \"\"\"\r\n pass\r\n\r\n def tns_calcute_net_pos(self):\r\n \"\"\"事务计算仓位轧差\"\"\"\r\n if not self.trading or self.entrust != 0:\r\n return\r\n\r\n self.account_pos = self.cta_engine.get_position(self.vt_symbol, direction=Direction.NET)\r\n if not self.account_pos:\r\n self.write_error(f'未能获取{self.vt_symbol}净仓')\r\n\r\n # 校验多一次多空信号\r\n long_klines = [s for s in self.policy.long_klines if s in self.bar_names]\r\n short_klines = [s for s in self.policy.short_klines if s in self.bar_names]\r\n\r\n if len(long_klines) != len(self.policy.long_klines):\r\n self.send_wechat(f'{self.strategy_name}多头信号校验不一致,修正{self.policy.long_klines} => {long_klines}')\r\n self.policy.long_klines = copy.copy(long_klines)\r\n\r\n if len(short_klines) != len(self.policy.short_klines):\r\n self.send_wechat(f'{self.strategy_name}空头信号校验不一致,修正:{self.policy.short_klines} => {short_klines}')\r\n self.policy.short_klines = copy.copy(short_klines)\r\n\r\n # 多/空/净仓分数\r\n long_kline_count = len(self.policy.long_klines)\r\n short_kline_count = len(self.policy.short_klines)\r\n net_kline_count = long_kline_count - short_kline_count\r\n\r\n if net_kline_count != self.policy.last_net_count:\r\n self.write_log(u'信号K线净仓变化 {} =>{}'.format(self.policy.last_net_count, net_kline_count))\r\n self.policy.last_net_count = net_kline_count\r\n\r\n # 计算目标头寸,(正数:多头, 负数:空头,0:持平)\r\n if self.max_invest_pos > 0:\r\n # 采用固定最大仓位时\r\n target_volume = round_to(\r\n value=self.max_invest_pos * net_kline_count / self.kline_count,\r\n target=self.volumn_tick)\r\n single_volume = round_to(\r\n value=float(self.max_invest_pos / self.kline_count),\r\n target=self.volumn_tick)\r\n max_volume = self.max_invest_pos\r\n else:\r\n # 采用资金投入百分比\r\n balance, avaliable, _, _ = self.cta_engine.get_account()\r\n invest_margin = balance * self.max_invest_rate\r\n if invest_margin > self.max_invest_margin > 0:\r\n invest_margin = self.max_invest_margin\r\n max_volume = round_to(\r\n value=invest_margin / (self.cur_price * self.margin_rate),\r\n target=self.volumn_tick)\r\n single_volume = round_to(\r\n value=float(max_volume / self.kline_count),\r\n target=self.volumn_tick)\r\n target_volume = round_to(\r\n value=max_volume * net_kline_count / self.kline_count,\r\n target=self.volumn_tick)\r\n\r\n diff_volume = target_volume - self.position.pos\r\n diff_volume = round(diff_volume, 7)\r\n single_volume = round(single_volume, 7)\r\n\r\n # 排除一些噪音(根据净值百分比出来的偏差)\r\n if abs(diff_volume) < single_volume * 0.8:\r\n return\r\n acc_volume = self.account_pos.volume if self.account_pos else 0\r\n\r\n self.write_log(f\"{self.vt_symbol}, 账号净仓:{acc_volume},\"\r\n f\"策略净仓:{self.position.pos},多单:{self.position.long_pos},空单:{self.position.short_pos}\\n\"\r\n f\"目标仓位:{target_volume},偏差仓位:{diff_volume},\"\r\n f\"最大限仓:{max_volume}, 单次变动:{single_volume}\")\r\n\r\n if diff_volume > 0:\r\n\r\n cover_volume = 0\r\n buy_volume = diff_volume\r\n if self.position.short_pos < 0:\r\n cover_volume = abs(self.position.short_pos)\r\n if cover_volume > diff_volume:\r\n cover_volume = diff_volume\r\n buy_volume = 0\r\n else:\r\n buy_volume = round(round_to(diff_volume - cover_volume, self.volumn_tick), 7)\r\n\r\n self.write_log(f'需要增加{self.vt_symbol}仓位{diff_volume} = [平空:{cover_volume}] + 开多{buy_volume}]')\r\n\r\n if cover_volume > 0:\r\n self.write_log(f'执行 {self.vt_symbol} cover:{cover_volume}')\r\n ret = self.tns_process_cover(cover_volume=cover_volume)\r\n if ret:\r\n self.write_log(f'委托平仓空单成功')\r\n return\r\n else:\r\n self.write_log(u'执行平仓失败,转移买入数量:{} => {}'.format(buy_volume, buy_volume + cover_volume))\r\n buy_volume += cover_volume\r\n buy_volume = round(buy_volume, 7)\r\n\r\n if buy_volume > 0:\r\n self.write_log(f'执行 {self.vt_symbol} buy:{buy_volume}')\r\n grid = CtaGrid(direction=Direction.LONG,\r\n vt_symbol=self.vt_symbol,\r\n open_price=self.cur_price,\r\n close_price=sys.maxsize,\r\n stop_price=0,\r\n volume=buy_volume)\r\n\r\n ret = self.grid_buy(grid)\r\n if not ret:\r\n self.write_error(u'执行买入仓位事务失败')\r\n\r\n elif diff_volume < 0:\r\n\r\n sell_volume = 0\r\n short_volume = abs(diff_volume)\r\n if self.position.long_pos > 0:\r\n sell_volume = abs(self.position.long_pos)\r\n if sell_volume > abs(diff_volume):\r\n sell_volume = abs(diff_volume)\r\n short_volume = 0\r\n else:\r\n short_volume = abs(diff_volume) - sell_volume\r\n short_volume = round(round_to(short_volume, self.volumn_tick), 7)\r\n self.write_log(f'需要减少{self.vt_symbol}仓位{diff_volume} = [多平:{sell_volume}] + 空开{short_volume}]')\r\n\r\n if sell_volume > 0:\r\n self.write_log(f'执行 {self.vt_symbol}sell:{sell_volume}')\r\n ret = self.tns_process_sell(sell_volume=sell_volume)\r\n if ret:\r\n self.write_log(f'委托平仓多单成功')\r\n return\r\n else:\r\n self.write_log(u'执行平仓失败,转移做空数量:{} => {}'.format(short_volume, short_volume + sell_volume))\r\n short_volume += sell_volume\r\n short_volume = round_to(short_volume, self.volumn_tick)\r\n\r\n if short_volume > 0:\r\n self.write_log(f'执行 {self.vt_symbol} short:{short_volume}')\r\n grid = CtaGrid(direction=Direction.SHORT,\r\n vt_symbol=self.vt_symbol,\r\n open_price=self.cur_price,\r\n close_price=-sys.maxsize,\r\n stop_price=0,\r\n volume=short_volume)\r\n\r\n ret = self.grid_short(grid)\r\n if not ret:\r\n self.write_error(u'执行调整仓位事务失败')\r\n\r\n self.policy.save()\r\n\r\n def tns_process_cover(self, cover_volume):\r\n \"\"\"事务执行平空计划\"\"\"\r\n\r\n # 合约得持仓信息\r\n if self.account_pos is None:\r\n self.write_error(u'当前{}合约得持仓信息获取不到'.format(self.vt_symbol))\r\n return False\r\n\r\n cover_grid = self.tns_get_grid(direction=Direction.SHORT, close_volume=cover_volume)\r\n if cover_grid is None:\r\n self.write_error(u'无法获取合适的平空网格')\r\n return False\r\n\r\n return self.grid_cover(cover_grid)\r\n\r\n def tns_process_sell(self, sell_volume):\r\n \"\"\"事务执行平多计划\"\"\"\r\n\r\n # 合约得持仓信息\r\n if self.account_pos is None:\r\n self.write_error(u'当前{}合约得持仓信息获取不到'.format(self.vt_symbol))\r\n return False\r\n\r\n sell_grid = self.tns_get_grid(direction=Direction.LONG, close_volume=sell_volume)\r\n if sell_grid is None:\r\n self.write_error(u'无法获取合适的平多网格')\r\n return False\r\n\r\n return self.grid_sell(sell_grid)\r\n\r\n def tns_get_grid(self, direction, close_volume):\r\n \"\"\"根据需要平仓的volume,选取/创建出一个grid\"\"\"\r\n\r\n opened_grids = self.gt.get_opened_grids(direction=direction)\r\n if len(opened_grids) == 0:\r\n self.write_error(u'当前没有{}单得网格'.format(direction))\r\n return None\r\n\r\n select_grid = None\r\n remove_gids = []\r\n\r\n for g in opened_grids:\r\n if g.order_status:\r\n self.write_log(f'该网格正在委托中,不选择:{g.__dict__}')\r\n continue\r\n\r\n if select_grid is None:\r\n select_grid = g\r\n # 恰好等于需要close的数量\r\n if round(select_grid.volume, 7) == close_volume:\r\n self.write_log(u'选中首个网格,仓位:{}'.format(close_volume))\r\n break\r\n # volume 大于需要close的数量\r\n if select_grid.volume > close_volume:\r\n remain_volume = select_grid.volume - close_volume\r\n remain_volume = round(remain_volume, 7)\r\n select_grid.volume = close_volume\r\n remain_grid = copy.copy(select_grid)\r\n remain_grid.id = str(uuid.uuid1())\r\n remain_grid.volume = remain_volume\r\n if direction == Direction.SHORT:\r\n self.gt.up_grids.append(remain_grid)\r\n else:\r\n self.gt.dn_grids.append(remain_grid)\r\n self.write_log(u'选择首个网格,仓位超出,创建新的剩余网格:{}'.format(remain_volume))\r\n break\r\n else:\r\n # 如果\r\n if select_grid.volume + g.volume <= close_volume:\r\n old_volume = select_grid.volume\r\n select_grid.volume += g.volume\r\n select_grid.volume = round(select_grid.volume, 7)\r\n\r\n g.volume = 0\r\n remove_gids.append(g.id)\r\n self.write_log(u'close_volume: {} => {},需要移除:{}'\r\n .format(old_volume, select_grid.volume, g.__dict__))\r\n if select_grid.volume == close_volume:\r\n break\r\n elif select_grid.volume + g.volume > close_volume:\r\n g.volume -= (close_volume - select_grid.volume)\r\n select_grid.volume = close_volume\r\n self.write_log(u'cover_volume已满足')\r\n break\r\n\r\n if select_grid is None:\r\n self.write_error(u'没有可选择的{}单网格'.format(direction))\r\n return None\r\n\r\n if round(select_grid.volume, 7) != close_volume:\r\n self.write_error(u'没有可满足数量{}的{}单网格'.format(close_volume, direction))\r\n return None\r\n\r\n self.gt.remove_grids_by_ids(direction=direction, ids=remove_gids)\r\n\r\n return select_grid\r\n\r\n def display_tns(self):\r\n if not self.inited:\r\n return\r\n if self.backtesting:\r\n return\r\n\r\n\r\nclass GroupPolicy(CtaPolicy):\r\n \"\"\"组合策略事务\"\"\"\r\n\r\n def __init__(self, strategy):\r\n super().__init__(strategy)\r\n\r\n self.signals = {} # kline_name: { 'last_signal': '', 'last_signal_time': datetime }\r\n\r\n self.long_klines = [] # 做多信号得kline.name list\r\n self.short_klines = [] # 做空信号得kline.name list\r\n\r\n self.last_net_count = 0\r\n self.last_fund_rate = 1\r\n\r\n def to_json(self):\r\n \"\"\"\r\n 将数据转换成dict\r\n :return:\r\n \"\"\"\r\n j = dict()\r\n j['create_time'] = self.create_time.strftime(\r\n '%Y-%m-%d %H:%M:%S') if self.create_time is not None else \"\"\r\n j['save_time'] = self.save_time.strftime('%Y-%m-%d %H:%M:%S') if self.save_time is not None else \"\"\r\n\r\n d = {}\r\n for kline_name, signal in self.signals.items():\r\n save_signal = copy.deepcopy(signal)\r\n\r\n last_signal_time = save_signal.get('last_signal_time', None)\r\n\r\n if isinstance(last_signal_time, datetime):\r\n save_signal.update({\"last_signal_time\": last_signal_time.strftime(\r\n '%Y-%m-%d %H:%M:%S')})\r\n elif last_signal_time is None:\r\n save_signal.update({\"last_signal_time\": \"\"})\r\n\r\n d.update({kline_name: save_signal})\r\n j['signals'] = d\r\n\r\n j['long_klines'] = self.long_klines\r\n j['short_klines'] = self.short_klines\r\n\r\n j['last_net_count'] = self.last_net_count\r\n j['last_fund_rate'] = self.last_fund_rate\r\n return j\r\n\r\n def from_json(self, json_data):\r\n \"\"\"\r\n 将dict转化为属性\r\n :param json_data:\r\n :return:\r\n \"\"\"\r\n if not isinstance(json_data, dict):\r\n return\r\n\r\n if 'create_time' in json_data:\r\n try:\r\n if len(json_data['create_time']) > 0:\r\n self.create_time = datetime.strptime(json_data['create_time'], '%Y-%m-%d %H:%M:%S')\r\n else:\r\n self.create_time = datetime.now()\r\n except Exception as ex:\r\n self.create_time = datetime.now()\r\n\r\n if 'save_time' in json_data:\r\n try:\r\n if len(json_data['save_time']) > 0:\r\n self.save_time = datetime.strptime(json_data['save_time'], '%Y-%m-%d %H:%M:%S')\r\n else:\r\n self.save_time = datetime.now()\r\n except Exception as ex:\r\n self.save_time = datetime.now()\r\n\r\n signals = json_data.get('signals', {})\r\n for kline_name, signal in signals.items():\r\n last_signal = signal.get('last_signal', \"\")\r\n str_last_signal_time = signal.get('last_signal_time', \"\")\r\n last_signal_time = None\r\n try:\r\n if len(str_last_signal_time) > 0:\r\n last_signal_time = datetime.strptime(str_last_signal_time, '%Y-%m-%d %H:%M:%S')\r\n else:\r\n last_signal_time = None\r\n except Exception as ex:\r\n last_signal_time = None\r\n signal.update({'last_signal_time': last_signal_time})\r\n self.signals.update({kline_name: signal})\r\n\r\n self.long_klines = json_data.get('long_klines', [])\r\n self.short_klines = json_data.get('short_klines', [])\r\n self.last_net_count = json_data.get('last_net_count', 0)\r\n self.last_fund_rate = json_data.get('last_fund_rate', 1)\r\n\r\n def clean(self):\r\n \"\"\"\r\n 清空数据\r\n :return:\r\n \"\"\"\r\n self.write_log(u'清空policy数据')\r\n self.signals = {}\r\n self.long_klines = []\r\n self.short_klines = []\r\n self.last_net_count = 0\r\n self.last_fund_rate = 1\r\n" ]
[ [ "numpy.float32" ], [ "numpy.float32" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
yue-litam/azurelane-auto-scripts
[ "f7aaa93b02d72e4895c28672ba72ee935334067c" ]
[ "detect-test.py" ]
[ "import numpy as np\nimport cv2\nimport wda\n\n\ndef main():\n image_path = './screen.png'\n feature_path = './iphone_se_3.0.0/feature/ambush_encountered_detection.png'\n\n c = wda.Client()\n _ = c.screenshot(image_path)\n screen = cv2.imread(image_path) # 加载图片\n screen_gray = cv2.cvtColor(screen, cv2.COLOR_BGR2GRAY) # 灰度转换\n # screen_gray = cv2.imread(image_path, 0)\n\n feature = cv2.imread(feature_path, 0)\n feature_w, feature_h = feature.shape[::-1]\n print('feature size:', feature_w, 'x', feature_h)\n\n # # 查看三组图像(图像标签名称,文件名称)\n # cv2.imshow('screen', screen)\n # cv2.imshow('screen_gray', screen_gray)\n # cv2.imshow('feature', feature)\n # cv2.waitKey(0)\n # cv2.destroyAllWindows()\n\n res = cv2.matchTemplate(screen_gray, feature, cv2.TM_CCOEFF_NORMED)\n\n # min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)\n # touch_loc = (max_loc[0], max_loc[1])\n # cv2.rectangle(screen, touch_loc, (touch_loc[0] + feature_w, touch_loc[1] + feature_h), (7, 249, 151), 2)\n\n # 使用灰度图像中的坐标对原始RGB图像进行标记\n loc = np.where(res >= 0.8)\n for pt in zip(*loc[::-1]):\n cv2.rectangle(screen, pt, (pt[0] + feature_w, pt[1] + feature_h), (7, 249, 151), 2)\n\n # 显示图像\n cv2.imshow('Detected', screen)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.where" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
makotovnjp/Talent5OpenPose
[ "1ebbbd4f226b6839d7d1627d6c33edd416c137fc" ]
[ "src/pose_estimation/utils/openpose_net.py" ]
[ "import torch\nimport torch.nn as nn\nfrom torch.nn import init\nimport torchvision\n\n\nclass OpenPoseNet(nn.Module):\n def __init__(self):\n super(OpenPoseNet, self).__init__()\n\n self.model0 = OpenPose_Feature()\n\n self.model1_1 = make_OpenPose_block('block1_1')\n self.model2_1 = make_OpenPose_block('block2_1')\n self.model3_1 = make_OpenPose_block('block3_1')\n self.model4_1 = make_OpenPose_block('block4_1')\n self.model5_1 = make_OpenPose_block('block5_1')\n self.model6_1 = make_OpenPose_block('block6_1')\n\n self.model1_2 = make_OpenPose_block('block1_2')\n self.model2_2 = make_OpenPose_block('block2_2')\n self.model3_2 = make_OpenPose_block('block3_2')\n self.model4_2 = make_OpenPose_block('block4_2')\n self.model5_2 = make_OpenPose_block('block5_2')\n self.model6_2 = make_OpenPose_block('block6_2')\n\n def forward(self, x):\n # Feature\n out1 = self.model0(x)\n\n # Stage1\n out1_1 = self.model1_1(out1) # PAFs側\n out1_2 = self.model1_2(out1) # confidence heatmap側\n\n # CStage2\n out2 = torch.cat([out1_1, out1_2, out1], 1) # 次元1のチャネルで結合\n out2_1 = self.model2_1(out2)\n out2_2 = self.model2_2(out2)\n\n # Stage3\n out3 = torch.cat([out2_1, out2_2, out1], 1)\n out3_1 = self.model3_1(out3)\n out3_2 = self.model3_2(out3)\n\n # Stage4\n out4 = torch.cat([out3_1, out3_2, out1], 1)\n out4_1 = self.model4_1(out4)\n out4_2 = self.model4_2(out4)\n\n # Stage5\n out5 = torch.cat([out4_1, out4_2, out1], 1)\n out5_1 = self.model5_1(out5)\n out5_2 = self.model5_2(out5)\n\n # Stage6\n out6 = torch.cat([out5_1, out5_2, out1], 1)\n out6_1 = self.model6_1(out6)\n out6_2 = self.model6_2(out6)\n\n saved_for_loss = []\n saved_for_loss.append(out1_1) # PAFs側\n saved_for_loss.append(out1_2) # confidence heatmap側\n saved_for_loss.append(out2_1)\n saved_for_loss.append(out2_2)\n saved_for_loss.append(out3_1)\n saved_for_loss.append(out3_2)\n saved_for_loss.append(out4_1)\n saved_for_loss.append(out4_2)\n saved_for_loss.append(out5_1)\n saved_for_loss.append(out5_2)\n saved_for_loss.append(out6_1)\n saved_for_loss.append(out6_2)\n\n return (out6_1, out6_2), saved_for_loss\n\n\nclass OpenPose_Feature(nn.Module):\n def __init__(self):\n super(OpenPose_Feature, self).__init__()\n\n vgg19 = torchvision.models.vgg19(pretrained=True)\n model = {}\n model['block0'] = vgg19.features[0:23]\n\n model['block0'].add_module(\"23\", torch.nn.Conv2d(\n 512, 256, kernel_size=3, stride=1, padding=1))\n model['block0'].add_module(\"24\", torch.nn.ReLU(inplace=True))\n model['block0'].add_module(\"25\", torch.nn.Conv2d(\n 256, 128, kernel_size=3, stride=1, padding=1))\n model['block0'].add_module(\"26\", torch.nn.ReLU(inplace=True))\n\n self.model = model['block0']\n\n def forward(self, x):\n outputs = self.model(x)\n return outputs\n\n\ndef make_OpenPose_block(block_name):\n blocks = {}\n # Stage 1\n blocks['block1_1'] = [{'conv5_1_CPM_L1': [128, 128, 3, 1, 1]},\n {'conv5_2_CPM_L1': [128, 128, 3, 1, 1]},\n {'conv5_3_CPM_L1': [128, 128, 3, 1, 1]},\n {'conv5_4_CPM_L1': [128, 512, 1, 1, 0]},\n {'conv5_5_CPM_L1': [512, 38, 1, 1, 0]}]\n\n blocks['block1_2'] = [{'conv5_1_CPM_L2': [128, 128, 3, 1, 1]},\n {'conv5_2_CPM_L2': [128, 128, 3, 1, 1]},\n {'conv5_3_CPM_L2': [128, 128, 3, 1, 1]},\n {'conv5_4_CPM_L2': [128, 512, 1, 1, 0]},\n {'conv5_5_CPM_L2': [512, 19, 1, 1, 0]}]\n\n # Stages 2 - 6\n for i in range(2, 7):\n blocks['block%d_1' % i] = [\n {'Mconv1_stage%d_L1' % i: [185, 128, 7, 1, 3]},\n {'Mconv2_stage%d_L1' % i: [128, 128, 7, 1, 3]},\n {'Mconv3_stage%d_L1' % i: [128, 128, 7, 1, 3]},\n {'Mconv4_stage%d_L1' % i: [128, 128, 7, 1, 3]},\n {'Mconv5_stage%d_L1' % i: [128, 128, 7, 1, 3]},\n {'Mconv6_stage%d_L1' % i: [128, 128, 1, 1, 0]},\n {'Mconv7_stage%d_L1' % i: [128, 38, 1, 1, 0]}\n ]\n\n blocks['block%d_2' % i] = [\n {'Mconv1_stage%d_L2' % i: [185, 128, 7, 1, 3]},\n {'Mconv2_stage%d_L2' % i: [128, 128, 7, 1, 3]},\n {'Mconv3_stage%d_L2' % i: [128, 128, 7, 1, 3]},\n {'Mconv4_stage%d_L2' % i: [128, 128, 7, 1, 3]},\n {'Mconv5_stage%d_L2' % i: [128, 128, 7, 1, 3]},\n {'Mconv6_stage%d_L2' % i: [128, 128, 1, 1, 0]},\n {'Mconv7_stage%d_L2' % i: [128, 19, 1, 1, 0]}\n ]\n\n cfg_dict = blocks[block_name]\n\n layers = []\n\n for i in range(len(cfg_dict)):\n for k, v in cfg_dict[i].items():\n if 'pool' in k:\n layers += [nn.MaxPool2d(kernel_size=v[0], stride=v[1],\n padding=v[2])]\n else:\n conv2d = nn.Conv2d(in_channels=v[0], out_channels=v[1],\n kernel_size=v[2], stride=v[3],\n padding=v[4])\n layers += [conv2d, nn.ReLU(inplace=True)]\n\n net = nn.Sequential(*layers[:-1])\n\n def _initialize_weights_norm(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n init.normal_(m.weight, std=0.01)\n if m.bias is not None:\n init.constant_(m.bias, 0.0)\n\n net.apply(_initialize_weights_norm)\n\n return net\n" ]
[ [ "torch.nn.Sequential", "torch.cat", "torch.nn.init.constant_", "torch.nn.Conv2d", "torch.nn.MaxPool2d", "torch.nn.init.normal_", "torch.nn.ReLU" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
frankzl/deep-rap
[ "f992081b136e02d6ee5f976f0343f7e3220a1f39" ]
[ "tools/training.py" ]
[ "import numpy as np\nimport tensorflow as tf\nimport glob\nimport os\n\n\ndef batch_data(num_data, batch_size):\n \"\"\" Yield batches with indices until epoch is over.\n\n Parameters\n ----------\n num_data: int\n The number of samples in the dataset.\n batch_size: int\n The batch size used using training.\n\n Returns\n -------\n batch_ixs: np.array of ints with shape [batch_size,]\n Yields arrays of indices of size of the batch size until the epoch is over.\n \"\"\"\n\n # data_ixs = np.random.permutation(np.arange(num_data))\n data_ixs = np.arange(num_data)\n ix = 0\n while ix + batch_size < num_data:\n batch_ixs = data_ixs[ix:ix+batch_size]\n ix += batch_size\n yield batch_ixs\n\n\ndef sample( seed_text, trainable, encoder, decoder, length=40 ):\n \n \"\"\" prints the sampled string\n \n seed_text: string of the seed, must have minimum length of our timestep size\n \n trainable: object model to sample from\n \n encoder: encoder object to encode the seed_text\n \n decoder: decoder object to decode the output from the trainable\n \n length: how many symbols we want to sample\n \n \"\"\"\n \n seed = encoder.encode_raw( seed_text )\n\n #to print the seed characters\n seed_chars = seed_text\n print( \"------Sampling----------\" )\n print( f\"seed: \\n{seed_text}\\n-\" )\n \n #predict next symbols\n for i in range(length):\n seed = encoder.encode_raw( seed_chars )\n # Take only the last required symbols\n if len(seed.shape) == 3:\n seed = seed[:,-1*trainable.time_steps:,:]\n elif len(seed.shape) == 2:\n seed = seed[:,-1*trainable.time_steps:]\n\n \n # remove_fist_char = seed[:,1:,:]\n # seed = np.append(remove_fist_char, np.reshape(probabilities, [1, 1, trainable.vocab_size]), axis=1)\n \n predicted = trainable.session.run([trainable.final_output], feed_dict = {trainable.X:seed})\n predicted = np.asarray(predicted[0]).astype('float64')[0]\n \n predicted_symbol = decoder.decode( predicted )\n seed_chars += predicted_symbol\n print ('result: \\n'+ seed_chars)\n\n\ndef train_model(trainable, train_data, train_labels, sampler, epochs=20, batch_size=128, log_dir=None, \n embedding_matrix=None, retrain=False):\n train_losses = []\n train_accs = []\n \n trainable.session = tf.Session()\n session = trainable.session\n \n saver = tf.train.Saver(max_to_keep=4, keep_checkpoint_every_n_hours=0.5)\n\n TRAIN = True\n with session.as_default():\n session.run(tf.global_variables_initializer())\n # assign pretrained embedding matrix\n if embedding_matrix is not None:\n session.run(trainable.embedding_init, feed_dict={trainable.embedding_placeholder: embedding_matrix})\n\n if log_dir:\n LOG_DIR = log_dir\n if not os.path.exists(LOG_DIR):\n os.makedirs(LOG_DIR, exist_ok=True)\n if glob.glob(LOG_DIR + \"/*.meta\"):\n TRAIN = retrain\n saver = tf.train.import_meta_graph(glob.glob(LOG_DIR + '/*.meta')[0])\n saver.restore(session, os.path.join(LOG_DIR, \"model\"))\n print(\"Restoring an old model from '{}'\".format(LOG_DIR))\n if retrain:\n print(\"and training it further..\")\n else:\n print(\"Building model from scratch! \\n Saving into: '{}'\".format(LOG_DIR))\n else:\n LOG_DIR = \"logs/train_model\"\n if not os.path.exists(LOG_DIR):\n os.makedirs(LOG_DIR, exist_ok=True)\n print(\"Building model from scratch! \\n Saving into: '{}'\".format(LOG_DIR))\n\n tr_loss, tr_acc = session.run([trainable.loss, trainable.accuracy],\n feed_dict={trainable.X: train_data,\n trainable.Y: train_labels})\n train_losses.append(tr_loss)\n train_accs.append(tr_acc)\n \n if TRAIN:\n for epoch in range(epochs):\n \n for batch_ixs in batch_data(len(train_data), batch_size):\n _ = session.run(trainable.train_step,\n feed_dict={\n trainable.X: train_data[batch_ixs],\n trainable.Y: train_labels[batch_ixs],\n })\n tr_loss, tr_acc = session.run([trainable.loss, trainable.accuracy],\n feed_dict={trainable.X: train_data,\n trainable.Y: train_labels\n })\n train_losses.append(tr_loss)\n train_accs.append(tr_acc)\n \n if(epoch + 1) % 1 == 0:\n # saving the session into \"model\"\n saver.save(session, os.path.join(LOG_DIR, \"model\"))\n print(f\"\\n\\nEpoch {epoch + 1}/{epochs}\")\n print(f\"Loss: \\t {tr_loss}\")\n print(f\"Accuracy:\\t {tr_acc}\")\n \n \n #get on of training set as seed\n # seed_text = train_data[0]\n # seed_text = train_data[0]\n seed_text = \"\"\"as real as it seems the american dream\n ain't nothing but another calculated schemes\\nto get us locked up shot up back in chains\n to deny us of the future rob our names\\nkept my history of mystery but now i see\n the american dream wasn't meant for me\\ncause lady liberty is a hypocrite she lied to me\\npromised me freedom education equality\n never gave me nothing but slavery\\nand now look at how dangerous you made me\"\"\"\n \n sampler(trainable, seed_text)\n \n \n trainable.hist = {\n 'train_losses': np.array(train_losses),\n 'train_accuracy': np.array(train_accs)\n }\n\n\ndef sample_from_distribution(predicted, temperature=0.5):\n '''\n helper function to sample an index from a probability array\n '''\n exp_predicted = np.exp(predicted/temperature)\n predicted = exp_predicted / np.sum(exp_predicted)\n probabilities = np.random.multinomial(1, predicted, 1)\n return probabilities\n\nclass Encoder:\n def __init__(self, name):\n self.name = name\n def encode(self, seed_chars):\n pass\n\n def encode_raw(self, text):\n pass\n \nclass Decoder:\n def __init__(self, name):\n self.name = name\n def decode(self, predicted):\n pass\n\nclass OneHotEncoder(Encoder):\n \"\"\"\n Encodes sequences of words to sequences of 1-Hot Encoded vectors\n \"\"\"\n \n def __init__(self, name, word2index):\n super(OneHotEncoder, self).__init__(name)\n self.word2index = word2index\n \n def encode(self, sequences):\n encoded_sequences = []\n for seq in sequences:\n encoded = np.zeros( ( len(seq), len(self.word2index) ) )\n \n for idx, symbol in enumerate(seq):\n encoded[idx][ self.word2index[symbol] ] = 1\n \n encoded_sequences.append(encoded)\n \n return np.array(encoded_sequences)\n\n def encode_raw(self, text):\n return self.encode( [text] )\n \n def encode_labels(self, labels):\n \n encoded = []\n \n for label in labels:\n one_hot_vec = np.zeros(len(self.word2index), dtype=int)\n one_hot_vec[ self.word2index[label] ] = 1\n encoded.append( one_hot_vec )\n \n return np.array(encoded)\n \nclass OneHotDecoder(Decoder):\n \"\"\"\n Decodes a 1-Hot Encoded vector (prediction) to a word\n \"\"\"\n def __init__(self, name, index2word, temperature=0.5):\n super(OneHotDecoder, self).__init__(name)\n self.temperature = temperature\n self.index2word = index2word \n \n def decode(self, predicted):\n predicted = sample_from_distribution(predicted, temperature=self.temperature)\n return self.index2word[ np.argmax(predicted) ]\n\nclass OneHotWordEncoder(Encoder):\n \"\"\"\n Encodes sequences of words to sequences of 1-Hot Encoded vectors\n \"\"\"\n \n def __init__(self, name, word2index):\n super(OneHotWordEncoder, self).__init__(name)\n self.word2index = word2index\n \n def encode(self, sequences):\n \"\"\"\n Encodes our sequences of words to sequences of 1-Hots\n \"\"\"\n try:\n encoded_sequences = []\n for seq in sequences:\n \n encoded = np.zeros( ( len(seq), len(self.word2index) ) )\n \n for idx, word in enumerate(seq):\n encoded[idx][ self.word2index[word] ] = 1\n \n encoded_sequences.append(encoded)\n \n return np.array(encoded_sequences)\n except Exception as e:\n print(e)\n \n def encode_raw(self, text):\n \"\"\"\n Encodes a text to sequences of 1-Hots (needed for sampling)\n \"\"\"\n text = text.replace(\"\\n\", \" \\\\n \")\n text = text.replace(\" +\", \" \")\n words = text.split(\" \")\n encoded = np.zeros( ( len(words), len(self.word2index) ) )\n \n for idx, word in enumerate(words):\n if word != \"\":\n encoded[idx][ self.word2index[word] ] = 1\n \n return np.array( [encoded] )\n \n \n def encode_labels(self, labels):\n \"\"\"\n Encodes the labels (sequences of one word)\n \"\"\"\n \n encoded = []\n \n for label in labels:\n one_hot_vec = np.zeros(len(self.word2index), dtype=int)\n one_hot_vec[ self.word2index[label] ] = 1\n encoded.append( one_hot_vec )\n \n return np.array(encoded)\n \nclass OneHotWordDecoder(Decoder):\n \"\"\"\n Decodes a 1-Hot Encoded vector (prediction) to a word\n \"\"\"\n def __init__(self, name, index2word, temperature=0.5):\n super(OneHotWordDecoder, self).__init__(name)\n self.temperature = temperature\n self.index2word = index2word \n \n def decode(self, predicted):\n predicted = sample_from_distribution(predicted, temperature=self.temperature)\n return \" \" + self.index2word[ np.argmax(predicted) ].replace(\"\\\\n\",\"\\n\")\n\nclass IndexWordEncoder(Encoder):\n \"\"\"\n Encodes sequences of words to sequences of 1-Hot Encoded vectors\n \"\"\"\n \n def __init__(self, name, word2index):\n super(IndexWordEncoder, self).__init__(name)\n self.word2index = word2index\n \n def encode(self, sequences):\n \"\"\"\n Encodes our sequences of words to sequences of indices\n \"\"\"\n max_len = 0\n for seq in sequences:\n if len(seq) > max_len:\n max_len = len(seq)\n\n encoded_sequences = []\n for seq in sequences:\n \n # encoded = np.zeros( len(seq) )\n encoded = [ self.word2index[word] for word in seq ]\n diff = max_len - len(seq)\n if diff > 0:\n encoded.extend([0] * diff)\n \n encoded_sequences.append(encoded)\n \n return np.array(encoded_sequences)\n \n def encode_raw(self, text):\n \"\"\"\n Encodes a text to sequences of indices (needed for sampling)\n \"\"\"\n text = text.replace(\"\\n\", \" \\\\n \")\n text = text.replace(\" +\", \" \")\n words = text.split(\" \")\n encoded = np.zeros( len(words) )\n \n for idx, word in enumerate(words):\n if word != \"\":\n encoded[idx] = self.word2index[word]\n \n return np.array( [encoded] )\n \n \n def encode_labels(self, labels):\n \"\"\"\n Encodes the labels (sequences of one word)\n \"\"\"\n \n encoded = []\n \n for label in labels:\n one_hot_vec = np.zeros(len(self.word2index), dtype=int)\n one_hot_vec[ self.word2index[label] ] = 1\n encoded.append( one_hot_vec )\n \n return np.array(encoded)\n\ndef train_model_more_embeddings(trainable, train_data, train_data_2, train_labels, sampler, epochs=20, \n batch_size=128, log_dir=None, \n embedding_matrix=None, \n embedding_matrix_phonem=None, retrain=False):\n train_losses = []\n train_accs = []\n \n trainable.session = tf.Session()\n session = trainable.session\n \n saver = tf.train.Saver(max_to_keep=4, keep_checkpoint_every_n_hours=0.5)\n\n TRAIN = True\n with session.as_default():\n session.run(tf.global_variables_initializer())\n # assign pretrained embedding matrix\n if embedding_matrix is not None:\n session.run(trainable.embedding_init, feed_dict={trainable.embedding_placeholder: embedding_matrix})\n if embedding_matrix_phonem is not None:\n session.run(trainable.embedding_phonem_init, feed_dict={trainable.embedding_phonem_placeholder: embedding_matrix_phonem})\n\n if log_dir:\n LOG_DIR = log_dir\n if not os.path.exists(LOG_DIR):\n os.makedirs(LOG_DIR, exist_ok=True)\n if glob.glob(LOG_DIR + \"/*.meta\"):\n TRAIN = retrain\n saver = tf.train.import_meta_graph(glob.glob(LOG_DIR + '/*.meta')[0])\n saver.restore(session, os.path.join(LOG_DIR, \"model\"))\n print(\"Restoring an old model from '{}'\".format(LOG_DIR))\n if retrain:\n print(\"and training it further..\")\n else:\n print(\"Building model from scratch! \\n Saving into: '{}'\".format(LOG_DIR))\n else:\n LOG_DIR = \"logs/train_model\"\n if not os.path.exists(LOG_DIR):\n os.makedirs(LOG_DIR, exist_ok=True)\n print(\"Building model from scratch! \\n Saving into: '{}'\".format(LOG_DIR))\n\n tr_loss, tr_acc = session.run([trainable.loss, trainable.accuracy],\n feed_dict={trainable.X: train_data,\n trainable.Y: train_labels,\n trainable.X_phonem: train_data_2})\n train_losses.append(tr_loss)\n train_accs.append(tr_acc)\n \n if TRAIN:\n for epoch in range(epochs):\n \n for batch_ixs in batch_data(len(train_data), batch_size):\n _ = session.run(trainable.train_step,\n feed_dict={\n trainable.X: train_data[batch_ixs],\n trainable.Y: train_labels[batch_ixs],\n trainable.X_phonem: train_data_2[batch_ixs]\n })\n tr_loss, tr_acc = session.run([trainable.loss, trainable.accuracy],\n feed_dict={trainable.X: train_data,\n trainable.Y: train_labels,\n trainable.X_phonem: train_data_2\n })\n train_losses.append(tr_loss)\n train_accs.append(tr_acc)\n \n if(epoch + 1) % 1 == 0:\n # saving the session into \"model\"\n saver.save(session, os.path.join(LOG_DIR, \"model\"))\n print(f\"\\n\\nEpoch {epoch + 1}/{epochs}\")\n print(f\"Loss: \\t {tr_loss}\")\n print(f\"Accuracy:\\t {tr_acc}\")\n \n \n #get on of training set as seed\n # seed_text = train_data[0]\n # seed_text = train_data[0]\n seed_text = \"\"\"as real as it seems the american dream\n ain't nothing but another calculated schemes\\nto get us locked up shot up back in chains\n to deny us of the future rob our names\\nkept my history of mystery but now i see\n the american dream wasn't meant for me\\ncause lady liberty is a hypocrite she lied to me\\npromised me freedom education equality\n never gave me nothing but slavery\\nand now look at how dangerous you made me\"\"\"\n \n sampler(trainable, seed_text)\n \n \n trainable.hist = {\n 'train_losses': np.array(train_losses),\n 'train_accuracy': np.array(train_accs)\n }" ]
[ [ "numpy.asarray", "numpy.arange", "tensorflow.global_variables_initializer", "numpy.random.multinomial", "numpy.argmax", "tensorflow.Session", "tensorflow.train.Saver", "numpy.array", "numpy.exp", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
msb002/imped2drt
[ "37247b8e29bc88b053bde8cf9b0981c3cf5c99cc" ]
[ "KKlib.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jul 24 16:36:57 2018\nLast Updated: Jan 17 2020\n@author: NicoleCreange\n\nBased on work by Yoed Tsur\n\"\"\"\n#%%\nimport numpy as np\nimport matplotlib.pyplot as plt\nnp.seterr(divide='ignore', invalid='ignore')\n\ndef KKT_i2r(ww,z): \n l = len(z)\n KKTr = []\n ww= ww*2*np.pi\n for ii in range(0,l):\n KKT = []\n for jj in range(0,l):\n if jj!=ii:\n x = (ww[jj]*z[jj].imag - ww[ii]*z[ii].imag)/(ww[jj]**2 - ww[ii]**2)\n if jj==ii and jj!=0 and jj!=l-1:\n x = 0.5*((z[jj].imag/ww[jj]) + ((z[jj+1].imag - z[jj-1].imag)/(ww[jj+1] - ww[jj-1])))\n if jj==ii and jj==0 and jj!=l-1:\n x = 0.5*(z[jj].imag/ww[jj] + ((z[jj+1].imag-z[jj].imag)/(ww[jj+1]-ww[jj])))\n if jj==ii and jj!=0 and jj==l-1:\n x = 0.5*((z[jj].imag/ww[jj]) + ((z[jj].imag-z[jj-1].imag)/(ww[jj]-ww[jj-1])))\n KKT.append(x)\n from scipy.interpolate import CubicSpline as scaps\n cs = scaps(ww,KKT)\n rekk = 0\n for mm in range(l-1):\n trap = (KKT[mm+1] + KKT[mm])/2\n dw = ww[mm+1]-ww[mm]\n rekk = rekk + trap*dw\n KKTr.append((2/np.pi)*rekk + z[-1].real)\n return KKTr\ndef KKT_r2i(ww,z): #what Yoed has\n l = len(z)\n ww= ww*2*np.pi\n KKTi = []\n for ii in range(0,l):\n KKT = []\n for jj in range(0,l):\n if jj!=ii:\n x = (z[jj].real - z[ii].real)/(ww[jj]**2 - ww[ii]**2)\n if jj==ii and jj!=0 and jj!=l-1:\n x = ((z[jj+1].real - z[jj-1].real)/(ww[jj+1] - ww[jj-1]))/(2*ww[jj])\n if jj==ii and jj==0 and jj!=l-1:\n x = ((z[jj+1].real - z[jj].real)/(ww[jj+1]-ww[jj]))/(2*ww[jj])\n if jj==ii and jj!=0 and jj==l-1:\n x = ((z[jj].real - z[jj-1].real)/(ww[jj]-ww[jj-1]))/(2*ww[jj])\n KKT.append(x)\n from scipy.interpolate import CubicSpline as scaps\n cs = scaps(ww,KKT)\n imkk = 0\n for mm in range(l-1):\n trap = (KKT[mm+1] + KKT[mm])/2\n dw = ww[mm+1]-ww[mm]\n imkk = imkk + trap*dw\n KKTi.append((2*ww[ii]/np.pi)*imkk)\n return KKTi\n\n#%%\n\ndef KKT(w,Z,thres=0.2,clean=False):\n import matplotlib.pyplot as plt\n # check for order of data. Needs to be low frequency to high frequency\n order = w[0]-w[-1]\n print(order)\n if order > 0:\n w = np.flipud(w)\n Z = np.flipud(Z)\n zr = Z.real\n zi = Z.imag\n z = zr-1j*zi\n KKTimag = KKT_r2i(w,z)\n KKTreal = KKT_i2r(w,z)\n KKw = w\n fig,axs = plt.subplots(nrows=2,ncols=2,figsize=(8,5))\n if clean==True:\n KKTdata = np.asarray(KKTreal)+1j*np.asarray(KKTimag)\n wc,Zdata = KKT_clean(w,Z,KKTdata,thres)\n axs[0,1].semilogx(wc,Zdata.real,'b.',label = \"$Z_{exp}$'\")\n axs[0,1].semilogx(wc,-Zdata.imag,'k.',label = '$Z_{exp}$\"')\n axs[0,1].semilogx(w,Z.real,'b:')\n axs[0,1].semilogx(w,-Z.imag,'k:')\n axs[0,1].semilogx(KKw,np.asarray(KKTreal),'g',label = 'KKT real')\n axs[0,1].semilogx(KKw,-np.asarray(KKTimag),'r',label = 'KKT imag')\n axs[0,1].ticklabel_format(style='sci',axis='y',scilimits=(0,0))\n axs[0,1].set_xlabel('Frequency (Hz)')\n axs[0,1].set_ylabel('|Z|')\n axs[0,1].legend(fontsize=8)\n return fig,axs,wc,Zdata\n else:\n axs[0,1].semilogx(w,Z.real,'b.',label = \"$Z_{exp}$'\")\n axs[0,1].semilogx(w,-Z.imag,'k.',label = '$Z_{exp}$\"')\n axs[0,1].semilogx(KKw,np.asarray(KKTreal),'g',label = 'KKT real')\n axs[0,1].semilogx(KKw,-np.asarray(KKTimag),'r',label = 'KKT imag')\n axs[0,1].ticklabel_format(style='sci',axis='y',scilimits=(0,0))\n axs[0,1].set_xlabel('Frequency (Hz)')\n axs[0,1].set_ylabel('|Z|')\n axs[0,1].legend(fontsize=8)\n return fig,axs,w,Z\n\ndef KKT_clean(w,Z,KKTdata,thres):\n import numpy as np\n \n res_real = (KKTdata.real-Z.real)\n res_imag = (KKTdata.imag-Z.imag)\n if np.sum(res_real) > np.sum(res_imag):\n rem_r = [np.abs(res_real) > np.abs(Z.real)*thres]\n indx_r = np.where(rem_r[0]==True)\n z_clean = np.delete(Z,indx_r)\n wc = np.delete(w,indx_r)\n else:\n rem_i = [np.abs(res_imag) > np.abs(Z.imag)*thres]\n indx_i = np.where(rem_i[0]==True)\n z_clean = np.delete(Z,indx_i)\n wc = np.delete(w,indx_i)\n return wc,z_clean\n\n" ]
[ [ "numpy.abs", "numpy.asarray", "numpy.flipud", "matplotlib.pyplot.subplots", "numpy.seterr", "numpy.delete", "scipy.interpolate.CubicSpline", "numpy.where", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.6", "1.10", "1.4", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "1.0", "1.3", "1.8" ], "tensorflow": [] } ]
w-k-jones/tobac
[ "e2af8e0c6f7e78f936c42539660bcb2b20e290c6" ]
[ "tobac/analysis.py" ]
[ "import pandas as pd\nimport numpy as np\nimport logging\nimport os\n\nfrom .utils import mask_cell,mask_cell_surface,mask_cube_cell,get_bounding_box\n\ndef cell_statistics_all(input_cubes,track,mask,aggregators,output_path='./',cell_selection=None,output_name='Profiles',width=10000,z_coord='model_level_number',dimensions=['x','y'],**kwargs):\n if cell_selection is None:\n cell_selection=np.unique(track['cell'])\n for cell in cell_selection :\n cell_statistics(input_cubes=input_cubes,track=track, mask=mask,\n dimensions=dimensions,aggregators=aggregators,cell=cell,\n output_path=output_path,output_name=output_name,\n width=width,z_coord=z_coord,**kwargs)\n\ndef cell_statistics(input_cubes,track,mask,aggregators,cell,output_path='./',output_name='Profiles',width=10000,z_coord='model_level_number',dimensions=['x','y'],**kwargs):\n from iris.cube import Cube,CubeList\n from iris.coords import AuxCoord\n from iris import Constraint,save \n \n # If input is single cube, turn into cubelist\n if type(input_cubes) is Cube:\n input_cubes=CubeList([input_cubes])\n \n logging.debug('Start calculating profiles for cell '+str(cell))\n track_i=track[track['cell']==cell]\n \n cubes_profile={}\n for aggregator in aggregators:\n cubes_profile[aggregator.name()]=CubeList()\n \n for time_i in track_i['time'].values:\n constraint_time = Constraint(time=time_i)\n \n mask_i=mask.extract(constraint_time)\n mask_cell_i=mask_cell(mask_i,cell,track_i,masked=False)\n mask_cell_surface_i=mask_cell_surface(mask_i,cell,track_i,masked=False,z_coord=z_coord)\n\n x_dim=mask_cell_surface_i.coord_dims('projection_x_coordinate')[0]\n y_dim=mask_cell_surface_i.coord_dims('projection_y_coordinate')[0]\n x_coord=mask_cell_surface_i.coord('projection_x_coordinate')\n y_coord=mask_cell_surface_i.coord('projection_y_coordinate')\n \n if (mask_cell_surface_i.core_data()>0).any():\n box_mask_i=get_bounding_box(mask_cell_surface_i.core_data(),buffer=1)\n \n box_mask=[[x_coord.points[box_mask_i[x_dim][0]],x_coord.points[box_mask_i[x_dim][1]]],\n [y_coord.points[box_mask_i[y_dim][0]],y_coord.points[box_mask_i[y_dim][1]]]]\n else:\n box_mask=[[np.nan,np.nan],[np.nan,np.nan]]\n \n x=track_i[track_i['time'].values==time_i]['projection_x_coordinate'].values[0]\n y=track_i[track_i['time'].values==time_i]['projection_y_coordinate'].values[0]\n\n box_slice=[[x-width,x+width],[y-width,y+width]]\n \n x_min=np.nanmin([box_mask[0][0],box_slice[0][0]])\n x_max=np.nanmax([box_mask[0][1],box_slice[0][1]])\n y_min=np.nanmin([box_mask[1][0],box_slice[1][0]])\n y_max=np.nanmax([box_mask[1][1],box_slice[1][1]])\n\n constraint_x=Constraint(projection_x_coordinate=lambda cell: int(x_min) < cell < int(x_max))\n constraint_y=Constraint(projection_y_coordinate=lambda cell: int(y_min) < cell < int(y_max))\n\n constraint=constraint_time & constraint_x & constraint_y\n# Mask_cell_surface_i=mask_cell_surface(Mask_w_i,cell,masked=False,z_coord='model_level_number')\n mask_cell_i=mask_cell_i.extract(constraint)\n mask_cell_surface_i=mask_cell_surface_i.extract(constraint)\n\n input_cubes_i=input_cubes.extract(constraint)\n for cube in input_cubes_i:\n cube_masked=mask_cube_cell(cube,mask_cell_i,cell,track_i)\n coords_remove=[]\n for coordinate in cube_masked.coords(dim_coords=False):\n\n if coordinate.name() not in dimensions:\n for dim in dimensions:\n if set(cube_masked.coord_dims(coordinate)).intersection(set(cube_masked.coord_dims(dim))):\n coords_remove.append(coordinate.name())\n for coordinate in set(coords_remove):\n cube_masked.remove_coord(coordinate) \n \n for aggregator in aggregators:\n cube_collapsed=cube_masked.collapsed(dimensions,aggregator,**kwargs)\n #remove all collapsed coordinates (x and y dim, scalar now) and keep only time as all these coordinates are useless\n for coordinate in cube_collapsed.coords():\n if not cube_collapsed.coord_dims(coordinate):\n if coordinate.name() is not 'time':\n cube_collapsed.remove_coord(coordinate)\n logging.debug(str(cube_collapsed))\n cubes_profile[aggregator.name()].append(cube_collapsed)\n\n\n minutes=(track_i['time_cell']/pd.Timedelta(minutes=1)).values\n latitude=track_i['latitude'].values\n longitude=track_i['longitude'].values\n minutes_coord=AuxCoord(minutes,long_name='cell_time',units='min')\n latitude_coord=AuxCoord(latitude,long_name='latitude',units='degrees')\n longitude_coord=AuxCoord(longitude,long_name='longitude',units='degrees')\n \n for aggregator in aggregators:\n cubes_profile[aggregator.name()]=cubes_profile[aggregator.name()].merge()\n for cube in cubes_profile[aggregator.name()]:\n cube.add_aux_coord(minutes_coord,data_dims=cube.coord_dims('time'))\n cube.add_aux_coord(latitude_coord,data_dims=cube.coord_dims('time'))\n cube.add_aux_coord(longitude_coord,data_dims=cube.coord_dims('time'))\n os.makedirs(os.path.join(output_path,output_name,aggregator.name()),exist_ok=True)\n savefile=os.path.join(output_path,output_name,aggregator.name(),output_name+'_'+ aggregator.name()+'_'+str(int(cell))+'.nc')\n save(cubes_profile[aggregator.name()],savefile)\n\n\ndef cog_cell(cell,Tracks=None,M_total=None,M_liquid=None,\n M_frozen=None,\n Mask=None,\n savedir=None):\n \n \n from iris import Constraint\n logging.debug('Start calculating COG for '+str(cell))\n Track=Tracks[Tracks['cell']==cell]\n constraint_time=Constraint(time=lambda cell: Track.head(1)['time'].values[0] <= cell <= Track.tail(1)['time'].values[0])\n M_total_i=M_total.extract(constraint_time)\n M_liquid_i=M_liquid.extract(constraint_time)\n M_frozen_i=M_frozen.extract(constraint_time)\n Mask_i=Mask.extract(constraint_time)\n\n savedir_cell=os.path.join(savedir,'cells',str(int(cell)))\n os.makedirs(savedir_cell,exist_ok=True)\n savefile_COG_total_i=os.path.join(savedir_cell,'COG_total'+'_'+str(int(cell))+'.h5')\n savefile_COG_liquid_i=os.path.join(savedir_cell,'COG_liquid'+'_'+str(int(cell))+'.h5')\n savefile_COG_frozen_i=os.path.join(savedir_cell,'COG_frozen'+'_'+str(int(cell))+'.h5')\n \n Tracks_COG_total_i=calculate_cog(Track,M_total_i,Mask_i)\n# Tracks_COG_total_list.append(Tracks_COG_total_i)\n logging.debug('COG total loaded for ' +str(cell))\n \n Tracks_COG_liquid_i=calculate_cog(Track,M_liquid_i,Mask_i)\n# Tracks_COG_liquid_list.append(Tracks_COG_liquid_i)\n logging.debug('COG liquid loaded for ' +str(cell))\n Tracks_COG_frozen_i=calculate_cog(Track,M_frozen_i,Mask_i)\n# Tracks_COG_frozen_list.append(Tracks_COG_frozen_i)\n logging.debug('COG frozen loaded for ' +str(cell))\n \n Tracks_COG_total_i.to_hdf(savefile_COG_total_i,'table')\n Tracks_COG_liquid_i.to_hdf(savefile_COG_liquid_i,'table')\n Tracks_COG_frozen_i.to_hdf(savefile_COG_frozen_i,'table')\n logging.debug('individual COG calculated and saved to '+ savedir_cell)\n\n\ndef lifetime_histogram(Track,bin_edges=np.arange(0,200,20),density=False,return_values=False):\n Track_cell=Track.groupby('cell')\n minutes=(Track_cell['time_cell'].max()/pd.Timedelta(minutes=1)).values\n hist, bin_edges = np.histogram(minutes, bin_edges,density=density)\n bin_centers=bin_edges[:-1]+0.5*np.diff(bin_edges)\n if return_values:\n return hist,bin_edges,bin_centers,minutes\n else:\n return hist,bin_edges,bin_centers\n \ndef haversine(lat1,lon1,lat2,lon2):\n \"\"\"Computes the Haversine distance in kilometres between two points (based on implementation CIS https://github.com/cedadev/cis)\n :param lat1: first point or points as array, each as array of latitude in degrees\n :param lon1: first point or points as array, each as array of longitude in degrees\n :param lat2: second point or points as array, each as array of latitude in degrees\n :param lon2: second point or points as array, each as array of longitude in degrees\n :return: distance between the two points in kilometres\n \"\"\"\n RADIUS_EARTH = 6378.0\n lat1 = np.radians(lat1)\n lat2 = np.radians(lat2)\n lon1 = np.radians(lon1)\n lon2 = np.radians(lon2)\n #print(lat1,lat2,lon1,lon2)\n arclen = 2 * np.arcsin(np.sqrt((np.sin((lat2 - lat1) / 2)) ** 2 + np.cos(lat1) * np.cos(lat2) * (np.sin((lon2 - lon1) / 2)) ** 2))\n return arclen * RADIUS_EARTH\n\ndef calculate_distance(feature_1,feature_2,method_distance=None):\n \"\"\"Computes distance between two features based on either lat/lon coordinates or x/y coordinates\n :param feature_1: first feature or points as array, each as array of latitude, longitude in degrees\n :param feature_2: second feature or points as array, each as array of latitude, longitude in degrees\n :return: distance between the two features in metres\n \"\"\"\n if method_distance is None:\n if ('projection_x_coordinate' in feature_1) and ('projection_y_coordinate' in feature_1) and ('projection_x_coordinate' in feature_2) and ('projection_y_coordinate' in feature_2) :\n method_distance='xy'\n elif ('latitude' in feature_1) and ('longitude' in feature_1) and ('latitude' in feature_2) and ('longitude' in feature_2):\n method_distance='latlon'\n else:\n raise ValueError('either latitude/longitude or projection_x_coordinate/projection_y_coordinate have to be present to calculate distances')\n\n if method_distance=='xy':\n distance=np.sqrt((feature_1['projection_x_coordinate']-feature_2['projection_x_coordinate'])**2\n +(feature_1['projection_y_coordinate']-feature_2['projection_y_coordinate'])**2)\n elif method_distance=='latlon':\n distance=1000*haversine(feature_1['latitude'],feature_1['longitude'],feature_2['latitude'],feature_2['longitude'])\n else:\n raise ValueError('method undefined')\n return distance\n\ndef calculate_velocity_individual(feature_old,feature_new,method_distance=None):\n distance=calculate_distance(feature_old,feature_new,method_distance=method_distance)\n diff_time=((feature_new['time']-feature_old['time']).total_seconds())\n velocity=distance/diff_time\n return velocity\n\ndef calculate_velocity(track,method_distance=None):\n for cell_i,track_i in track.groupby('cell'):\n index=track_i.index.values\n for i,index_i in enumerate(index[:-1]):\n velocity=calculate_velocity_individual(track_i.loc[index[i]],track_i.loc[index[i+1]],method_distance=method_distance)\n track.at[index_i,'v']=velocity\n return track\n\ndef velocity_histogram(track,bin_edges=np.arange(0,30,1),density=False,method_distance=None,return_values=False):\n if 'v' not in track.columns:\n logging.info('calculate velocities')\n track=calculate_velocity(track)\n velocities=track['v'].values\n hist, bin_edges = np.histogram(velocities[~np.isnan(velocities)], bin_edges,density=density)\n if return_values:\n return hist,bin_edges,velocities\n else:\n return hist,bin_edges\n\ndef calculate_nearestneighbordistance(features,method_distance=None):\n from itertools import combinations\n features['min_distance']=np.nan\n for time_i,features_i in features.groupby('time'):\n logging.debug(str(time_i))\n indeces=combinations(features_i.index.values,2)\n #Loop over combinations to remove features that are closer together than min_distance and keep larger one (either higher threshold or larger area)\n distances=[]\n for index_1,index_2 in indeces: \n if index_1 is not index_2: \n distance=calculate_distance(features_i.loc[index_1],features_i.loc[index_2],method_distance=method_distance)\n distances.append(pd.DataFrame({'index_1':index_1,'index_2':index_2,'distance': distance}, index=[0]))\n if any([x is not None for x in distances]):\n distances=pd.concat(distances, ignore_index=True) \n for i in features_i.index:\n min_distance=distances.loc[(distances['index_1']==i) | (distances['index_2']==i),'distance'].min()\n features.at[i,'min_distance']=min_distance\n return features\n\ndef nearestneighbordistance_histogram(features,bin_edges=np.arange(0,30000,500),density=False,method_distance=None,return_values=False):\n if 'min_distance' not in features.columns:\n logging.debug('calculate nearest neighbor distances')\n features=calculate_nearestneighbordistance(features,method_distance=method_distance)\n distances=features['min_distance'].values\n hist, bin_edges = np.histogram(distances[~np.isnan(distances)], bin_edges,density=density)\n if return_values:\n return hist,bin_edges,distances\n else:\n return hist,bin_edges\n \n# Treatment of 2D lat/lon coordinates to be added:\n# def calculate_areas_2Dlatlon(latitude_coord,longitude_coord):\n# lat=latitude_coord.core_data()\n# lon=longitude_coord.core_data()\n# area=np.zeros(lat.shape)\n# dx=np.zeros(lat.shape)\n# dy=np.zeros(lat.shape)\n \n# return area\n\ndef calculate_area(features,mask,method_area=None):\n from tobac.utils import mask_features_surface,mask_features\n from iris import Constraint\n from iris.analysis.cartography import area_weights\n \n features['area']=np.nan\n \n mask_coords=[coord.name() for coord in mask.coords()]\n if method_area is None:\n if ('projection_x_coordinate' in mask_coords) and ('projection_y_coordinate' in mask_coords):\n method_area='xy'\n elif ('latitude' in mask_coords) and ('longitude' in mask_coords):\n method_area='latlon'\n else:\n raise ValueError('either latitude/longitude or projection_x_coordinate/projection_y_coordinate have to be present to calculate distances')\n logging.debug('calculating area using method '+ method_area)\n if method_area=='xy':\n if not (mask.coord('projection_x_coordinate').has_bounds() and mask.coord('projection_y_coordinate').has_bounds()):\n mask.coord('projection_x_coordinate').guess_bounds()\n mask.coord('projection_y_coordinate').guess_bounds()\n area=np.outer(np.diff(mask.coord('projection_x_coordinate').bounds,axis=1),np.diff(mask.coord('projection_y_coordinate').bounds,axis=1))\n elif method_area=='latlon':\n if (mask.coord('latitude').ndim==1) and (mask.coord('latitude').ndim==1):\n if not (mask.coord('latitude').has_bounds() and mask.coord('longitude').has_bounds()):\n mask.coord('latitude').guess_bounds()\n mask.coord('longitude').guess_bounds()\n area=area_weights(mask,normalize=False)\n elif mask.coord('latitude').ndim==2 and mask.coord('longitude').ndim==2:\n raise ValueError('2D latitude/longitude coordinates not supported yet')\n # area=calculate_areas_2Dlatlon(mask.coord('latitude'),mask.coord('longitude'))\n else:\n raise ValueError('latitude/longitude coordinate shape not supported')\n else:\n raise ValueError('method undefined')\n \n for time_i,features_i in features.groupby('time'):\n logging.debug('timestep:'+ str(time_i))\n constraint_time = Constraint(time=time_i)\n mask_i=mask.extract(constraint_time)\n for i in features_i.index:\n if len(mask_i.shape)==3:\n mask_i_surface = mask_features_surface(mask_i, features_i.loc[i,'feature'], z_coord='model_level_number')\n elif len(mask_i.shape)==2: \n mask_i_surface=mask_features(mask_i,features_i.loc[i,'feature'])\n area_feature=np.sum(area*(mask_i_surface.data>0))\n features.at[i,'area']=area_feature\n return features\n\ndef area_histogram(features,mask,bin_edges=np.arange(0,30000,500),\n density=False,method_area=None,\n return_values=False,representative_area=False):\n if 'area' not in features.columns:\n logging.info('calculate area')\n features=calculate_area(features, mask, method_area)\n areas=features['area'].values\n # restrict to non NaN values:\n areas=areas[~np.isnan(areas)]\n if representative_area:\n weights=areas\n else:\n weights=None\n hist, bin_edges = np.histogram(areas, bin_edges,density=density,weights=weights) \n bin_centers=bin_edges[:-1]+0.5*np.diff(bin_edges)\n\n if return_values:\n return hist,bin_edges,bin_centers,areas\n else:\n return hist,bin_edges,bin_centers\n \ndef histogram_cellwise(Track,variable=None,bin_edges=None,quantity='max',density=False):\n Track_cell=Track.groupby('cell')\n if quantity=='max':\n variable_cell=Track_cell[variable].max().values\n elif quantity=='min':\n variable_cell=Track_cell[variable].min().values\n elif quantity=='mean':\n variable_cell=Track_cell[variable].mean().values\n else:\n raise ValueError('quantity unknown, must be max, min or mean')\n hist, bin_edges = np.histogram(variable_cell, bin_edges,density=density)\n bin_centers=bin_edges[:-1]+0.5*np.diff(bin_edges)\n\n return hist,bin_edges, bin_centers\n\ndef histogram_featurewise(Track,variable=None,bin_edges=None,density=False):\n hist, bin_edges = np.histogram(Track[variable].values, bin_edges,density=density)\n bin_centers=bin_edges[:-1]+0.5*np.diff(bin_edges)\n\n return hist,bin_edges, bin_centers\n\ndef calculate_overlap(track_1,track_2,min_sum_inv_distance=None,min_mean_inv_distance=None):\n cells_1=track_1['cell'].unique()\n# n_cells_1_tot=len(cells_1)\n cells_2=track_2['cell'].unique()\n overlap=pd.DataFrame()\n for i_cell_1,cell_1 in enumerate(cells_1):\n for cell_2 in cells_2:\n track_1_i=track_1[track_1['cell']==cell_1]\n track_2_i=track_2[track_2['cell']==cell_2]\n track_1_i=track_1_i[track_1_i['time'].isin(track_2_i['time'])]\n track_2_i=track_2_i[track_2_i['time'].isin(track_1_i['time'])]\n if not track_1_i.empty:\n n_overlap=len(track_1_i)\n distances=[]\n for i in range(len(track_1_i)):\n distance=calculate_distance(track_1_i.iloc[[i]],track_2_i.iloc[[i]],method_distance='xy')\n distances.append(distance)\n# mean_distance=np.mean(distances)\n mean_inv_distance=np.mean(1/(1+np.array(distances)/1000))\n# mean_inv_squaredistance=np.mean(1/(1+(np.array(distances)/1000)**2))\n sum_inv_distance=np.sum(1/(1+np.array(distances)/1000))\n# sum_inv_squaredistance=np.sum(1/(1+(np.array(distances)/1000)**2))\n overlap=overlap.append({'cell_1':cell_1,\n 'cell_2':cell_2,\n 'n_overlap':n_overlap,\n# 'mean_distance':mean_distance,\n 'mean_inv_distance':mean_inv_distance,\n# 'mean_inv_squaredistance':mean_inv_squaredistance,\n 'sum_inv_distance':sum_inv_distance,\n# 'sum_inv_squaredistance':sum_inv_squaredistance\n },ignore_index=True)\n if min_sum_inv_distance:\n overlap=overlap[(overlap['sum_inv_distance']>=min_sum_inv_distance)] \n if min_mean_inv_distance:\n overlap=overlap[(overlap['mean_inv_distance']>=min_mean_inv_distance)] \n\n return overlap" ]
[ [ "numpy.nanmax", "pandas.concat", "numpy.radians", "numpy.sqrt", "numpy.unique", "numpy.isnan", "numpy.arange", "numpy.nanmin", "numpy.cos", "pandas.DataFrame", "pandas.Timedelta", "numpy.sin", "numpy.diff", "numpy.array", "numpy.histogram", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
BrianBartling/MusicSymbolClassifier
[ "85042c305a2d0123149bc2e9ffea7f4dc5b4b839" ]
[ "ModelTester/ClassifyAndLocalizeImages.py" ]
[ "#!/usr/bin/python\nimport sys\nfrom argparse import ArgumentParser\nfrom typing import List\n\nimport imageio\nimport numpy\nfrom PIL import ImageDraw, Image\nfrom tensorflow.keras.models import load_model\nimport os\n\n\nclass_names = []\n\n\ndef test_model(model_path: str, image_paths: List[str]):\n # model_path = \"C:\\\\Users\\\\Alex\\\\Repositories\\\\MusicSymbolClassifier\\\\HomusTrainer\\\\results\\\\2017-06-05_vgg4.h5\"\n # model_name = \"vgg4\"\n # image_path = \"C:\\\\Users\\\\Alex\\\\Repositories\\\\MusicSymbolClassifier\\\\HomusTrainer\\\\Quarter1.png\"\n\n print(\"Weights loaded from : \", model_path)\n\n print(\"Loading classifier...\")\n classifier = load_model(model_path)\n classifier.summary()\n\n input_shape = classifier.input_shape [1:3] # For some reason, input-shape has the form (None, 1, 2, 3)\n print(\" Input shape: {0}, Output: {1} classes\".format(input_shape, classifier.output_shape[1]))\n\n for image_path in image_paths:\n input_image = imageio.imread(image_path, as_gray=False, pilmode=\"RGB\")\n print(\"\\nLoading image {0} of shape {1}\".format(image_path, input_image.shape))\n\n print(\"Preprocessing image ...\")\n print(\" Resizing to \" + str(input_shape))\n normalized_input_image = Image.fromarray(input_image).resize(input_shape)\n normalized_input_image_array = numpy.asarray(normalized_input_image, dtype=numpy.float32)\n input_image = normalized_input_image_array\n \n print(\" Result: shape: {0}, dtype: {1}, mean: {2:.3f}, std: {3:.3f}\".format(normalized_input_image_array.shape,\n normalized_input_image_array.dtype,\n numpy.mean(normalized_input_image_array),\n numpy.std(normalized_input_image_array)))\n \n # Image.fromarray(normalized_input_image_array.astype(numpy.uint8), mode=\"RGB\").save(\"normalized_input.png\")\n\n\n # plot_model(classifier, to_file='classifier.png')\n\n print(\"Classifying image ...\")\n # print(\"1/1 [==============================] - 0s\")\n\n result = classifier.predict(numpy.array([input_image]))\n print('result: ', result)\n scores = result[0].flatten()\n if len(result) > 1:\n bounding_box = result[1].flatten()\n print(\"Bounding-Box: {0}\".format(bounding_box))\n else:\n bounding_box = None\n class_with_highest_probability = numpy.where(scores == scores.max())[0][0]\n\n if len(image_paths) == 1:\n print(\"Class scores:\")\n for i in range(len(scores)):\n print(\"{0:<18s} {1:.5f}\".format(class_names[i], scores[i]))\n\n most_likely_class = class_names[class_with_highest_probability]\n print(\" Image is most likely: {0} (certainty: {1:0.2f})\".format(most_likely_class,\n scores[class_with_highest_probability]))\n\n red = (255, 0, 0)\n image_with_bounding_box = normalized_input_image\n draw = ImageDraw.Draw(image_with_bounding_box)\n if bounding_box:\n rectangle = (bounding_box[0], bounding_box[1], bounding_box[0] + bounding_box[2],\n bounding_box[1] + bounding_box[3])\n draw.rectangle(rectangle, fill=None, outline=red)\n path = os.path.dirname(image_path)\n file_name, extension = os.path.splitext(os.path.basename(image_path))\n image_with_bounding_box.save(\n os.path.join(path, \"{0}_{1}_localization{2}\".format(file_name, most_likely_class, extension)))\n\n\nif __name__ == \"__main__\":\n parser = ArgumentParser(\"Classify an RGB-image with a pre-trained classifier\")\n parser.add_argument(\"-c\", \"--classifier\", dest=\"model_path\",\n help=\"path to the classifier that contains the weights (*.h5)\",\n default=\"2017-08-17_vgg4_with_localization.h5\")\n parser.add_argument(\"-i\", \"--images\", dest=\"image_paths\", nargs=\"+\",\n help=\"path(s) to the rgb image(s) to classify\",\n # default=\"C:\\\\Users\\\\Alex\\\\Repositories\\\\MusicSymbolClassifier\\\\ModelTrainer\\\\data\\\\images\\\\3-4-Time\\\\1-13_3.png abc\",\n default=\"\"\n )\n parser.add_argument(\"-d\", \"--image_directory\", dest=\"image_directory\",\n help=\"path to a folder that contains all images that should be classified\",\n # default=\"C:\\\\Users\\\\Alex\\\\Repositories\\\\MusicSymbolClassifier\\\\ModelTester\\\\test-data\\\\\",\n default=\"\"\n )\n\n args = parser.parse_args()\n\n if len(args.image_paths) == 0 and len(args.image_directory) == 0:\n print(\"No data for classification provided. Aborting\")\n parser.print_help()\n sys.exit(-1)\n\n if args.image_directory:\n class_names = sorted(os.listdir(args.image_directory))\n else:\n class_names = ['12-8-Time',\n '2-2-Time',\n '2-4-Time',\n '3-4-Time',\n '3-8-Time',\n '4-4-Time',\n '6-8-Time',\n '9-8-Time',\n 'Barline',\n 'C-Clef',\n 'Common-Time',\n 'Cut-Time',\n 'Dot',\n 'Double-Sharp',\n 'Eighth-Note',\n 'Eighth-Rest',\n 'F-Clef',\n 'Flat',\n 'G-Clef',\n 'Half-Note',\n 'Natural',\n 'Quarter-Note',\n 'Quarter-Rest',\n 'Sharp',\n 'Sixteenth-Note',\n 'Sixteenth-Rest',\n 'Sixty-Four-Note',\n 'Sixty-Four-Rest',\n 'Thirty-Two-Note',\n 'Thirty-Two-Rest',\n 'Whole-Half-Rest',\n 'Whole-Note']\n\n files = []\n\n if len(args.image_paths) > 0:\n if type(args.image_paths) == str:\n files.append(args.image_paths)\n else:\n files += args.image_paths\n\n if len(args.image_directory) > 0 and not args.image_paths:\n files_in_directory = os.listdir(args.image_directory)\n images_in_directory = [os.path.join(args.image_directory, i) for i in files_in_directory if\n i.endswith(\"png\") or i.endswith(\"jpg\")]\n files += images_in_directory\n\n test_model(args.model_path, files)\n" ]
[ [ "tensorflow.keras.models.load_model", "numpy.asarray", "numpy.std", "numpy.mean", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "2.2", "2.3", "2.4", "2.5", "2.6" ] } ]
ALPA-Industry-and-Technology/stable-baselines
[ "256b5c3d2e6dba692851211ec24261bbada98af2" ]
[ "stable_baselines/gail/dataset/dataset.py" ]
[ "import queue\nimport time\nfrom multiprocessing import Queue, Process\n\nimport cv2\nimport numpy as np\nfrom joblib import Parallel, delayed\n\nfrom stable_baselines import logger\n\n\nclass ExpertDataset(object):\n \"\"\"\n Dataset for using behavior cloning or GAIL.\n\n Data structure of the expert dataset, an \".npz\" archive:\n the data is saved in python dictionary format with keys: 'actions', 'episode_returns',\n 'rewards', 'obs', 'episode_starts'\n In case of images, 'obs' contains the relative path to the images.\n\n :param expert_path: (str) the path to trajectory data (.npz file)\n :param train_fraction: (float) the train validation split (0 to 1)\n for pre-training using behavior cloning (BC)\n :param batch_size: (int) the minibatch size for behavior cloning\n :param traj_limitation: (int) the number of trajectory to use (if -1, load all)\n :param randomize: (bool) if the dataset should be shuffled\n :param verbose: (int) Verbosity\n :param sequential_preprocessing: (bool) Do not use subprocess to preprocess\n the data (slower but use less memory for the CI)\n \"\"\"\n\n def __init__(self, expert_path, train_fraction=0.7, batch_size=64,\n traj_limitation=-1, randomize=True, verbose=1,\n sequential_preprocessing=False):\n traj_data = np.load(expert_path)\n\n if verbose > 0:\n for key, val in traj_data.items():\n print(key, val.shape)\n\n # Array of bool where episode_starts[i] = True for each new episode\n episode_starts = traj_data['episode_starts']\n\n traj_limit_idx = len(traj_data['obs'])\n\n if traj_limitation > 0:\n n_episodes = 0\n # Retrieve the index corresponding\n # to the traj_limitation trajectory\n for idx, episode_start in enumerate(episode_starts):\n n_episodes += int(episode_start)\n if n_episodes == (traj_limitation + 1):\n traj_limit_idx = idx - 1\n\n observations = traj_data['obs'][:traj_limit_idx]\n actions = traj_data['actions'][:traj_limit_idx]\n\n # obs, actions: shape (N * L, ) + S\n # where N = # episodes, L = episode length\n # and S is the environment observation/action space.\n # S = (1, ) for discrete space\n # Flatten to (N * L, prod(S))\n if len(observations.shape) > 2:\n observations = np.reshape(observations, [-1, np.prod(observations.shape[1:])])\n if len(actions.shape) > 2:\n actions = np.reshape(actions, [-1, np.prod(actions.shape[1:])])\n\n indices = np.random.permutation(len(observations)).astype(np.int64)\n\n # Train/Validation split when using behavior cloning\n train_indices = indices[:int(train_fraction * len(indices))]\n val_indices = indices[int(train_fraction * len(indices)):]\n\n assert len(train_indices) > 0, \"No sample for the training set\"\n assert len(val_indices) > 0, \"No sample for the validation set\"\n\n self.observations = observations\n self.actions = actions\n\n self.returns = traj_data['episode_returns'][:traj_limit_idx]\n self.avg_ret = sum(self.returns) / len(self.returns)\n self.std_ret = np.std(np.array(self.returns))\n self.verbose = verbose\n\n assert len(self.observations) == len(self.actions), \"The number of actions and observations differ \" \\\n \"please check your expert dataset\"\n self.num_traj = min(traj_limitation, np.sum(episode_starts))\n self.num_transition = len(self.observations)\n self.randomize = randomize\n self.sequential_preprocessing = sequential_preprocessing\n\n self.dataloader = None\n self.train_loader = DataLoader(train_indices, self.observations, self.actions, batch_size,\n shuffle=self.randomize, start_process=False,\n sequential=sequential_preprocessing)\n self.val_loader = DataLoader(val_indices, self.observations, self.actions, batch_size,\n shuffle=self.randomize, start_process=False,\n sequential=sequential_preprocessing)\n\n if self.verbose >= 1:\n self.log_info()\n\n def init_dataloader(self, batch_size):\n \"\"\"\n Initialize the dataloader used by GAIL.\n\n :param batch_size: (int)\n \"\"\"\n indices = np.random.permutation(len(self.observations)).astype(np.int64)\n self.dataloader = DataLoader(indices, self.observations, self.actions, batch_size,\n shuffle=self.randomize, start_process=False,\n sequential=self.sequential_preprocessing)\n\n def __del__(self):\n del self.dataloader, self.train_loader, self.val_loader\n\n def prepare_pickling(self):\n \"\"\"\n Exit processes in order to pickle the dataset.\n \"\"\"\n self.dataloader, self.train_loader, self.val_loader = None, None, None\n\n def log_info(self):\n \"\"\"\n Log the information of the dataset.\n \"\"\"\n logger.log(\"Total trajectories: {}\".format(self.num_traj))\n logger.log(\"Total transitions: {}\".format(self.num_transition))\n logger.log(\"Average returns: {}\".format(self.avg_ret))\n logger.log(\"Std for returns: {}\".format(self.std_ret))\n\n def get_next_batch(self, split=None):\n \"\"\"\n Get the batch from the dataset.\n\n :param split: (str) the type of data split (can be None, 'train', 'val')\n :return: (np.ndarray, np.ndarray) inputs and labels\n \"\"\"\n dataloader = {\n None: self.dataloader,\n 'train': self.train_loader,\n 'val': self.val_loader\n }[split]\n\n if dataloader.process is None:\n dataloader.start_process()\n try:\n return next(dataloader)\n except StopIteration:\n dataloader = iter(dataloader)\n return next(dataloader)\n\n def plot(self):\n \"\"\"\n Show histogram plotting of the episode returns\n \"\"\"\n # Isolate dependency since it is only used for plotting and also since\n # different matplotlib backends have further dependencies themselves.\n import matplotlib.pyplot as plt\n plt.hist(self.returns)\n plt.show()\n\n\nclass DataLoader(object):\n \"\"\"\n A custom dataloader to preprocessing observations (including images)\n and feed them to the network.\n\n Original code for the dataloader from https://github.com/araffin/robotics-rl-srl\n (MIT licence)\n Authors: Antonin Raffin, René Traoré, Ashley Hill\n\n :param indices: ([int]) list of observations indices\n :param observations: (np.ndarray) observations or images path\n :param actions: (np.ndarray) actions\n :param batch_size: (int) Number of samples per minibatch\n :param n_workers: (int) number of preprocessing worker (for loading the images)\n :param infinite_loop: (bool) whether to have an iterator that can be resetted\n :param max_queue_len: (int) Max number of minibatches that can be preprocessed at the same time\n :param shuffle: (bool) Shuffle the minibatch after each epoch\n :param start_process: (bool) Start the preprocessing process (default: True)\n :param backend: (str) joblib backend (one of 'multiprocessing', 'sequential', 'threading'\n or 'loky' in newest versions)\n :param sequential: (bool) Do not use subprocess to preprocess the data\n (slower but use less memory for the CI)\n :param partial_minibatch: (bool) Allow partial minibatches (minibatches with a number of element\n lesser than the batch_size)\n \"\"\"\n\n def __init__(self, indices, observations, actions, batch_size, n_workers=1,\n infinite_loop=True, max_queue_len=1, shuffle=False,\n start_process=True, backend='threading', sequential=False, partial_minibatch=True):\n super(DataLoader, self).__init__()\n self.n_workers = n_workers\n self.infinite_loop = infinite_loop\n self.indices = indices\n self.original_indices = indices.copy()\n self.n_minibatches = len(indices) // batch_size\n # Add a partial minibatch, for instance\n # when there is not enough samples\n if partial_minibatch and len(indices) / batch_size > 0:\n self.n_minibatches += 1\n self.batch_size = batch_size\n self.observations = observations\n self.actions = actions\n self.shuffle = shuffle\n self.queue = Queue(max_queue_len)\n self.process = None\n self.load_images = isinstance(observations[0], str)\n self.backend = backend\n self.sequential = sequential\n self.start_idx = 0\n if start_process:\n self.start_process()\n\n def start_process(self):\n \"\"\"Start preprocessing process\"\"\"\n # Skip if in sequential mode\n if self.sequential:\n return\n self.process = Process(target=self._run)\n # Make it a deamon, so it will be deleted at the same time\n # of the main process\n self.process.daemon = True\n self.process.start()\n\n @property\n def _minibatch_indices(self):\n \"\"\"\n Current minibatch indices given the current pointer\n (start_idx) and the minibatch size\n :return: (np.ndarray) 1D array of indices\n \"\"\"\n return self.indices[self.start_idx:self.start_idx + self.batch_size]\n\n def sequential_next(self):\n \"\"\"\n Sequential version of the pre-processing.\n \"\"\"\n if self.start_idx > len(self.indices):\n raise StopIteration\n\n if self.start_idx == 0:\n if self.shuffle:\n # Shuffle indices\n np.random.shuffle(self.indices)\n\n obs = self.observations[self._minibatch_indices]\n if self.load_images:\n obs = np.concatenate([self._make_batch_element(image_path) for image_path in obs],\n axis=0)\n\n actions = self.actions[self._minibatch_indices]\n self.start_idx += self.batch_size\n return obs, actions\n\n def _run(self):\n start = True\n with Parallel(n_jobs=self.n_workers, batch_size=\"auto\", backend=self.backend) as parallel:\n while start or self.infinite_loop:\n start = False\n\n if self.shuffle:\n np.random.shuffle(self.indices)\n\n for minibatch_idx in range(self.n_minibatches):\n\n self.start_idx = minibatch_idx * self.batch_size\n\n obs = self.observations[self._minibatch_indices]\n if self.load_images:\n if self.n_workers <= 1:\n obs = [self._make_batch_element(image_path)\n for image_path in obs]\n\n else:\n obs = parallel(delayed(self._make_batch_element)(image_path)\n for image_path in obs)\n\n obs = np.concatenate(obs, axis=0)\n\n actions = self.actions[self._minibatch_indices]\n\n self.queue.put((obs, actions))\n\n # Free memory\n del obs\n\n self.queue.put(None)\n\n @classmethod\n def _make_batch_element(cls, image_path):\n \"\"\"\n Process one element.\n\n :param image_path: (str) path to an image\n :return: (np.ndarray)\n \"\"\"\n # cv2.IMREAD_UNCHANGED is needed to load\n # grey and RGBa images\n image = cv2.imread(image_path, cv2.IMREAD_UNCHANGED)\n # Grey image\n if len(image.shape) == 2:\n image = image[:, :, np.newaxis]\n\n if image is None:\n raise ValueError(\"Tried to load {}, but it was not found\".format(image_path))\n # Convert from BGR to RGB\n if image.shape[-1] == 3:\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n image = image.reshape((1,) + image.shape)\n return image\n\n def __len__(self):\n return self.n_minibatches\n\n def __iter__(self):\n self.start_idx = 0\n self.indices = self.original_indices.copy()\n return self\n\n def __next__(self):\n if self.sequential:\n return self.sequential_next()\n\n if self.process is None:\n raise ValueError(\"You must call .start_process() before using the dataloader\")\n while True:\n try:\n val = self.queue.get_nowait()\n break\n except queue.Empty:\n time.sleep(0.001)\n continue\n if val is None:\n raise StopIteration\n return val\n\n def __del__(self):\n if self.process is not None:\n self.process.terminate()\n" ]
[ [ "numpy.random.shuffle", "numpy.concatenate", "numpy.prod", "matplotlib.pyplot.hist", "numpy.load", "numpy.array", "numpy.sum", "matplotlib.pyplot.show" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
imhgchoi/Neural-Architecture-Search
[ "87b3d0ef3435e67a364615ff76388b87f47aaf66" ]
[ "models/worker/macro_cnn_worker.py" ]
[ "\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom util.neural_blocks import SeparableConv\n\n\n\ndef get_class_num(dataType):\n\tif dataType in ['mnist','cifar10'] :\n\t\treturn 10\n\telse :\n\t\traise ValueError('invalid dataset type {}'.format(dataType))\n\ndef get_in_channel_num(dataType):\n\tif dataType in ['mnist'] :\n\t\treturn 1\n\telif dataType in ['cifar10'] :\n\t\treturn 3\n\telse :\n\t\traise ValueError('invalid dataset type {}'.format(dataType))\n\n\nclass FactorizedReduction(nn.Module):\n\t'''\n\treference\n\thttps://github.com/melodyguan/enas/blob/master/src/cifar10/general_child.py#L129\n\thttps://github.com/TDeVries/enas_pytorch/blob/master/models/shared_cnn.py\n\t'''\n\tdef __init__(self, indim, outdim, stride=2) :\n\t\tsuper(FactorizedReduction, self).__init__()\n\n\t\tassert outdim % 2 == 0, (\"outdim of factorized reduction should be even\")\n\n\t\tself.stride = stride\n\n\t\tif stride == 1 :\n\t\t\tself.facRed = nn.Sequential(\n\t\t\t\tnn.Conv2d(indim, outdim, kernel_size=1, bias=False),\n\t\t\t\tnn.BatchNorm2d(outdim, track_running_stats=False)\n\t\t\t)\n\t\telse :\n\t\t\tself.path1 = nn.Sequential(\n\t\t\t\tnn.AvgPool2d(1, stride=stride),\n\t\t\t\tnn.Conv2d(indim, outdim//2, kernel_size=1, bias=False)\n\t\t\t)\n\t\t\tself.path2 = nn.Sequential(\n\t\t\t\tnn.AvgPool2d(1, stride=stride),\n\t\t\t\tnn.Conv2d(indim, outdim//2, kernel_size=1, bias=False)\n\t\t\t)\n\t\t\tself.batch_norm = nn.BatchNorm2d(outdim, track_running_stats=False)\n\n\tdef forward(self, x):\n\t\tif self.stride == 1 :\n\t\t\treturn self.facRed(x)\n\t\telse :\n\t\t\tx1 = self.path1(x)\n\t\t\t# pad the right and the bottom, then crop to include those pixels\n\t\t\tx2 = F.pad(x, pad=(0, 1, 0, 1), mode='constant', value=0.)\n\t\t\tx2 = x2[:, :, 1:, 1:]\n\t\t\tx2 = self.path2(x2)\n\n\t\t\tx = torch.cat([x1, x2], dim=1)\n\t\t\tx = self.batch_norm(x)\n\t\t\treturn x\n\nclass FixedMacroLayer(nn.Module):\n\tdef __init__(self, lid, indim, outdim, layer_info):\n\t\tsuper(FixedMacroLayer, self).__init__()\n\t\tself.id = lid\n\n\t\tif lid > 0 :\n\t\t\tself.skipIdx = layer_info[1]\n\t\telse :\n\t\t\tself.skipIdx = torch.zeros(1)\n\n\t\tif layer_info[0] == 0 :\n\t\t\tself.node = nn.Sequential(\n\t\t\t\tnn.ReLU(),\n\t\t\t\tnn.Conv2d(indim, outdim, kernel_size=3, padding=1, bias=False),\n\t\t\t\tnn.BatchNorm2d(outdim, track_running_stats=False)\n\t\t\t)\n\t\telif layer_info[0] == 1 :\n\t\t\tself.node = nn.Sequential(\n\t\t\t\tnn.ReLU(),\n\t\t\t\tSeparableConv(indim, outdim, 3, False),\n\t\t\t\tnn.BatchNorm2d(outdim, track_running_stats=False),\n\t\t\t)\n\t\telif layer_info[0] == 2 :\n\t\t\tself.node = nn.Sequential(\n\t\t\t\tnn.ReLU(),\n\t\t\t\tnn.Conv2d(indim, outdim, kernel_size=5, padding=2, bias=False),\n\t\t\t\tnn.BatchNorm2d(outdim, track_running_stats=False)\n\t\t\t)\n\t\telif layer_info[0] == 3 :\n\t\t\tself.node = nn.Sequential(\n\t\t\t\tnn.ReLU(),\n\t\t\t\tSeparableConv(indim, outdim, 5, False),\n\t\t\t\tnn.BatchNorm2d(outdim, track_running_stats=False)\n\t\t\t)\n\t\telif layer_info[0] == 4 :\n\t\t\tself.node = nn.Sequential(\n\t\t\t\tnn.AvgPool2d(kernel_size=3, stride=1, padding=1) \n\t\t\t)\n\t\telif layer_info[0] == 5 :\n\t\t\tself.node = nn.Sequential(\n\t\t\t\tnn.MaxPool2d(kernel_size=3, stride=1, padding=1)\n\t\t\t)\n\n\t\tstab_in = int((torch.sum(self.skipIdx).item() + 1)*indim)\n\t\tself.stabilizer = nn.Sequential(\n\t\t\tnn.Conv2d(stab_in, outdim, kernel_size=1, bias=False),\n\t\t\tnn.BatchNorm2d(outdim, track_running_stats=False),\n\t\t\tnn.ReLU()\n\t\t)\n\n\t\tself.device = 'cuda' if torch.cuda.is_available() else 'cpu'\n\n\tdef forward(self, x, prevLayers, arch):\n\t\tx = self.node(x)\n\n\t\tskip_out = []\n\t\tfor i, skip in enumerate(self.skipIdx) :\n\t\t\tif skip == 1 :\n\t\t\t\tskip_out.append(prevLayers[i])\n\t\tx = torch.cat([x] + skip_out, dim=1).to(self.device)\n\n\t\tx = self.stabilizer(x)\n\t\treturn x\n\n\nclass MacroLayer(nn.Module):\n\tdef __init__(self, lid, indim, outdim):\n\t\tsuper(MacroLayer, self).__init__()\n\t\tself.id = lid\n\n\t\tself.node1 = nn.Sequential(\n\t\t\tnn.ReLU(),\n\t\t\tnn.Conv2d(indim, outdim, kernel_size=3, padding=1, bias=False),\n\t\t\tnn.BatchNorm2d(outdim, track_running_stats=False)\n\t\t)\n\t\tself.node2 = nn.Sequential(\n\t\t\tnn.ReLU(),\n\t\t\tSeparableConv(indim, outdim, 3, False),\n\t\t\tnn.BatchNorm2d(outdim, track_running_stats=False),\n\t\t)\n\t\tself.node3 = nn.Sequential(\n\t\t\tnn.ReLU(),\n\t\t\tnn.Conv2d(indim, outdim, kernel_size=5, padding=2, bias=False),\n\t\t\tnn.BatchNorm2d(outdim, track_running_stats=False)\n\t\t)\n\t\tself.node4 = nn.Sequential(\n\t\t\tnn.ReLU(),\n\t\t\tSeparableConv(indim, outdim, 5, False),\n\t\t\tnn.BatchNorm2d(outdim, track_running_stats=False)\n\t\t)\n\t\tself.node5 = nn.Sequential(\n\t\t\tnn.AvgPool2d(kernel_size=3, stride=1, padding=1) \n\t\t)\n\t\tself.node6 = nn.Sequential(\n\t\t\tnn.MaxPool2d(kernel_size=3, stride=1, padding=1)\n\t\t)\n\n\t\tself.stabilizer = nn.Sequential(\n\t\t\tnn.Conv2d(outdim, outdim, kernel_size=1, bias=False),\n\t\t\tnn.BatchNorm2d(outdim, track_running_stats=False),\n\t\t\tnn.ReLU(),\n\t\t)\n\t\tself.batch_norm = nn.BatchNorm2d(outdim, track_running_stats=False)\n\n\tdef forward(self, x, prevLayers, arch):\n\t\tnodeIdx = arch[0]\n\t\tif self.id > 0 :\n\t\t\tskipIdx = arch[1]\n\t\telse :\n\t\t\tskipIdx = []\n\n\t\tif nodeIdx == 0 :\n\t\t\tx = self.node1(x)\n\t\telif nodeIdx == 1 :\n\t\t\tx = self.node2(x)\n\t\telif nodeIdx == 2 :\n\t\t\tx = self.node3(x)\n\t\telif nodeIdx == 3 :\n\t\t\tx = self.node4(x)\n\t\telif nodeIdx == 4 :\n\t\t\tx = self.node5(x)\n\t\telif nodeIdx == 5 :\n\t\t\tx = self.node6(x)\n\t\telse :\n\t\t\traise ValueError('invalid node index {}'.format(nodeIdx))\n\n\t\tfor i, skip in enumerate(skipIdx):\n\t\t\tif skip == 1 :\n\t\t\t\tx = x + prevLayers[i]\n\t\tx = self.stabilizer(x)\n\n\t\treturn self.batch_norm(x)\n\n\nclass MacroCNN(nn.Module):\n\tdef __init__(self, args, fixed_arch=None):\n\t\tsuper(MacroCNN, self).__init__()\n\t\tself.args = args\n\t\tself.classNum = get_class_num(args.dataset)\n\n\t\tself.first_layer = nn.Sequential(\n\t\t\tnn.Conv2d(get_in_channel_num(args.dataset), args.cnn_first_layer_outdim, \n\t\t\t\t\t kernel_size=args.cnn_first_layer_kernel,\n\t\t\t\t\t stride=1,\n\t\t\t\t\t padding=args.cnn_first_layer_pad,\n\t\t\t\t\t bias=False),\n\t\t\tnn.BatchNorm2d(args.cnn_first_layer_outdim, track_running_stats=False)\n\t\t)\n\n\t\tself.layers = nn.ModuleList([])\n\t\tself.pooled_layers = nn.ModuleList([])\n\t\tself.pool_indices = list(range(self.args.macro_num_layers-5, 0, -4))\n\n\t\toutdim = args.cnn_first_layer_outdim\n\t\tfor lid in range(self.args.macro_num_layers) :\n\t\t\tif args.mode.upper() == 'FIX' :\n\t\t\t\tlayer = FixedMacroLayer(lid, outdim, outdim, fixed_arch[str(lid)])\n\t\t\telse :\n\t\t\t\tlayer = MacroLayer(lid, outdim, outdim)\n\t\t\tself.layers.append(layer)\n\n\t\t\tif lid in self.pool_indices :\n\t\t\t\tfor _ in range(len(self.layers)):\n\t\t\t\t\tif args.mode.upper() == 'FIX' :\n\t\t\t\t\t\tself.pooled_layers.append(FactorizedReduction(outdim, outdim * 2))\n\t\t\t\t\telse :\t\n\t\t\t\t\t\tself.pooled_layers.append(FactorizedReduction(outdim, outdim))\n\t\t\t\tif args.mode.upper() == 'FIX' :\n\t\t\t\t\toutdim = outdim * 2\n\n\t\tself.global_avg_pool = nn.AdaptiveAvgPool2d((1,1))\n\t\tself.final_layer = nn.Linear(outdim, self.classNum)\n\n\t\tself.initialize()\n\n\t\tself.loss = nn.CrossEntropyLoss()\n\t\tself.device = 'cuda' if torch.cuda.is_available() else 'cpu'\n\t\tself.to(self.device)\n\n\n\tdef initialize(self):\n\t\tfor mod in self.modules():\n\t\t\tif isinstance(mod, nn.Conv2d):\n\t\t\t\tnn.init.kaiming_uniform_(mod.weight, nonlinearity='relu')\n\n\tdef forward(self, x, arch):\n\t\tx = self.first_layer(x)\n\n\t\tprevLayers = []\n\t\tpool_count = 0\n\t\tfor lid in range(self.args.macro_num_layers) :\n\t\t\tx = self.layers[lid](x, prevLayers, arch[str(lid)])\n\t\t\tprevLayers.append(x)\n\t\t\tif lid in self.pool_indices :\n\t\t\t\tfor i, prev in enumerate(prevLayers) :\n\t\t\t\t\tprevLayers[i] = self.pooled_layers[pool_count](prev)\n\t\t\t\t\tpool_count+=1\n\t\t\t\tx = prevLayers[-1]\n\n\t\tx = self.global_avg_pool(x)\n\t\tx = x.view(x.shape[0], -1)\n\t\tx = F.dropout(x, p=self.args.dropout)\n\n\t\treturn self.final_layer(x)\n\n\n\n\n\nclass MacroCNNlight(nn.Module):\n\tdef __init__(self, args, fixed_arch=None):\n\t\tsuper(MacroCNNlight, self).__init__()\n\t\tself.args = args\n\t\tself.classNum = get_class_num(args.dataset)\n\n\t\tself.first_layer = nn.Sequential(\n\t\t\tnn.Conv2d(get_in_channel_num(args.dataset), args.cnn_first_layer_outdim, \n\t\t\t\t\t kernel_size=args.cnn_first_layer_kernel,\n\t\t\t\t\t stride=1,\n\t\t\t\t\t padding=args.cnn_first_layer_pad,\n\t\t\t\t\t bias=False),\n\t\t\tnn.BatchNorm2d(args.cnn_first_layer_outdim, track_running_stats=False)\n\t\t)\n\n\t\tself.layers = nn.ModuleList([])\n\n\t\toutdim = args.cnn_first_layer_outdim\n\t\tfor lid in range(self.args.macro_num_layers) :\n\t\t\tif args.mode == 'fix' :\n\t\t\t\tlayer = FixedMacroLayer(lid, outdim, outdim, fixed_arch[str(lid)])\n\t\t\telse :\n\t\t\t\tlayer = MacroLayer(lid, outdim, outdim)\n\t\t\tself.layers.append(layer)\n\n\t\tself.global_avg_pool = nn.AdaptiveAvgPool2d((1,1))\n\t\tself.final_layer = nn.Linear(outdim, self.classNum)\n\n\t\tself.initialize()\n\n\t\tself.loss = nn.CrossEntropyLoss()\n\t\tself.device = 'cuda' if torch.cuda.is_available() else 'cpu'\n\t\tself.to(self.device)\n\n\n\tdef initialize(self):\n\t\tfor mod in self.modules():\n\t\t\tif isinstance(mod, nn.Conv2d):\n\t\t\t\tnn.init.kaiming_uniform_(mod.weight, nonlinearity='relu')\n\n\tdef forward(self, x, arch):\n\t\tx = self.first_layer(x)\n\n\t\tprevLayers = []\n\t\tfor lid in range(self.args.macro_num_layers) :\n\t\t\tx = self.layers[lid](x, prevLayers, arch[str(lid)])\n\t\t\tprevLayers.append(x)\n\n\t\tx = self.global_avg_pool(x)\n\t\tx = x.view(x.shape[0], -1)\n\t\tx = F.dropout(x, p=self.args.dropout)\n\n\t\treturn self.final_layer(x)\n\n" ]
[ [ "torch.nn.CrossEntropyLoss", "torch.nn.functional.dropout", "torch.cat", "torch.zeros", "torch.nn.ModuleList", "torch.nn.Conv2d", "torch.sum", "torch.nn.init.kaiming_uniform_", "torch.nn.Linear", "torch.nn.AvgPool2d", "torch.nn.MaxPool2d", "torch.nn.AdaptiveAvgPool2d", "torch.cuda.is_available", "torch.nn.BatchNorm2d", "torch.nn.ReLU", "torch.nn.functional.pad" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
mlindauer/EPM_DNN
[ "11022235382e10ed6e8c960d3320c81b3c34d4a7" ]
[ "scripts/evaluate_matlab.py" ]
[ "######################################################################\nimport logging\nimport sys\nimport os\nimport inspect\ncmd_folder = os.path.realpath(os.path.abspath(os.path.split(inspect.getfile( inspect.currentframe() ))[0]))\ncmd_folder = os.path.realpath(os.path.join(cmd_folder, \"..\"))\nif cmd_folder not in sys.path:\n sys.path.insert(0,cmd_folder)\n \n######################################################################\n\nimport os\nimport random\nfrom argparse import ArgumentParser, ArgumentDefaultsHelpFormatter\n\nimport pandas as pd\nimport numpy as np\n\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\n\nfrom sklearn.preprocessing import OneHotEncoder\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import mean_squared_error\n\nfrom ConfigSpace.io import pcs\nfrom ConfigSpace.util import fix_types, deactivate_inactive_hyperparameters\nfrom ConfigSpace.configuration_space import Configuration, ConfigurationSpace\nfrom ConfigSpace.hyperparameters import CategoricalHyperparameter\n\nfrom smac.configspace.util import convert_configurations_to_array\n\nfrom epm_dnn.dnn import DNN\nfrom epm_dnn.rf import RF\n\nfrom plottingscripts.plotting.scatter import plot_scatter_plot\n\ndef read_feature_file(fn:str):\n \n df = pd.read_csv(fn, header=0, index_col=0)\n # rm constant features\n df = df.loc[:, (df != df.ix[0]).any()]\n \n # replace missing values by median\n df[df == -512] = np.nan\n df = df.fillna(df.median())\n return df\n\ndef read_perf_file(fn:str):\n \n perf_pd = pd.read_csv(fn, header=None, index_col=0)\n perf_pd.replace(0.0, 0.0005, inplace=True)\n return perf_pd\n\ndef read_config_file(fn:str, cs:ConfigurationSpace):\n \n config_pd = pd.read_csv(fn, header=0, index_col=0, dtype=object) \n \n configs = []\n \n for param_name in list(config_pd):\n if param_name.startswith(\"dummy_non_parameter\"):\n del config_pd[param_name]\n \n for config in config_pd.iterrows():\n config = fix_types(configuration=config[1:][0].to_dict(), configuration_space=cs)\n config = deactivate_inactive_hyperparameters(configuration=config, configuration_space=cs)\n configs.append(config)\n \n return configs\n\ndef read_cs(fn:str):\n\n with open(fn) as fp:\n pcs_str = fp.readlines()\n cs = pcs.read(pcs_str)\n \n return cs\n\ndef build_matrix(feature_pd:pd.DataFrame, perf_pd:pd.DataFrame, \n configs:list, cs:ConfigurationSpace,\n n_insts:int=None):\n \n insts = list(feature_pd.index)\n \n if n_insts is not None and n_insts < len(insts):\n insts = random.sample(insts, n_insts)\n \n config_matrix = convert_configurations_to_array(configs)\n \n # one hot encode categorical parameters\n n_values = []\n mask_array = []\n parameters = cs.get_hyperparameters()\n \n for param in parameters:\n if isinstance(param, (CategoricalHyperparameter)):\n n_values.append(len(param.choices))\n mask_array.append(True)\n else:\n mask_array.append(False)\n \n n_values = np.array(n_values)\n mask_array = np.array(mask_array) \n \n ohe = OneHotEncoder(n_values=n_values, categorical_features=mask_array, sparse=False)\n config_matrix = ohe.fit_transform(config_matrix)\n \n train_config_indices = random.sample(range(len(configs)), int(len(configs)/2))\n valid_config_indices = random.sample(train_config_indices, int(len(train_config_indices)/2))\n \n train_inst_indices = random.sample(range(len(insts)), int(len(insts)/2))\n valid_inst_indices = random.sample(train_inst_indices, int(len(train_inst_indices)/2))\n \n # convert in X matrix and y vector\n X_I, X_II, X_III, X_IV = [[],[],[],[]], [], [], []\n y_I, y_II, y_III, y_IV = [[],[],[],[]], [], [], []\n for i_idx, inst in enumerate(insts):\n feat_vector = feature_pd.loc[inst].values\n perf_vector = perf_pd.loc[inst].values\n for c_idx in range(len(configs)):\n config_vec = config_matrix[c_idx,:]\n perf = perf_vector[c_idx]\n \n if i_idx in train_inst_indices and c_idx in train_config_indices:\n if i_idx in valid_inst_indices and c_idx in valid_config_indices:\n X_I[3].append(np.concatenate((config_vec, feat_vector)))\n y_I[3].append(perf)\n elif i_idx not in valid_inst_indices and c_idx in valid_config_indices:\n X_I[2].append(np.concatenate((config_vec, feat_vector)))\n y_I[2].append(perf)\n elif i_idx in valid_inst_indices and c_idx not in valid_config_indices:\n X_I[1].append(np.concatenate((config_vec, feat_vector)))\n y_I[1].append(perf)\n else:\n X_I[0].append(np.concatenate((config_vec, feat_vector)))\n y_I[0].append(perf)\n elif i_idx not in train_inst_indices and c_idx in train_config_indices:\n X_II.append(np.concatenate((config_vec, feat_vector)))\n y_II.append(perf)\n elif i_idx in train_inst_indices and c_idx not in train_config_indices:\n X_III.append(np.concatenate((config_vec, feat_vector)))\n y_III.append(perf)\n else:\n X_IV.append(np.concatenate((config_vec, feat_vector)))\n y_IV.append(perf)\n \n X_II, X_III, X_IV = np.array(X_II), np.array(X_III), np.array(X_IV)\n y_II, y_III, y_IV = np.array(y_II), np.array(y_III), np.array(y_IV)\n X_I = np.array([np.array(X_I[0]),np.array(X_I[1]),np.array(X_I[2]),np.array(X_I[3])]) \n y_I = np.array([np.array(y_I[0]),np.array(y_I[1]),np.array(y_I[2]),np.array(y_I[3])]) \n \n print(X_I.shape, X_II.shape, X_III.shape, X_IV.shape)\n print(y_I.shape, y_II.shape, y_III.shape, y_IV.shape)\n \n return X_I, X_II, X_III, X_IV, y_I, y_II, y_III, y_IV\n\n\ndef validate(X, y_true, quadrant:str):\n \n y_pred = model.predict(X)\n rmse = np.sqrt(mean_squared_error(y_true=y_true, y_pred=y_pred))\n print(\"RMSE (%s): %f\" %(quadrant, rmse))\n rmse = np.sqrt(mean_squared_error(y_true=np.log10(y_true), y_pred=np.log10(y_pred)))\n print(\"RMSLE (%s): %f\" %(quadrant, rmse))\n \n out_fn = \"%s_%s_b%d_r%s_s%d_%s\" %(args.scenario, args.model, args.budget, args.regularize, args.seed, quadrant)\n \n fig = plot_scatter_plot(x_data=y_true, y_data=y_pred, labels=[\"y(true)\", \"y(pred)\"], max_val=cutoff)\n fig.tight_layout()\n fig.savefig(\"scatter_%s.png\" %(out_fn))\n plt.close(fig)\n \n np.savetxt(fname=\"y_pred_%s.csv\" %(out_fn), X=y_pred)\n \nif __name__ == \"__main__\":\n \n parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter)\n \n parser.add_argument(\"--scenario\", required=True)\n parser.add_argument(\"--src_dir\", required=True)\n parser.add_argument(\"--n_insts\", type=int, default=None, \n help=\"subsample to x instances\")\n parser.add_argument(\"--force_reading\", default=True,\n action=\"store_true\")\n parser.add_argument(\"--model\", choices=[\"RF\",\"DNN\"], default=\"DNN\")\n parser.add_argument(\"--regularize\", default=False, action=\"store_true\",\n help=\"Enables regularization (L2 and dropout) for DNNs\")\n parser.add_argument(\"--start_from\", default=None, nargs=\"*\")\n \n parser.add_argument(\"--budget\", type=int, default=1, help=\n \"number of function evaluations for SMAC; if 1, using the default config\")\n parser.add_argument(\"--wc_budget\", type=int, default=60, help=\n \"wallclock time budget for SMAC\")\n parser.add_argument(\"--max_layers\", type=int, default=10, help=\n \"maximal number of layers (only applicable if --model DNN)\")\n parser.add_argument(\"--seed\", type=int, default=12345, help=\n \"random seed\")\n \n parser.add_argument(\"--verbose\", choices=[\"INFO\",\"DEBUG\"], default=\"INFO\")\n \n args = parser.parse_args()\n \n print(args)\n \n random.seed(args.seed)\n np.random.seed(args.seed)\n \n if args.start_from is not None:\n d = dict([k.split(\":\") for k in args.start_from])\n \n \n logging.basicConfig(level=args.verbose)\n \n if args.scenario == 'SPEAR-SWV':\n performance_file = os.path.join(args.src_dir,'SAT','1000samples-SPEAR-SWV-all604inst-results.txt')\n config_file = os.path.join(args.src_dir,'SAT','1000samples-algospear1.2.1.1-runobjruntime-overallobjmean10-runs1000-time300.0-length2147483647_0.txt')\n pcs_file = os.path.join(args.src_dir,'SAT','spear-params.txt')\n feature_file = os.path.join(args.src_dir,'SAT','SWV-feat.csv')\n cutoff = 300\n elif args.scenario == 'SPEAR-IBM':\n performance_file = os.path.join(args.src_dir,'SAT','1000samples-SPEAR-IBM-all765inst-results.txt')\n config_file = os.path.join(args.src_dir,'SAT','1000samples-algospear1.2.1.1-runobjruntime-overallobjmean10-runs1000-time300.0-length2147483647_0.txt')\n pcs_file = os.path.join(args.src_dir,'SAT','spear-params.txt')\n feature_file = os.path.join(args.src_dir,'SAT','IBM-ALL-feat.csv')\n cutoff = 300\n elif args.scenario == 'SPEAR-SWV-IBM':\n performance_file = os.path.join(args.src_dir,'SAT','1000samples-SPEAR-IBM-SWV-results.txt')\n config_file = os.path.join(args.src_dir,'SAT','1000samples-algospear1.2.1.1-runobjruntime-overallobjmean10-runs1000-time300.0-length2147483647_0.txt')\n pcs_file = os.path.join(args.src_dir,'SAT','spear-params.txt')\n feature_file = os.path.join(args.src_dir,'SAT','IBM-SWV-feat.csv')\n cutoff = 300\n elif args.scenario == 'CPLEX-CRR':\n performance_file = os.path.join(args.src_dir,'MIP','1000samples-CPLEX-CORLAT-REG-RCW-results.txt')\n config_file = os.path.join(args.src_dir,'MIP','1000samples-algocplex12-milp-runobjruntime-overallobjmean10-runs1000-time300.0-length2147483647_0.txt')\n pcs_file = os.path.join(args.src_dir,'MIP','cplex12-params-CPAIOR-space.txt')\n feature_file = os.path.join(args.src_dir,'MIP','CORLAT-REG-RCW-features.csv')\n cutoff = 300\n elif args.scenario == 'CPLEX-CR':\n performance_file = os.path.join(args.src_dir,'MIP','1000samples-CPLEX-CORLAT-REG-results.txt')\n config_file = os.path.join(args.src_dir,'MIP','1000samples-algocplex12-milp-runobjruntime-overallobjmean10-runs1000-time300.0-length2147483647_0.txt')\n pcs_file = os.path.join(args.src_dir,'MIP','cplex12-params-CPAIOR-space.txt')\n feature_file = os.path.join(args.src_dir,'MIP','CORLAT-REG-features.csv')\n cutoff = 300\n elif args.scenario == 'CPLEX-RCW':\n performance_file = os.path.join(args.src_dir,'MIP','1000samples-CPLEX-RCW-990train-990test-results.txt')\n config_file = os.path.join(args.src_dir,'MIP','1000samples-algocplex12-milp-runobjruntime-overallobjmean10-runs1000-time300.0-length2147483647_0.txt')\n pcs_file = os.path.join(args.src_dir,'MIP','cplex12-params-CPAIOR-space.txt')\n feature_file = os.path.join(args.src_dir,'MIP','RCW-train_test-features-withfilename.csv')\n cutoff = 300\n elif args.scenario == 'CPLEX-REG':\n performance_file = os.path.join(args.src_dir,'MIP','1000samples-CPLEX-CATS_REG-1000train-1000test-results.txt')\n config_file = os.path.join(args.src_dir,'MIP','1000samples-algocplex12-milp-runobjruntime-overallobjmean10-runs1000-time300.0-length2147483647_0.txt')\n pcs_file = os.path.join(args.src_dir,'MIP','cplex12-params-CPAIOR-space.txt')\n feature_file = os.path.join(args.src_dir,'MIP','REG-train_test-features-withfilename.csv')\n cutoff = 300\n elif args.scenario == 'CPLEX-CORLAT':\n performance_file = os.path.join(args.src_dir,'MIP','1000samples-CPLEX-CORLAT-train_test_inst-results.txt')\n config_file = os.path.join(args.src_dir,'MIP','1000samples-algocplex12-milp-runobjruntime-overallobjmean10-runs1000-time300.0-length2147483647_0.txt')\n pcs_file = os.path.join(args.src_dir,'MIP','cplex12-params-CPAIOR-space.txt')\n feature_file = os.path.join(args.src_dir,'MIP','CORLAT-train_test-features-withfilename.csv')\n cutoff = 300\n elif args.scenario == 'CPLEX-BIGMIX':\n performance_file = os.path.join(args.src_dir,'MIP','1000samples-CPLEX-BIGMIX-all1510inst-results.txt')\n config_file = os.path.join(args.src_dir,'MIP','1000samples-algocplex12-milp-runobjruntime-overallobjmean10-runs1000-time300.0-length2147483647_0.txt')\n pcs_file = os.path.join(args.src_dir,'MIP','cplex12-params-CPAIOR-space.txt')\n feature_file = os.path.join(args.src_dir,'MIP','BIGMIX-train_test-features-withfilename.csv')\n cutoff = 300\n \n if os.path.isfile(\"converted_data/%s/X.npy\" %(args.scenario)) and not args.force_reading:\n \n logging.info(\"Reading data from disk\")\n \n X_I, X_II, X_III, X_IV = np.load(\"converted_data/%s/X.npy\" %(args.scenario))\n y_I, y_II, y_III, y_IV = np.load(\"converted_data/%s/y.npy\" %(args.scenario))\n \n else:\n feature_pd = read_feature_file(fn=feature_file)\n cs = read_cs(fn=pcs_file)\n perf_pd = read_perf_file(fn=performance_file)\n configs = read_config_file(fn=config_file, cs=cs)\n \n X_I, X_II, X_III, X_IV, y_I, y_II, y_III, y_IV = build_matrix(feature_pd=feature_pd, \n perf_pd=perf_pd, \n configs=configs, \n cs=cs, \n n_insts=args.n_insts)\n \n try:\n os.makedirs(\"converted_data/%s/\"%(args.scenario))\n except OSError:\n pass\n \n #np.save(file=\"converted_data/%s/X.npy\" %(args.scenario), \n # arr=np.array([X_I, X_II, X_III, X_IV]))\n #np.save(file=\"converted_data/%s/y.npy\" %(args.scenario), \n # arr=np.array([y_I, y_II, y_III, y_IV]))\n \n print(X_I.shape)\n print(\"min(y_I): %f\" %(np.min(y_I)))\n print(\"max(y_I): %f\" %(np.max(y_I)))\n timeouts = np.sum(y_II>=cutoff) + np.sum(y_III>=cutoff) + np.sum(y_IV>=cutoff) + \\\n np.sum(y_I[0]>=cutoff) + np.sum(y_I[1]>=cutoff) + np.sum(y_I[2]>=cutoff) + np.sum(y_I[3]>=cutoff)\n print(\"#timeouts: %d\" %(timeouts))\n \n if args.model == \"DNN\":\n \n model = DNN(num_layers_range=[1,4,args.max_layers], \n use_dropout=args.regularize, \n use_l2_regularization=args.regularize)\n \n model.fit(X=X_I, \n y=y_I,\n max_epochs=10,\n wc_limit=args.wc_budget,\n runcount_limit=args.budget,\n seed=args.seed,\n config=args.start_from)\n \n elif args.model == \"RF\":\n \n model = RF()\n \n model.fit(X=X_I, \n y=y_I,\n wc_limit=args.wc_budget,\n runcount_limit=args.budget,\n seed=args.seed,\n config=args.start_from)\n \n X_all = None\n y_all = None\n for idx, (X_q, y_q) in enumerate(zip(X_I,y_I)):\n if idx == 0:\n X_all = X_q\n y_all = y_q\n else:\n X_all = np.vstack([X_all, X_q])\n y_all = np.hstack([y_all, y_q]) \n X_I = X_all\n y_I = y_all\n\n\n validate(X_I, y_I, quadrant=\"I\")\n validate(X_II, y_II, quadrant=\"II\")\n validate(X_III, y_III, quadrant=\"III\")\n validate(X_IV, y_IV, quadrant=\"IV\")\n \n\n\n" ]
[ [ "numpy.hstack", "pandas.read_csv", "numpy.random.seed", "numpy.min", "matplotlib.use", "sklearn.preprocessing.OneHotEncoder", "sklearn.metrics.mean_squared_error", "numpy.concatenate", "numpy.max", "numpy.log10", "matplotlib.pyplot.close", "numpy.savetxt", "numpy.load", "numpy.array", "numpy.sum", "numpy.vstack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
nevermore3/PlateRecognition
[ "2d4a7604b0d85be4368542ad9fbd3553c99caa6a" ]
[ "models/easypr/dataset.py" ]
[ "import os\nimport random\nimport cv2\nimport numpy as np\nfrom queue import Queue\nfrom threading import Thread\nfrom six.moves import cPickle as pickle\n\n\nclass DataSet(object):\n def __init__(self, dataset_params, phase):\n\n # process params\n self.data_path = str(dataset_params['path'])\n self.label_path = os.path.join(self.data_path, phase + '.pickle')\n self.batch_size = int(dataset_params['batch_size'])\n self.thread_num = int(dataset_params['thread_num'])\n self.gray = False\n\n if 'gray' in dataset_params:\n self.gray = dataset_params['gray']\n\n # record and image_label queue\n self.record_queue = Queue(maxsize=10000)\n self.image_label_queue = Queue(maxsize=512)\n\n with open(self.label_path, 'rb') as f:\n result = pickle.load(f)\n\n self.record_list = result # {'name', 'label', 'subdir'}\n self.record_point = 0\n self.record_number = len(self.record_list)\n if self.batch_size == -1:\n self.batch_size = self.record_number\n\n self.num_batch_per_epoch = int(self.record_number / self.batch_size)\n\n t_record_producer = Thread(target=self.record_producer)\n t_record_producer.daemon = True\n t_record_producer.start()\n\n for i in range(self.thread_num):\n t = Thread(target=self.record_customer)\n t.daemon = True\n t.start()\n\n def record_producer(self):\n \"\"\"record_queue's processor\n \"\"\"\n while True:\n if self.record_point % self.record_number == 0:\n random.shuffle(self.record_list)\n self.record_point = 0\n\n self.record_queue.put([os.path.join(self.data_path, self.record_list[self.record_point]['subdir'],\n self.record_list[self.record_point]['name']),\n self.record_list[self.record_point]['label']])\n self.record_point += 1\n\n def record_process(self, record, gray=False):\n \"\"\"record process\n Args: record\n Returns:\n image: 3-D ndarray\n labels: 2-D list\n \"\"\"\n if gray:\n image = cv2.imdecode(np.fromfile(record[0], dtype=np.uint8), cv2.IMREAD_GRAYSCALE)[..., None]\n else:\n image = cv2.imdecode(np.fromfile(record[0], dtype=np.uint8), cv2.IMREAD_COLOR)\n return [image, record[1]]\n\n def record_customer(self):\n \"\"\"record queue's customer \n \"\"\"\n while True:\n item = self.record_queue.get()\n out = self.record_process(item, self.gray)\n self.image_label_queue.put(out)\n\n def batch(self):\n \"\"\"get batch\n Returns:\n images: 4-D ndarray [batch_size, height, width, 1]\n labels: 1-D ndarray [batch_size, ]\n \"\"\"\n images = []\n labels = []\n\n for i in range(self.batch_size):\n image, label = self.image_label_queue.get()\n images.append(image)\n labels.append(label)\n\n images = np.asarray(images, dtype=np.float32)\n images = images / 255 * 2 - 1\n\n return images, labels\n" ]
[ [ "numpy.asarray", "numpy.fromfile" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ishine/neural-lexicon-reader
[ "66f4c464a7a442812e79458759ac913ce51d1c6e" ]
[ "dataloader.py" ]
[ "import io\nimport logging\nimport threading\nimport queue\nimport traceback\nimport zipfile\nfrom collections import defaultdict\nimport time\nimport os\nimport numpy as np\nimport torch\nimport pickle\nimport json\nimport glob\nfrom utils.text import text_to_byte_sequence, text_to_char_sequence, text_to_word_sequence, text_to_phone_sequence, \\\n sos_id, eos_id, get_aligned_words\n\nnp.random.seed(0)\nzip_cache = {}\n\nclass PseudoZipfile:\n def __init__(self, dir):\n assert os.path.isdir(dir), \"%s is not a directory\" % dir\n dir = dir.strip(os.path.sep)\n self.dir = dir\n self.filename_set = set([l[len(dir) + 1:] for l in glob.glob(os.path.join(dir, '*'))])\n\n def open(self, filename, mode):\n filename = filename.replace('/', os.path.sep)\n if mode[-1] != 'b':\n mode += 'b'\n return open(os.path.join(self.dir, filename), mode)\n\ndef load_zip(filename):\n if os.path.isdir(filename):\n return PseudoZipfile(filename)\n if filename not in zip_cache:\n zip_cache[filename] = zipfile.ZipFile(filename)\n zip_cache[filename].filename_set = set(x.filename for x in zip_cache[filename].filelist)\n return zip_cache[filename]\n\n\nclass Feeder(threading.Thread):\n def __init__(self, zip_filename, metadata_file_path, hparams, spk_to_id=None, lang_to_id=None,\n rank=0, world_size=1, adapt_lang=None, adapt_spk=None, train_lang=None, train_spk=None,\n exclude_spk=None, downsample_lang=None, adapt_samples=None, warmup_lang=None, warmup_spk=None,\n vocab=None, embed=None):\n super(Feeder, self).__init__()\n self._offset = 0\n self._epoch = 0\n self._spk_to_id = spk_to_id\n self._lang_to_id = lang_to_id\n self._hparams = hparams\n self.global_step = 1\n self.proto = get_input_proto(hparams)\n self.queue = queue.Queue(maxsize=2)\n self.rand = np.random.RandomState(rank)\n self._rank = rank\n self._world_size = world_size\n self._lock = threading.Lock()\n\n self.zfile = load_zip(zip_filename)\n logging.info(\"Found %d spectrograms\" % len(self.zfile.filename_set))\n self.embed = None\n if embed:\n if hparams.use_external_embed:\n self.embed = load_zip(embed)\n logging.info(\"Found %d external embeddings\" % len(self.embed.filename_set))\n else: # lexicon texts\n self.embed = json.load(open(embed, encoding='utf-8'))\n self.vocab = vocab\n\n # Load metadata\n with open(metadata_file_path, encoding='utf-8') as f:\n self._metadata = _read_meta(f, self._hparams.data_format, inc_lang=train_lang, inc_spk=train_spk)\n logging.info('%d samples read' % (len(self._metadata)))\n if exclude_spk:\n self._metadata = [m for m in self._metadata if m['n'].split('_')[0] not in exclude_spk]\n logging.info('%d samples after speakers excluded' % (len(self._metadata)))\n if downsample_lang:\n self._metadata = downsample_language(self._metadata, downsample_lang)\n logging.info('%d samples after language downsampling' % (len(self._metadata)))\n self._warmup_lang = warmup_lang\n self._warmup_spk = warmup_spk\n self._adapt_samples = adapt_samples\n\n hours = sum([int(x['l']) for x in self._metadata]) * hparams.frame_shift_ms / (3600 * 1000)\n logging.info('Loaded metadata for %d examples (%.2f hours)' % (len(self._metadata), hours))\n\n if self._world_size > 1:\n self._metadata = self._metadata[self._rank::self._world_size]\n logging.info(\"%d samples after sharding\" % len(self._metadata))\n\n if self._hparams.shuffle_training_data:\n self.rand.shuffle(self._metadata)\n\n if hparams.balanced_training:\n logging.info('Using balanced data in training')\n self.grouped_meta = _group_meta(self._metadata, self._hparams)\n\n self._adapt_lang = adapt_lang\n self._adapt_spk = adapt_spk\n if self._adapt_lang or self._adapt_spk:\n with open(metadata_file_path, encoding='utf-8') as f:\n self._adapt_metadata = _read_meta(f, self._hparams.data_format,\n inc_lang=adapt_lang, inc_spk=adapt_spk)\n logging.info('%d adapt samples read' % (len(self._adapt_metadata)))\n if exclude_spk:\n self._adapt_metadata = [m for m in self._adapt_metadata if m['n'].split('_')[0] not in exclude_spk]\n logging.info('%d adapt samples after speakers excluded' % (len(self._adapt_metadata)))\n if adapt_samples:\n self._adapt_metadata = [m for m in self._adapt_metadata if m['n'] in adapt_samples]\n elif downsample_lang:\n self._adapt_metadata = downsample_language(self._adapt_metadata, downsample_lang)\n logging.info('%d adapt samples after language downsampling' % (len(self._adapt_metadata)))\n spk_cnt = defaultdict(int)\n spk_time = defaultdict(int)\n for m in self._adapt_metadata:\n spk = m['n'].split('_')[0]\n spk_cnt[spk] += 1\n spk_time[spk] += int(m['l']) * hparams.frame_shift_ms / (60 * 1000)\n logging.info('Adapt samples by speakers: ' + ' '.join(\n ['%s (%d, %.3f min)' % (k, v, spk_time[k]) for k, v in spk_cnt.items()]))\n if self._world_size > 1:\n self._adapt_metadata = self._adapt_metadata[self._rank::self._world_size]\n logging.info('%d samples after language sharding' % (len(self._adapt_metadata)))\n if len(self._adapt_metadata) <= 30:\n logging.info('\\n\\t'.join(['Samples:'] + [m['n'] for m in self._adapt_metadata]))\n self._adapt_offset = 0\n self.rand.shuffle(self._adapt_metadata)\n else:\n self._adapt_metadata = None\n\n def run(self):\n try:\n while True:\n self._enqueue_next_group()\n except Exception:\n logging.error(traceback.format_exc())\n\n def state_dict(self):\n with self._lock:\n state = {'rand': self.rand.get_state()}\n if self._hparams.balanced_training:\n state['offset'] = self.grouped_meta['offsets']\n state['epoch'] = self.grouped_meta['epoch']\n else:\n state['offset'] = self._offset\n state['epoch'] = self._epoch\n\n if hasattr(self, '_adapt_offset'):\n state['adapt_offset'] = self._adapt_offset\n logging.info(\"Dumped feeder state: \" + str(state['offset']))\n return state\n\n def load_state_dict(self, state):\n logging.info(\"Loaded feeder state: \" + str(state['offset']))\n self.rand.set_state(state['rand'])\n if self._hparams.balanced_training:\n self.grouped_meta['offsets'] = state['offset']\n self.grouped_meta['epoch'] = state['epoch']\n else:\n self._offset = state['offset']\n self._epoch = state['epoch']\n if hasattr(self, '_adapt_offset'):\n state['adapt_offset'] = self._adapt_offset\n\n\n def get_examples(self, bucket_size):\n examples = []\n with self._lock:\n for i in range(bucket_size):\n examples.append(self._get_next_example())\n return examples\n\n def get_batch(self):\n return self.queue.get()\n\n def _cast_tensor(self, batch):\n batch = dict([(name, batch[name]) for name in self.proto])\n if self._world_size > 1 and torch.cuda.is_available(): # Reduce memory cost; support DDP only\n ctx = torch.cuda.device(self._rank)\n else:\n ctx = memoryview(b'') # no-op\n with ctx:\n for key in batch:\n batch[key] = self.proto[key](batch[key])\n if isinstance(batch[key], torch.Tensor) and torch.cuda.is_available():\n batch[key] = batch[key].pin_memory()\n return batch\n\n def _enqueue_next_group(self):\n tic = time.time()\n examples = self.get_examples(self._hparams.bucket_size)\n examples.sort(key=lambda x: len(x['mel_target']))\n batches = _pack_into_batches(examples, hparams=self._hparams)\n self.rand.shuffle(batches)\n\n for batch in batches:\n batch = _prepare_batch(batch, hparams=self._hparams, ex_embed=self.embed, vocab=self.vocab)\n batch = self._cast_tensor(batch)\n self.queue.put(batch)\n logging.info(\"Packed %d batches with %d samples in %.2f sec\" % (len(batches), len(examples), time.time() - tic))\n\n def _get_next_balanced_meta(self):\n lang = self.rand.choice(self.grouped_meta['langs'], p=self.grouped_meta['prob'])\n meta = self.grouped_meta['meta'][lang][self.grouped_meta['offsets'][lang]]\n self.grouped_meta['offsets'][lang] += 1\n if self.grouped_meta['offsets'][lang] >= len(self.grouped_meta['meta'][lang]):\n self.grouped_meta['offsets'][lang] = 0\n self.grouped_meta['epoch'][lang] += 1\n logging.info(\"Start epoch %d of %s\" % (self.grouped_meta['epoch'][lang], lang))\n return meta\n\n def _get_next_example(self):\n while True:\n if self._adapt_metadata and self.rand.random() < self._adapt_rate():\n meta = self._adapt_metadata[self._adapt_offset]\n self._adapt_offset += 1\n if self._adapt_offset >= len(self._adapt_metadata):\n self._adapt_offset = 0\n self.rand.shuffle(self._adapt_metadata)\n elif not self._hparams.balanced_training:\n meta = self._metadata[self._offset]\n self._offset += 1\n if self._offset >= len(self._metadata):\n self._offset = 0\n self._epoch += 1\n if self._hparams.shuffle_training_data:\n self.rand.shuffle(self._metadata)\n else:\n meta = self._get_next_balanced_meta()\n\n if self.skip_meta(meta):\n continue\n break\n\n return extract_meta(meta, self.zfile, self._hparams, self._spk_to_id, self._lang_to_id, vocab=self.vocab)\n\n def _adapt_rate(self):\n if self.global_step >= self._hparams.adapt_end_step:\n r = 1.0\n elif self.global_step < self._hparams.adapt_start_step:\n r = 0.0\n else:\n r = (self.global_step - self._hparams.adapt_start_step) / \\\n (self._hparams.adapt_end_step - self._hparams.adapt_start_step)\n return r * self._hparams.final_adapt_rate\n\n def skip_meta(self, meta):\n if self.global_step >= self._hparams.data_warmup_steps:\n return False\n if self._warmup_lang is not None and meta.get('i', None) not in self._warmup_lang:\n return True\n if self._warmup_spk is not None and meta['n'].split('_')[0] not in self._warmup_spk:\n return True\n if self._hparams.target_length_upper_bound < 0 or \\\n self._hparams.target_length_lower_bound <= int(meta['l']) <= self._hparams.target_length_upper_bound:\n return False\n return True\n\n\nclass FeederEval:\n def __init__(self, zip_filename, metadata_file_path, hparams, spk_to_id=None, lang_to_id=None,\n eval_lang=None, eval_spk=None, exclude_spk=None, target_lang=None, target_spk=None,\n shuffle=True, keep_order=False, pick_partial=False, single=False, vocab=None, embed=None):\n super(FeederEval, self).__init__()\n self._offset = 0\n self._shuffle = shuffle\n self._keep_order = keep_order\n self.single = single\n self.lang_ids = lang_to_id\n self.spk_ids = spk_to_id\n self._target_lang = target_lang\n self._target_spk = target_spk\n self._eval_lang = eval_lang\n self._eval_spk = eval_spk\n self._hparams = hparams\n self.proto = get_input_proto(hparams)\n\n self.zfile = load_zip(zip_filename) if zip_filename is not None else None\n self.embed = None\n if embed:\n if hparams.use_external_embed:\n self.embed = load_zip(embed)\n logging.info(\"Found %d external embeddings\" % len(self.embed.filename_set))\n else: # lexicon texts\n self.embed = json.load(open(embed, encoding='utf-8'))\n self.vocab = vocab\n\n with open(metadata_file_path, encoding='utf-8') as f:\n self._metadata = _read_meta(f, self._hparams.data_format, inc_lang=eval_lang, inc_spk=eval_spk)\n logging.info('%d eval samples read' % len(self._metadata))\n\n if 'l' in hparams.data_format:\n self._metadata = [m for m in self._metadata if int(m['l']) < hparams.max_eval_sample_length]\n logging.info('%d eval samples after filtering length' % len(self._metadata))\n\n if exclude_spk:\n self._metadata = [m for m in self._metadata if m['n'].split('_')[0] not in exclude_spk]\n logging.info('%d eval samples after speakers excluded' % (len(self._metadata)))\n if pick_partial:\n self._metadata = filter_eval_samples(self._metadata, 3, self._hparams.eval_sample_per_speaker)\n logging.info('%d eval samples after filtering' % len(self._metadata))\n self._meta_texts = ['|'.join([m[c] for c in self._hparams.data_format]) for m in self._metadata]\n\n self.data = self.prepare_all_batches(self.get_all_batches())\n self.rand = np.random.RandomState(0)\n if self._shuffle:\n self.rand.shuffle(self.data)\n logging.info('[FeederEval] Prepared %d batches' % len(self.data))\n\n def fetch_data(self, exclude=None):\n if exclude is None:\n data = self.data\n else:\n data = self.prepare_all_batches(self.get_all_batches(exclude))\n if self._shuffle and not self._keep_order:\n self.rand.shuffle(data)\n for batch in data:\n for name in batch:\n if name in self.proto:\n batch[name] = self.proto[name](batch[name])\n return data\n\n def _get_next_example(self):\n finished = False\n meta = self._metadata[self._offset]\n self._offset += 1\n if self._offset >= len(self._metadata):\n self._offset = 0\n finished = True\n\n return extract_meta(meta, self.zfile, self._hparams, self.spk_ids, self.lang_ids,\n target_spk=self._target_spk, target_lang=self._target_lang, vocab=self.vocab), finished\n\n def _get_all_examples(self):\n examples = []\n while True:\n example, finished = self._get_next_example()\n examples.append(example)\n if finished:\n break\n return examples\n\n def get_all_batches(self, exclude=[]):\n examples = self._get_all_examples()\n examples = [x for x in examples if x['name'] not in exclude]\n\n if 'mel_target' in examples[0]:\n examples.sort(key=lambda x: len(x['mel_target']))\n batches = _pack_into_batches(examples, self.single, hparams=self._hparams)\n return batches\n\n def prepare_all_batches(self, batches):\n ret = []\n for batch in batches:\n batch = _prepare_batch(batch, hparams=self._hparams, ex_embed=self.embed, vocab=self.vocab)\n ret.append(batch)\n return ret\n\n\ndef _read_meta(meta_file, format, inc_lang=None, inc_spk=None):\n meta_list = []\n for line in meta_file:\n parts = line.strip().split('|')\n if len(parts) != len(format):\n parts = line.strip().split('\\t')\n if format == 'nlti':\n name, length, text, lang = parts\n item_dict = {'n': name, 'l': length, 't': text, 'i': lang}\n elif format == 'nltpi':\n name, length, text, phone, lang = parts\n item_dict = {'n': name, 'l': length, 't': text, 'p': phone, 'i': lang}\n else:\n raise ValueError('Invalid format for _read_meta: %s' % format)\n if inc_lang is not None and lang not in inc_lang:\n continue\n if inc_spk is not None and name.split('_')[0] not in inc_spk:\n continue\n meta_list.append(item_dict)\n return meta_list\n\n\ndef _group_meta(metadata, hparams):\n lang_meta = defaultdict(list)\n lang_spk = defaultdict(set)\n for m in metadata:\n lang_meta[m['i']].append(m)\n lang_spk[m['i']].add(m['n'].split('_')[0])\n langs = list(lang_meta.keys())\n langs.sort()\n sizes = [len(lang_meta[l]) for l in langs]\n alphas = np.power(np.asarray(sizes) / np.sum(sizes), hparams.lg_prob_scale)\n prob = alphas / np.sum(alphas)\n for i, lang in enumerate(langs):\n logging.info(\"\\t%s: %d samples, prob=%f\" % (lang, sizes[i], prob[i]))\n spks = list(lang_spk[lang])\n spks.sort()\n logging.info('\\tSpeakers: ' + str(spks))\n return {'langs': langs, 'prob': prob, 'meta': lang_meta,\n 'offsets': dict([(l, 0) for l in langs]), 'epoch': dict([(l, 0) for l in langs])}\n\n\ndef downsample_language(meta_list, downsample_langs):\n mark = [True for _ in meta_list]\n lang_bins = defaultdict(list)\n for i, m in enumerate(meta_list):\n if m['i'] in downsample_langs:\n lang_bins[m['i']].append(i)\n for lang_key, values in lang_bins.items():\n r = np.random.RandomState(0)\n r.shuffle(values)\n if downsample_langs[lang_key] <= 1:\n keep_samples = int(len(values) * downsample_langs[lang_key])\n else:\n keep_samples = int(downsample_langs[lang_key])\n for i in range(keep_samples, len(values)):\n mark[values[i]] = False\n\n meta_list = [meta_list[k] for k in range(len(mark)) if mark[k]]\n return meta_list\n\n\ndef filter_eval_samples(meta, n_spk, n_sample, required_speakers=None):\n lang_samples = defaultdict(list)\n for m in meta:\n lang_samples[m['i']].append(m)\n samples = []\n for lang in lang_samples:\n r = np.random.RandomState(0)\n r.shuffle(lang_samples[lang])\n spk_cnt = {}\n if required_speakers is not None:\n n_spk = len(required_speakers)\n for s in required_speakers:\n spk_cnt[s] = 0\n for m in lang_samples[lang]:\n spk = m['n'].split('_')[0]\n if spk not in spk_cnt:\n if len(spk_cnt) >= n_spk:\n continue\n spk_cnt[spk] = 0\n spk_cnt[spk] += 1\n if spk_cnt[spk] <= n_sample:\n samples.append(m)\n r = np.random.RandomState(0)\n r.shuffle(samples)\n return samples\n\n\ndef _pack_into_batches(examples, single=False, hparams=None):\n batches = [[]]\n batch_max_input_len = 0\n for sample in examples:\n target_len = len(sample['mel_target']) if 'mel_target' in sample else int(len(sample['input']) * 10)\n batch_max_input_len = max(batch_max_input_len, len(sample['input']))\n quad_cnt = batch_max_input_len ** 2 + target_len ** 2\n if (len(batches[-1]) + 1) * quad_cnt > hparams.batch_frame_quad_limit or \\\n (len(batches[-1]) + 1) * target_len > hparams.batch_frame_limit or single \\\n or len(batches[-1]) == hparams.max_batch_size:\n batches.append([])\n batch_max_input_len = len(sample['input'])\n batches[-1].append(sample)\n return batches\n\n\ndef _load_from_zip(zfile, npy_name):\n with zfile.open(npy_name, 'r') as zip_npy:\n with io.BytesIO(zip_npy.read()) as raw_npy:\n return np.load(raw_npy)\n\n\nembed_cache = {}\nembed_cache_list = []\ndef get_embed(key, embed_file, lang):\n key = str(key) + '.pickle'\n if lang == 'ja-jp':\n lang = 'ja'\n if lang in ['ja', 'zh-hk'] and lang is not None:\n key = lang + '/' + key\n if key not in embed_file.filename_set:\n return {'tokens': [''], 'key': np.zeros([1, 1024], dtype=np.float32),\n 'value': np.zeros([1, 2048], dtype=np.float32), 'empty': True}\n if key in embed_cache:\n return embed_cache[key]\n else:\n with embed_file.open(key, 'r') as pf:\n embed = pickle.loads(pf.read())\n embed = {'tokens': embed['tokens'], 'key': np.squeeze(embed['input'], axis=0),\n 'value': np.squeeze(np.concatenate([embed['input'], embed['output']], axis=-1), axis=0)}\n embed_cache[key] = embed\n embed_cache_list.append(key)\n if len(embed_cache_list) > 15000:\n del embed_cache[embed_cache_list[0]]\n del embed_cache_list[0]\n return embed\n\ndef get_char_embed(vocab, lexicon, key, context_size):\n if key in lexicon['gloss']:\n text = lexicon['gloss'][key][:200].replace('●', '*')\n if text.startswith(\"释义:\"):\n text = text[3:]\n toks = [[vocab.get(ch, 0)] for ch in text]\n else:\n text = ['']\n toks = [[0]]\n return {'tokens': list(text), 'key': np.asarray(toks, dtype=np.int32),\n 'value': np.zeros([0, context_size], dtype=np.float32), 'empty': key not in lexicon['gloss']}\n\ndef get_lexicon_embed(all_words, all_langs, embeddings, vocab, key_size, value_size, use_external_embed):\n # words: List of list of word of each token\n keys = [np.zeros([1, key_size], dtype=np.float32 if use_external_embed else np.int32)] # [n_entry, length_k, depth_k]\n contexts = [np.zeros([1, value_size], dtype=np.float32)] # [n_entry, length_k, depth_v]\n context_tokens = [['']]\n batch_scripts = [] # [batch, length_q, length_k]\n context_lengths = [1] # [n_entry]\n word_index = {None: 0} # Placeholder\n indices = []\n for wi, words in enumerate(all_words):\n indices.append([])\n batch_scripts.append([])\n for w in words:\n key = (w, all_langs[wi])\n if key not in word_index:\n if use_external_embed:\n wid = vocab[w]\n embed = get_embed(str(wid), embeddings, all_langs[wi])\n else:\n embed = get_char_embed(vocab, embeddings, w, context_size=value_size)\n if embed.get(\"empty\", False):\n idx = 0\n else:\n idx = word_index[key] = len(word_index)\n keys.append(embed['key'])\n contexts.append(embed['value'])\n context_lengths.append(len(embed['key']))\n context_tokens.append(embed['tokens'])\n else:\n idx = word_index[key]\n indices[-1].append(idx)\n batch_scripts[-1].append(context_tokens[idx])\n return keys, contexts, context_lengths, indices, batch_scripts\n\n\ndef _prepare_batch(batch, hparams, ex_embed=None, vocab=None):\n inputs = _prepare_inputs([x['input'] for x in batch])\n input_lengths = np.asarray([len(x['input']) for x in batch], dtype=np.int32)\n results = {'inputs': inputs, 'input_lengths': input_lengths}\n\n if 'target_length' in batch[0]:\n target_lengths = np.asarray([x['target_length'] for x in batch], dtype=np.int32)\n results['target_lengths'] = target_lengths\n elif 'mel_target' in batch[0]:\n target_lengths = np.asarray([len(x['mel_target']) for x in batch], dtype=np.int32)\n results['target_lengths'] = target_lengths\n if 'mel_target' in batch[0]:\n mel_targets = _prepare_targets([x['mel_target'] for x in batch])\n results['mel_targets'] = mel_targets\n\n\n if hparams.multi_lingual:\n results['input_language_vecs'] = np.asarray([x['language_vec'] for x in batch], dtype=np.float32)\n if hparams.multi_speaker or hparams.multi_lingual:\n results['input_spk_ids'] = np.asarray([x['speaker_id'] for x in batch], dtype=np.float32)\n if hparams.use_knowledge_attention:\n keys, contexts, context_lengths, context_indices, context_script = \\\n get_lexicon_embed([x['words'] for x in batch], [x['lang'] for x in batch], ex_embed, vocab,\n hparams.knowledge_key_size if hparams.use_external_embed else 1,\n hparams.knowledge_value_size, hparams.use_external_embed)\n if hparams.use_external_embed:\n results['keys'] = _prepare_targets(keys)\n else:\n results['keys'] = _prepare_targets(keys)\n results['contexts'] = _prepare_targets(contexts)\n results['context_indices'] = _prepare_inputs([np.asarray(x) for x in context_indices])\n results['context_lengths'] = np.asarray(context_lengths)\n results['context_scripts'] = context_script\n\n results['input_scripts'] = [x['input_scripts'] for x in batch]\n results['langs'] = [x['lang'] for x in batch]\n results['names'] = [x['name'] for x in batch]\n return results\n\n\ndef _prepare_inputs(inputs):\n max_len = max([len(x) for x in inputs])\n return np.stack([_pad_input(x, max_len) for x in inputs])\n\nimport numba as nb\nfrom numba import prange\nfrom numba.typed import List\n\ndef _prepare_targets_nojit(targets):\n max_len = max([len(t) for t in targets])\n result = np.zeros([len(targets), max_len, targets[0].shape[-1]])\n for i in range(len(targets)):\n result[i, :targets[i].shape[0]] = targets[i]\n return result\n\[email protected](nopython=True, parallel=True)\ndef _prepare_targets_jit(targets):\n max_len = max([int(t.shape[0]) for t in targets])\n result = np.empty((len(targets), max_len, targets[0].shape[-1]), dtype=targets[0].dtype)\n for i in prange(len(targets)):\n result[i, :targets[i].shape[0]] = targets[i]\n result[i, targets[i].shape[0]:] = 0\n return result\n\ndef _prepare_targets(targets):\n typed_a = List()\n [typed_a.append(x) for x in targets]\n return _prepare_targets_jit(typed_a)\n\[email protected](nopython=True, parallel=True)\ndef _prepare_targets_jit_int(targets):\n max_len = max([int(t.shape[0]) for t in targets])\n result = np.empty((len(targets), max_len, targets[0].shape[-1]), dtype=targets[0].dtype)\n for i in prange(len(targets)):\n result[i, :targets[i].shape[0]] = targets[i]\n result[i, targets[i].shape[0]:] = 0\n return result\n\ndef _prepare_targets_int(targets):\n typed_a = List()\n [typed_a.append(x) for x in targets]\n return _prepare_targets_jit(typed_a)\n\n\ndef _pad_input(x, length):\n return np.pad(x, (0, length - x.shape[0]), mode='constant', constant_values=0)\n\n\ndef _pad_target(t, length):\n return np.pad(t, [(0, length - t.shape[0]), (0, 0)], mode='constant', constant_values=0)\n\n\ndef extract_meta(meta, zfile, hparams, spk_ids, lang_ids, target_spk=None, target_lang=None, vocab=None):\n name = meta['n']\n if name.endswith('.npy'):\n name = name[:-4]\n results = {'name': name}\n if zfile:\n mel_target = _load_from_zip(zfile, meta['n'])\n else:\n mel_target = None\n if mel_target is not None:\n if 'l' in meta:\n target_length = int(meta['l'])\n else:\n target_length = mel_target.shape[0]\n results['mel_target'] = mel_target\n results['target_length'] = target_length\n\n if target_lang is not None:\n lang = target_lang\n else:\n lang = meta.get('i', None)\n results['lang'] = lang\n if hparams.multi_lingual and lang:\n language_vec = np.zeros([hparams.max_num_language])\n language_vec[lang_ids[lang]] = 1\n results['language_vec'] = language_vec\n\n results['input_scripts'] = meta['t']\n if hparams.input_method == 'byte':\n input_data, offsets = text_to_byte_sequence(meta['t'])\n elif hparams.input_method == 'phone':\n input_data = text_to_phone_sequence(meta['p'], vocab)\n results['input_scripts'] = meta['p']\n offsets = []\n elif hparams.input_method == 'char':\n input_data, offsets = text_to_char_sequence(meta['t'], vocab, remove_space=hparams.remove_space)\n elif hparams.input_method == 'word':\n input_data, offsets = text_to_word_sequence(meta['t'], vocab)\n input_data = [sos_id] + input_data + [eos_id]\n offsets = [-1] + offsets + [-1]\n\n results['input'] = np.asarray(input_data, dtype=np.int32)\n if hparams.use_knowledge_attention:\n token_to_word = get_aligned_words(meta['t'], vocab, offsets)\n results['words'] = token_to_word\n\n if hparams.multi_speaker or hparams.multi_lingual:\n if target_spk:\n speaker_id = spk_ids[target_spk]\n else:\n speaker_id = spk_ids[name.split('_')[0]]\n results['speaker_id'] = speaker_id\n return results\n\n\ndef get_input_proto(config):\n keys = {'inputs': torch.LongTensor, 'input_lengths': torch.LongTensor,\n 'mel_targets': torch.FloatTensor, 'target_lengths': torch.LongTensor,\n 'names': list, 'input_scripts': list, 'langs': list}\n if config.multi_speaker or config.multi_lingual:\n keys['input_spk_ids'] = torch.LongTensor\n if config.multi_lingual:\n keys['input_language_vecs'] = torch.FloatTensor\n if config.use_knowledge_attention:\n if config.use_external_embed:\n keys['keys'] = torch.FloatTensor\n else:\n keys['keys'] = torch.LongTensor\n keys['contexts'] = torch.FloatTensor\n keys['context_lengths'] = torch.LongTensor\n keys['context_indices'] = torch.LongTensor\n keys['context_scripts'] = list\n return keys\n" ]
[ [ "numpy.pad", "numpy.random.seed", "numpy.asarray", "numpy.squeeze", "numpy.concatenate", "torch.cuda.is_available", "torch.cuda.device", "numpy.load", "numpy.random.RandomState", "numpy.zeros", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
dataiku/near-neighbors-search
[ "6ec87c71c1086de037a5c0fe254bde5cc5f5664f", "6ec87c71c1086de037a5c0fe254bde5cc5f5664f" ]
[ "python-lib/nearest_neighbor/base.py", "tests/python/unit/test_base.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"Module to wrap all Nearest Neighbor Search algorithms\"\"\"\n\nfrom typing import AnyStr, Dict, List, Tuple\n\nimport numpy as np\nimport pandas as pd\n\nfrom data_loader import DataLoader\n\n\nclass NearestNeighborSearch:\n \"\"\"Base class for all Nearest Neighbor Search algorithms\"\"\"\n\n INDEX_FILE_NAME = \"index.nns\"\n CONFIG_FILE_NAME = \"config.json\"\n ARRAY_IDS_FILE_NAME = \"vector_ids.npz\"\n ARRAYS_FILE_NAME = \"vectors.npz\"\n INPUT_COLUMN_NAME = \"input_id\"\n NEIGHBOR_COLUMN_NAME = \"neighbor_id\"\n DISTANCE_COLUMN_NAME = \"distance\"\n COLUMN_DESCRIPTIONS = {\n INPUT_COLUMN_NAME: \"Unique ID from the input dataset\",\n NEIGHBOR_COLUMN_NAME: \"Neighbor ID from the pre-computed index\",\n DISTANCE_COLUMN_NAME: \"Distance with neighbor\",\n }\n\n def __new__(cls, *args, **kwargs):\n \"\"\"Determine the appropriate algorithm based on the arguments\"\"\"\n algorithm = kwargs.get(\"algorithm\")\n if algorithm == \"annoy\":\n from nearest_neighbor.annoy import Annoy # noqa\n\n for i in cls.__subclasses__():\n if i.__name__ == \"Annoy\":\n return super().__new__(i)\n elif algorithm == \"faiss\":\n from nearest_neighbor.faiss import Faiss # noqa\n\n for i in cls.__subclasses__():\n if i.__name__ == \"Faiss\":\n return super().__new__(i)\n else:\n raise NotImplementedError(f\"Algorithm '{algorithm}' is not available\")\n\n def __init__(self, num_dimensions: int, **kwargs):\n self.num_dimensions = num_dimensions\n\n def get_config(self) -> Dict:\n \"\"\"Config required to reload the index after initial build\"\"\"\n raise NotImplementedError(\"Get config method not implemented\")\n\n def build_save_index(self, arrays: np.array, index_path: AnyStr) -> None:\n \"\"\"Add arrays (a.k.a. vectors) to the index and save to disk\"\"\"\n raise NotImplementedError(\"Index building and saving method not implemented\")\n\n def load_index(self, index_file_path: AnyStr) -> None:\n \"\"\"Load pre-computed index from disk into memory\"\"\"\n raise NotImplementedError(\"Index loading method not implemented\")\n\n def find_neighbors_array(self, arrays: np.array, num_neighbors: int = 5) -> List[List[Tuple]]:\n \"\"\"Find nearest neighbors of each arrays (a.k.a. vectors) and return pairs of (index, distance)\"\"\"\n raise NotImplementedError(\"Find neighbors method not implemented\")\n\n def find_neighbors_df(\n self,\n df: pd.DataFrame,\n unique_id_column: AnyStr,\n feature_columns: List[AnyStr],\n index_array_ids: np.array,\n num_neighbors: int = 5,\n **kwargs,\n ) -> pd.DataFrame:\n \"\"\"Find nearest neighbors in a raw pandas DataFrame and format results into a new DataFrame\"\"\"\n output_df = pd.DataFrame()\n output_df[self.INPUT_COLUMN_NAME] = df[unique_id_column]\n data_loader = DataLoader(unique_id_column, feature_columns)\n (array_ids, arrays) = data_loader.convert_df_to_arrays(df, verbose=False)\n if arrays.shape[1] != self.num_dimensions:\n raise ValueError(\n \"Incompatible number of dimensions: \"\n + f\"{self.num_dimensions} in index, {arrays.shape[1]} in feature column(s)\"\n )\n output_df[\"index_distance_pairs\"] = self.find_neighbors_array(arrays, num_neighbors)\n output_df = output_df.explode(\"index_distance_pairs\")\n output_df[self.NEIGHBOR_COLUMN_NAME] = output_df[\"index_distance_pairs\"].apply(lambda x: int(x[0]))\n output_df[self.DISTANCE_COLUMN_NAME] = output_df[\"index_distance_pairs\"].apply(lambda x: float(x[1]))\n output_df[self.NEIGHBOR_COLUMN_NAME] = (\n output_df[self.NEIGHBOR_COLUMN_NAME].astype(int).apply(lambda i: index_array_ids[i])\n ) # lookup the original array ids\n del output_df[\"index_distance_pairs\"]\n return output_df\n", "import pandas as pd\nimport os.path\n\nfrom data_loader import DataLoader\nfrom tempfile import NamedTemporaryFile\nfrom nearest_neighbor.base import NearestNeighborSearch\n\n\ndef test_build_save_index():\n\n params = {'unique_id_column': 'images',\n 'feature_columns': ['prediction'],\n 'algorithm': 'annoy',\n 'expert': True,\n 'annoy_metric': 'angular',\n 'annoy_num_trees': 10}\n\n # Load data into array format for indexing\n columns = [params[\"unique_id_column\"]] + params[\"feature_columns\"]\n input_df = pd.read_csv('./tests/resources/caltech_embeddings.csv')\n # Restrict to selected columns\n input_df = input_df[columns]\n data_loader = DataLoader(params[\"unique_id_column\"], params[\"feature_columns\"])\n (array_ids, arrays) = data_loader.convert_df_to_arrays(input_df)\n nearest_neighbor = NearestNeighborSearch(num_dimensions=arrays.shape[1], **params)\n with NamedTemporaryFile() as tmp:\n nearest_neighbor.build_save_index(arrays=arrays, index_path=tmp.name)\n assert os.path.isfile(tmp.name)\n\n\ndef test_find_neighbors_df():\n\n params = {'unique_id_column': 'images',\n 'feature_columns': ['prediction'],\n 'algorithm': 'annoy',\n 'expert': True,\n 'annoy_metric': 'angular',\n 'annoy_num_trees': 10}\n\n index_config = {'algorithm': 'annoy',\n 'num_dimensions': 2048,\n 'annoy_metric': 'angular',\n 'annoy_num_trees': 10,\n 'feature_columns': ['prediction'],\n 'expert': True}\n\n # Load data into array format for indexing\n columns = [params[\"unique_id_column\"]] + params[\"feature_columns\"]\n input_df = pd.read_csv('./tests/resources/caltech_embeddings.csv')\n input_df = input_df[columns]\n data_loader = DataLoader(params[\"unique_id_column\"], params[\"feature_columns\"])\n (array_ids, arrays) = data_loader.convert_df_to_arrays(input_df)\n nearest_neighbor = NearestNeighborSearch(num_dimensions=arrays.shape[1], **params)\n with NamedTemporaryFile() as tmp:\n nearest_neighbor.build_save_index(arrays=arrays, index_path=tmp.name)\n params = {'unique_id_column': 'images', 'feature_columns': ['prediction'], 'num_neighbors': 5}\n nearest_neighbor = NearestNeighborSearch(**index_config)\n nearest_neighbor.load_index(tmp.name)\n # Find nearest neighbors in input dataset\n df = nearest_neighbor.find_neighbors_df(input_df, **params, index_array_ids=array_ids)\n actual = sorted(list(df[df['input_id'] == '34719_ostrich.jpg']['neighbor_id']))\n expected = ['107505_ostrich.jpg', '185189_ostrich.jpg', '213657_ostrich.jpg', '229350_ostrich.jpg', '34719_ostrich.jpg']\n assert len(actual) == len(expected)\n assert all([actual_item == expected_item for actual_item, expected_item in zip(actual, expected)])\n" ]
[ [ "pandas.DataFrame" ], [ "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
Sushant-aggarwal/text-uncertainity
[ "824767847f224ce2d8943e9fa0c01aad8b0fdda6" ]
[ "cnn_lstm.py" ]
[ "#Run in google colab.\n#---------Nilesh Agarwal---------#\n#[email protected]\n\n#!wget http://nlp.stanford.edu/data/glove.6B.zip\n#!unzip glove*.zip\nimport os\nimport numpy as np\nimport csv\nimport pandas as pd\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.optimizers import Adam\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.models import Model\nfrom keras.utils import to_categorical,plot_model\nfrom keras.layers import Activation, Dense, Dropout,Input,Add,concatenate,LSTM\nfrom sklearn.preprocessing import LabelBinarizer\nfrom sklearn.model_selection import train_test_split,StratifiedKFold\nfrom keras.layers import Conv1D,MaxPooling1D,Embedding,GlobalMaxPooling1D\nfrom keras.initializers import Constant\nfrom sklearn.metrics import accuracy_score,classification_report,confusion_matrix\nimport pickle\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import f1_score\nfrom sklearn.metrics import precision_score\nfrom sklearn.metrics import recall_score\n\nfinal_df = pd.read_csv(\"multilabel_dataset.csv\")\n\nX = final_df.loc[:, 'sentences'].values\nY = final_df.loc[:, 'label'].values\nX_phrases = final_df.loc[:, 'vague_phrases'].values\nX_cues = []\n\ncounter=0\nfor i in X_phrases:\n counter+=1\n if counter==5000:\n break\n cues = str(i)\n X_cues.append(cues)\nX_cues = np.array(X_cues).reshape(4499);\n\nseed = 7\n#np.random.seed(seed)\n\nkfold = StratifiedKFold(n_splits=5, shuffle=True, random_state=seed)\n\naccuracy=[]\nrecall=[]\nf1_scores=[]\nprecision=[]\n\nfinal_predicted_sentences=[]\nfinal_actual_labels=[]\nfinal_predicted_labels=[]\nfinal_actual_cue_words=[]\nfinal_probability_class0=[]\nfinal_probability_class1=[]\n\nfor train, test in kfold.split(X, Y, X_cues):\n for i in range(len(X[test])):\n final_predicted_sentences.append(X[test][i])\n final_actual_labels.append(Y[test][i])\n final_actual_cue_words.append(X_cues[test][i])\n\nembedding_index = {}\nwith open('glove.6B.300d.txt') as f:\n for line in f:\n word, coefs = line.split(maxsplit=1)\n coefs = np.fromstring(coefs,'f',sep=' ')\n embedding_index[word] = coefs\n\nnum_labels = 4\nvocab_size = 10000\nbatch_size = 128\nembedding_dim = 300\nmax_len = 297\n\ntokenizer = Tokenizer(num_words = vocab_size)\ntokenizer.fit_on_texts(X)\nword_index = tokenizer.word_index\ntrain_sentences_tokenized = tokenizer.texts_to_sequences(X)\nX = pad_sequences(train_sentences_tokenized, maxlen=max_len)\nY = to_categorical(final_df['label'])\nprint(Y.shape)\nprint(X.shape)\n\nprint('Preparing embedding matrix')\nnum_words = min(vocab_size,len(word_index))+1\nembedding_matrix = np.zeros((num_words,embedding_dim))\nfor word,i in word_index.items():\n if i > vocab_size:\n continue\n\n embedding_vector = embedding_index.get(word)\n if embedding_vector is not None:\n embedding_matrix[i] = embedding_vector\n\naccuracy=[]\nrecall=[]\nf1_scores=[]\nprecision=[]\n\nfinal_predicted_labels=[]\nfinal_probability_class0=[]\nfinal_probability_class1=[]\nfinal_probability_class2=[]\nfinal_probability_class3=[]\n\nY = final_df['label']\nfor train, test in kfold.split(X, Y, X_cues):\n Y = to_categorical(final_df['label'])\n embedding_layer = Embedding(num_words,\n embedding_dim,\n embeddings_initializer = Constant(embedding_matrix),\n input_length = max_len,\n trainable = True)\n sequence_input = Input(shape = (max_len,),dtype = 'int32')\n embedded_sequences = embedding_layer(sequence_input)\n x1 = Conv1D(128,3,activation = 'relu')(embedded_sequences)\n x1 = Dropout(0.2)(x1)\n x1 = GlobalMaxPooling1D()(x1)\n x2 = Conv1D(64,2,activation = 'relu')(embedded_sequences)\n x2 = Dropout(0.2)(x2)\n x2 = GlobalMaxPooling1D()(x2)\n x3 = LSTM(128)(embedded_sequences)\n x3 = Dropout(0.2)(x2)\n x = concatenate([x1,x2,x3],axis=1)\n\n pred = Dense(4,activation = 'softmax')(x)\n model = Model(sequence_input,pred)\n model.summary()\n adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=0.000001, decay=0.0001)\n\n model.compile(loss = 'binary_crossentropy',optimizer = adam ,metrics = ['accuracy'])\n #model.summary()\n\n a,b = 0,4\n model.fit(X[train],Y[train][:,a:b],batch_size = batch_size,epochs = 10,validation_data = (X[test],Y[test][:,a:b]))\n scores = model.evaluate(X[test], Y[test][:,a:b], verbose=0)\n print(\"%s: %.2f%%\" % (model.metrics_names[1], scores[1]*100))\n\n pred = model.predict(X[test])\n final_probability_0 = pred[:,0]\n final_probability_1 = pred[:,1]\n final_probability_2 = pred[:,2]\n final_probability_3 = pred[:,3]\n\n for i in range(len(X[test])):\n final_predicted_labels.append(pred.argmax(axis=1)[i])\n final_probability_class0.append(final_probability_0[i])\n final_probability_class1.append(final_probability_1[i])\n final_probability_class2.append(final_probability_2[i])\n final_probability_class3.append(final_probability_3[i])\n\n c_matrix = confusion_matrix(Y[test][:,a:b].argmax(axis=1),pred.argmax(axis=1))\n print(c_matrix)\n print(classification_report(Y[test][:,a:b].argmax(axis=1),pred.argmax(axis=1)))\n\n plot_model(model, to_file='model.png',show_shapes = True)\n model.save('my_model_cSIN2_tag')\n\n accuracy.append(accuracy_score(Y[test][:,a:b].argmax(axis=1),pred.argmax(axis=1)))\n print(accuracy)\n f1_scores.append(f1_score(Y[test][:,a:b].argmax(axis=1), pred.argmax(axis=1), average='macro'))\n precision.append(precision_score(Y[test][:,a:b].argmax(axis=1), pred.argmax(axis=1), average='macro'))\n recall.append(recall_score(Y[test][:,a:b].argmax(axis=1), pred.argmax(axis=1), average='macro'))\n\nprint(\"accuracy :: %.4f%% (+/- %.2f%%)\" % (np.mean(accuracy), np.std(accuracy)))\nprint(\"f1_score :: %.4f%% (+/- %.2f%%)\" % (np.mean(f1_scores), np.std(f1_scores)))\nprint(\"precision_score :: %.4f%% (+/- %.2f%%)\" % (np.mean(precision), np.std(precision)))\nprint(\"recall :: %.4f%% (+/- %.2f%%)\" % (np.mean(recall), np.std(recall)))\n\ndf1 = pd.DataFrame(final_predicted_sentences)\ndf1.columns = ['Sentences']\n\ndf2 = pd.DataFrame(final_actual_labels)\ndf2.columns = ['True_Labels']\n\ndf3 = pd.DataFrame(final_predicted_labels)\ndf3.columns = ['Predicted_Labels']\n\ndf8 = pd.DataFrame(final_actual_cue_words)\ndf8.columns = ['Speculative_Words']\n\ndf4 = pd.DataFrame(final_probability_class0)\ndf4.columns = ['Probabilty_Score_Class0']\n\ndf5 = pd.DataFrame(final_probability_class1)\ndf5.columns = ['Probabilty_Score_Class1']\n\ndf6 = pd.DataFrame(final_probability_class2)\ndf6.columns = ['Probabilty_Score_Class2']\n\ndf7 = pd.DataFrame(final_probability_class3)\ndf7.columns = ['Probabilty_Score_Class3']\n\ndf = df1.join(df2)\ndf = df.join(df3)\ndf = df.join(df4)\ndf = df.join(df5)\ndf = df.join(df6)\n\ndf.to_csv('predicted_PRIVACY-POLICY_vagueness_CNN-LSTM_model.csv',index=False)\n\noptions1 = ['0']\noptions2 = ['1']\n# selecting rows based on condition\nrslt_df1 = df[(df['True_Labels'] == 1) &\n df['Predicted_Labels'].isin(options1)]\n\nrslt_df2 = df[(df['True_Labels'] == 0) &\n df['Predicted_Labels'].isin(options2)]\n\nframes = [rslt_df1,rslt_df2]\nresult_df = pd.concat(frames)\n#print('\\nResult dataframe :\\n', r)\nresult_df.to_csv('incorrect_predictions_PRIVACY-POLICY_vagueness_CNN-LSTM_model.csv',index=False)\n\noptions3 = ['1']\noptions4 = ['0']\n# selecting rows based on condition\nrslt_df1 = df[(df['True_Labels'] == 1) &\n df['Predicted_Labels'].isin(options3)]\n\nrslt_df2 = df[(df['True_Labels'] == 0) &\n df['Predicted_Labels'].isin(options4)]\n\nframes = [rslt_df1,rslt_df2]\nresult_df = pd.concat(frames)\n#print('\\nResult dataframe :\\n', r)\nresult_df.to_csv('correct_predictions_PRIVACY-POLICY_vagueness_CNN-LSTM_model.csv',index=False)\n" ]
[ [ "pandas.concat", "pandas.read_csv", "sklearn.model_selection.StratifiedKFold", "pandas.DataFrame", "numpy.std", "numpy.fromstring", "numpy.mean", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
belinghy/fairmotion
[ "748a76cea5f6f52f42c1af7b7f87d81d30d0cdb2" ]
[ "fairmotion/tasks/motion_prediction/utils.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates.\n\n\nimport numpy as np\nimport os\nimport torch\nfrom functools import partial\nfrom multiprocessing import Pool\n\nfrom fairmotion.models import (\n decoders,\n encoders,\n optimizer,\n rnn,\n seq2seq,\n transformer,\n)\nfrom fairmotion.tasks.motion_prediction import dataset as motion_dataset\nfrom fairmotion.utils import constants\nfrom fairmotion.ops import conversions\n\n\ndef apply_ops(input, ops):\n \"\"\"\n Apply series of operations in order on input. `ops` is a list of methods\n that takes single argument as input (single argument functions, partial\n functions). The methods are called in the same order provided.\n \"\"\"\n output = input\n for op in ops:\n output = op(output)\n return output\n\n\ndef unflatten_angles(arr, rep):\n \"\"\"\n Unflatten from (batch_size, num_frames, num_joints*ndim) to\n (batch_size, num_frames, num_joints, ndim) for each angle format\n \"\"\"\n if rep == \"aa\":\n return arr.reshape(arr.shape[:-1] + (-1, 3))\n elif rep == \"quat\":\n return arr.reshape(arr.shape[:-1] + (-1, 4))\n elif rep == \"rotmat\":\n return arr.reshape(arr.shape[:-1] + (-1, 3, 3))\n\n\ndef flatten_angles(arr, rep):\n \"\"\"\n Unflatten from (batch_size, num_frames, num_joints, ndim) to\n (batch_size, num_frames, num_joints*ndim) for each angle format\n \"\"\"\n if rep == \"aa\":\n return arr.reshape(arr.shape[:-2] + (-1))\n elif rep == \"quat\":\n return arr.reshape(arr.shape[:-2] + (-1))\n elif rep == \"rotmat\":\n # original dimension is (batch_size, num_frames, num_joints, 3, 3)\n return arr.reshape(arr.shape[:-3] + (-1))\n\n\ndef multiprocess_convert(arr, convert_fn):\n pool = Pool(40)\n result = list(pool.map(convert_fn, arr))\n return result\n\n\ndef convert_fn_to_R(rep):\n ops = [partial(unflatten_angles, rep=rep)]\n if rep == \"aa\":\n ops.append(partial(multiprocess_convert, convert_fn=conversions.A2R))\n elif rep == \"quat\":\n ops.append(partial(multiprocess_convert, convert_fn=conversions.Q2R))\n elif rep == \"rotmat\":\n ops.append(lambda x: x)\n ops.append(np.array)\n return ops\n\n\ndef identity(x):\n return x\n\n\ndef convert_fn_from_R(rep):\n if rep == \"aa\":\n convert_fn = conversions.R2A\n elif rep == \"quat\":\n convert_fn = conversions.R2Q\n elif rep == \"rotmat\":\n convert_fn = identity\n return convert_fn\n\n\ndef unnormalize(arr, mean, std):\n return arr * (std + constants.EPSILON) + mean\n\n\ndef prepare_dataset(\n train_path,\n valid_path,\n test_path,\n batch_size,\n device,\n shuffle=False,\n):\n dataset = {}\n for split, split_path in zip(\n [\"train\", \"test\", \"validation\"], [train_path, valid_path, test_path]\n ):\n mean, std = None, None\n if split in [\"test\", \"validation\"]:\n mean = dataset[\"train\"].dataset.mean\n std = dataset[\"train\"].dataset.std\n dataset[split] = motion_dataset.get_loader(\n split_path,\n batch_size,\n device,\n mean,\n std,\n shuffle,\n )\n return dataset, mean, std\n\n\ndef prepare_model(input_dim, hidden_dim, device, num_layers=1, architecture=\"seq2seq\"):\n if architecture == \"rnn\":\n model = rnn.RNN(input_dim, hidden_dim, num_layers)\n if architecture == \"seq2seq\":\n enc = encoders.LSTMEncoder(input_dim=input_dim, hidden_dim=hidden_dim).to(\n device\n )\n dec = decoders.LSTMDecoder(\n input_dim=input_dim,\n hidden_dim=hidden_dim,\n output_dim=input_dim,\n device=device,\n ).to(device)\n model = seq2seq.Seq2Seq(enc, dec)\n elif architecture == \"tied_seq2seq\":\n model = seq2seq.TiedSeq2Seq(input_dim, hidden_dim, num_layers, device)\n elif architecture == \"transformer_encoder\":\n model = transformer.TransformerLSTMModel(\n input_dim,\n hidden_dim,\n 4,\n hidden_dim,\n num_layers,\n )\n elif architecture == \"transformer\":\n model = transformer.TransformerModel(\n input_dim,\n hidden_dim,\n 4,\n hidden_dim,\n num_layers,\n )\n model = model.to(device)\n model.zero_grad()\n model.double()\n return model\n\n\ndef log_config(path, args):\n with open(os.path.join(path, \"config.txt\"), \"w\") as f:\n for key, value in args._get_kwargs():\n f.write(f\"{key}:{value}\\n\")\n\n\ndef prepare_optimizer(model, opt: str, lr=None):\n kwargs = {}\n if lr is not None:\n kwargs[\"lr\"] = lr\n\n if opt == \"sgd\":\n return optimizer.SGDOpt(model, **kwargs)\n elif opt == \"adam\":\n return optimizer.AdamOpt(model, **kwargs)\n elif opt == \"noamopt\":\n return optimizer.NoamOpt(model)\n\n\ndef prepare_tgt_seqs(architecture, src_seqs, tgt_seqs):\n if architecture == \"st_transformer\" or architecture == \"rnn\":\n return torch.cat((src_seqs[:, 1:], tgt_seqs), axis=1)\n else:\n return tgt_seqs\n" ]
[ [ "torch.cat" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
RE-Lab-Projects/hplib-database
[ "61f33998d87174d209981ccf59c3e1393bb6ec25" ]
[ "src/hplib.py" ]
[ "\"\"\"\nThe ``hplib`` module provides a set of functions for simulating the performance of heat pumps.\n\"\"\"\nimport pandas as pd\nimport scipy\nfrom scipy.optimize import curve_fit\nfrom typing import Any, Dict, Union\nimport os\nimport numpy as np\n\n\ndef load_database() -> pd.DataFrame:\n \"\"\"\n Loads data from hplib_database.\n\n Returns\n -------\n df : pd.DataFrame\n Content of the database\n \"\"\"\n df = pd.read_csv(cwd()+r'/hplib_database.csv')\n return df\n\n\ndef get_parameters(model: str, group_id: int = 0,\n t_in: int = 0, t_out: int = 0, p_th: int = 0) -> pd.DataFrame:\n \"\"\"\n Loads the content of the database for a specific heat pump model\n and returns a pandas ``DataFrame`` containing the heat pump parameters.\n\n Parameters\n ----------\n model : str\n Name of the heat pump model or \"Generic\".\n group_id : numeric, default 0\n only for model \"Generic\": Group ID for subtype of heat pump. [1-6].\n t_in : numeric, default 0\n only for model \"Generic\": Input temperature :math:`T` at primary side of the heat pump. [°C]\n t_out : numeric, default 0\n only for model \"Generic\": Output temperature :math:`T` at secondary side of the heat pump. [°C]\n p_th : numeric, default 0\n only for model \"Generic\": Thermal output power at setpoint t_in, t_out (and for \n water/water, brine/water heat pumps t_amb = -7°C). [W]\n\n Returns\n -------\n parameters : pd.DataFrame\n Data frame containing the model parameters.\n \"\"\"\n df = pd.read_csv(cwd()+r'/hplib_database.csv', delimiter=',')\n df = df.loc[df['Model'] == model]\n parameters = pd.DataFrame()\n parameters['Manufacturer']=(df['Manufacturer'].values.tolist())\n parameters['Model'] = (df['Model'].values.tolist())\n try:\n parameters['MAPE_COP']=df['MAPE_COP'].values.tolist()\n parameters['MAPE_P_el']=df['MAPE_P_el'].values.tolist()\n parameters['MAPE_P_th']=df['MAPE_P_th'].values.tolist()\n except:\n pass\n parameters['P_th_h_ref [W]'] = (df['P_th_h_ref [W]'].values.tolist())\n parameters['P_el_h_ref [W]'] = (df['P_el_h_ref [W]'].values.tolist())\n parameters['COP_ref'] = (df['COP_ref'].values.tolist())\n parameters['Group'] = (df['Group'].values.tolist())\n parameters['p1_P_th [1/°C]'] = (df['p1_P_th [1/°C]'].values.tolist())\n parameters['p2_P_th [1/°C]'] = (df['p2_P_th [1/°C]'].values.tolist())\n parameters['p3_P_th [-]'] = (df['p3_P_th [-]'].values.tolist())\n parameters['p4_P_th [1/°C]'] = (df['p4_P_th [1/°C]'].values.tolist())\n parameters['p1_P_el_h [1/°C]'] = (df['p1_P_el_h [1/°C]'].values.tolist())\n parameters['p2_P_el_h [1/°C]'] = (df['p2_P_el_h [1/°C]'].values.tolist())\n parameters['p3_P_el_h [-]'] = (df['p3_P_el_h [-]'].values.tolist())\n parameters['p4_P_el_h [1/°C]'] = (df['p4_P_el_h [1/°C]'].values.tolist())\n parameters['p1_COP [-]'] = (df['p1_COP [-]'].values.tolist())\n parameters['p2_COP [-]'] = (df['p2_COP [-]'].values.tolist())\n parameters['p3_COP [-]'] = (df['p3_COP [-]'].values.tolist())\n parameters['p4_COP [-]'] = (df['p4_COP [-]'].values.tolist())\n try:\n parameters['P_th_c_ref [W]'] = (df['P_th_c_ref [W]'].values.tolist())\n parameters['P_el_c_ref [W]'] = (df['P_el_c_ref [W]'].values.tolist())\n parameters['p1_Pdc [1/°C]'] = (df['p1_Pdc [1/°C]'].values.tolist())\n parameters['p2_Pdc [1/°C]'] = (df['p2_Pdc [1/°C]'].values.tolist())\n parameters['p3_Pdc [-]'] = (df['p3_Pdc [-]'].values.tolist())\n parameters['p4_Pdc [1/°C]'] = (df['p4_Pdc [1/°C]'].values.tolist())\n parameters['p1_P_el_c [1/°C]'] = (df['p1_P_el_c [1/°C]'].values.tolist())\n parameters['p2_P_el_c [1/°C]'] = (df['p2_P_el_c [1/°C]'].values.tolist())\n parameters['p3_P_el_c [-]'] = (df['p3_P_el_c [-]'].values.tolist())\n parameters['p4_P_el_c [1/°C]'] = (df['p4_P_el_c [1/°C]'].values.tolist())\n parameters['p1_EER [-]'] = (df['p1_EER [-]'].values.tolist())\n parameters['p2_EER [-]'] = (df['p2_EER [-]'].values.tolist())\n parameters['p3_EER [-]'] = (df['p3_EER [-]'].values.tolist())\n parameters['p4_EER [-]'] = (df['p4_EER [-]'].values.tolist())\n except:\n pass\n\n if model == 'Generic':\n parameters = parameters.iloc[group_id - 1:group_id]\n \n p_th_ref = fit_p_th_ref(t_in, t_out, group_id, p_th)\n parameters.loc[:, 'P_th_h_ref [W]'] = p_th_ref\n t_in_hp = [-7,0,10] # air/water, brine/water, water/water\n t_out_fix = 52\n t_amb_fix = -7\n p1_cop = parameters['p1_COP [-]'].array[0]\n p2_cop = parameters['p2_COP [-]'].array[0]\n p3_cop = parameters['p3_COP [-]'].array[0]\n p4_cop = parameters['p4_COP [-]'].array[0]\n if (p1_cop * t_in + p2_cop * t_out + p3_cop + p4_cop * t_amb_fix)<=1.0:\n raise ValueError('COP too low! Increase t_in or decrease t_out.')\n if group_id == 1 or group_id == 4:\n t_in_fix = t_in_hp[0]\n if group_id == 2 or group_id == 5:\n t_in_fix = t_in_hp[1]\n if group_id == 3 or group_id == 6:\n t_in_fix = t_in_hp[2] \n cop_ref = p1_cop * t_in_fix + p2_cop * t_out_fix + p3_cop + p4_cop * t_amb_fix\n p_el_ref = p_th_ref / cop_ref\n parameters.loc[:, 'P_el_h_ref [W]'] = p_el_ref\n parameters.loc[:, 'COP_ref'] = cop_ref\n if group_id==1:\n try:\n p1_eer = parameters['p1_EER [-]'].array[0]\n p2_eer = parameters['p2_EER [-]'].array[0]\n p3_eer = parameters['p3_EER [-]'].array[0]\n p4_eer = parameters['p4_EER [-]'].array[0]\n eer_ref = p1_eer * 35 + p2_eer * 7 + p3_eer + p4_eer * 35\n parameters.loc[:,'P_th_c_ref [W]'] = p_el_ref * 0.6852 * eer_ref\n parameters['P_el_c_ref [W]'] = p_el_ref * 0.6852 #average value from real Heatpumps (P_el35/7 to P_el-7/52) \n parameters.loc[:, 'EER_ref'] = eer_ref \n except:\n pass\n return parameters\n\n\ndef get_parameters_fit(model: str, group_id: int = 0, p_th: int = 0) -> pd.DataFrame:\n \"\"\"\n Helper function for leastsquare fit of thermal output power at reference set point.\n\n Parameters\n ----------\n model : str\n Name of the heat pump model.\n group_id : numeric, default 0\n Group ID for a parameter set which represents an average heat pump of its group.\n p_th : numeric, default 0\n Thermal output power. [W]\n\n Returns\n -------\n parameters : pd.DataFrame\n Data frame containing the model parameters.\n \"\"\"\n df = pd.read_csv(cwd()+r'/hplib_database.csv', delimiter=',')\n df = df.loc[df['Model'] == model]\n parameters = pd.DataFrame()\n\n parameters['Model'] = (df['Model'].values.tolist())\n parameters['P_th_h_ref [W]'] = (df['P_th_h_ref [W]'].values.tolist())\n parameters['P_el_h_ref [W]'] = (df['P_el_h_ref [W]'].values.tolist())\n parameters['COP_ref'] = (df['COP_ref'].values.tolist())\n parameters['Group'] = (df['Group'].values.tolist())\n parameters['p1_P_th [1/°C]'] = (df['p1_P_th [1/°C]'].values.tolist())\n parameters['p2_P_th [1/°C]'] = (df['p2_P_th [1/°C]'].values.tolist())\n parameters['p3_P_th [-]'] = (df['p3_P_th [-]'].values.tolist())\n parameters['p4_P_th [1/°C]'] = (df['p4_P_th [1/°C]'].values.tolist())\n parameters['p1_P_el_h [1/°C]'] = (df['p1_P_el_h [1/°C]'].values.tolist())\n parameters['p2_P_el_h [1/°C]'] = (df['p2_P_el_h [1/°C]'].values.tolist())\n parameters['p3_P_el_h [-]'] = (df['p3_P_el_h [-]'].values.tolist())\n parameters['p4_P_el_h [1/°C]'] = (df['p4_P_el_h [1/°C]'].values.tolist())\n parameters['p1_COP [-]'] = (df['p1_COP [-]'].values.tolist())\n parameters['p2_COP [-]'] = (df['p2_COP [-]'].values.tolist())\n parameters['p3_COP [-]'] = (df['p3_COP [-]'].values.tolist())\n parameters['p4_COP [-]'] = (df['p4_COP [-]'].values.tolist())\n \n if model == 'Generic':\n parameters = parameters.iloc[group_id - 1:group_id]\n parameters.loc[:, 'P_th_h_ref [W]'] = p_th\n t_in_hp = [-7,0,10] # air/water, brine/water, water/water\n t_out_fix = 52\n t_amb_fix = -7\n p1_cop = parameters['p1_COP [-]'].array[0]\n p2_cop = parameters['p2_COP [-]'].array[0]\n p3_cop = parameters['p3_COP [-]'].array[0]\n p4_cop = parameters['p4_COP [-]'].array[0]\n if group_id == 1 or group_id == 4:\n t_in_fix = t_in_hp[0]\n if group_id == 2 or group_id == 5:\n t_in_fix = t_in_hp[1]\n if group_id == 3 or group_id == 6:\n t_in_fix = t_in_hp[2] \n cop_ref = p1_cop * t_in_fix + p2_cop * t_out_fix + p3_cop + p4_cop * t_amb_fix\n p_el_ref = p_th / cop_ref\n parameters.loc[:, 'P_el_h_ref [W]'] = p_el_ref\n parameters.loc[:, 'COP_ref'] = cop_ref\n return parameters\n\n\ndef fit_p_th_ref(t_in: int, t_out: int, group_id: int, p_th_set_point: int) -> Any:\n \"\"\"\n Determine the thermal output power in [W] at reference conditions (T_in = [-7, 0, 10] , \n T_out=52, T_amb=-7) for a given set point for a generic heat pump, using a least-square method.\n\n Parameters\n ----------\n t_in : numeric\n Input temperature :math:`T` at primary side of the heat pump. [°C]\n t_out : numeric\n Output temperature :math:`T` at secondary side of the heat pump. [°C]\n group_id : numeric\n Group ID for a parameter set which represents an average heat pump of its group.\n p_th_set_point : numeric\n Thermal output power. [W]\n\n Returns\n -------\n p_th : Any\n Thermal output power. [W]\n \"\"\"\n P_0 = [1000] # starting values\n a = (t_in, t_out, group_id, p_th_set_point)\n p_th, _ = scipy.optimize.leastsq(fit_func_p_th_ref, P_0, args=a)\n return p_th\n\n\ndef fit_func_p_th_ref(p_th: int, t_in: int, t_out: int, group_id: int, p_th_set_point: int) -> int:\n \"\"\"\n Helper function to determine difference between given and calculated \n thermal output power in [W].\n\n Parameters\n ----------\n p_th : numeric\n Thermal output power. [W]\n t_in : numeric\n Input temperature :math:`T` at primary side of the heat pump. [°C]\n t_out : numeric\n Output temperature :math:`T` at secondary side of the heat pump. [°C]\n group_id : numeric\n Group ID for a parameter set which represents an average heat pump of its group.\n p_th_set_point : numeric\n Thermal output power. [W]\n\n Returns\n -------\n p_th_diff : numeric\n Thermal output power. [W]\n \"\"\"\n if group_id == 1 or group_id == 4:\n t_amb = t_in\n else:\n t_amb = -7\n parameters = get_parameters_fit(model='Generic', group_id=group_id, p_th=p_th)\n df = simulate(t_in, t_out - 5, parameters, t_amb)\n p_th_calc=df.P_th.values[0]\n p_th_diff = p_th_calc - p_th_set_point\n return p_th_diff\n\n\ndef simulate(t_in_primary: any, t_in_secondary: any, parameters: pd.DataFrame,\n t_amb: any, mode: int = 1) -> pd.DataFrame:\n \"\"\"\n Performs the simulation of the heat pump model.\n\n Parameters\n ----------\n t_in_primary : numeric or iterable (e.g. pd.Series)\n Input temperature on primry side :math:`T` (air, brine, water). [°C]\n t_in_secondary : numeric or iterable (e.g. pd.Series)\n Input temperature on secondary side :math:`T` from heating storage or system. [°C]\n parameters : pd.DataFrame\n Data frame containing the heat pump parameters from hplib.getParameters().\n t_amb : numeric or iterable (e.g. pd.Series)\n Ambient temperature :math:'T' of the air. [°C]\n mode : int\n for heating: 1, for cooling: 2\n\n Returns\n -------\n df : pd.DataFrame\n with the following columns\n T_in = Input temperature :math:`T` at primary side of the heat pump. [°C]\n T_out = Output temperature :math:`T` at secondary side of the heat pump. [°C]\n T_amb = Ambient / Outdoor temperature :math:`T`. [°C]\n COP = Coefficient of Performance.\n EER = Energy Efficiency Ratio.\n P_el = Electrical input Power. [W]\n P_th = Thermal output power. [W]\n m_dot = Mass flow at secondary side of the heat pump. [kg/s] \n \"\"\"\n\n DELTA_T = 5 # Inlet temperature is supposed to be heated up by 5 K\n CP = 4200 # J/(kg*K), specific heat capacity of water\n t_in = t_in_primary#info value for dataframe\n T_amb = t_amb #info value for dataframe\n group_id = parameters['Group'].array[0]\n p1_p_el_h = parameters['p1_P_el_h [1/°C]'].array[0]\n p2_p_el_h = parameters['p2_P_el_h [1/°C]'].array[0]\n p3_p_el_h = parameters['p3_P_el_h [-]'].array[0]\n p4_p_el_h = parameters['p4_P_el_h [1/°C]'].array[0]\n p1_cop = parameters['p1_COP [-]'].array[0]\n p2_cop = parameters['p2_COP [-]'].array[0]\n p3_cop = parameters['p3_COP [-]'].array[0]\n p4_cop = parameters['p4_COP [-]'].array[0]\n p_el_ref = parameters['P_el_h_ref [W]'].array[0]\n p_th_ref = parameters['P_th_h_ref [W]'].array[0]\n try:\n p1_eer = parameters['p1_EER [-]'].array[0]\n p2_eer = parameters['p2_EER [-]'].array[0]\n p3_eer = parameters['p3_EER [-]'].array[0]\n p4_eer = parameters['p4_EER [-]'].array[0]\n p1_p_el_c = parameters['p1_P_el_c [1/°C]'].array[0]\n p2_p_el_c = parameters['p2_P_el_c [1/°C]'].array[0]\n p3_p_el_c = parameters['p3_P_el_c [-]'].array[0]\n p4_p_el_c = parameters['p4_P_el_c [1/°C]'].array[0]\n p_el_col_ref=parameters['P_el_c_ref [W]'].array[0]\n except:\n p1_eer = np.nan\n p2_eer = np.nan\n p3_eer = np.nan\n p4_eer = np.nan\n p1_p_el_c = np.nan\n p2_p_el_c = np.nan\n p3_p_el_c = np.nan\n p4_p_el_c = np.nan\n p_el_col_ref=np.nan\n # for subtype = air/water heat pump\n if group_id == 1 or group_id == 4:\n t_amb = t_in\n else:\n pass\n if(type(t_in)==pd.core.series.Series or type(t_in_secondary)==pd.core.series.Series or type(t_amb)==pd.core.series.Series):# for handling pandas.Series\n try:\n df=t_in.to_frame()\n df.rename(columns = {t_in.name:'T_in'}, inplace = True)\n df['mode']=mode\n df.loc[df['mode']==1,'T_out']=t_in_secondary + DELTA_T\n df.loc[df['mode']==2,'T_out']=t_in_secondary - DELTA_T\n df['T_amb']=t_amb\n \n except:\n try:\n df=t_in_secondary.to_frame()\n df.rename(columns = {t_in_secondary.name:'T_out'}, inplace = True)\n df['mode']=mode\n df.loc[df['mode']==1,'T_out']=df['T_out']+ DELTA_T\n df.loc[df['mode']==2,'T_out']=df['T_out'] - DELTA_T\n df['T_in']=t_in\n df['T_amb']=t_amb\n except:\n df=t_amb.to_frame()\n df.rename(columns = {t_amb.name:'T_amb'}, inplace = True)\n df['T_in']=t_in\n df['mode']=mode\n df.loc[df['mode']==1,'T_out']=t_in_secondary + DELTA_T\n df.loc[df['mode']==2,'T_out']=t_in_secondary - DELTA_T\n if group_id == 1 or group_id == 2 or group_id == 3:\n df.loc[df['mode']==1,'COP'] = p1_cop * t_in + p2_cop * df['T_out'] + p3_cop + p4_cop * t_amb\n df.loc[df['mode']==1,'P_el'] = (p1_p_el_h * t_in + p2_p_el_h * df['T_out'] + p3_p_el_h + p4_p_el_h * t_amb) * p_el_ref #this is the first calculated value for P_el\n if group_id == 1:#with regulated heatpumps the electrical power can get too low. We defined a minimum value at 25% from the point at -7/output temperature.\n df.loc[df['mode']==2,'EER'] = (p1_eer * t_in + p2_eer * df['T_out'] + p3_eer + p4_eer * t_amb)\n df.loc[df['mode']==2,'P_el'] = (p1_p_el_c * t_in + p2_p_el_c * df['T_out'] + p3_p_el_c + p4_p_el_c * t_amb) * p_el_col_ref\n df.loc[(df['mode']==2) & (df['T_in'] < 25),'P_el'] = p_el_col_ref * (p1_p_el_c * 25 + p2_p_el_c * df['T_out'] + p3_p_el_c + p4_p_el_c * 25)\n df.loc[(df['mode']==2) & (df['EER']<1),'P_th'] = np.nan\n df.loc[(df['mode']==2) & (df['EER']<1),'EER'] = np.nan\n df.loc[df['mode']==2,'P_th'] = -(df['P_el'] * df['EER'])\n df.loc[(df['mode']==2) & (df['P_el']<0),'EER'] = np.nan\n df.loc[(df['mode']==2) & (df['P_el']<0),'P_th'] = np.nan\n df.loc[(df['mode']==2) & (df['P_el']<0),'P_el'] = np.nan\n #df.loc[df['mode']==2,'P_el'] = df['P_th'] / df['COP']\n df.loc[df['mode']==1,'t_in'] = -7\n df.loc[df['mode']==1,'t_amb'] = -7\n if group_id == 2:\n df['t_in']=df['T_in']\n df.loc[:,'t_amb'] = -7\n df.loc[(df['mode']==1) & (df['P_el'] < 0.25 * p_el_ref * (p1_p_el_h * df['t_in'] + p2_p_el_h * df['T_out'] + p3_p_el_h + p4_p_el_h * df['t_amb'])),'P_el'] = 0.25 * p_el_ref * (p1_p_el_h * df['t_in'] + p2_p_el_h * df['T_out'] + p3_p_el_h + p4_p_el_h * df['t_amb'])\n df.loc[(df['mode']==1) ,'P_th'] = (df['P_el'] * df['COP'])\n df.loc[(df['mode']==1) & (df['COP'] < 1),'P_el']=p_th_ref#if COP is too low the electeric heating element is used in simulation\n df.loc[(df['mode']==1) & (df['COP'] < 1),'P_th']=p_th_ref\n df.loc[(df['mode']==1) & (df['COP'] < 1),'COP']=1\n df['m_dot']=df['P_th']/(DELTA_T * CP)\n del df['t_in']\n del df['t_amb']\n elif group_id == 4 or group_id == 5 or group_id == 6:\n df['COP'] = p1_cop * t_in + p2_cop * df['T_out'] + p3_cop + p4_cop * t_amb\n df['P_el'] = (p1_p_el_h * t_in + p2_p_el_h * df['T_out'] + p3_p_el_h + p4_p_el_h * t_amb) * p_el_ref\n df['P_th'] = df['P_el'] * df['COP']\n df.loc[df['COP'] < 1,'P_el']=p_th_ref\n df.loc[df['COP'] < 1,'P_th']=p_th_ref#if COP is too low the electeric heating element is used in simulation\n df.loc[df['COP'] < 1,'COP']=1\n df['m_dot']=df['P_th']/(DELTA_T * CP)\n df['P_el']=df['P_el'].round(0)\n df['COP']=df['COP'].round(2)\n df['m_dot']=df['m_dot'].round(3)\n df['m_dot']=df['m_dot'].abs()\n else:\n if mode==1:\n t_out = t_in_secondary + DELTA_T #Inlet temperature is supposed to be heated up by 5 K\n EER=0\n if mode==2: # Inlet temperature is supposed to be cooled down by 5 K\n t_out = t_in_secondary - DELTA_T\n COP=0\n # for regulated heat pumps\n if group_id == 1 or group_id == 2 or group_id == 3:\n if mode==1:\n COP = p1_cop * t_in + p2_cop * t_out + p3_cop + p4_cop * t_amb\n P_el = (p1_p_el_h * t_in + p2_p_el_h * t_out + p3_p_el_h + p4_p_el_h * t_amb) * p_el_ref\n if group_id == 1:\n t_in = -7\n t_amb = t_in\n if group_id == 2:\n t_amb = -7\n \n if P_el < 0.25 * p_el_ref * (\n p1_p_el_h * t_in + p2_p_el_h * t_out + p3_p_el_h + p4_p_el_h * t_amb): # 25% of Pel @ -7°C T_amb = T_in\n P_el = 0.25 * p_el_ref * (p1_p_el_h * t_in + p2_p_el_h * t_out + p3_p_el_h + p4_p_el_h * t_amb)\n P_th = P_el * COP\n if COP <= 1:\n COP = 1\n P_el = p_th_ref\n P_th = p_th_ref\n elif mode==2:\n EER = (p1_eer * t_in + p2_eer * t_out + p3_eer + p4_eer * t_amb)\n \n if t_in<25:\n t_in=25\n t_amb=t_in\n P_el = (p1_p_el_c * t_in + p2_p_el_c * t_out + p3_p_el_c + p4_p_el_c * t_amb) * p_el_col_ref\n if P_el<0:\n EER = 0\n P_el = 0\n P_th = -(EER*P_el)\n if EER < 1:\n EER = 0\n P_el = 0\n P_th = 0\n\n # for subtype = On-Off\n elif group_id == 4 or group_id == 5 or group_id == 6:\n P_el = (p1_p_el_h * t_in + p2_p_el_h * t_out + p3_p_el_h + p4_p_el_h * t_amb) * p_el_ref\n COP = p1_cop * t_in + p2_cop * t_out + p3_cop + p4_cop * t_amb\n P_th = P_el * COP\n if COP <= 1:\n COP = 1\n P_el = p_th_ref\n P_th = p_th_ref\n # massflow\n m_dot = P_th / (DELTA_T * CP)\n #round\n df=pd.DataFrame()\n \n df['T_in']=[t_in_primary]\n df['T_out']=[t_out]\n df['T_amb']=[T_amb]\n df['COP']=[COP]\n df['EER']=[EER]\n df['P_el']=[P_el]\n df['P_th']=[P_th]\n df['m_dot']=[abs(m_dot)]\n return df\n\ndef cwd():\n real_path = os.path.realpath(__file__)\n dir_path = os.path.dirname(real_path)\n return dir_path\n\nclass HeatPump:\n def __init__(self, parameters: pd.DataFrame):\n self.group_id = float(parameters['Group'].array[0])\n self.p1_p_el_h = float(parameters['p1_P_el_h [1/°C]'].array[0])\n self.p2_p_el_h = float(parameters['p2_P_el_h [1/°C]'].array[0])\n self.p3_p_el_h = float(parameters['p3_P_el_h [-]'].array[0])\n self.p4_p_el_h = float(parameters['p4_P_el_h [1/°C]'].array[0])\n self.p1_cop = float(parameters['p1_COP [-]'].array[0])\n self.p2_cop = float(parameters['p2_COP [-]'].array[0])\n self.p3_cop = float(parameters['p3_COP [-]'].array[0])\n self.p4_cop = float(parameters['p4_COP [-]'].array[0])\n self.p_el_ref = float(parameters['P_el_h_ref [W]'].array[0])\n self.p_th_ref = float(parameters['P_th_h_ref [W]'].array[0])\n try:\n self.p1_eer = parameters['p1_EER [-]'].array[0]\n self.p2_eer = parameters['p2_EER [-]'].array[0]\n self.p3_eer = parameters['p3_EER [-]'].array[0]\n self.p4_eer = parameters['p4_EER [-]'].array[0]\n self.p1_p_el_c = parameters['p1_P_el_c [1/°C]'].array[0]\n self.p2_p_el_c = parameters['p2_P_el_c [1/°C]'].array[0]\n self.p3_p_el_c = parameters['p3_P_el_c [-]'].array[0]\n self.p4_p_el_c = parameters['p4_P_el_c [1/°C]'].array[0]\n self.p_el_col_ref=parameters['P_el_c_ref [W]'].array[0]\n except:\n self.p1_eer = np.nan\n self.p2_eer = np.nan\n self.p3_eer = np.nan\n self.p4_eer = np.nan\n self.p1_p_el_c = np.nan\n self.p2_p_el_c = np.nan\n self.p3_p_el_c = np.nan\n self.p4_p_el_c = np.nan\n self.p_el_col_ref=np.nan\n\n self.delta_t = 5 # Inlet temperature is supposed to be heated up by 5 K\n self.cp = 4200 # J/(kg*K), specific heat capacity of water\n\n def simulate(self, t_in_primary: float, t_in_secondary: float, t_amb: float, mode: int = 1) -> dict:\n \"\"\"\n Performs the simulation of the heat pump model.\n\n Parameters\n ----------\n t_in_primary : numeric or iterable (e.g. pd.Series)\n Input temperature on primry side :math:`T` (air, brine, water). [°C]\n t_in_secondary : numeric or iterable (e.g. pd.Series)\n Input temperature on secondary side :math:`T` from heating storage or system. [°C]\n parameters : pd.DataFrame\n Data frame containing the heat pump parameters from hplib.getParameters().\n t_amb : numeric or iterable (e.g. pd.Series)\n Ambient temperature :math:'T' of the air. [°C]\n mode : int\n for heating: 1, for cooling: 2\n\n Returns\n -------\n result : dict\n with the following columns\n T_in = Input temperature :math:`T` at primary side of the heat pump. [°C]\n T_out = Output temperature :math:`T` at secondary side of the heat pump. [°C]\n T_amb = Ambient / Outdoor temperature :math:`T`. [°C]\n COP = Coefficient of Performance.\n EER = Energy Efficiency Ratio.\n P_el = Electrical input Power. [W]\n P_th = Thermal output power. [W]\n m_dot = Mass flow at secondary side of the heat pump. [kg/s]\n \"\"\"\n\n if mode==2 and self.group_id > 1:\n raise ValueError('Cooling is only possible with heat pumps of group id = 1.')\n \n t_in = t_in_primary # info value for dataframe\n if mode==1:\n t_out = t_in_secondary + self.delta_t #Inlet temperature is supposed to be heated up by 5 K\n eer=0\n if mode==2: # Inlet temperature is supposed to be cooled down by 5 K\n t_out = t_in_secondary - self.delta_t\n cop=0\n # for subtype = air/water heat pump\n if self.group_id in (1, 4):\n t_amb = t_in\n t_ambient=t_amb\n # for regulated heat pumps\n if self.group_id in (1, 2, 3):\n if mode==1:\n cop = self.p1_cop * t_in + self.p2_cop * t_out + self.p3_cop + self.p4_cop * t_amb\n p_el=self.p_el_ref * (self.p1_p_el_h * t_in\n + self.p2_p_el_h * t_out\n + self.p3_p_el_h\n + self.p4_p_el_h * t_amb)\n if self.group_id == 1:\n if isinstance(t_in, np.ndarray):\n t_in = np.full_like(t_in, -7)\n else:\n t_in = -7\n t_amb = t_in\n\n elif self.group_id == 2:\n if isinstance(t_amb, np.ndarray):\n t_amb = np.full_like(t_amb, -7)\n else:\n t_amb = -7\n p_el_25 = 0.25 * self.p_el_ref * (self.p1_p_el_h * t_in\n + self.p2_p_el_h * t_out\n + self.p3_p_el_h\n + self.p4_p_el_h * t_amb)\n if isinstance(p_el, np.ndarray):\n p_el = np.where(p_el < p_el_25, p_el_25, p_el)\n elif p_el < p_el_25:\n p_el = p_el_25\n \n \n p_th = p_el * cop\n if isinstance(cop, np.ndarray):\n p_el = np.where(cop <= 1, self.p_th_ref, p_el)\n p_th = np.where(cop <= 1, self.p_th_ref, p_th)\n cop = np.where(cop <= 1, 1, cop)\n elif cop <= 1:\n cop = 1\n p_el = self.p_th_ref\n p_th = self.p_th_ref\n if mode==2:\n eer = (self.p1_eer * t_in + self.p2_eer * t_out + self.p3_eer + self.p4_eer * t_amb)\n if isinstance(t_in, np.ndarray):\n t_in=np.where(t_in<25,25,t_in)\n elif t_in<25:\n t_in=25\n t_amb=t_in\n p_el = (self.p1_p_el_c * t_in + self.p2_p_el_c * t_out + self.p3_p_el_c + self.p4_p_el_c * t_amb) * self.p_el_col_ref\n if isinstance(p_el,np.ndarray):\n eer = np.where(p_el<0,0,eer)\n p_el = np.where(p_el<0,0,p_el)\n elif p_el<0:\n eer = 0\n p_el = 0\n p_th = -(eer*p_el)\n if isinstance(eer,np.ndarray):\n p_el = np.where(eer <= 1, 0 , p_el)\n p_th = np.where(eer <= 1, 0 , p_th)\n eer = np.where(eer <= 1, 0, eer)\n elif eer < 1:\n eer = 0\n p_el = 0\n p_th = 0\n\n # for subtype = On-Off\n elif self.group_id in (4, 5, 6):\n p_el = (self.p1_p_el_h * t_in\n + self.p2_p_el_h * t_out\n + self.p3_p_el_h\n + self.p4_p_el_h * t_amb) * self.p_el_ref\n\n cop = self.p1_cop * t_in + self.p2_cop * t_out + self.p3_cop + self.p4_cop * t_amb\n\n p_th = p_el * cop\n\n if isinstance(cop, np.ndarray):\n p_el = np.where(cop <= 1, self.p_th_ref, p_el)\n p_th = np.where(cop <= 1, self.p_th_ref, p_th)\n cop = np.where(cop <= 1, 1, cop)\n elif cop <= 1:\n cop = 1\n p_el = self.p_th_ref\n p_th = self.p_th_ref\n\n # massflow\n m_dot = abs(p_th / (self.delta_t * self.cp))\n \n # round\n result = dict()\n\n result['T_in'] = t_in_primary\n result['T_out'] = t_out\n result['T_amb'] = t_ambient\n result['COP'] = cop\n result['EER'] = eer\n result['P_el'] = p_el\n result['P_th'] = p_th\n result['m_dot']= m_dot\n\n return result\n" ]
[ [ "numpy.full_like", "scipy.optimize.leastsq", "numpy.where", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
Sulecs1/Diabetes_Classification_Project
[ "0522b88c39389810a92afe00d63825c42be0f768" ]
[ "Diabetes_Classification.py" ]
[ "########################################################\n# Diabetes Classification Project #\n########################################################\n#<<<Şule AKÇAY>>>\n#Pregnancies: Hamilelik sayısı\n#Glucose: Oral glikoz tolerans testintinde 2 saatlik plazma glikoz\n#konsantrasyonu\n#BloodPressure: Kan basıncı (Küçük tansiyon) (mm Hg)\n#SkinT hickness: Cilt kalınlığı\n#Insulin: 2 saatlik serum insülini (mu U/ ml)\n#BMIBody: Vücut kitle indeksi (Weight in kg/ (height in m)^2)\n#DiabetesPedigreeFunc tion: Aile geçmişine göre diyabet olasılığını\n#puanlayan bir fonksyion.\n#Age: Yaş (yıl)\n#Outcome: Hastalığa sahip (1) ya da değil (0)\n#########################################################\n\n#Gerekli Olan Kütüphaneler eklendi\nimport pandas as pd\nimport numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.tree import export_graphviz, export_text\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.neighbors import LocalOutlierFactor\nimport missingno as msno\nimport pickle\nimport pydotplus\nfrom skompiler import skompile\nimport joblib\nimport warnings\nfrom sklearn.metrics import *\nfrom sklearn.model_selection import *\n\nimport pickle\nfrom helpers.data_prep import *\nfrom helpers.eda import *\nfrom helpers.helpers import *\n\n#Eklentiler eklendi\npd.pandas.set_option('display.max_columns', None)\npd.set_option('display.float_format', lambda x: '%.3f' % x)\npd.set_option('display.width', 170)\nwarnings.filterwarnings('ignore')\n\n#Veri seti için fonskiyon oluşturuldu\ndef load():\n data = pd.read_csv(r\"C:\\Users\\Suleakcay\\PycharmProjects\\pythonProject6\\data\\diabetes.csv\")\n return data\n\ndf = load()\ndf.head()\n\n#Aykırı değer varsa görebilmek için\nmsno.bar(df)\nplt.show()\n#veri seti gözlemler hakkında inceleme yapıldı\ngrab_col_names(df)\n\ndef data_understand(df):\n print(\"DF SHAPE:\", df.shape)\n print(\"------------------------------------------------------------------------\")\n print(\"OUTCOME 1 DF RATIO:\", len(df[df[\"Outcome\"] == 1]) / len(df))\n print(\"OUTCOME 0 DF RATIO:\", len(df[df[\"Outcome\"] == 0]) / len(df))\n print(\"------------------------------------TYPES------------------------------------\")\n print(df.dtypes)\n print(\"------------------------------------HEAD------------------------------------\")\n print(df.head())\n print(\"-------------------------------------TAİL-----------------------------------\")\n print(df.tail())\n print(\"------------------------------------DESCRİBE------------------------------------\")\n print(df.describe().T)\n print(\"-------------------------------------QUANTİLE-----------------------------------\")\n print(df.quantile([0, 0.05, 0.50, 0.95, 0.99, 1]).T)\n print(\"-----------------------------------CORR-------------------------------------\")\n # Isı haritasında, daha parlak renkler daha fazla korelasyonu gösterir.\n # Tablodan ve ısı haritasından da görebileceğimiz gibi, glikoz seviyeleri, yaş, vücut kitle indeksi ve gebelik sayısı, sonuç değişkeni ile önemli bir korelasyona sahiptir. Ayrıca yaş ve gebelikler veya insülin ve cilt kalınlığı gibi özellik çiftleri arasındaki korelasyona dikkat ediniz.\n\n corr = df.corr()\n print(corr)\n print(\"--------------------------------------HEATMAP----------------------------------\")\n sns.heatmap(corr,\n xticklabels=corr.columns,\n yticklabels=corr.columns)\n plt.show()\n print(\"------------------------------------------------------------------------\")\n df.hist(bins=20, color=\"#1c0f45\", edgecolor='orange', figsize=(15, 15));\n plt.show()\n print(\"------------------------------------------------------------------------\")\n\ndata_understand(df)\n\n#Veri setindeki eksik değerleri sorgulamak için\ndef df_questioning_null(df):\n\n print(f\"Veri kümesinde hiç boş değer var mı?: {df.isnull().values.any()}\")\n if df.isnull().values.any():\n null_values = df.isnull().sum()\n print(f\"Hangi sütunlarda eksik değerler var?:\\n{null_values[null_values > 0]}\")\n\ndf_questioning_null(df)\ndf.shape\n\n\ndef col_nan_assigment(df):\n #Nehir Günde Daşçı\n for col in df.columns:\n for row in range(len(df)):\n if col != \"Outcome\":\n if df.loc[row, col] == 0:\n if df.loc[row, \"Outcome\"] == 1:\n df.loc[row, col] = df.loc[df[\"Outcome\"] == 1, col].median()\n else:\n df.loc[row, col] = df.loc[df[\"Outcome\"] == 0, col].median()\n\ncol_nan_assigment(df)\ndf.head()\n\n#numerik ve kategorik değişken isimleri\ndef num_and_cat_name(df):\n cat_cols = [col for col in df.columns if df[col].nunique() < 10 and df[col].dtypes != \"O\"]\n num_cols = [col for col in df.columns if df[col].dtypes != 'O' and col not in [\"Outcome\"]]\n return cat_cols, num_cols\n\nnum_and_cat_name(df)\n\nlist_num = []\nfor col in df.columns:\n if df[col].dtypes != 'O' and col not in [\"Outcome\"]:\n list_num.append(col)\n\n\n#Aykırı değerler boxplot grafiği gözlendi\ndef plot_outliers(df):\n for col in df.columns:\n if col in list_num:\n sns.boxplot(x=df[col])\n plt.title(\"BoxPlot Grafik Gösterimi\")\n plt.show()\nplot_outliers(df)\n\n\n#numerik değişkenlere göre target analizi yaptım\nnum_cols = [col for col in df.columns if df[col].nunique() > 10\n and df[col].dtypes != 'O'\n and col not in [\"Outcome\"]]\n\ndef target_summary_with_num(dataframe, target, numerical_col):#yukarıdaki işlemin genelleştirlmiş hali\n\n print(dataframe.groupby(target).agg({numerical_col: \"mean\"}), end=\"\\n\\n\\n\")\n\nfor col in num_cols:\n target_summary_with_num(df, \"Outcome\", col)\n\n#sayısal değişkenleri birbirleri ile karşılaştırma işlemi grafik incelenerek yapıldı\n#def check_plot(dataframe):\n# for colx in df.columns:\n# for coly in list_num:\n# if colx != coly:\n# sns.lmplot(x=colx, y=coly, data=dataframe)\n# plt.show()\n\n#check_plot(df)\n\n#eşik değerini bulmak için kırılım grafiği incelendi\nclf = LocalOutlierFactor(n_neighbors = 20, contamination = 0.1)\nclf.fit_predict(df)\ndf_scores = clf.negative_outlier_factor_\nscores = pd.DataFrame(np.sort(df_scores))\nscores.plot(stacked=True, xlim=[0, 20], style='.-')\nplt.show()\n\nesik_deger = np.sort(df_scores)[5] #eşik değerimiz\ndf[df_scores < esik_deger] #eşik değerine göre seçim aykırıları seçtik yaptık\ndf[df_scores < esik_deger].shape #sayısı\ndf.describe().T\n#indeksleri tutma amacımız indekse göre kolay işlem yapmak\ndf[df_scores < esik_deger].index\n#hepsini silmek istersek\ndf.drop(axis=0, labels=df[df_scores < esik_deger].index)\ndf = df.drop(axis=0, labels=df[df_scores < esik_deger].index)\ndf.head()\n\n#kişinin akrabalarının diabet olma olasılığını 0-1 arasına çektik\ntransformer = MinMaxScaler()\ndf[\"DiabetesPedigreeFunction\"] = transformer.fit_transform(df[[\"DiabetesPedigreeFunction\"]])\ndf.head()\n\n# 140 altı normal\n# 140-199 gizli şeker\n# 200 ve üzeri diyabet\ndf[\"Insulin_Category\"] = pd.cut(x=df[\"Insulin\"],\n bins=[0, 140, 200, df[\"Insulin\"].max()],\n labels=[\"Normal\", \"Gizli_Şeker\", \"Diyabet\"])\n\ndf[\"Insulin_Category\"] = df[\"Insulin_Category\"].fillna(\"Normal\")\ndf = one_hot_encoder(df, [\"Insulin_Category\"], drop_first=True)\ndf = df.drop(\"Insulin\", axis=1)\ndf.head()\n\n\n#MODEL OLUŞTURMA\ny = df[\"Outcome\"]\nX = df.drop([\"Outcome\"], axis=1)\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30, random_state=17)\n\ncart_model = DecisionTreeClassifier(random_state=17).fit(X_train, y_train) #train veri setine göre fit ettik\n\n# train hatası\ny_pred = cart_model.predict(X_train) #train setinin bağımsız değişkenlerini yerine koyarak bağımlı değişkenlerini tahmin ettim\ny_prob = cart_model.predict_proba(X_train)[:, 1]\nprint(classification_report(y_train, y_pred))\nroc_auc_score(y_train, y_prob) #1.0 overfit\n\n# test hatası\ny_pred = cart_model.predict(X_test)\ny_prob = cart_model.predict_proba(X_test)[:, 1]\nprint(classification_report(y_test, y_pred))\nroc_auc_score(y_test, y_prob) #modelin perforomansı\n#0.7533928239449712\n\n\n#karar ağacını görselleştirme işlemi\ndef tree_graph_to_png(tree, feature_names, png_file_to_save):\n tree_str = export_graphviz(tree, feature_names=feature_names, filled=True, out_file=None)\n graph = pydotplus.graph_from_dot_data(tree_str)\n graph.write_png(png_file_to_save)\n\ntree_graph_to_png(tree=cart_model, feature_names=X_train.columns, png_file_to_save='cart.png')\n\n#karar kuralları çıkarma işlemi\ntree_rules = export_text(cart_model, feature_names=list(X_train.columns))\nprint(tree_rules)\n\n\n#değişken önem düzeylerini incelemek\ndef plot_importance(model, features, num=len(X), save=False):\n feature_imp = pd.DataFrame({'Value': model.feature_importances_, 'Feature': features.columns})\n plt.figure(figsize=(10, 10))\n sns.set(font_scale=1)\n sns.barplot(x=\"Value\", y=\"Feature\", data=feature_imp.sort_values(by=\"Value\",\n ascending=False)[0:num])\n plt.title('Features')\n plt.tight_layout()\n plt.show()\n if save:\n plt.savefig('importances.png')\n\nplot_importance(cart_model, X_train)\n\n###############################\n#HİPERPARAMETRE OPTİMİZSAYONU\n###############################\n\n#boş model nesnesi oluşturduk\ncart_model = DecisionTreeClassifier(random_state=17)\n# arama yapılacak hiperparametre setleri\ncart_params = {'max_depth': range(1, 11),\n \"min_samples_split\": [2, 3, 4]}\n\n#cross validation ile hiperparemetre araması yapacağız\n#Yani hiperparametre araması yapılırken train seti üzerinde yapılır tüm veri üzerinde yapılmaz!\n#model doğrulama yapılınca asla bütün veri kullanılmaz !!Yanlılık oluşturur!!!\ncart_cv = GridSearchCV(cart_model, cart_params, cv=5, n_jobs=-1, verbose=True)\ncart_cv.fit(X_train, y_train) #train setini çapraz doğrulamaya sokuyor\n\n\n\ncart_tuned = DecisionTreeClassifier(**cart_cv.best_params_).fit(X_train, y_train)\n#Model incelemesi\n#** tüm parametreler\n# train hatası\ny_pred = cart_tuned.predict(X_train)\ny_prob = cart_tuned.predict_proba(X_train)[:, 1]\nprint(classification_report(y_train, y_pred))\nroc_auc_score(y_train, y_prob)\n#train setindeki hata :0.9123278443113773\n\n#test hatası\ny_pred = cart_tuned.predict(X_test)\ny_prob = cart_tuned.predict_proba(X_test)[:, 1]\nprint(classification_report(y_test, y_pred))\nroc_auc_score(y_test, y_prob)\n#test hatamız :0.8587562744004462\n\n\n################################\n# FİNAL MODELİN YENİDEN TÜM VERİYE FİT EDİLMESİ\n################################\n\ncart_tuned_final = DecisionTreeClassifier(**cart_cv.best_params_).fit(X, y)\n\n################################\n# MODELİN DAHA SONRA KULLANILMAK ÜZERE KAYDEDİLMESİ\n################################\n\nimport joblib\njoblib.dump(cart_tuned_final, \"cart_tuned_final.pkl\")\n\ncart_model_from_disk = joblib.load(\"cart_tuned_final.pkl\")\n\ncart_model_from_disk.predict(X_test)\n\n\n\n\n\n\n\n\n\n\n" ]
[ [ "pandas.pandas.set_option", "pandas.read_csv", "sklearn.tree.export_graphviz", "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.title", "numpy.sort", "sklearn.neighbors.LocalOutlierFactor", "pandas.DataFrame", "matplotlib.pyplot.savefig", "sklearn.tree.DecisionTreeClassifier", "pandas.set_option", "matplotlib.pyplot.show", "sklearn.preprocessing.MinMaxScaler", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
guoqianyou/qlib
[ "184ce34a347123bf2cdd0bb48e2e110df9fe2722" ]
[ "qlib/data/dataset/processor.py" ]
[ "# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT License.\n\nimport abc\nfrom typing import Union, Text\nimport numpy as np\nimport pandas as pd\n\nfrom ...constant import EPS\nfrom .utils import fetch_df_by_index\nfrom ...utils.serial import Serializable\nfrom ...utils.paral import datetime_groupby_apply\n\n\ndef get_group_columns(df: pd.DataFrame, group: Union[Text, None]):\n \"\"\"\n get a group of columns from multi-index columns DataFrame\n\n Parameters\n ----------\n df : pd.DataFrame\n with multi of columns.\n group : str\n the name of the feature group, i.e. the first level value of the group index.\n \"\"\"\n if group is None:\n return df.columns\n else:\n return df.columns[df.columns.get_loc(group)]\n\n\nclass Processor(Serializable):\n def fit(self, df: pd.DataFrame = None):\n \"\"\"\n learn data processing parameters\n\n Parameters\n ----------\n df : pd.DataFrame\n When we fit and process data with processor one by one. The fit function reiles on the output of previous\n processor, i.e. `df`.\n\n \"\"\"\n pass\n\n @abc.abstractmethod\n def __call__(self, df: pd.DataFrame):\n \"\"\"\n process the data\n\n NOTE: **The processor could change the content of `df` inplace !!!!! **\n User should keep a copy of data outside\n\n Parameters\n ----------\n df : pd.DataFrame\n The raw_df of handler or result from previous processor.\n \"\"\"\n pass\n\n def is_for_infer(self) -> bool:\n \"\"\"\n Is this processor usable for inference\n Some processors are not usable for inference.\n\n Returns\n -------\n bool:\n if it is usable for infenrece.\n \"\"\"\n return True\n\n def readonly(self) -> bool:\n \"\"\"\n Does the processor treat the input data readonly (i.e. does not write the input data) when processing\n\n Knowning the readonly information is helpful to the Handler to avoid uncessary copy\n \"\"\"\n return False\n\n def config(self, **kwargs):\n attr_list = {\"fit_start_time\", \"fit_end_time\"}\n for k, v in kwargs.items():\n if k in attr_list and hasattr(self, k):\n setattr(self, k, v)\n\n for attr in attr_list:\n if attr in kwargs:\n kwargs.pop(attr)\n super().config(**kwargs)\n\n\nclass DropnaProcessor(Processor):\n def __init__(self, fields_group=None):\n self.fields_group = fields_group\n\n def __call__(self, df):\n return df.dropna(subset=get_group_columns(df, self.fields_group))\n\n def readonly(self):\n return True\n\n\nclass DropnaLabel(DropnaProcessor):\n def __init__(self, fields_group=\"label\"):\n super().__init__(fields_group=fields_group)\n\n def is_for_infer(self) -> bool:\n \"\"\"The samples are dropped according to label. So it is not usable for inference\"\"\"\n return False\n\n\nclass DropCol(Processor):\n def __init__(self, col_list=[]):\n self.col_list = col_list\n\n def __call__(self, df):\n if isinstance(df.columns, pd.MultiIndex):\n mask = df.columns.get_level_values(-1).isin(self.col_list)\n else:\n mask = df.columns.isin(self.col_list)\n return df.loc[:, ~mask]\n\n def readonly(self):\n return True\n\n\nclass FilterCol(Processor):\n def __init__(self, fields_group=\"feature\", col_list=[]):\n self.fields_group = fields_group\n self.col_list = col_list\n\n def __call__(self, df):\n\n cols = get_group_columns(df, self.fields_group)\n all_cols = df.columns\n diff_cols = np.setdiff1d(all_cols.get_level_values(-1), cols.get_level_values(-1))\n self.col_list = np.union1d(diff_cols, self.col_list)\n mask = df.columns.get_level_values(-1).isin(self.col_list)\n return df.loc[:, mask]\n\n def readonly(self):\n return True\n\n\nclass TanhProcess(Processor):\n \"\"\"Use tanh to process noise data\"\"\"\n\n def __call__(self, df):\n def tanh_denoise(data):\n mask = data.columns.get_level_values(1).str.contains(\"LABEL\")\n col = df.columns[~mask]\n data[col] = data[col] - 1\n data[col] = np.tanh(data[col])\n\n return data\n\n return tanh_denoise(df)\n\n\nclass ProcessInf(Processor):\n \"\"\"Process infinity\"\"\"\n\n def __call__(self, df):\n def replace_inf(data):\n def process_inf(df):\n for col in df.columns:\n # FIXME: Such behavior is very weird\n df[col] = df[col].replace([np.inf, -np.inf], df[col][~np.isinf(df[col])].mean())\n return df\n\n data = datetime_groupby_apply(data, process_inf)\n data.sort_index(inplace=True)\n return data\n\n return replace_inf(df)\n\n\nclass Fillna(Processor):\n \"\"\"Process NaN\"\"\"\n\n def __init__(self, fields_group=None, fill_value=0):\n self.fields_group = fields_group\n self.fill_value = fill_value\n\n def __call__(self, df):\n if self.fields_group is None:\n df.fillna(self.fill_value, inplace=True)\n else:\n cols = get_group_columns(df, self.fields_group)\n df.fillna({col: self.fill_value for col in cols}, inplace=True)\n return df\n\n\nclass MinMaxNorm(Processor):\n def __init__(self, fit_start_time, fit_end_time, fields_group=None):\n # NOTE: correctly set the `fit_start_time` and `fit_end_time` is very important !!!\n # `fit_end_time` **must not** include any information from the test data!!!\n self.fit_start_time = fit_start_time\n self.fit_end_time = fit_end_time\n self.fields_group = fields_group\n\n def fit(self, df):\n df = fetch_df_by_index(df, slice(self.fit_start_time, self.fit_end_time), level=\"datetime\")\n cols = get_group_columns(df, self.fields_group)\n self.min_val = np.nanmin(df[cols].values, axis=0)\n self.max_val = np.nanmax(df[cols].values, axis=0)\n self.ignore = self.min_val == self.max_val\n self.cols = cols\n\n def __call__(self, df):\n def normalize(x, min_val=self.min_val, max_val=self.max_val, ignore=self.ignore):\n if (~ignore).all():\n return (x - min_val) / (max_val - min_val)\n for i in range(ignore.size):\n if not ignore[i]:\n x[i] = (x[i] - min_val) / (max_val - min_val)\n return x\n\n df.loc(axis=1)[self.cols] = normalize(df[self.cols].values)\n return df\n\n\nclass ZScoreNorm(Processor):\n \"\"\"ZScore Normalization\"\"\"\n\n def __init__(self, fit_start_time, fit_end_time, fields_group=None):\n # NOTE: correctly set the `fit_start_time` and `fit_end_time` is very important !!!\n # `fit_end_time` **must not** include any information from the test data!!!\n self.fit_start_time = fit_start_time\n self.fit_end_time = fit_end_time\n self.fields_group = fields_group\n\n def fit(self, df):\n df = fetch_df_by_index(df, slice(self.fit_start_time, self.fit_end_time), level=\"datetime\")\n cols = get_group_columns(df, self.fields_group)\n self.mean_train = np.nanmean(df[cols].values, axis=0)\n self.std_train = np.nanstd(df[cols].values, axis=0)\n self.ignore = self.std_train == 0\n self.cols = cols\n\n def __call__(self, df):\n def normalize(x, mean_train=self.mean_train, std_train=self.std_train, ignore=self.ignore):\n if (~ignore).all():\n return (x - mean_train) / std_train\n for i in range(ignore.size):\n if not ignore[i]:\n x[i] = (x[i] - mean_train) / std_train\n return x\n\n df.loc(axis=1)[self.cols] = normalize(df[self.cols].values)\n return df\n\n\nclass RobustZScoreNorm(Processor):\n \"\"\"Robust ZScore Normalization\n\n Use robust statistics for Z-Score normalization:\n mean(x) = median(x)\n std(x) = MAD(x) * 1.4826\n\n Reference:\n https://en.wikipedia.org/wiki/Median_absolute_deviation.\n \"\"\"\n\n def __init__(self, fit_start_time, fit_end_time, fields_group=None, clip_outlier=True):\n # NOTE: correctly set the `fit_start_time` and `fit_end_time` is very important !!!\n # `fit_end_time` **must not** include any information from the test data!!!\n self.fit_start_time = fit_start_time\n self.fit_end_time = fit_end_time\n self.fields_group = fields_group\n self.clip_outlier = clip_outlier\n\n def fit(self, df):\n df = fetch_df_by_index(df, slice(self.fit_start_time, self.fit_end_time), level=\"datetime\")\n self.cols = get_group_columns(df, self.fields_group)\n X = df[self.cols].values\n self.mean_train = np.nanmedian(X, axis=0)\n self.std_train = np.nanmedian(np.abs(X - self.mean_train), axis=0)\n self.std_train += EPS\n self.std_train *= 1.4826\n\n def __call__(self, df):\n X = df[self.cols]\n X -= self.mean_train\n X /= self.std_train\n df[self.cols] = X\n if self.clip_outlier:\n df.clip(-3, 3, inplace=True)\n return df\n\n\nclass CSZScoreNorm(Processor):\n \"\"\"Cross Sectional ZScore Normalization\"\"\"\n\n def __init__(self, fields_group=None):\n self.fields_group = fields_group\n\n def __call__(self, df):\n # try not modify original dataframe\n cols = get_group_columns(df, self.fields_group)\n df[cols] = df[cols].groupby(\"datetime\").apply(lambda x: (x - x.mean()).div(x.std()))\n\n return df\n\n\nclass CSRankNorm(Processor):\n \"\"\"\n Cross Sectional Rank Normalization.\n \"Cross Sectional\" is often used to describe data operations.\n The operations across different stocks are often called Cross Sectional Operation.\n\n For example, CSRankNorm is an operation that grouping the data by each day and rank `across` all the stocks in each day.\n \"\"\"\n\n def __init__(self, fields_group=None):\n self.fields_group = fields_group\n\n def __call__(self, df):\n # try not modify original dataframe\n cols = get_group_columns(df, self.fields_group)\n t = df[cols].groupby(\"datetime\").rank(pct=True)\n t -= 0.5\n t *= 3.46 # NOTE: towards unit std\n df[cols] = t\n return df\n\n\nclass CSZFillna(Processor):\n \"\"\"Cross Sectional Fill Nan\"\"\"\n\n def __init__(self, fields_group=None):\n self.fields_group = fields_group\n\n def __call__(self, df):\n cols = get_group_columns(df, self.fields_group)\n df[cols] = df[cols].groupby(\"datetime\").apply(lambda x: x.fillna(x.mean()))\n return df\n\n\nclass HashStockFormat(Processor):\n \"\"\"Process the storage of from df into hasing stock format\"\"\"\n\n def __call__(self, df: pd.DataFrame):\n from .storage import HasingStockStorage\n\n return HasingStockStorage.from_df(df)\n" ]
[ [ "numpy.nanmax", "numpy.nanmedian", "numpy.abs", "numpy.nanmin", "numpy.union1d", "numpy.nanmean", "numpy.nanstd", "numpy.tanh", "numpy.isinf" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
mhd-medfa/got10k-toolkit
[ "bef9f21a09f2a4f30a9f4c3faeb52e0019c32cdf" ]
[ "got10k/datasets/uav123.py" ]
[ "from __future__ import absolute_import, print_function\n\nimport os\nimport glob\nimport numpy as np\nimport six\nimport json\n\n\nclass UAV123(object):\n \"\"\"`UAV123 <https://ivul.kaust.edu.sa/Pages/pub-benchmark-simulator-uav.aspx>`_ Dataset.\n\n Publication:\n ``A Benchmark and Simulator for UAV Tracking``,\n M. Mueller, N. Smith and B. Ghanem, ECCV 2016.\n \n Args:\n root_dir (string): Root directory of dataset where sequence\n folders exist.\n version (integer or string): Specify the benchmark version, specify as one of\n ``UAV123`` and ``UAV20L``.\n \"\"\"\n def __init__(self, root_dir, version='UAV123'):\n super(UAV123, self).__init__()\n assert version.upper() in ['UAV20L', 'UAV123']\n\n self.root_dir = root_dir\n self.version = version.upper()\n self._check_integrity(root_dir, version)\n\n # sequence meta information\n meta_file = os.path.join(\n os.path.dirname(__file__), 'uav123.json')\n with open(meta_file) as f:\n self.seq_metas = json.load(f)\n\n # sequence and annotation paths\n self.anno_files = sorted(glob.glob(\n os.path.join(root_dir, 'anno/%s/*.txt' % version)))\n self.seq_names = [\n os.path.basename(f)[:-4] for f in self.anno_files]\n self.seq_dirs = [os.path.join(\n root_dir, 'data_seq/UAV123/%s' % \\\n self.seq_metas[version][n]['folder_name'])\n for n in self.seq_names]\n \n def __getitem__(self, index):\n r\"\"\" \n Args:\n index (integer or string): Index or name of a sequence.\n \n Returns:\n tuple: (img_files, anno), where ``img_files`` is a list of\n file names and ``anno`` is a N x 4 (rectangles) numpy array.\n \"\"\"\n if isinstance(index, six.string_types):\n if not index in self.seq_names:\n raise Exception('Sequence {} not found.'.format(index))\n index = self.seq_names.index(index)\n\n # valid frame range\n start_frame = self.seq_metas[self.version][\n self.seq_names[index]]['start_frame']\n end_frame = self.seq_metas[self.version][\n self.seq_names[index]]['end_frame']\n img_files = [os.path.join(\n self.seq_dirs[index], '%06d.jpg' % f)\n for f in range(start_frame, end_frame + 1)]\n\n # load annotations\n anno = np.loadtxt(self.anno_files[index], delimiter=',')\n assert len(img_files) == len(anno)\n assert anno.shape[1] == 4\n\n return img_files, anno\n\n def __len__(self):\n return len(self.seq_names)\n\n def _check_integrity(self, root_dir, version):\n # sequence meta information\n meta_file = os.path.join(\n os.path.dirname(__file__), 'uav123.json')\n with open(meta_file) as f:\n seq_metas = json.load(f)\n seq_names = list(seq_metas[version].keys())\n\n if os.path.isdir(root_dir) and len(os.listdir(root_dir)) > 3:\n # check each sequence folder\n for seq_name in seq_names:\n seq_dir = os.path.join(\n root_dir, 'data_seq/UAV123/%s' % \\\n seq_metas[version][seq_name]['folder_name'])\n if not os.path.isdir(seq_dir):\n print('Warning: sequence %s not exists.' % seq_name)\n else:\n # dataset not exists\n raise Exception('Dataset not found or corrupted.')\n" ]
[ [ "numpy.loadtxt" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Ming2010/msds621
[ "4976b4c1547890b590383685ca7a7d665cc81ba5" ]
[ "projects/linreg/test_class.py" ]
[ "from sklearn.linear_model import LogisticRegression\nfrom sklearn.datasets import load_wine, load_iris\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import log_loss\n\nfrom linreg import *\n\ndef MAE(a,b): return np.mean(np.abs(a-b))\n\n\ndef wine_data():\n wine = load_wine()\n df_wine = pd.DataFrame(data=wine.data, columns=wine.feature_names)\n df_wine['y'] = wine.target\n df_wine = df_wine[df_wine['y'] < 2] # get two-class dataset\n X = df_wine.drop('y', axis=1).values\n y = df_wine['y'].values\n y = y.reshape(-1, 1)\n return X, y\n\ndef iris_data():\n iris = load_iris()\n\n df_iris = pd.DataFrame(data=iris.data, columns=iris.feature_names)\n df_iris['y'] = iris.target\n df_iris = df_iris[df_iris['y'] < 2]\n X = df_iris.drop('y', axis=1).values\n y = df_iris['y'].values\n y = y.reshape(-1, 1)\n return X, y\n\n\ndef check(X, y, mae, model, skmodel, accuracy=1.0):\n normalize(X)\n X_train, X_test, y_train, y_test = \\\n train_test_split(X, y, test_size=0.2, shuffle=True)\n model.fit(X_train, y_train)\n y_pred = model.predict(X_test)\n correct = np.sum(y_test.flatten() == y_pred.flatten())\n n = len(X_test)\n print(f\"Got {correct} / {n} correct = {(correct / n) * 100:.2f}%\")\n\n estimated_B = model.B.reshape(-1)\n # print(estimated_B)\n\n skmodel.fit(X_train, y_train.reshape(-1))\n\n if skmodel.coef_.ndim==2:\n true_B = np.concatenate([skmodel.intercept_, skmodel.coef_[0]])\n else:\n true_B = np.concatenate([skmodel.intercept_, skmodel.coef_])\n print(\"MAE of B\", MAE(estimated_B, true_B))\n\n y_proba_estimated = model.predict_proba(X_test)\n y_proba_true = skmodel.predict_proba(X_test)\n print(f\"Log loss {log_loss(y_test, y_proba_estimated)} vs sklearn {log_loss(y_test, y_proba_true)}\")\n if log_loss(y_test, y_proba_estimated) > log_loss(y_test, y_proba_true):\n # Sometimes log loss is pretty far off despite accuracy being ok\n # depending on validation set; these are really small data sets\n assert np.abs(log_loss(y_test, y_proba_estimated) - log_loss(y_test, y_proba_true)) < 0.365\n\n r = pd.DataFrame()\n r['estimated'] = estimated_B\n r['true'] = true_B\n print(r)\n\n assert (correct/n) >= accuracy\n assert MAE(estimated_B, true_B) < mae\n\n\ndef test_synthetic():\n # From https://beckernick.github.io/logistic-regression-from-scratch/\n n = 5000\n x1 = np.random.multivariate_normal([0, 0], [[1, .75], [.75, 1]], n)\n x2 = np.random.multivariate_normal([1, 4], [[1, .75], [.75, 1]], n)\n\n X = np.vstack((x1, x2)).astype(np.float32)\n y = np.hstack((np.zeros(n), np.ones(n)))\n y = y.reshape(-1,1)\n\n # X_ = X.copy()\n # normalize(X_)\n # logit = sm.Logit(y, add1col(X_))\n # res = logit.fit()\n # print(res.summary())\n # # print(logit.fit().params)\n\n check(X, y, .025,\n LogisticRegression621(max_iter=10_000, eta=10),\n LogisticRegression(penalty='none', solver='lbfgs'),\n accuracy=.98)\n\ndef test_wine():\n X, y = wine_data()\n\n check(X, y, 2.25,\n LogisticRegression621(max_iter=30_000, eta=1),\n LogisticRegression(penalty='none', solver='lbfgs'),\n accuracy=0.88)\n\ndef test_iris():\n X, y = iris_data()\n\n check(X, y, 1.8,\n LogisticRegression621(max_iter=80_000, eta=1),\n LogisticRegression(penalty='none', solver='lbfgs'),\n accuracy=0.99)\n" ]
[ [ "sklearn.linear_model.LogisticRegression", "sklearn.datasets.load_wine", "sklearn.datasets.load_iris", "sklearn.model_selection.train_test_split", "sklearn.metrics.log_loss" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ilovejs/RL
[ "2a28abc762b0415f38e90aff43eb8cf57dba798a" ]
[ "contents/2_Q_Learning_maze/RL_brain.py" ]
[ "\"\"\"\nThis part of code is the Q learning brain, which is a brain of the agent.\nAll decisions are made in here.\n\nView more on my tutorial page: https://morvanzhou.github.io/tutorials/\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\n\n\nclass QLearningTable:\n def __init__(self, actions, learning_rate=0.01, reward_decay=0.9, e_greedy=0.9):\n self.actions = actions # a list\n self.lr = learning_rate\n self.gamma = reward_decay\n self.epsilon = e_greedy\n self.q_table = pd.DataFrame(columns=self.actions, dtype=np.float64)\n\n def choose_action(self, observation):\n self.check_state_exist(observation)\n # action selection\n if np.random.uniform() < self.epsilon:\n # choose best action\n #state_action = self.q_table.ix[observation, :]\n state_action = self.q_table.loc[observation, :]\n\n # same value will always stick in first item, so we permute to obscure the order (having same value)\n state_action = state_action.reindex(np.random.permutation(state_action.index)) # some actions have same value\n # TODO: action = state_action.argmax()\n action = state_action.idxmax()\n else:\n # choose random action\n action = np.random.choice(self.actions)\n return action\n\n def learn(self, s, a, r, s_):\n # check `next` state in q-table ?\n self.check_state_exist(s_)\n q_predict = self.q_table.loc[s, a]\n if s_ != 'terminal':\n q_target = r + self.gamma * self.q_table.loc[s_, :].max() # next state is not terminal\n else:\n q_target = r # next state is terminal\n #ix ?\n self.q_table.loc[s, a] += self.lr * (q_target - q_predict) # update\n\n def check_state_exist(self, state):\n # add if not exists\n if state not in self.q_table.index:\n # append new state to q table\n self.q_table = self.q_table.append(\n pd.Series(\n [0]*len(self.actions),\n index=self.q_table.columns,\n name=state,\n )\n )" ]
[ [ "numpy.random.uniform", "numpy.random.permutation", "pandas.DataFrame", "numpy.random.choice" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
apulis/segnet
[ "b0f1f726d4506d8ab99d8ac6146656d17a4aa438" ]
[ "segnet.py" ]
[ "\"\"\" segnet.py\n Implementation of SegNet for Semantic Segmentation.\n\"\"\"\n\n\nimport os\nimport sys\nimport time\nimport numpy as np\nimport tensorflow as tf\n\nfrom ops import *\n\nflags = tf.app.flags\nFLAGS = flags.FLAGS\n\n\ndef inference(inputs, phase_train):\n with tf.variable_scope(FLAGS.arch):\n h, mask = encoder(inputs, phase_train, name='encoder')\n logits = decoder(h, mask, phase_train, name='decoder')\n return logits\n\n\ndef loss(logits, labels, ignore_label=-1, cb=None, name='loss'):\n with tf.name_scope(name):\n num_class = logits.get_shape().as_list()[-1]\n epsilon = tf.constant(value=1e-10)\n logits = tf.reshape(logits, (-1, num_class))\n labels = tf.reshape(labels, (-1, 1))\n not_ign_mask = tf.where(tf.not_equal(tf.squeeze(labels), ignore_label))\n\n logits = tf.reshape(tf.gather(logits, not_ign_mask), (-1, num_class))\n labels = tf.reshape(tf.gather(labels, not_ign_mask), (-1, 1))\n\n one_hot = tf.reshape(\n tf.one_hot(labels, depth=num_class), (-1, num_class))\n\n prob = tf.nn.softmax(logits)\n\n if cb is not None:\n xe = -tf.reduce_sum(\n tf.multiply(one_hot * tf.log(prob + epsilon), cb),\n reduction_indices=[1])\n else:\n xe = tf.nn.softmax_cross_entropy_with_logits(\n labels=one_hot, logits=logits)\n\n mxe = tf.reduce_mean(xe)\n return mxe\n\n\ndef acc(logits, labels, ignore_label=-1, name='acc'):\n with tf.name_scope(name):\n logits = tf.reshape(logits, (-1, FLAGS.num_class))\n labels = tf.reshape(labels, [-1])\n\n not_ign_mask = tf.where(tf.not_equal(tf.squeeze(labels), ignore_label))\n\n logits = tf.reshape(tf.gather(logits, not_ign_mask), (-1, FLAGS.num_class))\n labels = tf.reshape(tf.gather(labels, not_ign_mask), [-1])\n\n epsilon = tf.constant(value=1e-10, name=\"epsilon\")\n logits = tf.add(logits, epsilon)\n\n prob = tf.nn.softmax(logits)\n pred = tf.cast(tf.argmax(prob, axis=1), tf.int32)\n\n correct_pred = tf.equal(pred, labels)\n accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n return accuracy\n\n\ndef predict(logits, name='predict'):\n with tf.name_scope(name):\n prob = tf.squeeze(tf.nn.softmax(logits))\n pred = tf.squeeze(tf.cast(tf.argmax(prob, axis=-1), tf.int32))\n return prob, pred\n\n\ndef train_op(loss, opt_name, **kwargs):\n optimizer = _get_optimizer(opt_name, kwargs)\n return optimizer.minimize(loss)\n\n\ndef setup_summary(loss, acc):\n summary_loss = tf.summary.scalar('loss', loss)\n summary_acc = tf.summary.scalar('acc', acc)\n return tf.summary.merge([summary_loss, summary_acc])\n\n\ndef _get_optimizer(opt_name, params):\n if opt_name == 'adam':\n return tf.train.AdamOptimizer(params['lr'])\n elif opt_name == 'adadelta':\n return tf.train.AdadeltaOptimizer(params['lr'])\n elif opt_name == 'sgd':\n return tf.train.GradientDescentOptimizer(params['lr'])\n elif opt_name == 'momentum':\n return tf.train.MomentumOptimizer(params['lr'], params['momentum'])\n elif opt_name == 'rms':\n return tf.train.RMSPropOptimizer(params['lr'])\n elif opt_name == 'adagrad':\n return tf.train.AdagradOptimizer(params['lr'])\n else:\n print('error')\n\n\ndef n_enc_block(inputs, phase_train, n, k, name):\n h = inputs\n with tf.variable_scope(name):\n for i in range(n):\n h = conv2d(h, k, 3, stride=1, name='conv_{}'.format(i + 1))\n h = batch_norm(h, phase_train, name='bn_{}'.format(i + 1))\n h = relu(h, name='relu_{}'.format(i + 1))\n h, mask = maxpool2d_with_argmax(h, name='maxpool_{}'.format(i + 1))\n return h, mask\n\n\ndef encoder(inputs, phase_train, name='encoder'):\n with tf.variable_scope(name):\n h, mask_1 = n_enc_block(inputs, phase_train, n=2, k=64, name='block_1')\n h, mask_2 = n_enc_block(h, phase_train, n=2, k=128, name='block_2')\n h, mask_3 = n_enc_block(h, phase_train, n=3, k=256, name='block_3')\n h, mask_4 = n_enc_block(h, phase_train, n=3, k=512, name='block_4')\n h, mask_5 = n_enc_block(h, phase_train, n=3, k=512, name='block_5')\n return h, [mask_5, mask_4, mask_3, mask_2, mask_1]\n\n\ndef n_dec_block(inputs, mask, adj_k, phase_train, n, k, name):\n in_shape = inputs.get_shape().as_list()\n with tf.variable_scope(name):\n h = maxunpool2d(inputs, mask, name='unpool')\n for i in range(n):\n if i == (n - 1) and adj_k:\n h = conv2d(h, k / 2, 3, stride=1, name='conv_{}'.format(i + 1))\n else:\n h = conv2d(h, k, 3, stride=1, name='conv_{}'.format(i + 1))\n h = batch_norm(h, phase_train, name='bn_{}'.format(i + 1))\n h = relu(h, name='relu_{}'.format(i + 1))\n return h\n\n\ndef dec_last_conv(inputs, phase_train, k, name):\n with tf.variable_scope(name):\n h = conv2d(inputs, k, 1, name='conv')\n return h\n\n\ndef decoder(inputs, mask, phase_train, name='decoder'):\n with tf.variable_scope(name):\n h = n_dec_block(inputs, mask[0], False, phase_train, n=3, k=512, name='block_5')\n h = n_dec_block(h, mask[1], True, phase_train, n=3, k=512, name='block_4')\n h = n_dec_block(h, mask[2], True, phase_train, n=3, k=256, name='block_3')\n h = n_dec_block(h, mask[3], True, phase_train, n=2, k=128, name='block_2')\n h = n_dec_block(h, mask[4], True, phase_train, n=2, k=64, name='block_1')\n h = dec_last_conv(h, phase_train, k=FLAGS.num_class, name='last_conv')\n logits = h\n return logits\n" ]
[ [ "tensorflow.nn.softmax_cross_entropy_with_logits", "tensorflow.equal", "tensorflow.cast", "tensorflow.train.AdamOptimizer", "tensorflow.summary.scalar", "tensorflow.squeeze", "tensorflow.gather", "tensorflow.train.MomentumOptimizer", "tensorflow.add", "tensorflow.name_scope", "tensorflow.argmax", "tensorflow.train.AdagradOptimizer", "tensorflow.train.RMSPropOptimizer", "tensorflow.train.AdadeltaOptimizer", "tensorflow.train.GradientDescentOptimizer", "tensorflow.one_hot", "tensorflow.summary.merge", "tensorflow.nn.softmax", "tensorflow.constant", "tensorflow.reduce_mean", "tensorflow.reshape", "tensorflow.log", "tensorflow.variable_scope" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
mbz/Kitchen2D
[ "aeffbe37479eeaaf031b3ab6fa9c388286876638" ]
[ "kitchen2d/pour.py" ]
[ "#!/usr/bin/env python\n# Copyright (c) 2017 Zi Wang\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport kitchen2d.kitchen_stuff as ks\nfrom kitchen2d.kitchen_stuff import Kitchen2D\nfrom kitchen2d.gripper import Gripper\nimport sys\nimport numpy as np\nimport cPickle as pickle\nimport os\nimport time\nsettings = {\n 0: {\n 'do_gui': True,\n 'sink_w': 4.,\n 'sink_h': 5.,\n 'sink_d': 1.,\n 'sink_pos_x': 50,\n 'left_table_width': 100.,\n 'right_table_width': 100.,\n 'faucet_h': 8.,\n 'faucet_w': 5.,\n 'faucet_d': 0.5,\n 'planning': False,\n 'save_fig': True\n }\n}\n\nclass Pour(object):\n def __init__(self):\n #grasp_ratio, relative_pos_x, relative_pos_y, dangle, cw1, ch1, cw2, ch2\n self.x_range = np.array(\n [[0., -10., 1., np.pi/2, 4., 4., 3., 3.], \n [1., 10., 10., np.pi, 5., 5., 4.5, 5.]])\n #[1., 10., 10., np.pi, 8., 5., 4.5, 5.]]) this is the upper bound used in the paper.\n self.lengthscale_bound = np.array([np.ones(8)*0.1, [0.15, 0.5, 0.5, 0.2, 0.5, 0.5, 0.5, 0.5]])\n self.context_idx = [4, 5, 6, 7]\n self.param_idx = [0, 1, 2, 3]\n self.dx = len(self.x_range[0])\n self.task_lengthscale = np.ones(8)*10\n self.do_gui = False\n def check_legal(self, x):\n grasp_ratio, rel_x, rel_y, dangle, cw1, ch1, cw2, ch2 = x\n print(x)\n dangle *= np.sign(rel_x)\n settings[0]['do_gui'] = self.do_gui\n kitchen = Kitchen2D(**settings[0])\n gripper = Gripper(kitchen, (5,8), 0)\n cup1 = ks.make_cup(kitchen, (0,0), 0, cw1, ch1, 0.5)\n cup2 = ks.make_cup(kitchen, (-15,0), 0, cw2, ch2, 0.5, user_data='cup2')\n gripper.set_grasped(cup2, grasp_ratio, (-15,0), 0)\n gripper.set_position((rel_x, rel_y), 0)\n if not kitchen.planning:\n g2 = gripper.simulate_itself()\n _, collision = g2.check_path_collision((rel_x, rel_y), 0, (rel_x, rel_y), dangle)\n\n if collision:\n return False\n self.kitchen = kitchen\n self.gripper = gripper\n self.cup1 = cup1\n self.cup2 = cup2\n return True\n def sampled_x(self, n):\n i = 0\n while i < n:\n x = np.random.uniform(self.x_range[0], self.x_range[1])\n legal = self.check_legal(x)\n if legal:\n i += 1\n yield x\n\n def __call__(self, x, image_name=None):\n if not self.check_legal(x):\n return -1.\n grasp_ratio, rel_x, rel_y, dangle, cw1, ch1, cw2, ch2 = x\n dangle *= np.sign(rel_x)\n if self.kitchen.planning:\n self.gripper.close()\n dpos = self.cup1.position + (rel_x, rel_y)\n self.gripper.set_position(dpos, dangle)\n self.kitchen.image_name = image_name\n self.kitchen.step()\n return\n self.kitchen.gen_liquid_in_cup(self.cup2, 500)\n self.gripper.compute_post_grasp_mass()\n self.gripper.close(timeout=0.1)\n self.gripper.check_grasp(self.cup2)\n success, score, ims, actions = self.gripper.pour(self.cup1, (rel_x, rel_y), dangle, exact_control=False, p_range=cw1/2)\n return ims, actions\n # return np.exp(2*(score*10 - 9.5)) - 1.\n \n\n\nif __name__ == '__main__':\n func = Pour()\n N = 10\n samples = func.sampled_x(N)\n x = list(samples)\n for xx in x:\n start = time.time()\n print(func(xx))\n print(time.time() - start)\n " ]
[ [ "numpy.sign", "numpy.random.uniform", "numpy.array", "numpy.ones" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
id-shiv/utillib
[ "fc1186ac9cc505b884ff7cfdeccbea2bddf78d8a" ]
[ "projects/pybot/processor.py" ]
[ "import os\nimport json\n\nimport textblob\nimport pandas as pd\nimport numpy as np\n\n# import nltk\nfrom nltk.corpus import stopwords\nfrom nltk.corpus import words\n# nltk.download('stopwords')\n# nltk.download('words')\nfrom sklearn.ensemble import RandomForestClassifier\n\n\nINTENTS_PATH = 'projects/pybot/knowledge_base/'\n\n\ndef load():\n data_set_columns = words.words() + ['sub_intent', 'main_intent']\n\n intent_utterance_data = list()\n for _file in os.listdir(INTENTS_PATH):\n _main_intent_file = INTENTS_PATH + _file\n with open(_main_intent_file) as _main_intent_file_obj:\n _main_intents = json.load(_main_intent_file_obj)\n for _intent in _main_intents:\n for _utterance in _main_intents[_intent]['utterances']:\n intent_utterance_data.append({\n \"main_intent\": _file.split('.')[0],\n \"sub_intent\": _intent,\n \"utterance\": _utterance\n }\n )\n\n # Create a dataframe from the list\n data_set = pd.DataFrame(intent_utterance_data, columns=data_set_columns)\n print(data_set.shape)\n\n # # populate row for intent of unknown\n # intent_utterance_data = dict()\n # for word_in_vocabulary in words.words():\n # if word_in_vocabulary in data_set_columns:\n # intent_utterance_data[word_in_vocabulary] = -1\n # else:\n # intent_utterance_data[word_in_vocabulary] = 1\n # intent_utterance_data['sub_intent'] = 'unknown'\n # intent_utterance_data['main_intent'] = 'unknown'\n # no_idea = pd.DataFrame([intent_utterance_data])\n\n # data_set = data_set.append(no_idea)\n data_set = data_set.reset_index(drop=True)\n data_set = data_set.replace(np.NaN, -1)\n print(data_set)\n\n request = \"need latest configuration of ST002\"\n\n y = data_set['main_intent']\n X = data_set.drop('main_intent', axis=1)\n X = X.drop('sub_intent', axis=1)\n\n classifier = model(X, y)\n predict(request, X, classifier)\n\n y = data_set['sub_intent']\n X = data_set.drop('sub_intent', axis=1)\n X = X.drop('main_intent', axis=1)\n\n classifier = model(X, y)\n y_predict, predict_score = predict(request, X, classifier)\n\n\ndef model(X, y):\n clf = RandomForestClassifier(n_estimators=100, max_depth=2, random_state=0)\n clf.fit(X, y)\n return clf\n\n\ndef predict(request, X, model):\n X_predict = list()\n for feature in X.columns:\n feature_found = False\n for word in request.split(' '):\n if feature == word.strip().lower():\n X_predict.append(1)\n feature_found = True\n break\n if not feature_found:\n X_predict.append(0)\n\n pred = model.predict([X_predict])\n\n probability_classes = model.predict_proba([X_predict])[0]\n # gets a dictionary of {'class_name': probability}\n prob_per_class_dictionary = dict(zip(model.classes_, probability_classes))\n\n print(prob_per_class_dictionary)\n print(pred)\n return pred, prob_per_class_dictionary\n \n\ndef _get_words_features(message, remove_stopwords=False):\n if remove_stopwords:\n # Load stop words\n stop_words = stopwords.words('english')\n message = \" \".join(word for word in message.split(' ') if word not in stop_words)\n sentence = textblob.Sentence(message)\n return sentence.word_counts\n\n\nload()" ]
[ [ "sklearn.ensemble.RandomForestClassifier", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
devincornell/sqlitedocuments
[ "16923bb3b91af5104140e49045efdc612afbc310" ]
[ "experiments/pytorch2.py" ]
[ "\nimport torch\nfrom tqdm import tqdm\nimport numpy as np\n\nimport sys\nsys.path.append('..')\nimport doctable\n\n\n\nif __name__ == '__main__':\n device = torch.device('cuda:0')\n #device = torch.device('cpu')\n\n # TASK: find sims between all points in this space\n shape = (int(2.5e5), 300)\n X = torch.rand(shape).to(device)\n X = X / X.norm(p=2, dim=1)[:,None] # row-normalize\n #print(X.norm(p=2, dim=1))\n\n sims = torch.zeros((X.shape[0],))\n for s in tqdm(doctable.chunk_slice(X.shape[0], chunk_size=100)):\n sims[s] = X.matmul(X[s,:].T).T.mean(1)\n\n lsims = list()\n for i in tqdm(range(X.shape[0])):\n lsims.append(X.mv(X[i,:]).mean().cpu())\n lsims = np.array(lsims)\n\n print(sims.shape, lsims.shape)\n print(sims[:5])\n print(lsims[:5])\n \n\n\n\n" ]
[ [ "torch.device", "numpy.array", "torch.rand", "torch.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
brianrice2/cppn
[ "3c9c05dd612a198ddc1ea213614ffb6fed917bd0" ]
[ "sampler.py" ]
[ "'''\nImplementation of Compositional Pattern Producing Networks in Tensorflow\n\nhttps://en.wikipedia.org/wiki/Compositional_pattern-producing_network\n\n@hardmaru, 2016\nUpdated @brianrice2, 2020\n\nSampler Class\n\nThis file is meant to be run inside an IPython session, as it is meant\nto be used interacively for experimentation.\n\nIt shouldn't be that hard to take bits of this code into a normal\ncommand line environment though if you want to use outside of IPython.\n\nusage:\n%run -i sampler.py\nsampler = Sampler(z_dim = 4, c_dim = 1, scale = 8.0, net_size = 32)\n'''\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nfrom PIL import Image\nimport pylab\nimport imageio\nfrom model import CPPN\n\nmgc = get_ipython().magic\nmgc(u'matplotlib inline')\npylab.rcParams['figure.figsize'] = (10.0, 10.0)\n\nclass Sampler():\n def __init__(self, x_dim=256, y_dim=256, z_dim=8, c_dim=1, scale=10.0, net_size=32):\n self.cppn = CPPN(x_dim=x_dim, y_dim=y_dim, z_dim=z_dim,\n c_dim=c_dim, scale=scale, net_size=net_size)\n\n # saves most recent z here, in case we find a nice image and want the z-vec\n self.z = self.generate_z()\n \n def generate_z(self):\n z = np.random.uniform(-1.0, 1.0, size=(1, self.cppn.z_dim)).astype(np.float32)\n return z\n \n def generate(self, z=None, x_dim=1080, y_dim=1060, scale=10.0):\n if z is None:\n z = self.generate_z()\n else:\n z = np.reshape(z, (1, self.cppn.z_dim))\n self.z = z\n return self.cppn.generate(z, x_dim, y_dim, scale)[0]\n \n def show_image(self, image_data):\n '''\n image_data is a tensor, in [height width depth]\n image_data is NOT the PIL.Image class\n '''\n plt.subplot(1, 1, 1)\n y_dim = image_data.shape[0]\n x_dim = image_data.shape[1]\n c_dim = self.cppn.c_dim\n \n if c_dim > 1:\n plt.imshow(image_data, interpolation='nearest')\n else:\n plt.imshow(tf.reshape(image_data, (y_dim, x_dim)),\n cmap='Greys', interpolation='nearest')\n \n plt.axis('off')\n plt.show()\n \n def save_png(self, image_data, filename):\n img_data = np.array(1 - image_data)\n y_dim = image_data.shape[0]\n x_dim = image_data.shape[1]\n c_dim = self.cppn.c_dim\n \n if c_dim > 1:\n img_data = np.array(img_data.reshape((y_dim, x_dim, c_dim))*255.0,\n dtype=np.uint8)\n else:\n img_data = np.array(img_data.reshape((y_dim, x_dim))*255.0,\n dtype=np.uint8)\n \n im = Image.fromarray(img_data)\n im.save(filename)\n \n def to_image(self, image_data):\n # convert to PIL.Image format from np array (0, 1)\n img_data = np.array(1 - image_data)\n y_dim = image_data.shape[0]\n x_dim = image_data.shape[1]\n c_dim = self.cppn.c_dim\n \n if c_dim > 1:\n img_data = np.array(img_data.reshape((y_dim, x_dim, c_dim))*255.0, dtype=np.uint8)\n else:\n img_data = np.array(img_data.reshape((y_dim, x_dim))*255.0, dtype=np.uint8)\n \n im = Image.fromarray(img_data)\n return im\n \n def save_anim_gif(self, z1, z2, filename, n_frame=10, duration1=1.0,\n duration2=1.0, duration=0.1, x_dim=512, y_dim=512,\n scale=10.0, reverse=True):\n '''\n This saves an animated gif from two latent states, z1 and z2.\n n_frame: number of states in between z1 and z2 morphing effect, exclusive of z1 and z2\n duration1 and duration2 control how long z1 and z2 are shown.\n duration controls frame speed, in seconds\n '''\n delta_z = (z2 - z1) / (n_frame + 1)\n total_frames = n_frame + 2\n images = []\n \n for i in range(total_frames):\n z = z1 + delta_z*float(i)\n images.append(self.to_image(self.generate(z, x_dim, y_dim, scale)))\n print(\"processing image\", i+1)\n \n durations = [duration1] + ([duration] * n_frame) + [duration2]\n \n if reverse: # go backwards in time back to the first state\n rev_images = list(images)\n rev_images.reverse()\n rev_images = rev_images[1:]\n images += rev_images\n durations = durations + ([duration] * n_frame) + [duration1]\n \n print(\"writing gif file...\")\n imageio.mimsave('./' + filename, images)\n print('saved!')" ]
[ [ "matplotlib.pyplot.imshow", "numpy.reshape", "tensorflow.reshape", "matplotlib.pyplot.subplot", "matplotlib.pyplot.axis", "numpy.random.uniform", "numpy.array", "matplotlib.pyplot.show" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "2.6", "2.2", "1.13", "2.3", "2.4", "1.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.8", "1.2", "2.10" ] } ]
MihaiLai/Disaster-Response-Classification
[ "bc495e259458cdb26bc5770cf200b8c2918831e7" ]
[ "app/run.py" ]
[ "import json\nimport re\nimport plotly\nimport pandas as pd\n\nfrom nltk.stem import WordNetLemmatizer\nfrom nltk.tokenize import word_tokenize\n\nfrom flask import Flask\nfrom flask import render_template, request, jsonify\nfrom plotly.graph_objs import Bar\nfrom sklearn.externals import joblib\nfrom sqlalchemy import create_engine\n\n\napp = Flask(__name__)\n\ndef tokenize(text):\n '''process text data\n keywords:\n text:input text\n '''\n # remove punctuation\n text = re.sub(r\"[^a-zA-Z0-9]\", \" \", text)\n # word tokenize\n tokens = word_tokenize(text)\n # lemmatiz\n lem = WordNetLemmatizer()\n clean_tokens = []\n for token in tokens:\n clean_tok = lem.lemmatize(token).lower().strip()\n clean_tokens.append(clean_tok)\n return clean_tokens\n\n# load data\nengine = create_engine('sqlite:///../data/DisasterResponse.db')\ndf = pd.read_sql_table('InsertTableName', engine)\n\n# load model\nmodel = joblib.load(\"../models/classifier.pkl\")\n\n\n# index webpage displays cool visuals and receives user input text for model\[email protected]('/')\[email protected]('/index')\ndef index():\n \n # extract data needed for visuals\n categories_percent = []\n categories_names = df.columns[4:]\n for col in categories_names:\n try:\n percent = df[col].value_counts()[1]/len(df)\n except:\n percent = 0\n categories_percent.append(percent)\n # create visuals\n graphs = [\n {\n 'data': [\n Bar(\n x=categories_names,\n y=categories_percent\n )\n ],\n\n 'layout': {\n 'title': 'Posibility of every categories',\n 'yaxis': {\n 'title': \"Posibility\"\n },\n 'xaxis': {\n 'title': \"Categories\"\n }\n }\n }\n ]\n \n # encode plotly graphs in JSON\n ids = [\"graph-{}\".format(i) for i, _ in enumerate(graphs)]\n graphJSON = json.dumps(graphs, cls=plotly.utils.PlotlyJSONEncoder)\n \n # render web page with plotly graphs\n return render_template('master.html', ids=ids, graphJSON=graphJSON)\n\n\n# web page that handles user query and displays model results\[email protected]('/go')\ndef go():\n # save user input in query\n query = request.args.get('query', '') \n\n # use model to predict classification for query\n classification_labels = model.predict([query])[0]\n classification_results = dict(zip(df.columns[4:], classification_labels))\n\n # This will render the go.html Please see that file. \n return render_template(\n 'go.html',\n query=query,\n classification_result=classification_results\n )\n\n\ndef main():\n app.run(host='0.0.0.0', port=3001, debug=True)\n\n\nif __name__ == '__main__':\n main()" ]
[ [ "pandas.read_sql_table", "sklearn.externals.joblib.load" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
brando90/RAdam
[ "57bedd136a4c9ee281c905865daafafaaeb2afa0" ]
[ "radam/radam.py" ]
[ "import math\nimport torch\nfrom torch.optim.optimizer import Optimizer, required\n\nclass RAdam(Optimizer):\n\n def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, degenerated_to_sgd=True):\n if not 0.0 <= lr:\n raise ValueError(\"Invalid learning rate: {}\".format(lr))\n if not 0.0 <= eps:\n raise ValueError(\"Invalid epsilon value: {}\".format(eps))\n if not 0.0 <= betas[0] < 1.0:\n raise ValueError(\"Invalid beta parameter at index 0: {}\".format(betas[0]))\n if not 0.0 <= betas[1] < 1.0:\n raise ValueError(\"Invalid beta parameter at index 1: {}\".format(betas[1]))\n \n self.degenerated_to_sgd = degenerated_to_sgd\n if isinstance(params, (list, tuple)) and len(params) > 0 and isinstance(params[0], dict):\n for param in params:\n if 'betas' in param and (param['betas'][0] != betas[0] or param['betas'][1] != betas[1]):\n param['buffer'] = [[None, None, None] for _ in range(10)]\n defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, buffer=[[None, None, None] for _ in range(10)])\n super(RAdam, self).__init__(params, defaults)\n\n def __setstate__(self, state):\n super(RAdam, self).__setstate__(state)\n\n def step(self, closure=None):\n\n loss = None\n if closure is not None:\n loss = closure()\n\n for group in self.param_groups:\n\n for p in group['params']:\n if p.grad is None:\n continue\n grad = p.grad.data.float()\n if grad.is_sparse:\n raise RuntimeError('RAdam does not support sparse gradients')\n\n p_data_fp32 = p.data.float()\n\n state = self.state[p]\n\n if len(state) == 0:\n state['step'] = 0\n state['exp_avg'] = torch.zeros_like(p_data_fp32)\n state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)\n else:\n state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)\n state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)\n\n exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']\n beta1, beta2 = group['betas']\n\n # https://github.com/pytorch/pytorch/issues/32861\n exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)\n # exp_avg_sq.mul_(beta2).addcmul_(grad, grad, 1 - beta2)\n # exp_avg_sq.mul_(beta2).addcmul_(grad, 1 - beta2)\n # exp_avg_sq.mul_(beta2).addcmul_(grad, torch.tensor(1 - beta2, device=grad.device))\n exp_avg.mul_(beta1).add_(1 - beta1, grad)\n\n state['step'] += 1\n buffered = group['buffer'][int(state['step'] % 10)]\n if state['step'] == buffered[0]:\n N_sma, step_size = buffered[1], buffered[2]\n else:\n buffered[0] = state['step']\n beta2_t = beta2 ** state['step']\n N_sma_max = 2 / (1 - beta2) - 1\n N_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t)\n buffered[1] = N_sma\n\n # more conservative since it's an approximated value\n if N_sma >= 5:\n step_size = math.sqrt((1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) / N_sma * N_sma_max / (N_sma_max - 2)) / (1 - beta1 ** state['step'])\n elif self.degenerated_to_sgd:\n step_size = 1.0 / (1 - beta1 ** state['step'])\n else:\n step_size = -1\n buffered[2] = step_size\n\n # more conservative since it's an approximated value\n if N_sma >= 5:\n if group['weight_decay'] != 0:\n p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32)\n denom = exp_avg_sq.sqrt().add_(group['eps'])\n p_data_fp32.addcdiv_(-step_size * group['lr'], exp_avg, denom)\n p.data.copy_(p_data_fp32)\n elif step_size > 0:\n if group['weight_decay'] != 0:\n p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32)\n p_data_fp32.add_(-step_size * group['lr'], exp_avg)\n p.data.copy_(p_data_fp32)\n\n return loss\n\nclass PlainRAdam(Optimizer):\n\n def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, degenerated_to_sgd=True):\n if not 0.0 <= lr:\n raise ValueError(\"Invalid learning rate: {}\".format(lr))\n if not 0.0 <= eps:\n raise ValueError(\"Invalid epsilon value: {}\".format(eps))\n if not 0.0 <= betas[0] < 1.0:\n raise ValueError(\"Invalid beta parameter at index 0: {}\".format(betas[0]))\n if not 0.0 <= betas[1] < 1.0:\n raise ValueError(\"Invalid beta parameter at index 1: {}\".format(betas[1]))\n \n self.degenerated_to_sgd = degenerated_to_sgd\n defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)\n\n super(PlainRAdam, self).__init__(params, defaults)\n\n def __setstate__(self, state):\n super(PlainRAdam, self).__setstate__(state)\n\n def step(self, closure=None):\n\n loss = None\n if closure is not None:\n loss = closure()\n\n for group in self.param_groups:\n\n for p in group['params']:\n if p.grad is None:\n continue\n grad = p.grad.data.float()\n if grad.is_sparse:\n raise RuntimeError('RAdam does not support sparse gradients')\n\n p_data_fp32 = p.data.float()\n\n state = self.state[p]\n\n if len(state) == 0:\n state['step'] = 0\n state['exp_avg'] = torch.zeros_like(p_data_fp32)\n state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)\n else:\n state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)\n state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)\n\n exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']\n beta1, beta2 = group['betas']\n\n exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)\n exp_avg.mul_(beta1).add_(1 - beta1, grad)\n\n state['step'] += 1\n beta2_t = beta2 ** state['step']\n N_sma_max = 2 / (1 - beta2) - 1\n N_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t)\n\n\n # more conservative since it's an approximated value\n if N_sma >= 5:\n if group['weight_decay'] != 0:\n p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32)\n step_size = group['lr'] * math.sqrt((1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) / N_sma * N_sma_max / (N_sma_max - 2)) / (1 - beta1 ** state['step'])\n denom = exp_avg_sq.sqrt().add_(group['eps'])\n p_data_fp32.addcdiv_(-step_size, exp_avg, denom)\n p.data.copy_(p_data_fp32)\n elif self.degenerated_to_sgd:\n if group['weight_decay'] != 0:\n p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32)\n step_size = group['lr'] / (1 - beta1 ** state['step'])\n p_data_fp32.add_(-step_size, exp_avg)\n p.data.copy_(p_data_fp32)\n\n return loss\n\n\nclass AdamW(Optimizer):\n\n def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, warmup = 0):\n if not 0.0 <= lr:\n raise ValueError(\"Invalid learning rate: {}\".format(lr))\n if not 0.0 <= eps:\n raise ValueError(\"Invalid epsilon value: {}\".format(eps))\n if not 0.0 <= betas[0] < 1.0:\n raise ValueError(\"Invalid beta parameter at index 0: {}\".format(betas[0]))\n if not 0.0 <= betas[1] < 1.0:\n raise ValueError(\"Invalid beta parameter at index 1: {}\".format(betas[1]))\n \n defaults = dict(lr=lr, betas=betas, eps=eps,\n weight_decay=weight_decay, warmup = warmup)\n super(AdamW, self).__init__(params, defaults)\n\n def __setstate__(self, state):\n super(AdamW, self).__setstate__(state)\n\n def step(self, closure=None):\n loss = None\n if closure is not None:\n loss = closure()\n\n for group in self.param_groups:\n\n for p in group['params']:\n if p.grad is None:\n continue\n grad = p.grad.data.float()\n if grad.is_sparse:\n raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')\n\n p_data_fp32 = p.data.float()\n\n state = self.state[p]\n\n if len(state) == 0:\n state['step'] = 0\n state['exp_avg'] = torch.zeros_like(p_data_fp32)\n state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)\n else:\n state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)\n state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)\n\n exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']\n beta1, beta2 = group['betas']\n\n state['step'] += 1\n\n exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)\n exp_avg.mul_(beta1).add_(1 - beta1, grad)\n\n denom = exp_avg_sq.sqrt().add_(group['eps'])\n bias_correction1 = 1 - beta1 ** state['step']\n bias_correction2 = 1 - beta2 ** state['step']\n \n if group['warmup'] > state['step']:\n scheduled_lr = 1e-8 + state['step'] * group['lr'] / group['warmup']\n else:\n scheduled_lr = group['lr']\n\n step_size = scheduled_lr * math.sqrt(bias_correction2) / bias_correction1\n \n if group['weight_decay'] != 0:\n p_data_fp32.add_(-group['weight_decay'] * scheduled_lr, p_data_fp32)\n\n p_data_fp32.addcdiv_(-step_size, exp_avg, denom)\n\n p.data.copy_(p_data_fp32)\n\n return loss\n" ]
[ [ "torch.zeros_like" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ShenQianwithC/HistomicsTK
[ "4ad7e72a7ebdabbdfc879254fad04ce7ca47e320" ]
[ "packages/pylibtiff/setup.py" ]
[ "#!/usr/bin/env python\nimport os\n\nCLASSIFIERS = \"\"\"\\\nDevelopment Status :: 3 - Alpha\nIntended Audience :: Science/Research\nLicense :: OSI Approved\nProgramming Language :: Python\nTopic :: Scientific/Engineering\nTopic :: Software Development\nOperating System :: Microsoft :: Windows\nOperating System :: POSIX\nOperating System :: Unix\nOperating System :: MacOS\n\"\"\"\n\nMAJOR = 0\nMINOR = 4\nMICRO = 3\nISRELEASED = False\nVERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO)\n\nif os.path.exists('MANIFEST'): \n os.remove('MANIFEST')\n\n\ndef write_version_py(filename='libtiff/version.py'):\n cnt = \"\"\"\n# THIS FILE IS GENERATED FROM libtiff/setup.py\nshort_version='%(version)s'\nversion='%(version)s'\nrelease=%(isrelease)s\n\nif not release:\n version += '.dev'\n import os\n svn_version_file = os.path.join(os.path.dirname(__file__),\n '__svn_version__.py')\n svn_entries_file = os.path.join(os.path.dirname(__file__),'.svn',\n 'entries')\n if os.path.isfile(svn_version_file):\n import imp\n svn = imp.load_module('libtiff.__svn_version__',\n open(svn_version_file),\n svn_version_file,\n ('.py','U',1))\n version += svn.version\n elif os.path.isfile(svn_entries_file):\n import subprocess\n try:\n svn_version = subprocess.Popen([\"svnversion\", os.path.dirname (\n __file__)], stdout=subprocess.PIPE).communicate()[0]\n except:\n pass\n else:\n version += svn_version.strip()\n\nprint(version)\n\"\"\"\n a = open(filename, 'w')\n try:\n a.write(cnt % {'version': VERSION, 'isrelease': str(ISRELEASED)})\n finally:\n a.close()\n\n\ndef configuration(parent_package='', top_path=None):\n from numpy.distutils.misc_util import Configuration\n config = Configuration(None, parent_package, top_path)\n config.add_subpackage('libtiff')\n config.get_version('libtiff/version.py')\n config.add_data_files(('libtiff', 'LICENSE'))\n return config\n\n\nif __name__ == '__main__':\n from numpy.distutils.core import setup, Extension\n\n bittools_mod = Extension('bittools',\n sources=['libtiff/src/bittools.c'])\n tif_lzw_mod = Extension('tif_lzw',\n sources=['libtiff/src/tif_lzw.c'])\n\n # Rewrite the version file everytime\n if os.path.exists('libtiff/version.py'):\n os.remove('libtiff/version.py')\n write_version_py()\n\n setup(name='libtiff',\n # version='0.3-svn',\n author='Pearu Peterson',\n author_email='[email protected]',\n license='https://github.com/pearu/pylibtiff/blob/master/LICENSE',\n url='https://github.com/pearu/pylibtiff',\n # download_url = 'http://code.google.com/p/pylibtiff/downloads/',\n classifiers=[_f for _f in CLASSIFIERS.split('\\n') if _f],\n description='PyLibTiff: a Python tiff library.',\n long_description='''\\\nPyLibTiff? is a Python package that provides the following modules:\n\n libtiff - a wrapper of C libtiff library using ctypes.\n tiff - a numpy.memmap view of tiff files.\n''',\n platforms=[\"All\"],\n # packages = ['libtiff'],\n # package_dir = {'libtiff': 'libtiff'},\n configuration=configuration,\n ext_modules=[bittools_mod, tif_lzw_mod], requires=['numpy']\n )\n" ]
[ [ "numpy.distutils.core.Extension", "numpy.distutils.misc_util.Configuration" ] ]
[ { "matplotlib": [], "numpy": [ "1.11", "1.19", "1.24", "1.16", "1.23", "1.20", "1.7", "1.12", "1.21", "1.22", "1.14", "1.6", "1.13", "1.9", "1.17", "1.10", "1.18", "1.15", "1.8" ], "pandas": [], "scipy": [], "tensorflow": [] } ]
Joshuaalbert/bayes_filter
[ "2997d60d8cf07f875e42c0b5f07944e9ab7e9d33" ]
[ "bayes_filter/sample.py" ]
[ "\"\"\"\nThis is a modification of tensorflow probability's tfp.mcmc.sample function that allows dynamic stopping\nusing rhat criteria. When median rhat per parameters falls by less than a certain percent.\n\nFigure out proper liscensing later.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport warnings\n# Dependency imports\n\nimport tensorflow as tf\nimport tensorflow_probability as tfp\nfrom tensorflow_probability.python.mcmc.internal import util as mcmc_util\n\nfrom tensorflow.python.ops import control_flow_util\nimport numpy as np\n\n\n__all__ = [\n \"CheckpointableStatesAndTrace\",\n \"StatesAndTrace\",\n \"sample_chain\",\n]\n\n\ndef _reduce_variance(x, axis=None, biased=True, keepdims=False):\n with tf.compat.v1.name_scope('reduce_variance'):\n x = tf.convert_to_tensor(value=x, name='x')\n mean = tf.reduce_mean(input_tensor=x, axis=axis, keepdims=True)\n biased_var = tf.reduce_mean(\n input_tensor=tf.math.squared_difference(x, mean),\n axis=axis,\n keepdims=keepdims)\n if biased:\n return biased_var\n n = _axis_size(x, axis)\n return (n / (n - 1.)) * biased_var\n\ndef _axis_size(x, axis=None):\n \"\"\"Get number of elements of `x` in `axis`, as type `x.dtype`.\"\"\"\n if axis is None:\n return tf.cast(tf.size(input=x), x.dtype)\n return tf.cast(\n tf.reduce_prod(input_tensor=tf.gather(tf.shape(input=x), axis)), x.dtype)\n\ndef _get_rhat_onestate(state, delta_rhat, rhat, sample_sum, count, m, v, decay_length=100, independent_chain_ndims=1):\n tau = tf.math.reciprocal(tf.convert_to_tensor(decay_length, state.dtype))\n tau_ = 1. - tau\n count = tau_*count + tau * tf.constant(1, state.dtype)\n\n # variance of chain means\n sample_sum = tau_*sample_sum + tau*state\n sample_mean = sample_sum/count\n chain_axis = tf.range(0, independent_chain_ndims)\n b_div_n = _reduce_variance(sample_mean, axis=chain_axis, biased=False)\n\n delta= state - m\n m = tau_*m + tau * delta/count\n v = tau_*v + tau * delta*(state - m)\n\n sample_variance = v/count #biased\n w = tf.reduce_mean(sample_variance, axis=chain_axis)\n\n N = count\n M = _axis_size(state, chain_axis)\n\n sigma_2_plus = w + b_div_n\n next_rhat = ((M + 1.) / M) * sigma_2_plus / w - (N - 1.) / (M*N)\n delta_rhat = next_rhat - rhat\n\n # (tau*(1-tau) + tau)*(1-tau) + tau\n # (tau - tau^2 + tau)*(1 - tau) + tau\n # tau*( (1-tau) + 1)(1-tau) + tau\n # tau\n\n return delta_rhat, next_rhat, sample_sum, count, m, v\n\ndef _get_rhat(next_state, _delta_rhat, _rhat, _sample_sum, _count, _m, _v, independent_chain_ndims=1):\n list_like = isinstance(next_state, (tuple, list))\n if not list_like:\n next_state = [next_state]\n _delta_rhat = [_delta_rhat]\n _rhat = [_rhat]\n _sample_sum = [_sample_sum]\n _count = [_count]\n _m = [_m]\n _v = [_v]\n\n _next_delta_rhat, _next_rhat, _next_sample_sum, _next_count, _next_m, _next_v = [],[],[],[], [],[]\n\n for (delta_rhat, rhat, sample_sum, count, m, v, s) in zip(_delta_rhat, _rhat,_sample_sum, _count, _m, _v, next_state):\n state = tf.convert_to_tensor(value=s, name='state')\n next_delta_rhat, next_rhat, next_sample_sum, next_count, next_m, next_v = \\\n _get_rhat_onestate(\n state,\n delta_rhat,\n rhat,\n sample_sum,\n count,\n m,\n v,\n independent_chain_ndims=independent_chain_ndims)\n _next_delta_rhat.append(next_delta_rhat)\n _next_rhat.append(next_rhat)\n _next_sample_sum.append(next_sample_sum)\n _next_count.append(next_count)\n _next_m.append(next_m)\n _next_v.append(next_v)\n\n if not list_like:\n _next_delta_rhat, _next_rhat, _next_sample_sum, _next_count, _next_m, _next_v = (\n _next_delta_rhat[0], _next_rhat[0],\n _next_sample_sum[0], _next_count[0], _next_m[0], _next_v[0])\n\n return _next_delta_rhat, _next_rhat, _next_sample_sum, _next_count, _next_m, _next_v\n\n\ndef _initial_rhat_variables(init_state, independent_chain_ndims=1):\n initial_sample_sum, initial_count, initial_m, initial_v = [], [], [], []\n\n initial_rhats = []\n initial_delta_rhats = []\n\n list_like = isinstance(init_state, (tuple, list))\n if not list_like:\n init_state = [init_state]\n\n for s in init_state:\n state = tf.convert_to_tensor(value=s, name='init_state')\n\n initial_sample_sum.append(tf.zeros_like(state, name='sample_sum'))\n initial_count.append(tf.constant(0., dtype=state.dtype, name='count'))\n initial_m.append(tf.zeros_like(state, name='m'))\n initial_v.append(tf.zeros_like(state, name='v'))\n initial_rhats.append(tf.constant(1e15,state.dtype)*tf.ones(tf.shape(state)[independent_chain_ndims:], dtype=state.dtype))\n initial_delta_rhats.append(tf.constant(1e15,state.dtype)*tf.ones(tf.shape(state)[independent_chain_ndims:], dtype=state.dtype))\n\n\n if not list_like:\n initial_sample_sum, initial_count, initial_m, initial_v = (\n initial_sample_sum[0], initial_count[0], initial_m[0], initial_v[0]\n )\n initial_rhats = initial_rhats[0]\n initial_delta_rhats = initial_delta_rhats[0]\n\n return initial_delta_rhats, initial_rhats, initial_sample_sum, initial_count, initial_m, initial_v\n\n\n# def _get_rhat_onestate(state, delta_rhat, rhat, sample_sum, count, delta_mean, M2, independent_chain_ndims=1):\n# count += tf.constant(1, state.dtype)\n# sample_sum += state\n# sample_mean = sample_sum/count\n# chain_axis = tf.range(0, independent_chain_ndims)\n# b_div_n = _reduce_variance(sample_mean, axis=chain_axis, biased=False)\n#\n# delta = state - delta_mean\n# delta_mean += delta / count\n# delta2 = state - delta_mean\n# M2 += delta*delta2\n# sample_variance = M2/count #biased\n# w = tf.reduce_mean(sample_variance, axis=chain_axis)\n#\n# n = count\n# m = _axis_size(state, chain_axis)\n#\n# sigma_2_plus = w + b_div_n\n# next_rhat = ((m + 1.) / m) * sigma_2_plus / w - (n - 1.) / (m * n)\n# delta_rhat = next_rhat - rhat\n#\n# return delta_rhat, next_rhat, sample_sum, count, delta_mean, M2\n\n# def _get_rhat(next_state, _delta_rhat, _rhat, _sample_sum, _count, _delta_mean, _M2, independent_chain_ndims=1):\n# list_like = isinstance(next_state, (tuple, list))\n# print(next_state)\n# if not list_like:\n# next_state = [next_state]\n# _delta_rhat = [_delta_rhat]\n# _rhat = [_rhat]\n# _sample_sum = [_sample_sum]\n# _count = [_count]\n# _delta_mean = [_delta_mean]\n# _M2 = [_M2]\n#\n# _next_delta_rhat, _next_rhat, _next_sample_sum, _next_count, _next_delta_mean, _next_M2 = [],[],[],[], [],[]\n#\n# for (delta_rhat, rhat, sample_sum, count, delta_mean, M2, s) in zip(_delta_rhat, _rhat,_sample_sum, _count, _delta_mean, _M2, next_state):\n# state = tf.convert_to_tensor(value=s, name='state')\n# next_delta_rhat, next_rhat, next_sample_sum, next_count, next_delta_mean, next_M2 = \\\n# _get_rhat_onestate(\n# state,\n# delta_rhat,\n# rhat,\n# sample_sum,\n# count,\n# delta_mean,\n# M2,\n# independent_chain_ndims=independent_chain_ndims)\n# _next_delta_rhat.append(next_delta_rhat)\n# _next_rhat.append(next_rhat)\n# _next_sample_sum.append(next_sample_sum)\n# _next_count.append(next_count)\n# _next_delta_mean.append(next_delta_mean)\n# _next_M2.append(next_M2)\n#\n# if not list_like:\n# _next_delta_rhat, _next_rhat, _next_sample_sum, _next_count, _next_delta_mean, _next_M2 = (\n# _next_delta_rhat[0], _next_rhat[0],\n# _next_sample_sum[0], _next_count[0], _next_delta_mean[0], _next_M2[0])\n#\n# return _next_delta_rhat, _next_rhat, _next_sample_sum, _next_count, _next_delta_mean, _next_M2\n\n\n\n# def _initial_rhat_variables(init_state, independent_chain_ndims=1):\n# initial_sample_sum, initial_count, initial_delta_mean, initial_M2 = [], [], [], []\n#\n# initial_rhats = []\n# initial_delta_rhats = []\n#\n# list_like = isinstance(init_state, (tuple, list))\n# if not list_like:\n# init_state = [init_state]\n#\n# for s in init_state:\n# state = tf.convert_to_tensor(value=s, name='init_state')\n#\n# initial_sample_sum.append(tf.zeros_like(state))\n# initial_count.append(tf.constant(0., dtype=state.dtype))\n# initial_delta_mean.append(tf.zeros_like(state))\n# initial_M2.append(tf.zeros_like(state))\n# initial_rhats.append(tf.constant(1e15,state.dtype)*tf.ones(tf.shape(state)[independent_chain_ndims:], dtype=state.dtype))\n# initial_delta_rhats.append(tf.constant(1e15,state.dtype)*tf.ones(tf.shape(state)[independent_chain_ndims:], dtype=state.dtype))\n#\n#\n# if not list_like:\n# initial_sample_sum, initial_count, initial_delta_mean, initial_M2 = (\n# initial_sample_sum[0], initial_count[0], initial_delta_mean[0], initial_M2[0]\n# )\n# initial_rhats = initial_rhats[0]\n# initial_delta_rhats = initial_delta_rhats[0]\n#\n# return initial_delta_rhats, initial_rhats, initial_sample_sum, initial_count, initial_delta_mean, initial_M2\n\n\n\n###\n# BEGIN: Change\n\ndef trace_scan(loop_fn,\n initial_state,\n elems,\n trace_fn,\n parallel_iterations=10,\n name=None):\n \"\"\"A simplified version of `tf.scan` that has configurable tracing.\n\n This function repeatedly calls `loop_fn(state, elem)`, where `state` is the\n `initial_state` during the first iteration, and the return value of `loop_fn`\n for every iteration thereafter. `elem` is a slice of `elements` along the\n first dimension, accessed in order. Additionally, it calls `trace_fn` on the\n return value of `loop_fn`. The `Tensor`s in return values of `trace_fn` are\n stacked and returned from this function, such that the first dimension of\n those `Tensor`s matches the size of `elems`.\n\n Args:\n loop_fn: A callable that takes in a `Tensor` or a nested collection of\n `Tensor`s with the same structure as `initial_state`, a slice of `elems`\n and returns the same structure as `initial_state`.\n initial_state: A `Tensor` or a nested collection of `Tensor`s passed to\n `loop_fn` in the first iteration.\n elems: A `Tensor` that is split along the first dimension and each element\n of which is passed to `loop_fn`.\n trace_fn: A callable that takes in the return value of `loop_fn` and returns\n a `Tensor` or a nested collection of `Tensor`s.\n parallel_iterations: Passed to the internal `tf.while_loop`.\n name: Name scope used in this function. Default: 'trace_scan'.\n\n Returns:\n final_state: The final return value of `loop_fn`.\n trace: The same structure as the return value of `trace_fn`, but with each\n `Tensor` being a stack of the corresponding `Tensors` in the return value\n of `trace_fn` for each slice of `elems`.\n \"\"\"\n with tf.compat.v1.name_scope(\n name, 'trace_scan', [initial_state, elems]), tf.compat.v1.variable_scope(\n tf.compat.v1.get_variable_scope()) as vs:\n if vs.caching_device is None and not tf.executing_eagerly():\n vs.set_caching_device(lambda op: op.device)\n\n initial_state = tf.nest.map_structure(\n lambda x: tf.convert_to_tensor(value=x, name='initial_state'),\n initial_state)\n\n elems = tf.convert_to_tensor(value=elems, name='elems')\n\n static_length = elems.shape[0]\n if tf.compat.dimension_value(static_length) is None:\n length = tf.shape(input=elems)[0]\n else:\n length = tf.convert_to_tensor(\n value=static_length, dtype=tf.int32, name='length')\n\n # This is an TensorArray in part because of XLA, which had trouble with\n # non-statically known indices. I.e. elems[i] errored, but\n # elems_array.read(i) worked.\n elems_array = tf.TensorArray(\n elems.dtype, size=length, element_shape=elems.shape[1:])\n elems_array = elems_array.unstack(elems)\n\n trace_arrays = tf.nest.map_structure(\n lambda x: tf.TensorArray(x.dtype, size=length, element_shape=x.shape),\n trace_fn(initial_state))\n\n def _body(i, state, trace_arrays, rhat_and_vars):\n state = loop_fn(state, elems_array.read(i))\n trace_arrays = tf.nest.pack_sequence_as(trace_arrays, [\n a.write(i, v) for a, v in zip(\n tf.nest.flatten(trace_arrays), tf.nest.flatten(trace_fn(state)))\n ])\n rhat_and_vars = _get_rhat(state[0], *rhat_and_vars)\n return i + 1, state, trace_arrays, rhat_and_vars\n\n def _cond(i, state, trace_array, rhat_and_vars):\n default_cond = i < length\n delta_rhat, rhat = rhat_and_vars[0], rhat_and_vars[1]\n if not isinstance(rhat, (list, tuple)):\n delta_rhat, rhat = [delta_rhat], [rhat]\n\n dynamic_cond_A = tf.reduce_any([tf.greater(tf.reduce_mean(r), 1.2) for r in rhat])\n dynamic_cond_B = tf.reduce_any([tf.greater(tf.reduce_mean(tf.abs(dr)/tf.abs(r)), 0.05) for dr, r in zip(delta_rhat,rhat)])\n dynamic_cond_C = tf.reduce_any([tf.greater(tfp.stats.percentile(tf.math.abs(dr),50), 1e-4) for dr in delta_rhat])\n\n dynamic_cond = dynamic_cond_C#tf.logical_and(dynamic_cond_A, dynamic_cond_B)\n mean_rhat = tf.nest.map_structure(lambda r: tfp.stats.percentile(r,50), rhat)\n mean_drhat = tf.nest.map_structure(lambda r: tfp.stats.percentile(r,50), delta_rhat)\n rel_change = tf.nest.map_structure(lambda dr, r: tfp.stats.percentile(tf.abs(dr)/tf.abs(r), 50), delta_rhat,rhat )\n with tf.control_dependencies([tf.print(i, rel_change, mean_drhat, mean_rhat)]):\n return tf.logical_and(default_cond, dynamic_cond)\n\n init_rhat_vars = _initial_rhat_variables(initial_state[0])\n\n _, final_state, trace_arrays, rhat_and_vars = tf.while_loop(\n cond=_cond,\n body=_body,\n loop_vars=(0, initial_state, trace_arrays, init_rhat_vars),\n parallel_iterations=parallel_iterations)\n\n stacked_trace = tf.nest.map_structure(lambda x: x.stack(), trace_arrays)\n\n # Restore the static length if we know it.\n def _merge_static_length(x):\n x.set_shape(tf.TensorShape(static_length).concatenate(x.shape[1:]))\n return x\n\n stacked_trace = tf.nest.map_structure(_merge_static_length, stacked_trace)\n return final_state, stacked_trace\n\n###\n# END: Change\n\n# Cause all warnings to always be triggered.\n# Not having this means subsequent calls wont trigger the warning.\nwarnings.filterwarnings(\"always\",\n module=\"tensorflow_probability.*sample\",\n append=True) # Don't override user-set filters.\n\n\nclass StatesAndTrace(\n collections.namedtuple(\"StatesAndTrace\", \"all_states, trace\")):\n \"\"\"States and auxiliary trace of an MCMC chain.\n The first dimension of all the `Tensor`s in this structure is the same and\n represents the chain length.\n Attributes:\n all_states: A `Tensor` or a nested collection of `Tensor`s representing the\n MCMC chain state.\n trace: A `Tensor` or a nested collection of `Tensor`s representing the\n auxiliary values traced alongside the chain.\n \"\"\"\n __slots__ = ()\n\n\nclass CheckpointableStatesAndTrace(\n collections.namedtuple(\"CheckpointableStatesAndTrace\",\n \"all_states, trace, final_kernel_results\")):\n \"\"\"States and auxiliary trace of an MCMC chain.\n The first dimension of all the `Tensor`s in the `all_states` and `trace`\n attributes is the same and represents the chain length.\n Attributes:\n all_states: A `Tensor` or a nested collection of `Tensor`s representing the\n MCMC chain state.\n trace: A `Tensor` or a nested collection of `Tensor`s representing the\n auxiliary values traced alongside the chain.\n final_kernel_results: A `Tensor` or a nested collection of `Tensor`s\n representing the final value of the auxiliary state of the\n `TransitionKernel` that generated this chain.\n \"\"\"\n __slots__ = ()\n\n\ndef sample_chain(\n num_results,\n current_state,\n previous_kernel_results=None,\n kernel=None,\n num_burnin_steps=0,\n num_steps_between_results=0,\n trace_fn=lambda current_state, kernel_results: kernel_results,\n return_final_kernel_results=False,\n parallel_iterations=10,\n cond_fn=None,\n name=None,\n):\n \"\"\"Implements Markov chain Monte Carlo via repeated `TransitionKernel` steps.\n This function samples from an Markov chain at `current_state` and whose\n stationary distribution is governed by the supplied `TransitionKernel`\n instance (`kernel`).\n This function can sample from multiple chains, in parallel. (Whether or not\n there are multiple chains is dictated by the `kernel`.)\n The `current_state` can be represented as a single `Tensor` or a `list` of\n `Tensors` which collectively represent the current state.\n Since MCMC states are correlated, it is sometimes desirable to produce\n additional intermediate states, and then discard them, ending up with a set of\n states with decreased autocorrelation. See [Owen (2017)][1]. Such \"thinning\"\n is made possible by setting `num_steps_between_results > 0`. The chain then\n takes `num_steps_between_results` extra steps between the steps that make it\n into the results. The extra steps are never materialized (in calls to\n `sess.run`), and thus do not increase memory requirements.\n Warning: when setting a `seed` in the `kernel`, ensure that `sample_chain`'s\n `parallel_iterations=1`, otherwise results will not be reproducible.\n In addition to returning the chain state, this function supports tracing of\n auxiliary variables used by the kernel. The traced values are selected by\n specifying `trace_fn`. By default, all kernel results are traced but in the\n future the default will be changed to no results being traced, so plan\n accordingly. See below for some examples of this feature.\n Args:\n num_results: Integer number of Markov chain draws.\n current_state: `Tensor` or Python `list` of `Tensor`s representing the\n current state(s) of the Markov chain(s).\n previous_kernel_results: A `Tensor` or a nested collection of `Tensor`s\n representing internal calculations made within the previous call to this\n function (or as returned by `bootstrap_results`).\n kernel: An instance of `tfp.mcmc.TransitionKernel` which implements one step\n of the Markov chain.\n num_burnin_steps: Integer number of chain steps to take before starting to\n collect results.\n Default value: 0 (i.e., no burn-in).\n num_steps_between_results: Integer number of chain steps between collecting\n a result. Only one out of every `num_steps_between_samples + 1` steps is\n included in the returned results. The number of returned chain states is\n still equal to `num_results`. Default value: 0 (i.e., no thinning).\n trace_fn: A callable that takes in the current chain state and the previous\n kernel results and return a `Tensor` or a nested collection of `Tensor`s\n that is then traced along with the chain state.\n return_final_kernel_results: If `True`, then the final kernel results are\n returned alongside the chain state and the trace specified by the\n `trace_fn`.\n parallel_iterations: The number of iterations allowed to run in parallel. It\n must be a positive integer. See `tf.while_loop` for more details.\n cond_fn: callable\n Dynmaic termination condition that returns True if the sampler should continue.\n Call pattern func(i, state, trace)\n name: Python `str` name prefixed to Ops created by this function.\n Default value: `None` (i.e., \"mcmc_sample_chain\").\n Returns:\n checkpointable_states_and_trace: if `return_final_kernel_results` is\n `True`. The return value is an instance of\n `CheckpointableStatesAndTrace`.\n all_states: if `return_final_kernel_results` is `False` and `trace_fn` is\n `None`. The return value is a `Tensor` or Python list of `Tensor`s\n representing the state(s) of the Markov chain(s) at each result step. Has\n same shape as input `current_state` but with a prepended\n `num_results`-size dimension.\n states_and_trace: if `return_final_kernel_results` is `False` and\n `trace_fn` is not `None`. The return value is an instance of\n `StatesAndTrace`.\n #### Examples\n ##### Sample from a diagonal-variance Gaussian.\n I.e.,\n ```none\n for i=1..n:\n x[i] ~ MultivariateNormal(loc=0, scale=diag(true_stddev)) # likelihood\n ```\n ```python\n import tensorflow as tf\n import tensorflow_probability as tfp\n tfd = tfp.distributions\n dims = 10\n true_stddev = np.sqrt(np.linspace(1., 3., dims))\n likelihood = tfd.MultivariateNormalDiag(loc=0., scale_diag=true_stddev)\n states = tfp.mcmc.sample_chain(\n num_results=1000,\n num_burnin_steps=500,\n current_state=tf.zeros(dims),\n kernel=tfp.mcmc.HamiltonianMonteCarlo(\n target_log_prob_fn=likelihood.log_prob,\n step_size=0.5,\n num_leapfrog_steps=2),\n trace_fn=None)\n sample_mean = tf.reduce_mean(states, axis=0)\n # ==> approx all zeros\n sample_stddev = tf.sqrt(tf.reduce_mean(\n tf.squared_difference(states, sample_mean),\n axis=0))\n # ==> approx equal true_stddev\n ```\n ##### Sampling from factor-analysis posteriors with known factors.\n I.e.,\n ```none\n # prior\n w ~ MultivariateNormal(loc=0, scale=eye(d))\n for i=1..n:\n # likelihood\n x[i] ~ Normal(loc=w^T F[i], scale=1)\n ```\n where `F` denotes factors.\n ```python\n import tensorflow as tf\n import tensorflow_probability as tfp\n tfd = tfp.distributions\n # Specify model.\n def make_prior(dims):\n return tfd.MultivariateNormalDiag(\n loc=tf.zeros(dims))\n def make_likelihood(weights, factors):\n return tfd.MultivariateNormalDiag(\n loc=tf.matmul(weights, factors, adjoint_b=True))\n def joint_log_prob(num_weights, factors, x, w):\n return (make_prior(num_weights).log_prob(w) +\n make_likelihood(w, factors).log_prob(x))\n def unnormalized_log_posterior(w):\n # Posterior is proportional to: `p(W, X=x | factors)`.\n return joint_log_prob(num_weights, factors, x, w)\n # Setup data.\n num_weights = 10 # == d\n num_factors = 40 # == n\n num_chains = 100\n weights = make_prior(num_weights).sample(1)\n factors = tf.random_normal([num_factors, num_weights])\n x = make_likelihood(weights, factors).sample()\n # Sample from Hamiltonian Monte Carlo Markov Chain.\n # Get `num_results` samples from `num_chains` independent chains.\n chains_states, kernels_results = tfp.mcmc.sample_chain(\n num_results=1000,\n num_burnin_steps=500,\n current_state=tf.zeros([num_chains, num_weights], name='init_weights'),\n kernel=tfp.mcmc.HamiltonianMonteCarlo(\n target_log_prob_fn=unnormalized_log_posterior,\n step_size=0.1,\n num_leapfrog_steps=2))\n # Compute sample stats.\n sample_mean = tf.reduce_mean(chains_states, axis=[0, 1])\n # ==> approx equal to weights\n sample_var = tf.reduce_mean(\n tf.squared_difference(chains_states, sample_mean),\n axis=[0, 1])\n # ==> less than 1\n ```\n ##### Custom tracing functions.\n ```python\n import tensorflow as tf\n import tensorflow_probability as tfp\n tfd = tfp.distributions\n likelihood = tfd.Normal(loc=0., scale=1.)\n def sample_chain(trace_fn):\n return tfp.mcmc.sample_chain(\n num_results=1000,\n num_burnin_steps=500,\n current_state=0.,\n kernel=tfp.mcmc.HamiltonianMonteCarlo(\n target_log_prob_fn=likelihood.log_prob,\n step_size=0.5,\n num_leapfrog_steps=2),\n trace_fn=trace_fn)\n def trace_log_accept_ratio(states, previous_kernel_results):\n return previous_kernel_results.log_accept_ratio\n def trace_everything(states, previous_kernel_results):\n return previous_kernel_results\n _, log_accept_ratio = sample_chain(trace_fn=trace_log_accept_ratio)\n _, kernel_results = sample_chain(trace_fn=trace_everything)\n acceptance_prob = tf.exp(tf.minimum(log_accept_ratio_, 0.))\n # Equivalent to, but more efficient than:\n acceptance_prob = tf.exp(tf.minimum(kernel_results.log_accept_ratio_, 0.))\n ```\n #### References\n [1]: Art B. Owen. Statistically efficient thinning of a Markov chain sampler.\n _Technical Report_, 2017.\n http://statweb.stanford.edu/~owen/reports/bestthinning.pdf\n \"\"\"\n\n if not kernel.is_calibrated:\n warnings.warn(\"supplied `TransitionKernel` is not calibrated. Markov \"\n \"chain may not converge to intended target distribution.\")\n with tf.compat.v1.name_scope(\n name, \"mcmc_sample_chain\",\n [num_results, num_burnin_steps, num_steps_between_results]):\n num_results = tf.convert_to_tensor(\n value=num_results, dtype=tf.int32, name=\"num_results\")\n num_burnin_steps = tf.convert_to_tensor(\n value=num_burnin_steps, dtype=tf.int32, name=\"num_burnin_steps\")\n num_steps_between_results = tf.convert_to_tensor(\n value=num_steps_between_results,\n dtype=tf.int32,\n name=\"num_steps_between_results\")\n current_state = tf.nest.map_structure(\n lambda x: tf.convert_to_tensor(value=x, name=\"current_state\"),\n current_state)\n if previous_kernel_results is None:\n previous_kernel_results = kernel.bootstrap_results(current_state)\n\n if trace_fn is None:\n # It simplifies the logic to use a dummy function here.\n trace_fn = lambda *args: ()\n no_trace = True\n else:\n no_trace = False\n if trace_fn is sample_chain.__defaults__[4]:\n warnings.warn(\"Tracing all kernel results by default is deprecated. Set \"\n \"the `trace_fn` argument to None (the future default \"\n \"value) or an explicit callback that traces the values \"\n \"you are interested in.\")\n\n def _trace_scan_fn(state_and_results, num_steps):\n next_state, current_kernel_results = mcmc_util.smart_for_loop(\n loop_num_iter=num_steps,\n body_fn=kernel.one_step,\n initial_loop_vars=list(state_and_results),\n parallel_iterations=parallel_iterations)\n return next_state, current_kernel_results\n\n (_, final_kernel_results), (all_states, trace) = trace_scan(\n loop_fn=_trace_scan_fn,\n initial_state=(current_state, previous_kernel_results),\n elems=tf.one_hot(\n indices=0,\n depth=num_results,\n on_value=1 + num_burnin_steps,\n off_value=1 + num_steps_between_results,\n dtype=tf.int32),\n # pylint: disable=g-long-lambda\n trace_fn=lambda state_and_results: (state_and_results[0],\n trace_fn(*state_and_results)),\n # pylint: enable=g-long-lambda\n parallel_iterations=parallel_iterations)\n\n if return_final_kernel_results:\n return CheckpointableStatesAndTrace(\n all_states=all_states,\n trace=trace,\n final_kernel_results=final_kernel_results)\n else:\n if no_trace:\n return all_states\n else:\n return StatesAndTrace(all_states=all_states, trace=trace)" ]
[ [ "tensorflow.convert_to_tensor", "tensorflow.math.abs", "tensorflow.abs", "tensorflow.nest.flatten", "tensorflow.while_loop", "tensorflow.compat.v1.get_variable_scope", "tensorflow.compat.v1.name_scope", "tensorflow.TensorShape", "tensorflow.executing_eagerly", "tensorflow.shape", "tensorflow.TensorArray", "tensorflow.compat.dimension_value", "tensorflow.zeros_like", "tensorflow.one_hot", "tensorflow.print", "tensorflow.size", "tensorflow.constant", "tensorflow.range", "tensorflow.reduce_mean", "tensorflow.math.squared_difference", "tensorflow.nest.map_structure", "tensorflow.logical_and" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "2.6", "2.2", "1.13", "2.3", "2.4", "1.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.8", "1.2", "2.10" ] } ]
fredyzhang5532/retinaface-tf2
[ "2195b3cc4161d8b8e8c09389ee46f4e812adc945" ]
[ "nets/mobilenet.py" ]
[ "from tensorflow.keras import backend as K\r\nfrom tensorflow.keras.layers import (Activation, BatchNormalization, Conv2D,\r\n DepthwiseConv2D)\r\n\r\n\r\n#----------------------------------#\r\n# 普通的卷积块\r\n#----------------------------------#\r\ndef _conv_block(inputs, filters, kernel=(3, 3), strides=(1, 1)):\r\n x = Conv2D(filters, kernel,\r\n padding='same',\r\n use_bias=False,\r\n strides=strides,\r\n name='conv1')(inputs)\r\n x = BatchNormalization(name='conv1_bn')(x)\r\n return Activation(relu6, name='conv1_relu')(x)\r\n\r\n#----------------------------------#\r\n# 深度可分离卷积块\r\n#----------------------------------#\r\ndef _depthwise_conv_block(inputs, pointwise_conv_filters,\r\n depth_multiplier=1, strides=(1, 1), block_id=1):\r\n\r\n x = DepthwiseConv2D((3, 3),\r\n padding='same',\r\n depth_multiplier=depth_multiplier,\r\n strides=strides,\r\n use_bias=False,\r\n name='conv_dw_%d' % block_id)(inputs)\r\n\r\n x = BatchNormalization(name='conv_dw_%d_bn' % block_id)(x)\r\n x = Activation(relu6, name='conv_dw_%d_relu' % block_id)(x)\r\n\r\n x = Conv2D(pointwise_conv_filters, (1, 1),\r\n padding='same',\r\n use_bias=False,\r\n strides=(1, 1),\r\n name='conv_pw_%d' % block_id)(x)\r\n x = BatchNormalization(name='conv_pw_%d_bn' % block_id)(x)\r\n return Activation(relu6, name='conv_pw_%d_relu' % block_id)(x)\r\n\r\ndef relu6(x):\r\n return K.relu(x, max_value=6)\r\n\r\ndef MobileNet(img_input, depth_multiplier=1):\r\n # 640,640,3 -> 320,320,8\r\n x = _conv_block(img_input, 8, strides=(2, 2))\r\n # 320,320,8 -> 320,320,16\r\n x = _depthwise_conv_block(x, 16, depth_multiplier, block_id=1)\r\n\r\n # 320,320,16 -> 160,160,32\r\n x = _depthwise_conv_block(x, 32, depth_multiplier, strides=(2, 2), block_id=2)\r\n x = _depthwise_conv_block(x, 32, depth_multiplier, block_id=3)\r\n\r\n # 160,160,32 -> 80,80,64\r\n x = _depthwise_conv_block(x, 64, depth_multiplier, strides=(2, 2), block_id=4)\r\n x = _depthwise_conv_block(x, 64, depth_multiplier, block_id=5)\r\n feat1 = x\r\n\r\n # 80,80,64 -> 40,40,128\r\n x = _depthwise_conv_block(x, 128, depth_multiplier, strides=(2, 2), block_id=6)\r\n x = _depthwise_conv_block(x, 128, depth_multiplier, block_id=7)\r\n x = _depthwise_conv_block(x, 128, depth_multiplier, block_id=8)\r\n x = _depthwise_conv_block(x, 128, depth_multiplier, block_id=9)\r\n x = _depthwise_conv_block(x, 128, depth_multiplier, block_id=10)\r\n x = _depthwise_conv_block(x, 128, depth_multiplier, block_id=11)\r\n feat2 = x\r\n\r\n # 40,40,128 -> 20,20,256\r\n x = _depthwise_conv_block(x, 256, depth_multiplier, strides=(2, 2), block_id=12)\r\n x = _depthwise_conv_block(x, 256, depth_multiplier, block_id=13)\r\n feat3 = x\r\n\r\n return feat1, feat2, feat3\r\n\r\n\r\n" ]
[ [ "tensorflow.keras.layers.DepthwiseConv2D", "tensorflow.keras.layers.Activation", "tensorflow.keras.layers.Conv2D", "tensorflow.keras.layers.BatchNormalization", "tensorflow.keras.backend.relu" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "2.6", "2.4", "2.3", "2.5", "2.2" ] } ]
MartinPdS/PyMieSim
[ "2560c7f4009df5d05bcb0ce8e929aa7baa7be8de" ]
[ "PyMieSim/Tools/FiberModes.py" ]
[ "import numpy as np\nimport fibermodes\n\nfrom PyMieSim.Physics import FraunhoferDiffraction\nfrom PyMieSim.Tools.utils import Normalize\nfrom PyMieSim.Tools.Directories import *\n\n\nLPList = [(0,1),\n (0,2),\n (0,3),\n (1,1),\n (1,2),\n (1,3),\n (2,1),\n (2,2),\n (3,1),\n (3,2),\n (4,1),\n (5,1)]\n\n\ndef SMF28(mode, Num):\n \"\"\"Function return an instance of the fiber class specific for a\n SMF28 fiber optic .\n\n \"\"\"\n import fibermodes\n\n CoreDiameter = 8.2e-6\n cladDiameter = 125e-6\n\n Fiber = fiber()\n\n SFactor = 120\n\n\n Field = fibermodes.field.Field(Fiber.source,\n fibermodes.Mode(fibermodes.ModeFamily.HE, mode[0]+1, mode[1]),\n 940e-9,\n Fiber.CoreDiameter*Num/SFactor,\n Num).Ex()\n\n return np.array(Field, copy=False)\n\n\ndef GenLPfiles(LPList, Num=251):\n \"\"\"Function generate numpy files containing the LP mode field.\n The file directory is: \"PyMieSim/LPmodes/LP*.npy\"\n\n Parameters\n ----------\n LPList : :class:`list`\n List of the modes to be computed.\n Num : :class:`int`\n Number of points to evaluate the mode field.\n\n \"\"\"\n for mode in LPList:\n\n filename = f'{LPModePath}/LP{mode[0]}{mode[1]}.npy'\n\n modeField = SMF28(mode = (mode[0], mode[1]) , Num=Num)\n\n np.save(filename, modeField)\n\n print(f'Mode LP{mode[0]}{mode[1]} done!')\n\n print('Files are saved in \"PyMieSim/LPmodes/LP*.npy\" ')\n\n\ndef Genfiles(LPList, padWidth = 2000, Num=251):\n \"\"\"Function generate numpy files containing the FarField of the LP modes.\n The file directory is: \"PyMieSim/LPmodes/FLP*.npy\"\n\n Parameters\n ----------\n LPList : :class:`list`\n List of the modes to be computed.\n padWidth : :class:`int`\n The padding for the fourier transform, the higher the larger is the farfield.\n Num : :class:`int`\n Number of points to evaluate the mode field.\n\n \"\"\"\n\n if Num % 2 == 0 : Num += 1\n PAD = ((padWidth,padWidth),(padWidth,padWidth))\n\n for mode in LPList:\n\n filename = f'{LPModePath}/FLP{mode[0]}{mode[1]}.npy'\n\n FmodeField = SMF28(mode = (mode[0], mode[1]) , Num=Num)\n\n FmodeField = np.pad(array = FmodeField,\n pad_width = PAD,\n mode = 'constant')\n\n FmodeField = FraunhoferDiffraction(FmodeField)[padWidth:-padWidth, padWidth:-padWidth]\n\n FmodeField = Normalize(FmodeField)\n\n np.save(filename, FmodeField)\n\n print(f'Fourier Mode LP{mode[0]}{mode[1]} done!')\n\n print('Files are saved in \"PyMieSim/LPmodes/FLP*.npy\" ')\n\n\nclass fiber(object):\n \"\"\"Class generating a fiber object from fibermodes package\n (see requirement.txt).\n\n Parameters\n ----------\n core_radius : :class:`float`\n Radius of the core of the fiber.\n core_index : :class:`float`\n Index of the core of the fiber.\n clad_radius : :class:`float`\n Radius of the clad of the fiber.\n clad_index : :class:`float`\n Index of the clad of the fiber.\n\n \"\"\"\n\n def __init__(self,\n core_radius: float = 8.2e-6,\n core_index: float = 1.4456,\n clad_radius: float = 125e-6,\n clad_index: float = 1.4444):\n\n self.MaxDirect = 2 * clad_radius\n\n self.CoreDiameter = core_radius\n\n factory = fibermodes.FiberFactory()\n\n factory.addLayer(name = 'core',\n radius = core_radius,\n material = 'Fixed',\n geometry = \"StepIndex\",\n index = 1.4489)\n\n factory.addLayer(name = 'cladding',\n material = 'Fixed',\n index = 1)\n\n self.source = factory[0]\n\n\nif __name__=='__main__':\n GenLPFourierfiles(LPList)\n" ]
[ [ "numpy.array", "numpy.pad", "numpy.save" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
choprahetarth/DeblurGANv2
[ "d471fb102a30ab380492ef5309af711802a309d7" ]
[ "models/fpn_densenet.py" ]
[ "import torch\nimport torch.nn as nn\n\nfrom torchvision.models import densenet121\n\n\nclass FPNSegHead(nn.Module):\n def __init__(self, num_in, num_mid, num_out):\n super().__init__()\n\n self.block0 = nn.Conv2d(num_in, num_mid, kernel_size=3, padding=1, bias=False)\n self.block1 = nn.Conv2d(num_mid, num_out, kernel_size=3, padding=1, bias=False)\n\n def forward(self, x):\n x = nn.functional.relu(self.block0(x), inplace=True)\n x = nn.functional.relu(self.block1(x), inplace=True)\n return x\n\n\nclass FPNDense(nn.Module):\n\n def __init__(self, output_ch=3, num_filters=128, num_filters_fpn=256, pretrained=True):\n super().__init__()\n\n # Feature Pyramid Network (FPN) with four feature maps of resolutions\n # 1/4, 1/8, 1/16, 1/32 and `num_filters` filters for all feature maps.\n\n self.fpn = FPN(num_filters=num_filters_fpn, pretrained=pretrained)\n\n # The segmentation heads on top of the FPN\n\n self.head1 = FPNSegHead(num_filters_fpn, num_filters, num_filters)\n self.head2 = FPNSegHead(num_filters_fpn, num_filters, num_filters)\n self.head3 = FPNSegHead(num_filters_fpn, num_filters, num_filters)\n self.head4 = FPNSegHead(num_filters_fpn, num_filters, num_filters)\n\n self.smooth = nn.Sequential(\n nn.Conv2d(4 * num_filters, num_filters, kernel_size=3, padding=1),\n nn.BatchNorm2d(num_filters),\n nn.ReLU(),\n )\n\n self.smooth2 = nn.Sequential(\n nn.Conv2d(num_filters, num_filters // 2, kernel_size=3, padding=1),\n nn.BatchNorm2d(num_filters // 2),\n nn.ReLU(),\n )\n\n self.final = nn.Conv2d(num_filters // 2, output_ch, kernel_size=3, padding=1)\n\n def forward(self, x):\n map0, map1, map2, map3, map4 = self.fpn(x)\n\n map4 = nn.functional.upsample(self.head4(map4), scale_factor=8, mode=\"nearest\")\n map3 = nn.functional.upsample(self.head3(map3), scale_factor=4, mode=\"nearest\")\n map2 = nn.functional.upsample(self.head2(map2), scale_factor=2, mode=\"nearest\")\n map1 = nn.functional.upsample(self.head1(map1), scale_factor=1, mode=\"nearest\")\n\n smoothed = self.smooth(torch.cat([map4, map3, map2, map1], dim=1))\n smoothed = nn.functional.upsample(smoothed, scale_factor=2, mode=\"nearest\")\n smoothed = self.smooth2(smoothed + map0)\n smoothed = nn.functional.upsample(smoothed, scale_factor=2, mode=\"nearest\")\n\n final = self.final(smoothed)\n return torch.tanh(final)\n\n def unfreeze(self):\n for param in self.fpn.parameters():\n param.requires_grad = True\n\n\nclass FPN(nn.Module):\n\n def __init__(self, num_filters=256, pretrained=True):\n \"\"\"Creates an `FPN` instance for feature extraction.\n Args:\n num_filters: the number of filters in each output pyramid level\n pretrained: use ImageNet pre-trained backbone feature extractor\n \"\"\"\n\n super().__init__()\n\n self.features = densenet121(pretrained=pretrained).features\n\n self.enc0 = nn.Sequential(self.features.conv0,\n self.features.norm0,\n self.features.relu0)\n self.pool0 = self.features.pool0\n self.enc1 = self.features.denseblock1 # 256\n self.enc2 = self.features.denseblock2 # 512\n self.enc3 = self.features.denseblock3 # 1024\n self.enc4 = self.features.denseblock4 # 2048\n self.norm = self.features.norm5 # 2048\n\n self.tr1 = self.features.transition1 # 256\n self.tr2 = self.features.transition2 # 512\n self.tr3 = self.features.transition3 # 1024\n\n self.lateral4 = nn.Conv2d(1024, num_filters, kernel_size=1, bias=False)\n self.lateral3 = nn.Conv2d(1024, num_filters, kernel_size=1, bias=False)\n self.lateral2 = nn.Conv2d(512, num_filters, kernel_size=1, bias=False)\n self.lateral1 = nn.Conv2d(256, num_filters, kernel_size=1, bias=False)\n self.lateral0 = nn.Conv2d(64, num_filters // 2, kernel_size=1, bias=False)\n\n def forward(self, x):\n # Bottom-up pathway, from ResNet\n enc0 = self.enc0(x)\n\n pooled = self.pool0(enc0)\n\n enc1 = self.enc1(pooled) # 256\n tr1 = self.tr1(enc1)\n\n enc2 = self.enc2(tr1) # 512\n tr2 = self.tr2(enc2)\n\n enc3 = self.enc3(tr2) # 1024\n tr3 = self.tr3(enc3)\n\n enc4 = self.enc4(tr3) # 2048\n enc4 = self.norm(enc4)\n\n # Lateral connections\n\n lateral4 = self.lateral4(enc4)\n lateral3 = self.lateral3(enc3)\n lateral2 = self.lateral2(enc2)\n lateral1 = self.lateral1(enc1)\n lateral0 = self.lateral0(enc0)\n\n # Top-down pathway\n\n map4 = lateral4\n map3 = lateral3 + nn.functional.upsample(map4, scale_factor=2, mode=\"nearest\")\n map2 = lateral2 + nn.functional.upsample(map3, scale_factor=2, mode=\"nearest\")\n map1 = lateral1 + nn.functional.upsample(map2, scale_factor=2, mode=\"nearest\")\n\n return lateral0, map1, map2, map3, map4\n" ]
[ [ "torch.nn.functional.upsample", "torch.nn.Sequential", "torch.cat", "torch.nn.Conv2d", "torch.tanh", "torch.nn.BatchNorm2d", "torch.nn.ReLU" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Qlanowski/rangle
[ "53299209e5e1fb9ce1c9eed4cf44ac34684dba02" ]
[ "evo_utils.py" ]
[ "import os\nimport os.path as osp\nimport pickle\nimport sys\nimport tempfile\nfrom contextlib import contextmanager\n\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2_as_graph\n\n\n@contextmanager\ndef suppress_stdout():\n with open(os.devnull, \"w\") as devnull:\n old_stdout = sys.stdout\n sys.stdout = devnull\n try:\n yield\n finally:\n sys.stdout = old_stdout\n\n\ndef detect_hardware(tpu_name):\n try:\n tpu = tf.distribute.cluster_resolver.TPUClusterResolver(tpu=tpu_name) # TPU detection\n except ValueError:\n tpu = None\n gpus = tf.config.experimental.list_logical_devices(\"GPU\")\n\n # Select appropriate distribution strategy\n if tpu:\n tf.config.experimental_connect_to_cluster(tpu)\n tf.tpu.experimental.initialize_tpu_system(tpu)\n strategy = tf.distribute.TPUStrategy(tpu)\n print('Running on TPU ', tpu.cluster_spec().as_dict()['worker'])\n elif len(gpus) > 1:\n strategy = tf.distribute.MirroredStrategy([gpu.name for gpu in gpus])\n print('Running on multiple GPUs ', [gpu.name for gpu in gpus])\n elif len(gpus) == 1:\n strategy = tf.distribute.get_strategy() # default strategy that works on CPU and single GPU\n print('Running on single GPU ', gpus[0].name)\n else:\n strategy = tf.distribute.get_strategy() # default strategy that works on CPU and single GPU\n print('Running on CPU')\n print(\"Number of accelerators: \", strategy.num_replicas_in_sync)\n return tpu, strategy\n\n\ndef get_flops(model, write_path=tempfile.NamedTemporaryFile().name):\n concrete = tf.function(lambda inputs: model(inputs))\n concrete_func = concrete.get_concrete_function(\n [tf.TensorSpec([1, *inputs.shape[1:]]) for inputs in model.inputs])\n frozen_func, graph_def = convert_variables_to_constants_v2_as_graph(concrete_func)\n with tf.Graph().as_default() as graph:\n tf.graph_util.import_graph_def(graph_def, name='')\n run_meta = tf.compat.v1.RunMetadata()\n opts = tf.compat.v1.profiler.ProfileOptionBuilder.float_operation()\n if write_path:\n opts['output'] = 'file:outfile={}'.format(write_path) # suppress output\n flops = tf.compat.v1.profiler.profile(graph=graph, run_meta=run_meta, cmd=\"op\", options=opts)\n return flops.total_float_ops\n\n\ndef add_regularization(model, regularizer=tf.keras.regularizers.l2(0.0001)):\n if not isinstance(regularizer, tf.keras.regularizers.Regularizer):\n print(\"Regularizer must be a subclass of tf.keras.regularizers.Regularizer\")\n return model\n\n for layer in model.layers:\n for attr in ['kernel_regularizer']:\n if hasattr(layer, attr):\n setattr(layer, attr, regularizer)\n\n # When we change the layers attributes, the change only happens in the model config file\n model_json = model.to_json()\n\n # Save the weights before reloading the model.\n tmp_weights_path = os.path.join(tempfile.gettempdir(), 'tmp_weights.h5')\n model.save_weights(tmp_weights_path)\n\n # load the model from the config\n model = tf.keras.models.model_from_json(model_json)\n\n # Reload the model weights\n model.load_weights(tmp_weights_path, by_name=True)\n return model\n\n\ndef partial_weight_transfer(child_layer, parent_weights, disp):\n child_weights = child_layer.get_weights()\n for i, child_weight in enumerate(child_weights):\n parent_weight = parent_weights[i]\n\n if disp:\n print('Transferring partial weights for layer {}: {} -> {}'.format(\n child_layer.name, parent_weight.shape, child_weight.shape))\n\n # CONVOLUTION\n if len(child_weight.shape) == 4:\n # (child kernel size, input channels child, output channels child), ...\n (kc, icc, occ), (kp, icp, ocp) = child_weight.shape[1:], parent_weight.shape[1:]\n\n if (icc > icp and occ > ocp) or (icc > icp and occ == ocp) or (\n icc == icp and occ > ocp):\n if kc == kp:\n child_weights[i][:, :, :icp, :ocp] = parent_weight\n elif kc < kp:\n p = (kp - kc) // 2 # pad\n child_weights[i][:, :, :icp, :ocp] = parent_weight[p:p + kc, p:p + kc, :, :]\n elif kc > kp:\n p = (kc - kp) // 2\n child_weights[i][p:p + kp, p:p + kp, :icp, :ocp] = parent_weight\n\n elif (icc < icp and occ > ocp) or (icc < icp and occ == ocp):\n if kc == kp:\n child_weights[i][:, :, :, :ocp] = parent_weight[:, :, :icc, :]\n elif kc < kp:\n p = (kp - kc) // 2 # pad\n child_weights[i][:, :, :, :ocp] = parent_weight[p:p + kc, p:p + kc, :icc, :]\n elif kc > kp:\n p = (kc - kp) // 2\n child_weights[i][p:p + kp, p:p + kp, :, :ocp] = parent_weight[:, :, :icc, :]\n\n elif (icc > icp and occ < ocp) or (icc == icp and occ < ocp):\n if kc == kp:\n child_weights[i][:, :, :icp, :] = parent_weight[:, :, :, :occ]\n elif kc < kp:\n p = (kp - kc) // 2 # pad\n child_weights[i][:, :, :icp, :] = parent_weight[p:p + kc, p:p + kc, :, :occ]\n elif kc > kp:\n p = (kc - kp) // 2\n child_weights[i][p:p + kp, p:p + kp, :icp, :] = parent_weight[:, :, :, :occ]\n\n elif icc < icp and occ < ocp:\n if kc == kp:\n child_weights[i] = parent_weight[:, :, :icc, :occ]\n elif kc < kp:\n p = (kp - kc) // 2 # pad\n child_weights[i] = parent_weight[p:p + kc, p:p + kc, :icc, :occ]\n elif kc > kp:\n p = (kc - kp) // 2\n child_weights[i][p:p + kp, p:p + kp, :, :] = parent_weight[:, :, :icc, :occ]\n\n # DENSE\n elif len(child_weight.shape) == 2:\n icc, icp = child_weight.shape[0], parent_weight.shape[0]\n if icc < icp:\n child_weights[i] = parent_weight[:icc, :]\n else:\n weight_filler = np.zeros((icc - icp, child_weight.shape[1]))\n child_weights[i] = np.concatenate((parent_weight, weight_filler), axis=0)\n\n # BATCH NORM\n elif len(child_weight.shape) == 1:\n icc, icp = child_weight.shape[0], parent_weight.shape[0]\n if icc < icp:\n child_weights[i] = parent_weight[:icc]\n else:\n weight_filler = np.zeros((icc - icp,))\n weight_filler[:] = np.mean(parent_weight)\n child_weights[i] = np.concatenate((parent_weight, weight_filler), axis=0)\n try:\n child_layer.set_weights(child_weights)\n except:\n\n print(\"Partial weight transfer failed for '{}'\".format(child_layer.name))\n\n\ndef get_models(run_dir):\n meta_files, saved_models, genotypes = [], [], {}\n gens = sorted(os.listdir(run_dir))\n gens = [g for g in gens if '.pkl' not in g]\n for g in gens:\n gen_models = sorted(os.listdir(osp.join(run_dir, g)))\n gen_meta = [m for m in gen_models if 'meta' in m]\n gen_models = [m for m in gen_models if '.h5' in m]\n meta_files.extend(gen_meta)\n saved_models.extend(gen_models)\n for m in gen_meta:\n meta_data = pickle.load(open(osp.join(run_dir, g, m), 'rb'))\n genotypes[np.int(m.split('_')[1])] = meta_data['config'].MODEL.GENOTYPE\n return meta_files, saved_models, genotypes\n" ]
[ [ "tensorflow.compat.v1.profiler.ProfileOptionBuilder.float_operation", "tensorflow.distribute.cluster_resolver.TPUClusterResolver", "numpy.concatenate", "numpy.mean", "tensorflow.Graph", "tensorflow.keras.regularizers.l2", "tensorflow.tpu.experimental.initialize_tpu_system", "tensorflow.compat.v1.profiler.profile", "numpy.zeros", "tensorflow.config.experimental.list_logical_devices", "tensorflow.graph_util.import_graph_def", "tensorflow.config.experimental_connect_to_cluster", "tensorflow.distribute.get_strategy", "tensorflow.compat.v1.RunMetadata", "tensorflow.python.framework.convert_to_constants.convert_variables_to_constants_v2_as_graph", "tensorflow.distribute.TPUStrategy", "tensorflow.keras.models.model_from_json", "tensorflow.TensorSpec", "tensorflow.distribute.MirroredStrategy" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ZHUXUHAN/reid-baseline
[ "43e8734be52a90d8131af8c4b43536ba6911bdaa" ]
[ "engine/trainer.py" ]
[ "# encoding: utf-8\n\"\"\"\n@author: sherlock\n@contact: [email protected]\n\"\"\"\n\nimport logging\n\nimport torch\nimport torch.nn as nn\nfrom ignite.engine import Engine, Events\nfrom ignite.handlers import ModelCheckpoint, Timer\nfrom ignite.metrics import RunningAverage\n\nfrom utils.reid_metric import R1_mAP\nfrom utils.reid_metric import R1_mAP_reranking_training\n\nfrom apex.parallel import DistributedDataParallel as DDP\nfrom apex.fp16_utils import *\nfrom apex import amp, optimizers\nfrom apex.multi_tensor_apply import multi_tensor_applier\n\nglobal ITER\nITER = 0\n\n\ndef create_supervised_trainer(model, optimizer, loss_fn, aligned_train, pcb_train, mgn_train, new_pcb_train,\n device=None):\n \"\"\"\n Factory function for creating a trainer for supervised models\n\n Args:\n model (`torch.nn.Module`): the model to train\n optimizer (`torch.optim.Optimizer`): the optimizer to use\n loss_fn (torch.nn loss function): the loss function to use\n device (str, optional): device type specification (default: None).\n Applies to both model and batches.\n\n Returns:\n Engine: a trainer engine with supervised update function\n \"\"\"\n if device:\n if torch.cuda.device_count() > 1:\n model = nn.DataParallel(model)\n model.to(device)\n\n def _update(engine, batch):\n\n model.train()\n optimizer.zero_grad()\n img, target = batch\n img = img.to(device) if torch.cuda.device_count() >= 1 else img\n target = target.to(device) if torch.cuda.device_count() >= 1 else target\n if aligned_train:\n score, feat, local_feat = model(img)\n loss = loss_fn(score, feat, target, None, local_feat)\n elif pcb_train:\n score, feat, local_score, local_feat, res3_feat, res3_score = model(img, None)\n loss = loss_fn(score, feat, target, local_score, local_feat, res3_feat, res3_score)\n elif new_pcb_train:\n score, feat, local_score, local_feat = model(img, None)\n loss = loss_fn(score, feat, target, local_score, local_feat)\n elif mgn_train:\n score, feat, local_feat = model(img)\n loss = loss_fn(score, feat, target, None, local_feat)\n else:\n score, feat = model(img)\n loss = loss_fn(score, feat, target, None, None)\n loss.backward()\n optimizer.step()\n if type(score) == tuple:\n sum_score = 0\n for s in score:\n sum_score += (s.max(1)[1] == target).float().mean()\n acc = sum_score / len(score)\n else:\n acc = (score.max(1)[1] == target).float().mean()\n return loss.item(), acc.item()\n\n return Engine(_update)\n\n\ndef create_supervised_trainer_with_center(model, center_criterion, optimizer, optimizer_center, loss_fn,\n cetner_loss_weight,\n aligned_train, pcb_train, mgn_train, arc_train, new_pcb_train, device=None):\n \"\"\"\n Factory function for creating a trainer for supervised models\n\n Args:\n model (`torch.nn.Module`): the model to train\n optimizer (`torch.optim.Optimizer`): the optimizer to use\n loss_fn (torch.nn loss function): the loss function to use\n device (str, optional): device type specification (default: None).\n Applies to both model and batches.\n\n Returns:\n Engine: a trainer engine with supervised update function\n \"\"\"\n if device:\n if torch.cuda.device_count() > 1:\n model = nn.DataParallel(model)\n model.to(device)\n model, optimizer = amp.initialize(model, optimizer,opt_level='O1')\n\n def _update(engine, batch):\n model.train()\n optimizer.zero_grad()\n optimizer_center.zero_grad()\n img, target = batch\n img = img.to(device) if torch.cuda.device_count() >= 1 else img\n target = target.to(device) if torch.cuda.device_count() >= 1 else target\n\n if aligned_train:\n score, feat, local_feat = model(img)\n loss = loss_fn(score, feat, target, None, local_feat)\n elif pcb_train:\n if arc_train:\n score, feat, local_score, local_feat = model(img, target)\n else:\n score, feat, local_score, local_feat = model(img, None)\n\n loss = loss_fn(score, feat, target, local_score, local_feat, None, None)\n elif new_pcb_train:\n if arc_train:\n score, feat, local_score, local_feat = model(img, target)\n else:\n score, feat, local_score, local_feat, local_score_2, local_feat_2 = model(img, None)\n\n loss = loss_fn(score, feat, target, local_score, local_feat, local_score_2, local_feat_2)\n elif mgn_train:\n score, feat, local_feat = model(img)\n loss = loss_fn(score, feat, target, None, local_feat, None, None)\n else:\n score, feat = model(img)\n loss = loss_fn(score, feat, target, None, None)\n\n # print(\"Total loss is {}, center loss is {}\".format(loss, center_criterion(feat, target)))\n loss.backward()\n #if you use fp16 please use follwing codes\n# with amp.scale_loss(loss, optimizer) as scaled_loss:\n# scaled_loss.backward()\n optimizer.step()\n for param in center_criterion.parameters():\n param.grad.data *= (1. / cetner_loss_weight)\n optimizer_center.step()\n\n # compute acc\n if type(score) == tuple:\n sum_score = 0\n for s in score:\n sum_score += (s.max(1)[1] == target).float().mean()\n acc = sum_score / len(score)\n else:\n acc = (score.max(1)[1] == target).float().mean()\n\n return loss.item(), acc.item()\n\n return Engine(_update)\n\n\ndef create_supervised_evaluator(model, metrics,\n device=None):\n \"\"\"\n Factory function for creating an evaluator for supervised models\n\n Args:\n model (`torch.nn.Module`): the model to train\n metrics (dict of str - :class:`ignite.metrics.Metric`): a map of metric names to Metrics\n device (str, optional): device type specification (default: None).\n Applies to both model and batches.\n Returns:\n Engine: an evaluator engine with supervised inference function\n \"\"\"\n if device:\n if torch.cuda.device_count() > 1:\n model = nn.DataParallel(model)\n model.to(device)\n\n def _inference(engine, batch):\n model.eval()\n with torch.no_grad():\n data, pids, camids, data_flip = batch\n data = data.to(device) if torch.cuda.device_count() >= 1 else data\n data_flip = data_flip.to(device) if torch.cuda.device_count() >= 1 else data_flip\n feat, local_feat = model(data, None)\n feat_flip, local_feat_flip = model(data_flip, None)\n return feat, local_feat, pids, camids, feat_flip, local_feat_flip\n\n engine = Engine(_inference)\n\n for name, metric in metrics.items():\n metric.attach(engine, name)\n\n return engine\n\n\ndef do_train(\n cfg,\n model,\n train_loader,\n val_loader,\n optimizer,\n scheduler,\n loss_fn,\n num_query,\n start_epoch\n):\n log_period = cfg.SOLVER.LOG_PERIOD\n checkpoint_period = cfg.SOLVER.CHECKPOINT_PERIOD\n eval_period = cfg.SOLVER.EVAL_PERIOD\n output_dir = cfg.OUTPUT_DIR\n device = cfg.MODEL.DEVICE\n epochs = cfg.SOLVER.MAX_EPOCHS\n aligned_train = cfg.MODEL.ALIGNED\n pcb_train = cfg.MODEL.PCB\n mgn_train = cfg.MODEL.MGN\n new_pcb_train = cfg.MODEL.NEW_PCB\n\n logger = logging.getLogger(\"reid_baseline.train\")\n logger.info(\"Start training\")\n trainer = create_supervised_trainer(model, optimizer, loss_fn, aligned_train, pcb_train, mgn_train, new_pcb_train,\n device=device)\n # evaluator = create_supervised_evaluator(model, metrics={'r1_mAP': R1_mAP(num_query, max_rank=50, feat_norm=cfg.TEST.FEAT_NORM)}, device=device)\n checkpointer = ModelCheckpoint(output_dir, cfg.MODEL.NAME, checkpoint_period, n_saved=10, require_empty=False)\n timer = Timer(average=True)\n\n trainer.add_event_handler(Events.EPOCH_COMPLETED, checkpointer, {'model': model,\n 'optimizer': optimizer})\n timer.attach(trainer, start=Events.EPOCH_STARTED, resume=Events.ITERATION_STARTED,\n pause=Events.ITERATION_COMPLETED, step=Events.ITERATION_COMPLETED)\n\n # average metric to attach on trainer\n RunningAverage(output_transform=lambda x: x[0]).attach(trainer, 'avg_loss')\n RunningAverage(output_transform=lambda x: x[1]).attach(trainer, 'avg_acc')\n\n @trainer.on(Events.STARTED)\n def start_training(engine):\n engine.state.epoch = start_epoch\n\n @trainer.on(Events.EPOCH_STARTED)\n def adjust_learning_rate(engine):\n scheduler.step()\n\n @trainer.on(Events.ITERATION_COMPLETED)\n def log_training_loss(engine):\n global ITER\n ITER += 1\n\n if ITER % log_period == 0:\n logger.info(\"Epoch[{}] Iteration[{}/{}] Loss: {:.3f}, Acc: {:.3f}, Base Lr: {:.2e}\"\n .format(engine.state.epoch, ITER, len(train_loader),\n engine.state.metrics['avg_loss'], engine.state.metrics['avg_acc'],\n scheduler.get_lr()[0]))\n if len(train_loader) == ITER:\n ITER = 0\n\n # adding handlers using `trainer.on` decorator API\n @trainer.on(Events.EPOCH_COMPLETED)\n def print_times(engine):\n logger.info('Epoch {} done. Time per batch: {:.3f}[s] Speed: {:.1f}[samples/s]'\n .format(engine.state.epoch, timer.value() * timer.step_count,\n train_loader.batch_size / timer.value()))\n logger.info('-' * 10)\n timer.reset()\n\n @trainer.on(Events.EPOCH_COMPLETED)\n def log_validation_results(engine):\n pass\n if engine.state.epoch % eval_period == 0:\n evaluator.run(val_loader)\n cmc, mAP = evaluator.state.metrics['r1_mAP']\n logger.info(\"Validation Results - Epoch: {}\".format(engine.state.epoch))\n logger.info(\"mAP: {:.1%}\".format(mAP))\n for r in [1, 5, 10]:\n logger.info(\"CMC curve, Rank-{:<3}:{:.1%}\".format(r, cmc[r - 1]))\n\n trainer.run(train_loader, max_epochs=epochs)\n\n\ndef do_train_with_center(\n cfg,\n model,\n center_criterion,\n train_loader,\n val_loader,\n optimizer,\n optimizer_center,\n scheduler,\n loss_fn,\n num_query,\n start_epoch,\n datasets\n\n):\n log_period = cfg.SOLVER.LOG_PERIOD\n checkpoint_period = cfg.SOLVER.CHECKPOINT_PERIOD\n eval_period = cfg.SOLVER.EVAL_PERIOD\n output_dir = cfg.OUTPUT_DIR\n device = cfg.MODEL.DEVICE\n epochs = cfg.SOLVER.MAX_EPOCHS\n mgn_train = cfg.MODEL.MGN\n aligned_train = cfg.MODEL.ALIGNED\n pcb_train = cfg.MODEL.PCB\n arc_train = cfg.MODEL.ARC\n new_pcb_train = cfg.MODEL.NEW_PCB\n\n logger = logging.getLogger(\"reid_baseline.train\")\n logger.info(\"Start training\")\n trainer = create_supervised_trainer_with_center(model, center_criterion, optimizer, optimizer_center, loss_fn,\n cfg.SOLVER.CENTER_LOSS_WEIGHT, aligned_train, pcb_train, mgn_train,\n arc_train, new_pcb_train, device=device)\n evaluator = create_supervised_evaluator(model, metrics={\n 'r1_mAP': R1_mAP_reranking_training(num_query, max_rank=200, feat_norm=cfg.TEST.FEAT_NORM)}, device=device)\n checkpointer = ModelCheckpoint(output_dir, cfg.MODEL.NAME, checkpoint_period, n_saved=10, require_empty=False)\n timer = Timer(average=True)\n\n trainer.add_event_handler(Events.EPOCH_COMPLETED, checkpointer, {'model': model,\n 'optimizer': optimizer,\n 'center_param': center_criterion,\n 'optimizer_center': optimizer_center})\n\n timer.attach(trainer, start=Events.EPOCH_STARTED, resume=Events.ITERATION_STARTED,\n pause=Events.ITERATION_COMPLETED, step=Events.ITERATION_COMPLETED)\n\n # average metric to attach on trainer\n RunningAverage(output_transform=lambda x: x[0]).attach(trainer, 'avg_loss')\n RunningAverage(output_transform=lambda x: x[1]).attach(trainer, 'avg_acc')\n\n @trainer.on(Events.STARTED)\n def start_training(engine):\n engine.state.epoch = start_epoch\n\n @trainer.on(Events.EPOCH_STARTED)\n def adjust_learning_rate(engine):\n scheduler.step()\n\n @trainer.on(Events.ITERATION_COMPLETED)\n def log_training_loss(engine):\n global ITER\n ITER += 1\n\n if ITER % log_period == 0:\n logger.info(\"Epoch[{}] Iteration[{}/{}] Loss: {:.3f}, Acc: {:.3f}, Base Lr: {:.2e}\"\n .format(engine.state.epoch, ITER, len(train_loader),\n engine.state.metrics['avg_loss'], engine.state.metrics['avg_acc'],\n scheduler.get_lr()[0]))\n if len(train_loader) == ITER:\n ITER = 0\n\n # adding handlers using `trainer.on` decorator API\n @trainer.on(Events.EPOCH_COMPLETED)\n def print_times(engine):\n logger.info('Epoch {} done. Time per batch: {:.3f}[s] Speed: {:.1f}[samples/s]'\n .format(engine.state.epoch, timer.value() * timer.step_count,\n train_loader.batch_size / timer.value()))\n logger.info('-' * 10)\n timer.reset()\n\n @trainer.on(Events.EPOCH_COMPLETED)\n def log_validation_results(engine):\n pass\n if engine.state.epoch % eval_period == 0:\n evaluator.run(val_loader)\n cmc, mAP = evaluator.state.metrics['r1_mAP']\n logger.info(\"Validation Results - Epoch: {}\".format(engine.state.epoch))\n logger.info(\"mAP: {:.1%}\".format(mAP))\n for r in [1, 5, 10]:\n logger.info(\"CMC curve, Rank-{:<3}:{:.1%}\".format(r, cmc[r - 1]))\n\n trainer.run(train_loader, max_epochs=epochs)\n" ]
[ [ "torch.cuda.device_count", "torch.no_grad", "torch.nn.DataParallel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
mcx/SMAC3
[ "863e4290054847ba2688521b8cc2e44c15a1493a" ]
[ "smac/optimizer/epm_configuration_chooser.py" ]
[ "import logging\nimport typing\n\nimport numpy as np\n\nfrom smac.configspace import Configuration\nfrom smac.configspace.util import convert_configurations_to_array\nfrom smac.epm.rf_with_instances import RandomForestWithInstances\nfrom smac.optimizer.acquisition import AbstractAcquisitionFunction\nfrom smac.optimizer.ei_optimization import AcquisitionFunctionMaximizer, \\\n RandomSearch\nfrom smac.optimizer.random_configuration_chooser import RandomConfigurationChooser, ChooserNoCoolDown\nfrom smac.runhistory.runhistory import RunHistory\nfrom smac.runhistory.runhistory2epm import AbstractRunHistory2EPM\nfrom smac.scenario.scenario import Scenario\nfrom smac.stats.stats import Stats\n\n__copyright__ = \"Copyright 2021, AutoML.org Freiburg-Hannover\"\n__license__ = \"3-clause BSD\"\n\n\nclass EPMChooser(object):\n \"\"\"\n Interface to train the EPM and generate next configurations\n\n Parameters\n ----------\n\n scenario: smac.scenario.scenario.Scenario\n Scenario object\n stats: smac.stats.stats.Stats\n statistics object with configuration budgets\n runhistory: smac.runhistory.runhistory.RunHistory\n runhistory with all runs so far\n model: smac.epm.rf_with_instances.RandomForestWithInstances\n empirical performance model (right now, we support only\n RandomForestWithInstances)\n acq_optimizer: smac.optimizer.ei_optimization.AcquisitionFunctionMaximizer\n Optimizer of acquisition function.\n restore_incumbent: Configuration\n incumbent to be used from the start. ONLY used to restore states.\n rng: np.random.RandomState\n Random number generator\n random_configuration_chooser:\n Chooser for random configuration -- one of\n\n * ChooserNoCoolDown(modulus)\n * ChooserLinearCoolDown(start_modulus, modulus_increment, end_modulus)\n predict_x_best: bool\n Choose x_best for computing the acquisition function via the model instead of via the observations.\n min_samples_model: int\n Minimum number of samples to build a model\n \"\"\"\n def __init__(self,\n scenario: Scenario,\n stats: Stats,\n runhistory: RunHistory,\n runhistory2epm: AbstractRunHistory2EPM,\n model: RandomForestWithInstances,\n acq_optimizer: AcquisitionFunctionMaximizer,\n acquisition_func: AbstractAcquisitionFunction,\n rng: np.random.RandomState,\n restore_incumbent: Configuration = None,\n random_configuration_chooser: typing.Union[RandomConfigurationChooser] = ChooserNoCoolDown(2.0),\n predict_x_best: bool = True,\n min_samples_model: int = 1\n ):\n self.logger = logging.getLogger(\n self.__module__ + \".\" + self.__class__.__name__)\n self.incumbent = restore_incumbent\n\n self.scenario = scenario\n self.stats = stats\n self.runhistory = runhistory\n self.rh2EPM = runhistory2epm\n self.model = model\n self.acq_optimizer = acq_optimizer\n self.acquisition_func = acquisition_func\n self.rng = rng\n self.random_configuration_chooser = random_configuration_chooser\n\n self._random_search = RandomSearch(\n acquisition_func,\n self.scenario.cs, # type: ignore[attr-defined] # noqa F821\n rng,\n )\n\n self.initial_design_configs = [] # type: typing.List[Configuration]\n\n self.predict_x_best = predict_x_best\n\n self.min_samples_model = min_samples_model\n self.currently_considered_budgets = [0.0, ]\n\n def _collect_data_to_train_model(self) -> typing.Tuple[np.ndarray, np.ndarray, np.ndarray]:\n # if we use a float value as a budget, we want to train the model only on the highest budget\n available_budgets = []\n for run_key in self.runhistory.data.keys():\n available_budgets.append(run_key.budget)\n\n # Sort available budgets from highest to lowest budget\n available_budgets = sorted(list(set(available_budgets)), reverse=True)\n\n # Get #points per budget and if there are enough samples, then build a model\n for b in available_budgets:\n X, Y = self.rh2EPM.transform(self.runhistory, budget_subset=[b, ])\n if X.shape[0] >= self.min_samples_model:\n self.currently_considered_budgets = [b, ]\n configs_array = self.rh2EPM.get_configurations(\n self.runhistory, budget_subset=self.currently_considered_budgets)\n return X, Y, configs_array\n\n return np.empty(shape=[0, 0]), np.empty(shape=[0, ]), np.empty(shape=[0, 0])\n\n def _get_evaluated_configs(self) -> typing.List[Configuration]:\n return self.runhistory.get_all_configs_per_budget(budget_subset=self.currently_considered_budgets)\n\n def choose_next(self, incumbent_value: float = None) -> typing.Iterator[Configuration]:\n \"\"\"Choose next candidate solution with Bayesian optimization. The\n suggested configurations depend on the argument ``acq_optimizer`` to\n the ``SMBO`` class.\n\n Parameters\n ----------\n incumbent_value: float\n Cost value of incumbent configuration (required for acquisition function);\n If not given, it will be inferred from runhistory or predicted;\n if not given and runhistory is empty, it will raise a ValueError.\n\n Returns\n -------\n Iterator\n \"\"\"\n\n self.logger.debug(\"Search for next configuration\")\n X, Y, X_configurations = self._collect_data_to_train_model()\n\n if X.shape[0] == 0:\n # Only return a single point to avoid an overly high number of\n # random search iterations\n return self._random_search.maximize(\n runhistory=self.runhistory, stats=self.stats, num_points=1\n )\n self.model.train(X, Y)\n\n if incumbent_value is not None:\n best_observation = incumbent_value\n x_best_array = None # type: typing.Optional[np.ndarray]\n else:\n if self.runhistory.empty():\n raise ValueError(\"Runhistory is empty and the cost value of \"\n \"the incumbent is unknown.\")\n x_best_array, best_observation = self._get_x_best(self.predict_x_best, X_configurations)\n\n self.acquisition_func.update(\n model=self.model,\n eta=best_observation,\n incumbent_array=x_best_array,\n num_data=len(self._get_evaluated_configs()),\n X=X_configurations,\n )\n\n challengers = self.acq_optimizer.maximize(\n runhistory=self.runhistory,\n stats=self.stats,\n num_points=self.scenario.acq_opt_challengers, # type: ignore[attr-defined] # noqa F821\n random_configuration_chooser=self.random_configuration_chooser\n )\n return challengers\n\n def _get_x_best(self, predict: bool, X: np.ndarray) -> typing.Tuple[float, np.ndarray]:\n \"\"\"Get value, configuration, and array representation of the \"best\" configuration.\n\n The definition of best varies depending on the argument ``predict``. If set to ``True``,\n this function will return the stats of the best configuration as predicted by the model,\n otherwise it will return the stats for the best observed configuration.\n\n Parameters\n ----------\n predict : bool\n Whether to use the predicted or observed best.\n\n Returns\n -------\n float\n np.ndarry\n Configuration\n \"\"\"\n if predict:\n costs = list(map(\n lambda x: (\n self.model.predict_marginalized_over_instances(x.reshape((1, -1)))[0][0][0],\n x,\n ),\n X,\n ))\n costs = sorted(costs, key=lambda t: t[0])\n x_best_array = costs[0][1]\n best_observation = costs[0][0]\n # won't need log(y) if EPM was already trained on log(y)\n else:\n all_configs = self.runhistory.get_all_configs_per_budget(budget_subset=self.currently_considered_budgets)\n x_best = self.incumbent\n x_best_array = convert_configurations_to_array(all_configs)\n best_observation = self.runhistory.get_cost(x_best)\n best_observation_as_array = np.array(best_observation).reshape((1, 1))\n # It's unclear how to do this for inv scaling and potential future scaling.\n # This line should be changed if necessary\n best_observation = self.rh2EPM.transform_response_values(best_observation_as_array)\n best_observation = best_observation[0][0]\n\n return x_best_array, best_observation\n" ]
[ [ "numpy.array", "numpy.empty" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Codle/veGiantModel
[ "54df409d2c9e194a4e68ce7fe820bbd726525f90" ]
[ "src/veGiantModel/engine/module.py" ]
[ "# Copyright (c) 2021, ByteDance Inc. All rights reserved.\nimport os\nimport re as regex\nfrom functools import partial\nfrom math import floor\n\nimport torch\nimport torch.distributed as dist\nimport torch.nn as nn\nfrom deepspeed.pipe import LayerSpec, PipelineModule, TiedLayerSpec\nfrom deepspeed.runtime import utils as ds_utils\nfrom deepspeed.runtime.activation_checkpointing import checkpointing\nfrom deepspeed.utils import logger\n\nfrom .topology import PipeDataParallelTopology, PipelineParallelGrid\n\n\nclass veGiantModule(PipelineModule):\n def __init__(self,\n layers,\n num_stages=None,\n topology=None,\n loss_fn=None,\n seed_layers=False,\n seed_fn=None,\n base_seed=1234,\n grid=None,\n partition_method='parameters',\n activation_checkpoint_interval=0,\n activation_checkpoint_func=checkpointing.checkpoint):\n \"\"\"Modules to be parallelized with pipeline parallelism.\n\n The key constraint that enables pipeline parallelism is the\n representation of the forward pass as a sequence of layers\n and the enforcement of a simple interface between them. The\n forward pass is implicitly defined by the module ``layers``. The key\n assumption is that the output of each layer can be directly fed as\n input to the next, like a ``torch.nn.Sequence``. The forward pass is\n implicitly:\n\n .. code-block:: python\n\n def forward(self, inputs):\n x = inputs\n for layer in self.layers:\n x = layer(x)\n return x\n\n Args:\n layers (Iterable): A sequence of layers defining pipeline structure. Can be a ``torch.nn.Sequential`` module.\n num_stages (int, optional): The degree of pipeline parallelism. If not specified, ``topology`` must be provided.\n topology (``deepseed.pipe.ProcessTopology``, optional): Defines the axes of parallelism axes for training.\n Must be provided if ``num_stages`` is ``None``.\n loss_fn (callable, optional): Loss is computed ``loss = loss_fn(outputs, label)``\n base_seed (int, optional): [description]. Defaults to 1234.\n partition_method (str, optional): [description]. Defaults to 'parameters'.\n activation_checkpoint_interval (int, optional): The granularity activation checkpointing in terms of number of layers.\n 0 disables activation checkpointing.\n activation_checkpoint_func (callable, optional): The function to use for activation checkpointing.\n Defaults to ``deepspeed.checkpointing.checkpoint``.\n \"\"\"\n\n super(PipelineModule, self).__init__()\n\n # topology = grid.topology() if grid is not None else None\n\n if num_stages is None and topology is None:\n raise RuntimeError('must provide num_stages or topology')\n\n self.micro_offset = 0\n\n self.loss_fn = loss_fn\n\n self.seed_layers = seed_layers\n self.seed_fn = seed_fn\n self.base_seed = base_seed\n if dist.get_rank() == 0:\n try:\n seed_str = self.seed_fn.__name__\n except AttributeError:\n seed_str = None\n print(\n f'SEED_LAYERS={self.seed_layers} BASE_SEED={self.base_seed} SEED_FN={seed_str}'\n )\n\n # Setup world info\n self.world_group = dist.new_group(ranks=range(dist.get_world_size()))\n self.global_rank = dist.get_rank(group=self.world_group)\n self.world_size = dist.get_world_size(group=self.world_group)\n\n if topology:\n self._topo = topology\n self.num_stages = self._topo.get_dim('pipe')\n else:\n self.num_stages = num_stages\n if topology is None:\n if self.world_size % self.num_stages != 0:\n raise RuntimeError(\n f'num_stages ({self.num_stages}) must divide distributed world size ({self.world_size})'\n )\n dp = self.world_size // num_stages\n topology = PipeDataParallelTopology(num_pp=num_stages, num_dp=dp)\n self._topo = topology\n\n # Contruct communicators for pipeline topology\n self._grid = grid if grid is not None else PipelineParallelGrid(process_group=self.world_group, topology=self._topo)\n\n self.stage_id = self._topo.get_coord(self.global_rank).pipe\n\n # Initialize partition information\n self._layer_specs = list(layers)\n self._num_layers = len(self._layer_specs)\n self._local_start = 0\n self._local_stop = None\n self._partition_layers(method=partition_method)\n\n self.forward_funcs = []\n self.tied_modules = nn.ModuleDict()\n self.tied_weight_attrs = {}\n\n # Offset the random seed by the stage ID.\n # newseed = torch.cuda.initial_seed() + self._grid.get_stage_id()\n # ds_utils.set_random_seed(newseed)\n\n # with torch.random.fork_rng(devices=[torch.cuda.current_device()]):\n self._build()\n self.to('cuda')\n\n self.tied_comms = self._index_tied_modules()\n self._synchronize_tied_weights()\n\n self.activation_checkpoint_interval = activation_checkpoint_interval\n self.activation_checkpoint_func = activation_checkpoint_func\n\n def _build(self):\n specs = self._layer_specs\n\n for local_idx, layer in enumerate(specs[self._local_start:self._local_stop]):\n layer_idx = local_idx + self._local_start\n if self.seed_layers:\n if self.seed_fn:\n self.seed_fn(self.base_seed + layer_idx)\n else:\n ds_utils.set_random_seed(self.base_seed + layer_idx)\n\n # Recursively build PipelineModule objects\n if isinstance(layer, PipelineModule):\n raise NotImplementedError('RECURSIVE BUILD NOT YET IMPLEMENTED')\n\n # LayerSpec objects contain an nn.Module that should be allocated now.\n elif isinstance(layer, nn.Module):\n name = str(layer_idx)\n self.forward_funcs.append(layer)\n self.add_module(name, layer)\n\n # TiedLayerSpec objects contain an nn.Module that should be allocated now.\n elif isinstance(layer, TiedLayerSpec):\n # Build and register the module if we haven't seen it before.\n if layer.key not in self.tied_modules:\n self.tied_modules[layer.key] = layer.build()\n self.tied_weight_attrs[layer.key] = layer.tied_weight_attr\n\n if layer.forward_fn is None:\n # Just use forward()\n self.forward_funcs.append(self.tied_modules[layer.key])\n else:\n # User specified fn with args (module, input)\n self.forward_funcs.append(\n partial(layer.forward_fn,\n self.tied_modules[layer.key]))\n\n # LayerSpec objects contain an nn.Module that should be allocated now.\n elif isinstance(layer, LayerSpec):\n module = layer.build()\n name = str(layer_idx)\n self.forward_funcs.append(module)\n self.add_module(name, module)\n\n # Last option: layer may be a functional (e.g., lambda). We do nothing in\n # that case and just use it in forward()\n else:\n self.forward_funcs.append(layer)\n\n # All pipeline parameters should be considered as model parallel in the context\n # of our FP16 optimizer\n for p in self.parameters():\n p.model_parallel = True\n\n def _count_layer_params(self):\n \"\"\"Count the trainable parameters in individual layers.\n\n This routine will only build one layer at a time.\n\n Returns:\n A list of the number of parameters in each layer.\n \"\"\"\n param_counts = [0] * len(self._layer_specs)\n for idx, layer in enumerate(self._layer_specs):\n if isinstance(layer, LayerSpec):\n layer_spec = layer.build()\n params = filter(lambda p: p.requires_grad, layer_spec.parameters())\n param_counts[idx] = sum(p.numel() for p in params)\n elif isinstance(layer, nn.Module):\n params = filter(lambda p: p.requires_grad, layer.parameters())\n param_counts[idx] = sum(p.numel() for p in params)\n return param_counts\n\n def _find_layer_type(self, layername):\n idxs = []\n typeregex = regex.compile(layername, regex.IGNORECASE)\n for idx, layer in enumerate(self._layer_specs):\n name = None\n if isinstance(layer, LayerSpec):\n name = layer.typename.__name__\n elif isinstance(layer, nn.Module):\n name = layer.__class__.__name__\n else:\n try:\n name = layer.__name__\n except AttributeError:\n continue\n if typeregex.search(name):\n idxs.append(idx)\n\n if len(idxs) == 0:\n raise RuntimeError(\n f\"Partitioning '{layername}' found no valid layers to partition.\")\n return idxs\n\n def forward(self, forward_input):\n # We need to offset the seed by the microbatch ID. Save it in a local var to\n # ensure it is preserved in the closure. Otherwise checkpointed forward funcs\n # will see a different offset.\n self.micro_offset += 1\n\n def exec_range_func(start, end):\n ''' Helper function to be used with checkpoint()\n Adapted from torch.utils.checkpoint:checkpoint_sequential()\n '''\n local_micro_offset = self.micro_offset + 1\n\n def exec_func(*inputs):\n # Single tensor inputs need to be unwrapped\n if len(inputs) == 1:\n inputs = inputs[0]\n for idx, layer in enumerate(self.forward_funcs[start:end]):\n self.curr_layer = idx + self._local_start\n if self.seed_layers:\n new_seed = (self.base_seed *\n local_micro_offset) + self.curr_layer\n if self.seed_fn:\n self.seed_fn(new_seed)\n else:\n ds_utils.set_random_seed(new_seed)\n\n inputs = layer(inputs)\n return inputs\n\n return exec_func\n\n if self.activation_checkpoint_interval == 0:\n func = exec_range_func(0, len(self.forward_funcs))\n x = func(forward_input)\n else:\n num_layers = len(self.forward_funcs)\n x = forward_input\n for start_idx in range(0, num_layers, self.activation_checkpoint_interval):\n end_idx = min(start_idx + self.activation_checkpoint_interval,\n num_layers)\n\n funcs = self.forward_funcs[start_idx:end_idx]\n # Since we either pass tensors or tuples of tensors without unpacking, we\n # need to be careful not to double-wrap tensors with tuple.\n if not isinstance(x, tuple):\n x = (x, )\n\n if self._is_checkpointable(funcs):\n x = self.activation_checkpoint_func(\n exec_range_func(start_idx,\n end_idx),\n *x)\n else:\n x = exec_range_func(start_idx, end_idx)(*x)\n return x\n\n def _partition_uniform(self, num_items, num_parts):\n # print(f'enter _partition_uniform', flush=True)\n parts = [0] * (num_parts + 1)\n if num_items <= num_parts:\n for p in range(num_parts + 1):\n parts[p] = min(p, num_items)\n return parts\n expected_chunksize = num_items / num_parts\n for p in range(num_parts):\n parts[p] = min(floor(expected_chunksize * p), num_items)\n parts[num_parts] = num_items\n return parts\n\n def _partition_balanced(self, weights, num_parts, eps=1e-3):\n num_items = len(weights)\n # First check for the trivial edge case\n if num_items <= num_parts:\n return self._partition_uniform(num_items, num_parts)\n\n weights_ = ds_utils.prefix_sum_inc(weights)\n\n # Find the smallest bottleneck (weight of heaviest partition)\n bottleneck = ds_utils._rb_partition_balanced(weights_, num_parts, eps=eps)\n\n # Now compute that partitioning\n parts, success = ds_utils._lprobe(weights_, num_parts, bottleneck)\n assert success\n\n return parts\n\n def _partition_layers(self, method='uniform'):\n num_stages = self._topo.get_dim('pipe')\n stage_id = self._topo.get_coord(self.global_rank).pipe\n\n if self.global_rank == 0:\n logger.info(f'Partitioning pipeline stages with method {method}')\n\n method = method.lower()\n\n # Each stage gets a simple uniform number of layers.\n if method == 'uniform':\n num_layers = len(self._layer_specs)\n self.parts = self._partition_uniform(num_items=num_layers,\n num_parts=num_stages)\n elif method == 'parameters':\n param_counts = self._count_layer_params()\n self.parts = self._partition_balanced(weights=param_counts,\n num_parts=num_stages)\n elif method.startswith('type:'):\n layertype = method.split(':')[1]\n binary_weights = [0] * len(self._layer_specs)\n for idx in self._find_layer_type(layertype):\n binary_weights[idx] = 1\n else:\n self.parts = self._partition_balanced(weights=binary_weights,\n num_parts=num_stages)\n elif method.startswith('manual:'):\n msplit = method.split(':')\n layernum = int(msplit[1])\n layerparts = msplit[2].split(',')\n assert len(self._layer_specs) == layernum # failsafe check for layer num\n assert num_stages == len(layerparts) - 1 # failsafe check for num stages\n self.parts = list(map(int, layerparts))\n elif method == 'profile':\n raise NotImplementedError(f'Partitioning method {method} not implemented.')\n else:\n raise NotImplementedError(f'Partitioning method {method} not implemented.')\n\n # Print some information on the partitioning.\n if self.global_rank == 0:\n for stage in range(num_stages):\n start = self.parts[stage]\n stop = self.parts[stage + 1]\n print(f'stage={stage} layers={stop - start}')\n for idx, layer in enumerate(self._layer_specs[start:stop]):\n name = str(layer)\n if isinstance(layer, LayerSpec):\n name = layer.typename.__name__\n if isinstance(layer, nn.Module):\n name = layer.__class__.__name__\n else:\n try:\n name = layer.__name__\n except AttributeError:\n pass\n print(f' {idx+start:2d}: {name}')\n if self.loss_fn:\n try:\n print(f' loss: {self.loss_fn.__name__}')\n except AttributeError:\n print(f' loss: {self.loss_fn.__class__.__name__}')\n\n self._set_bounds(start=self.parts[stage_id], stop=self.parts[stage_id + 1])\n\n def allreduce_tied_weight_gradients(self):\n '''All reduce the gradients of the tied weights between tied stages'''\n for key, comm in self.tied_comms.items():\n weight = getattr(self.tied_modules[key], comm['weight_attr'])\n dist.all_reduce(weight.grad, group=comm['group'])\n\n def _synchronize_tied_weights(self):\n for key, comm in self.tied_comms.items():\n dist.broadcast(\n getattr(comm['module'],\n comm['weight_attr']),\n src=min(comm['ranks']),\n group=comm['group'],\n )\n\n def _index_tied_modules(self):\n ''' Build communication structures for tied modules. '''\n tied_comms = {}\n if self._topo.get_dim('pipe') == 1:\n return tied_comms\n\n specs = self._layer_specs\n tie_keys = set(s.key for s in specs if isinstance(s, TiedLayerSpec))\n for key in tie_keys:\n # Find the layers that the tied module appears in\n tied_layers = []\n for idx, layer in enumerate(specs):\n if isinstance(layer, TiedLayerSpec) and layer.key == key:\n tied_layers.append(idx)\n # Find all stages with this tied module\n # TODO: Would be nice to remove the nested data/model parallelism loops and\n # TODO: instead generalize in some way, since we really just care about the\n # TODO: stage that owns the tied layer. Then loop over each (dp, mp, ...)\n # TODO: fiber to generate process groups.\n tied_stages = set(self.stage_owner(idx) for idx in tied_layers)\n for dp in range(self._grid.data_parallel_size):\n for mp in range(self._grid.model_parallel_size):\n tied_ranks = []\n for s in sorted(tied_stages):\n if self._grid.model_parallel_size > 1:\n tied_ranks.append(\n self._grid.stage_to_global(stage_id=s,\n data=dp,\n model=mp))\n else:\n tied_ranks.append(\n self._grid.stage_to_global(stage_id=s,\n data=dp))\n group = dist.new_group(ranks=tied_ranks)\n\n # Record this tied module if we own a local copy of it.\n if self.global_rank in tied_ranks:\n assert key in self.tied_modules\n if key in self.tied_modules:\n tied_comms[key] = {\n 'ranks': tied_ranks,\n 'group': group,\n 'weight_attr': self.tied_weight_attrs[key],\n 'module': self.tied_modules[key],\n }\n # Only count the tied module once in the eyes of the FP16 optimizer\n if self.global_rank != tied_ranks[0]:\n for p in self.tied_modules[key].parameters():\n p.model_parallel = False\n '''\n if len(tied_comms) > 0:\n print(f'RANK={self.global_rank} tied_comms={tied_comms}')\n '''\n\n return tied_comms\n\n def partitions(self):\n return self.parts\n\n def stage_owner(self, layer_idx):\n assert 0 <= layer_idx < self._num_layers\n for stage in range(self._topo.get_dim('pipe')):\n if self.parts[stage] <= layer_idx < self.parts[stage + 1]:\n return stage\n raise RuntimeError(f'Layer {layer_idx} not owned? parts={self.parts}')\n\n def _set_bounds(self, start=None, stop=None):\n \"\"\"Manually define the range of layers that will be built on this process.\n\n These boundaries are treated as list slices and so start is inclusive and stop is\n exclusive. The default of None for both results in all layers being built\n locally.\n \"\"\"\n self._local_start = start\n self._local_stop = stop\n\n def set_checkpoint_interval(self, interval):\n assert interval >= 0\n self.checkpoint_interval = interval\n\n def topology(self):\n \"\"\" ProcessTopology object to query process mappings. \"\"\"\n return self._topo\n\n def mpu(self):\n return self._grid\n\n def num_pipeline_stages(self):\n return self._topo.get_dim('pipe')\n\n def ckpt_prefix(self, checkpoints_path, tag):\n \"\"\"Build a prefix for all checkpoint files written by this module. \"\"\"\n # All checkpoint files start with this\n rank_name = 'module'\n\n # Data parallelism is omitted from the naming convention because we are agnostic\n # to this in the checkpoint.\n omit_dims = frozenset(['data'])\n axes = [a for a in self._grid._topo.get_axis_names() if a not in omit_dims]\n for dim in axes:\n rank = getattr(self._grid._topo.get_coord(rank=self.global_rank), dim)\n rank_name += f'-{dim}_{rank:02d}'\n\n ckpt_name = os.path.join(checkpoints_path, str(tag), rank_name)\n return ckpt_name\n\n def ckpt_layer_path(self, ckpt_dir, local_layer_idx):\n \"\"\"Customize a prefix for a specific pipeline module layer. \"\"\"\n idx = local_layer_idx + self._local_start\n layer_ckpt_path = os.path.join(ckpt_dir, f'layer_{idx:02d}')\n rank_repr = self._grid._topo.get_rank_repr(rank=self.global_rank)\n if rank_repr != '':\n layer_ckpt_path += f'-{rank_repr}'\n layer_ckpt_path += '-model_states.pt'\n return layer_ckpt_path\n\n def save_state_dict(self, save_dir):\n if self._grid.data_parallel_id != 0:\n return\n\n os.makedirs(save_dir, exist_ok=True)\n # layer_offset = self._local_start\n for idx, layer in enumerate(self.forward_funcs):\n model_ckpt_path = self.ckpt_layer_path(save_dir, idx)\n if not hasattr(layer, 'state_dict'):\n continue\n torch.save(layer.state_dict(), model_ckpt_path)\n\n def load_state_dir(self, load_dir, strict=True):\n # rank = dist.get_rank()\n\n layer_offset = self._local_start\n for idx, layer in enumerate(self.forward_funcs):\n # Functions, etc. will not have state_dicts\n if not hasattr(layer, 'load_state_dict'):\n continue\n\n model_ckpt_path = self.ckpt_layer_path(load_dir, idx)\n layer.load_state_dict(torch.load(model_ckpt_path,\n map_location=lambda storage,\n loc: storage),\n strict=strict)\n if self._grid.data_parallel_id == 0:\n logger.info(\n f'RANK={self.global_rank} Loaded layer={idx+layer_offset} file={model_ckpt_path}'\n )\n\n self._synchronize_tied_weights()\n\n def _is_checkpointable(self, funcs):\n if self.__class__.__name__ == 'GPT2ModelPipe':\n return all('ParallelTransformerLayerPipe' in f.__class__.__name__\n for f in funcs)\n\n params = [f.parameters() for f in funcs if isinstance(f, torch.nn.Module)]\n return any(len(list(p)) > 0 for p in params)\n" ]
[ [ "torch.load", "torch.nn.ModuleDict", "torch.distributed.new_group", "torch.distributed.get_rank", "torch.distributed.get_world_size", "torch.distributed.all_reduce" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
huiguoo/benchmark
[ "0c34e941193c3294bdf16fa57bcd788145f672af" ]
[ "torchbenchmark/models/maml/meta.py" ]
[ "import torch\nfrom torch import nn\nfrom torch import optim\nfrom torch.nn import functional as F\nfrom torch.utils.data import TensorDataset, DataLoader\nfrom torch import optim\nimport numpy as np\n\nfrom .learner import Learner\nfrom copy import deepcopy\n\n\n\nclass Meta(nn.Module):\n \"\"\"\n Meta Learner\n \"\"\"\n def __init__(self, args, config):\n \"\"\"\n\n :param args:\n \"\"\"\n super(Meta, self).__init__()\n\n self.update_lr = args.update_lr\n self.meta_lr = args.meta_lr\n self.n_way = args.n_way\n self.k_spt = args.k_spt\n self.k_qry = args.k_qry\n self.task_num = args.task_num\n self.update_step = args.update_step\n self.update_step_test = args.update_step_test\n\n\n self.net = Learner(config, args.imgc, args.imgsz)\n self.meta_optim = optim.Adam(self.net.parameters(), lr=self.meta_lr)\n\n\n\n\n def clip_grad_by_norm_(self, grad, max_norm):\n \"\"\"\n in-place gradient clipping.\n :param grad: list of gradients\n :param max_norm: maximum norm allowable\n :return:\n \"\"\"\n\n total_norm = 0\n counter = 0\n for g in grad:\n param_norm = g.data.norm(2)\n total_norm += param_norm.item() ** 2\n counter += 1\n total_norm = total_norm ** (1. / 2)\n\n clip_coef = max_norm / (total_norm + 1e-6)\n if clip_coef < 1:\n for g in grad:\n g.data.mul_(clip_coef)\n\n return total_norm/counter\n\n def forward(self, x_spt, y_spt, x_qry, y_qry):\n if self.training:\n return self.forward_train(x_spt, y_spt, x_qry, y_qry)\n else:\n return self.finetunning(x_spt[0], y_spt[0], x_qry[0], y_qry[0])\n\n def forward_train(self, x_spt, y_spt, x_qry, y_qry):\n \"\"\"\n\n :param x_spt: [b, setsz, c_, h, w]\n :param y_spt: [b, setsz]\n :param x_qry: [b, querysz, c_, h, w]\n :param y_qry: [b, querysz]\n :return:\n \"\"\"\n task_num, setsz, c_, h, w = x_spt.size()\n querysz = x_qry.size(1)\n\n losses_q = [0 for _ in range(self.update_step + 1)] # losses_q[i] is the loss on step i\n corrects = [0 for _ in range(self.update_step + 1)]\n\n\n for i in range(task_num):\n\n # 1. run the i-th task and compute loss for k=0\n logits = self.net(x_spt[i], vars=None, bn_training=True)\n loss = F.cross_entropy(logits, y_spt[i])\n grad = torch.autograd.grad(loss, self.net.parameters())\n fast_weights = list([p[1] - self.update_lr * p[0]for p in zip(grad, self.net.parameters())])\n\n # this is the loss and accuracy before first update\n with torch.no_grad():\n # [setsz, nway]\n logits_q = self.net(x_qry[i], self.net.parameters(), bn_training=True)\n loss_q = F.cross_entropy(logits_q, y_qry[i])\n losses_q[0] += loss_q\n\n pred_q = F.softmax(logits_q, dim=1).argmax(dim=1)\n correct = torch.eq(pred_q, y_qry[i]).sum().item()\n corrects[0] = corrects[0] + correct\n\n # this is the loss and accuracy after the first update\n with torch.no_grad():\n # [setsz, nway]\n logits_q = self.net(x_qry[i], fast_weights, bn_training=True)\n loss_q = F.cross_entropy(logits_q, y_qry[i])\n losses_q[1] += loss_q\n # [setsz]\n pred_q = F.softmax(logits_q, dim=1).argmax(dim=1)\n correct = torch.eq(pred_q, y_qry[i]).sum().item()\n corrects[1] = corrects[1] + correct\n\n for k in range(1, self.update_step):\n # 1. run the i-th task and compute loss for k=1~K-1\n logits = self.net(x_spt[i], fast_weights, bn_training=True)\n loss = F.cross_entropy(logits, y_spt[i])\n # 2. compute grad on theta_pi\n grad = torch.autograd.grad(loss, fast_weights)\n # 3. theta_pi = theta_pi - train_lr * grad\n fast_weights = [p[1] - self.update_lr * p[0] for p in zip(grad, fast_weights)]\n\n logits_q = self.net(x_qry[i], fast_weights, bn_training=True)\n # loss_q will be overwritten and just keep the loss_q on last update step.\n loss_q = F.cross_entropy(logits_q, y_qry[i])\n losses_q[k + 1] += loss_q\n\n with torch.no_grad():\n pred_q = F.softmax(logits_q, dim=1).argmax(dim=1)\n correct = torch.eq(pred_q, y_qry[i]).sum().item() # convert to numpy\n corrects[k + 1] = corrects[k + 1] + correct\n\n\n\n # end of all tasks\n # sum over all losses on query set across all tasks\n loss_q = losses_q[-1] / task_num\n\n # optimize theta parameters\n self.meta_optim.zero_grad()\n loss_q.backward()\n # print('meta update')\n # for p in self.net.parameters()[:5]:\n # \tprint(torch.norm(p).item())\n self.meta_optim.step()\n\n\n accs = torch.tensor(corrects) / (querysz * task_num)\n\n return accs\n\n\n def finetunning(self, x_spt, y_spt, x_qry, y_qry):\n \"\"\"\n\n :param x_spt: [setsz, c_, h, w]\n :param y_spt: [setsz]\n :param x_qry: [querysz, c_, h, w]\n :param y_qry: [querysz]\n :return:\n \"\"\"\n assert len(x_spt.shape) == 4\n\n querysz = x_qry.size(0)\n\n corrects = [0 for _ in range(self.update_step_test + 1)]\n\n # in order to not ruin the state of running_mean/variance and bn_weight/bias\n # we finetunning on the copied model instead of self.net\n net = deepcopy(self.net)\n\n # 1. run the i-th task and compute loss for k=0\n logits = net(x_spt)\n loss = F.cross_entropy(logits, y_spt)\n grad = torch.autograd.grad(loss, net.parameters())\n fast_weights = list(map(lambda p: p[1] - self.update_lr * p[0], zip(grad, net.parameters())))\n\n # this is the loss and accuracy before first update\n with torch.no_grad():\n # [setsz, nway]\n logits_q = net(x_qry, net.parameters(), bn_training=True)\n # [setsz]\n pred_q = F.softmax(logits_q, dim=1).argmax(dim=1)\n # scalar\n correct = torch.eq(pred_q, y_qry).sum().item()\n corrects[0] = corrects[0] + correct\n\n # this is the loss and accuracy after the first update\n with torch.no_grad():\n # [setsz, nway]\n logits_q = net(x_qry, fast_weights, bn_training=True)\n # [setsz]\n pred_q = F.softmax(logits_q, dim=1).argmax(dim=1)\n # scalar\n correct = torch.eq(pred_q, y_qry).sum().item()\n corrects[1] = corrects[1] + correct\n\n for k in range(1, self.update_step_test):\n # 1. run the i-th task and compute loss for k=1~K-1\n logits = net(x_spt, fast_weights, bn_training=True)\n loss = F.cross_entropy(logits, y_spt)\n # 2. compute grad on theta_pi\n grad = torch.autograd.grad(loss, fast_weights)\n # 3. theta_pi = theta_pi - train_lr * grad\n fast_weights = list(map(lambda p: p[1] - self.update_lr * p[0], zip(grad, fast_weights)))\n\n logits_q = net(x_qry, fast_weights, bn_training=True)\n # loss_q will be overwritten and just keep the loss_q on last update step.\n loss_q = F.cross_entropy(logits_q, y_qry)\n\n with torch.no_grad():\n pred_q = F.softmax(logits_q, dim=1).argmax(dim=1)\n correct = torch.eq(pred_q, y_qry).sum().item() # convert to numpy\n corrects[k + 1] = corrects[k + 1] + correct\n\n\n del net\n\n accs = torch.tensor(corrects) / querysz\n\n return accs\n\n\n\n\ndef main():\n pass\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "torch.nn.functional.softmax", "torch.eq", "torch.nn.functional.cross_entropy", "torch.tensor", "torch.no_grad", "torch.autograd.grad" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
sakibguy/keras
[ "ea93c46545089efc1405c8c88e32b21129b24188" ]
[ "keras/tests/tracking_util_test.py" ]
[ "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nimport functools\n\nimport tensorflow.compat.v2 as tf\nimport os\nimport weakref\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.framework import test_util\nfrom keras import combinations\nfrom keras import keras_parameterized\nfrom keras import testing_utils\nfrom keras.engine import input_layer\nfrom keras.engine import sequential\nfrom keras.engine import training\nfrom keras.layers import core\nfrom keras.layers import reshaping\nfrom keras.optimizer_v2 import adam\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.training.tracking import util as trackable_utils\n\n\n# pylint: disable=not-callable\nclass MyModel(training.Model):\n \"\"\"A concrete Model for testing.\"\"\"\n\n def __init__(self):\n super(MyModel, self).__init__()\n self._named_dense = core.Dense(1, use_bias=True)\n self._second = core.Dense(1, use_bias=False)\n # We can still track Trackables which aren't Layers.\n self._non_layer = NonLayerTrackable()\n\n def call(self, values):\n ret = self._second(self._named_dense(values))\n return ret\n\n\nclass NonLayerTrackable(tf.Module):\n\n def __init__(self):\n super(NonLayerTrackable, self).__init__()\n self.a_variable = trackable_utils.add_variable(\n self, name=\"a_variable\", shape=[])\n\n\nclass InterfaceTests(tf.test.TestCase):\n\n def testLayerDeduplication(self):\n model = training.Model()\n layer_one = core.Dense(1)\n layer_two = core.Dense(1)\n model.other_path = [layer_one, layer_two]\n model.l2 = layer_two\n model.l1 = layer_one\n self.assertEqual([layer_one, layer_two], model.layers)\n\n def testSaveWithOnlyKerasSession(self):\n\n with tf.Graph().as_default(), self.cached_session():\n inp = input_layer.Input([1])\n dense = core.Dense(1)(inp)\n model = training.Model(inp, dense)\n model.compile(optimizer=\"sgd\", loss=\"mse\")\n model.fit([1.], [2.])\n checkpoint = tf.train.Checkpoint(model=model)\n checkpoint.save(os.path.join(self.get_temp_dir(), \"ckpt\"))\n\n\nclass CheckpointingTests(keras_parameterized.TestCase):\n\n @test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)\n def testNamingWithOptimizer(self):\n input_value = tf.constant([[3.]])\n model = MyModel()\n # A nuisance Model using the same optimizer. Its slot variables should not\n # go in the checkpoint, since it is never depended on.\n other_model = MyModel()\n optimizer = adam.Adam(0.001)\n step = tf.compat.v1.train.get_or_create_global_step()\n root_trackable = tf.train.Checkpoint(\n optimizer=optimizer, model=model, step=step)\n\n with tf.GradientTape() as tape:\n loss = model(input_value)\n variables = model.trainable_variables\n gradients = tape.gradient(loss, variables)\n train_op = tf.group(\n optimizer.apply_gradients(zip(gradients, variables)),\n step.assign_add(1))\n\n with tf.GradientTape() as tape:\n loss = other_model(input_value)\n variables = other_model.trainable_variables\n gradients = tape.gradient(loss, variables)\n optimizer.apply_gradients(zip(gradients, variables))\n\n self.evaluate(trackable_utils.gather_initializers(\n root_trackable))\n self.evaluate(train_op)\n named_variables, serialized_graph, _ = tf.__internal__.tracking.ObjectGraphView(\n root_trackable).serialize_object_graph()\n expected_slot_keys = (\n \"model/_second/kernel/.OPTIMIZER_SLOT/optimizer/m\",\n \"model/_second/kernel/.OPTIMIZER_SLOT/optimizer/v\",\n \"model/_named_dense/kernel/.OPTIMIZER_SLOT/optimizer/m\",\n \"model/_named_dense/kernel/.OPTIMIZER_SLOT/optimizer/v\",\n \"model/_named_dense/bias/.OPTIMIZER_SLOT/optimizer/m\",\n \"model/_named_dense/bias/.OPTIMIZER_SLOT/optimizer/v\",\n )\n expected_checkpoint_names = (\n # Created in the root node, so no prefix.\n \"step\",\n \"model/_second/kernel\",\n \"model/_named_dense/kernel\",\n \"model/_named_dense/bias\",\n # non-Layer dependency of the model\n \"model/_non_layer/a_variable\",\n \"optimizer/learning_rate\",\n \"optimizer/beta_1\",\n \"optimizer/beta_2\",\n \"optimizer/iter\",\n \"optimizer/decay\",\n ) + expected_slot_keys\n suffix = \"/.ATTRIBUTES/VARIABLE_VALUE\"\n expected_checkpoint_names = [\n name + suffix for name in expected_checkpoint_names]\n named_variables = {v.name: v for v in named_variables}\n self.assertEqual(len(expected_checkpoint_names),\n len(named_variables.keys()))\n # Check that we've mapped to the right variable objects (not exhaustive)\n self.assertEqual(\n \"global_step\",\n named_variables[\"step\" + suffix].full_name)\n self.assertEqual(\n \"my_model/dense_1/kernel\",\n named_variables[\"model/_second/kernel\" + suffix].full_name)\n self.assertEqual(\n \"my_model/dense/kernel\",\n named_variables[\"model/_named_dense/kernel\" + suffix].full_name)\n self.assertEqual(\"Adam/beta_1\",\n named_variables[\"optimizer/beta_1\" + suffix].full_name)\n self.assertEqual(\"Adam/beta_2\",\n named_variables[\"optimizer/beta_2\" + suffix].full_name)\n # Spot check the generated protocol buffers.\n self.assertEqual(\"optimizer\",\n serialized_graph.nodes[0].children[1].local_name)\n optimizer_node = serialized_graph.nodes[\n serialized_graph.nodes[0].children[1].node_id]\n children = [node.local_name for node in optimizer_node.children]\n self.assertEqual(\n # hyper variable dependencies\n len([\"beta_1\", \"beta_2\", \"iter\", \"decay\", \"learning_rate\"]),\n len(children))\n serialized_slot_keys = []\n for slot in optimizer_node.slot_variables:\n for attribute in (\n serialized_graph.nodes[slot.slot_variable_node_id].attributes):\n serialized_slot_keys.append(attribute.checkpoint_key)\n self.assertEqual(\n len([key + suffix for key in expected_slot_keys]),\n len(serialized_slot_keys))\n\n @combinations.generate(combinations.combine(mode=[\"graph\", \"eager\"]))\n def testSaveRestore(self):\n with self.test_session():\n model = MyModel()\n optimizer = adam.Adam(0.001)\n root_trackable = tf.train.Checkpoint(\n optimizer=optimizer, model=model)\n input_value = tf.constant([[3.]])\n with tf.GradientTape() as tape:\n loss = model(input_value)\n variables = model.trainable_variables\n gradients = tape.gradient(loss, variables)\n train_op = optimizer.apply_gradients(zip(gradients, variables))\n self.assertFalse(root_trackable.save_counter.trainable)\n self.evaluate(trackable_utils.gather_initializers(\n root_trackable))\n self.evaluate(train_op)\n prefix = os.path.join(self.get_temp_dir(), \"ckpt\")\n self.evaluate(tf.compat.v1.assign(model._named_dense.variables[1], [42.]))\n m_bias_slot = optimizer.get_slot(model._named_dense.variables[1], \"m\")\n self.evaluate(tf.compat.v1.assign(m_bias_slot, [1.5]))\n save_path = root_trackable.save(file_prefix=prefix)\n self.evaluate(tf.compat.v1.assign(model._named_dense.variables[1], [43.]))\n self.evaluate(tf.compat.v1.assign(root_trackable.save_counter, 3))\n optimizer_variables = self.evaluate(\n sorted(optimizer.variables(), key=lambda v: v.name))\n self.evaluate(tf.compat.v1.assign(m_bias_slot, [-2.]))\n # Immediate restoration\n status = root_trackable.restore(save_path=save_path).assert_consumed()\n status.run_restore_ops()\n self.assertAllEqual([42.], self.evaluate(model._named_dense.variables[1]))\n self.assertAllEqual(1, self.evaluate(root_trackable.save_counter))\n self.assertAllEqual([1.5], self.evaluate(m_bias_slot))\n if not tf.executing_eagerly():\n return # Restore-on-create is only supported when executing eagerly\n on_create_model = MyModel()\n on_create_optimizer = adam.Adam(0.001)\n on_create_root = tf.train.Checkpoint(\n optimizer=on_create_optimizer, model=on_create_model)\n # Deferred restoration\n status = on_create_root.restore(save_path=save_path)\n status.assert_nontrivial_match()\n status.assert_existing_objects_matched()\n with self.assertRaises(AssertionError):\n status.assert_consumed()\n on_create_model(tf.constant([[3.]])) # create variables\n self.assertAllEqual(1, self.evaluate(on_create_root.save_counter))\n self.assertAllEqual([42.],\n self.evaluate(\n on_create_model._named_dense.variables[1]))\n on_create_m_bias_slot = on_create_optimizer.get_slot(\n on_create_model._named_dense.variables[1], \"m\")\n status.assert_existing_objects_matched()\n if not tf.executing_eagerly():\n with self.assertRaises(AssertionError):\n status.assert_consumed()\n # Optimizer slot variables are created when the original variable is\n # restored.\n self.assertAllEqual([1.5], self.evaluate(on_create_m_bias_slot))\n dummy_var = tf.Variable([1.])\n on_create_optimizer.minimize(loss=dummy_var.read_value,\n var_list=[dummy_var])\n status.assert_existing_objects_matched()\n status.assert_consumed()\n self.assertAllEqual(\n optimizer_variables,\n # Creation order is different, so .variables() needs to be re-sorted.\n self.evaluate(sorted(optimizer.variables(), key=lambda v: v.name)))\n\n # TODO(allenl): Debug garbage created by this test in python3.\n def testDeferredRestorationUsageEager(self):\n \"\"\"An idiomatic eager execution example.\"\"\"\n num_training_steps = 10\n checkpoint_directory = self.get_temp_dir()\n checkpoint_prefix = os.path.join(checkpoint_directory, \"ckpt\")\n for training_continuation in range(3):\n model = MyModel()\n optimizer = adam.Adam(0.001)\n root = tf.train.Checkpoint(\n optimizer=optimizer, model=model)\n root.restore(tf.train.latest_checkpoint(\n checkpoint_directory))\n for _ in range(num_training_steps):\n # TODO(allenl): Use a Dataset and serialize/checkpoint it.\n input_value = tf.constant([[3.]])\n with tf.GradientTape() as tape:\n loss = model(input_value)\n variables = model.trainable_variables\n gradients = tape.gradient(loss, variables)\n optimizer.apply_gradients(zip(gradients, variables))\n root.save(file_prefix=checkpoint_prefix)\n self.assertEqual((training_continuation + 1) * num_training_steps,\n root.optimizer.iterations.numpy())\n\n def testUsageGraph(self):\n \"\"\"Expected usage when graph building.\"\"\"\n with context.graph_mode():\n num_training_steps = 10\n checkpoint_directory = self.get_temp_dir()\n checkpoint_prefix = os.path.join(checkpoint_directory, \"ckpt\")\n for training_continuation in range(3):\n with tf.Graph().as_default():\n model = MyModel()\n optimizer = adam.Adam(0.001)\n root = tf.compat.v1.train.Checkpoint(\n optimizer=optimizer, model=model)\n input_value = tf.constant([[3.]])\n with tf.GradientTape() as tape:\n loss = model(input_value)\n variables = model.trainable_variables\n gradients = tape.gradient(loss, variables)\n train_op = optimizer.apply_gradients(zip(gradients, variables))\n\n checkpoint_path = tf.train.latest_checkpoint(\n checkpoint_directory)\n with self.session(graph=tf.compat.v1.get_default_graph()) as session:\n status = root.restore(save_path=checkpoint_path)\n status.initialize_or_restore(session=session)\n if checkpoint_path is None:\n self.assertEqual(0, training_continuation)\n with self.assertRaises(AssertionError):\n status.assert_consumed()\n with self.assertRaises(AssertionError):\n status.assert_existing_objects_matched()\n else:\n status.assert_consumed()\n status.assert_existing_objects_matched()\n for _ in range(num_training_steps):\n session.run(train_op)\n root.save(file_prefix=checkpoint_prefix, session=session)\n self.assertEqual((training_continuation + 1) * num_training_steps,\n session.run(root.optimizer.iterations))\n self.assertEqual(training_continuation + 1,\n session.run(root.save_counter))\n\n @combinations.generate(combinations.combine(mode=[\"graph\", \"eager\"]))\n def testAgnosticUsage(self):\n \"\"\"Graph/eager agnostic usage.\"\"\"\n # Does create garbage when executing eagerly due to ops.Graph() creation.\n with self.test_session():\n num_training_steps = 10\n checkpoint_directory = self.get_temp_dir()\n optimizer = adam.Adam(0.001)\n def _train_fn(model, input_value):\n with tf.GradientTape() as tape:\n loss = model(input_value)\n variables = model.trainable_variables\n gradients = tape.gradient(loss, variables)\n return optimizer.apply_gradients(zip(gradients, variables))\n for training_continuation in range(3):\n with testing_utils.device(should_use_gpu=True):\n model = MyModel()\n root = tf.train.Checkpoint(\n optimizer=optimizer, model=model)\n manager = tf.train.CheckpointManager(\n root, checkpoint_directory, max_to_keep=1)\n status = root.restore(save_path=manager.latest_checkpoint)\n input_value = tf.constant([[3.]])\n train_fn = functools.partial(_train_fn, model, input_value)\n if not tf.executing_eagerly():\n train_fn = functools.partial(self.evaluate, train_fn())\n status.initialize_or_restore()\n for _ in range(num_training_steps):\n train_fn()\n manager.save()\n self.assertEqual((training_continuation + 1) * num_training_steps,\n self.evaluate(root.optimizer.iterations))\n self.assertEqual(training_continuation + 1,\n self.evaluate(root.save_counter))\n\n @combinations.generate(combinations.combine(mode=[\"eager\"]))\n def testPartialRestoreWarningObject(self):\n optimizer = adam.Adam(0.0)\n original_root = tf.train.Checkpoint(v1=tf.Variable(2.),\n v2=tf.Variable(3.),\n optimizer=optimizer)\n # Create a slot variable to save\n optimizer.minimize(original_root.v1.read_value, [original_root.v1])\n prefix = os.path.join(self.get_temp_dir(), \"ckpt\")\n save_path = original_root.save(prefix)\n partial_root = tf.train.Checkpoint(v1=tf.Variable(0.))\n weak_partial_root = weakref.ref(partial_root)\n weak_v1 = weakref.ref(partial_root.v1)\n partial_root.restore(save_path)\n self.assertEqual(2., partial_root.v1.numpy())\n with tf.compat.v1.test.mock.patch.object(logging, \"warning\") as mock_log:\n del partial_root\n self.assertIsNone(weak_partial_root())\n self.assertIsNone(weak_v1())\n messages = str(mock_log.call_args_list)\n self.assertIn(\"(root).v2'\", messages)\n self.assertIn(\"(root).optimizer's state 'm' for (root).v1\", messages)\n self.assertNotIn(\"(root).v1'\", messages)\n self.assertIn(\"expect_partial()\", messages)\n\n # pylint: disable=cell-var-from-loop\n @combinations.generate(combinations.combine(mode=[\"graph\", \"eager\"]))\n def testWithDefun(self):\n with self.test_session():\n num_training_steps = 2\n checkpoint_directory = self.get_temp_dir()\n checkpoint_prefix = os.path.join(checkpoint_directory, \"ckpt\")\n for training_continuation in range(3):\n with testing_utils.device(should_use_gpu=True):\n model = MyModel()\n # Don't actually train so we can test variable values\n optimizer = adam.Adam(0.)\n root = tf.train.Checkpoint(\n optimizer=optimizer, model=model)\n checkpoint_path = tf.train.latest_checkpoint(\n checkpoint_directory)\n status = root.restore(save_path=checkpoint_path)\n def train_fn():\n @tf.function\n def _call_model(x):\n return model(x)\n with tf.GradientTape() as tape:\n loss = _call_model(tf.constant([[3.]]))\n gradients = tape.gradient(loss, model.variables)\n return optimizer.apply_gradients(zip(gradients, model.variables))\n if not tf.executing_eagerly():\n train_fn = functools.partial(\n self.evaluate, train_fn())\n status.initialize_or_restore()\n for _ in range(num_training_steps):\n train_fn()\n if training_continuation > 0:\n status.assert_consumed()\n self.assertAllClose([[42.]], self.evaluate(model.variables[0]))\n else:\n self.evaluate(model.variables[0].assign([[42.]]))\n root.save(file_prefix=checkpoint_prefix)\n self.assertEqual((training_continuation + 1) * num_training_steps,\n self.evaluate(optimizer.iterations))\n self.assertEqual(training_continuation + 1,\n self.evaluate(root.save_counter))\n # pylint: enable=cell-var-from-loop\n\n @combinations.generate(combinations.combine(mode=[\"eager\"]))\n def testAnonymousVarsInInit(self):\n\n class Model(training.Model):\n\n def __init__(self):\n super(Model, self).__init__()\n self.w = tf.Variable(0.0)\n self.b = tf.Variable(0.0)\n self.vars = [self.w, self.b]\n\n def call(self, x):\n return x * self.w + self.b\n\n model = Model()\n optimizer = adam.Adam(learning_rate=0.05)\n checkpoint_directory = self.get_temp_dir()\n checkpoint_prefix = os.path.join(checkpoint_directory, \"ckpt\")\n checkpoint = tf.train.Checkpoint(\n model=model, optimizer=optimizer)\n for _ in range(2):\n checkpoint.save(checkpoint_prefix)\n with tf.GradientTape() as tape:\n loss = (tf.constant(1.)\n - model(tf.constant(1.))) ** 2\n grad = tape.gradient(loss, model.vars)\n optimizer.apply_gradients(\n [(g, v) for g, v in zip(grad, model.vars)])\n\n @combinations.generate(combinations.combine(mode=[\"graph\", \"eager\"]))\n def testDeferredSlotRestoration(self):\n with self.test_session():\n checkpoint_directory = self.get_temp_dir()\n\n root = tf.train.Checkpoint()\n root.var = trackable_utils.add_variable(\n root, name=\"var\", initializer=0.)\n optimizer = adam.Adam(0.1)\n variables = [root.var]\n gradients = [1.]\n train_op = optimizer.apply_gradients(zip(gradients, variables))\n # Note that `optimizer` has not been added as a dependency of\n # `root`. Create a one-off grouping so that slot variables for `root.var`\n # get initialized too.\n self.evaluate(trackable_utils.gather_initializers(\n tf.train.Checkpoint(root=root, optimizer=optimizer)))\n self.evaluate(train_op)\n self.evaluate(tf.compat.v1.assign(root.var, 12.))\n no_slots_path = root.save(os.path.join(checkpoint_directory, \"no_slots\"))\n root.optimizer = optimizer\n self.evaluate(tf.compat.v1.assign(root.var, 13.))\n self.evaluate(tf.compat.v1.assign(\n optimizer.get_slot(slot_name=\"m\", var=root.var),\n 14.))\n slots_path = root.save(os.path.join(checkpoint_directory, \"with_slots\"))\n new_root = tf.train.Checkpoint()\n # Load the slot-containing checkpoint (deferred), then immediately\n # overwrite the non-slot variable (also deferred).\n slot_status = new_root.restore(slots_path)\n no_slot_status = new_root.restore(no_slots_path)\n with self.assertRaises(AssertionError):\n no_slot_status.assert_consumed()\n new_root.var = trackable_utils.add_variable(\n new_root, name=\"var\", shape=[])\n no_slot_status.assert_consumed()\n no_slot_status.run_restore_ops()\n self.assertEqual(12., self.evaluate(new_root.var))\n new_root.optimizer = adam.Adam(0.1)\n slot_status.assert_existing_objects_matched()\n if not tf.executing_eagerly():\n with self.assertRaisesRegex(AssertionError, \"Unresolved object\"):\n slot_status.assert_consumed()\n self.assertEqual(12., self.evaluate(new_root.var))\n if tf.executing_eagerly():\n # Slot variables are only created with restoring initializers when\n # executing eagerly.\n self.assertEqual(14., self.evaluate(\n new_root.optimizer.get_slot(slot_name=\"m\", var=new_root.var)))\n else:\n # Slot variables are not created eagerly when graph building.\n with self.assertRaises(KeyError):\n new_root.optimizer.get_slot(slot_name=\"m\", var=new_root.var)\n variables = [new_root.var]\n gradients = [1.]\n train_op = new_root.optimizer.apply_gradients(zip(gradients, variables))\n # The slot variable now exists; restore() didn't create it, but we should\n # now have a restore op for it.\n slot_status.run_restore_ops()\n if not tf.executing_eagerly():\n # The train op hasn't run when graph building, so the slot variable has\n # its restored value. It has run in eager, so the value will\n # be different.\n self.assertEqual(14., self.evaluate(\n new_root.optimizer.get_slot(slot_name=\"m\", var=new_root.var)))\n self.evaluate(train_op)\n slot_status.assert_consumed()\n\n def testManySavesGraph(self):\n \"\"\"Saves after the first should not modify the graph.\"\"\"\n with context.graph_mode():\n graph = tf.Graph()\n with graph.as_default(), self.session(graph):\n checkpoint_directory = self.get_temp_dir()\n checkpoint_prefix = os.path.join(checkpoint_directory, \"ckpt\")\n obj = tf.train.Checkpoint()\n obj.var = tf.Variable(0., name=\"v\")\n obj.opt = adam.Adam(0.1)\n variables = [obj.var]\n gradients = [1.]\n obj.opt.apply_gradients(zip(gradients, variables))\n self.evaluate(trackable_utils.gather_initializers(obj))\n obj.save(checkpoint_prefix)\n graph.finalize()\n obj.save(checkpoint_prefix)\n\n def testManyRestoresGraph(self):\n \"\"\"Restores after the first should not modify the graph.\"\"\"\n with context.graph_mode():\n graph = tf.Graph()\n with graph.as_default(), self.session(graph):\n checkpoint_directory = self.get_temp_dir()\n checkpoint_prefix = os.path.join(checkpoint_directory, \"ckpt\")\n obj = tf.train.Checkpoint()\n obj.var = tf.Variable(0., name=\"v\")\n obj.opt = adam.Adam(0.1)\n variables = [obj.var]\n gradients = [1.]\n obj.opt.apply_gradients(zip(gradients, variables))\n self.evaluate(trackable_utils.gather_initializers(obj))\n save_path = obj.save(checkpoint_prefix)\n obj.restore(save_path)\n graph.finalize()\n obj.restore(save_path)\n\n @combinations.generate(combinations.combine(mode=[\"graph\", \"eager\"]))\n def test_sequential(self):\n with self.test_session():\n model = sequential.Sequential()\n checkpoint = tf.train.Checkpoint(model=model)\n model.add(core.Dense(4))\n second_dense = core.Dense(5)\n model.add(second_dense)\n model(tf.constant([[1.]]))\n checkpoint.restore(None).initialize_or_restore()\n self.evaluate(second_dense.bias.assign(\n tf.constant([1., 2., 3., 4., 5.])))\n checkpoint_directory = self.get_temp_dir()\n checkpoint_prefix = os.path.join(checkpoint_directory, \"ckpt\")\n save_path = checkpoint.save(checkpoint_prefix)\n self.evaluate(second_dense.bias.assign(\n tf.constant([5., 6., 7., 8., 9.])))\n checkpoint.restore(save_path).assert_consumed().run_restore_ops()\n self.assertAllEqual([1., 2., 3., 4., 5.],\n self.evaluate(second_dense.bias))\n\n deferred_sequential = sequential.Sequential()\n deferred_sequential_checkpoint = tf.train.Checkpoint(\n model=deferred_sequential)\n status = deferred_sequential_checkpoint.restore(save_path)\n deferred_sequential.add(core.Dense(4))\n deferred_second_dense = core.Dense(5)\n deferred_sequential.add(deferred_second_dense)\n deferred_sequential(tf.constant([[1.]]))\n status.run_restore_ops()\n self.assertAllEqual([1., 2., 3., 4., 5.],\n self.evaluate(deferred_second_dense.bias))\n\n @combinations.generate(combinations.combine(mode=[\"graph\", \"eager\"]))\n def test_initialize_if_not_restoring(self):\n with self.test_session():\n checkpoint_directory = self.get_temp_dir()\n checkpoint_prefix = os.path.join(checkpoint_directory, \"ckpt\")\n optimizer_only_prefix = os.path.join(checkpoint_directory, \"opt\")\n with testing_utils.device(should_use_gpu=True):\n model = MyModel()\n optimizer = adam.Adam(0.001)\n root = tf.train.Checkpoint(\n model=model) # Do not save the optimizer with the checkpoint.\n optimizer_checkpoint = tf.train.Checkpoint(\n optimizer=optimizer)\n\n checkpoint_path = tf.train.latest_checkpoint(\n checkpoint_directory)\n status = root.restore(save_path=checkpoint_path)\n input_value = tf.constant([[3.]])\n def train_fn():\n with tf.GradientTape() as tape:\n loss = model(input_value)\n variables = model.trainable_variables\n gradients = tape.gradient(loss, variables)\n return optimizer.apply_gradients(zip(gradients, variables))\n if not tf.executing_eagerly():\n train_fn = functools.partial(self.evaluate, train_fn())\n status.initialize_or_restore()\n # TODO(tanzheny): Add hyper variables to .variables(), and set them with\n # set_weights etc.\n variables_not_in_the_variables_property = [\n obj for obj in optimizer._hyper.values()\n if isinstance(obj, tf.Variable)]\n self.evaluate([v.initializer for v\n in optimizer.variables()\n + variables_not_in_the_variables_property])\n train_fn()\n model_save_path = root.save(file_prefix=checkpoint_prefix)\n self.evaluate(optimizer.beta_1.assign(42.))\n optimizer_save_path = optimizer_checkpoint.save(optimizer_only_prefix)\n del train_fn\n\n # Restore into a graph with the optimizer\n with testing_utils.device(should_use_gpu=True):\n model = MyModel()\n optimizer = adam.Adam(0.001)\n root = tf.train.Checkpoint(\n optimizer=optimizer, model=model)\n status = root.restore(save_path=model_save_path)\n input_value = tf.constant([[3.]])\n def train_fn1():\n with tf.GradientTape() as tape:\n loss = model(input_value)\n variables = model.trainable_variables\n gradients = tape.gradient(loss, variables)\n return optimizer.apply_gradients(zip(gradients, variables))\n if not tf.executing_eagerly():\n train_fn1 = functools.partial(self.evaluate, train_fn1())\n status.initialize_or_restore()\n train_fn1()\n with self.assertRaises(AssertionError):\n status.assert_existing_objects_matched()\n with self.assertRaises(AssertionError):\n status.assert_consumed()\n del train_fn1\n\n # Make sure initialization doesn't clobber later restores\n with testing_utils.device(should_use_gpu=True):\n model = MyModel()\n optimizer = adam.Adam(0.001, beta_1=1.0)\n root = tf.train.Checkpoint(\n optimizer=optimizer, model=model)\n opt_root = tf.train.Checkpoint(\n optimizer=optimizer)\n status = root.restore(save_path=model_save_path)\n init_only_optimizer_status = opt_root.restore(save_path=None)\n optimizer_status = opt_root.restore(save_path=optimizer_save_path)\n input_value = tf.constant([[3.]])\n def train_fn2():\n with tf.GradientTape() as tape:\n loss = model(input_value)\n variables = model.trainable_variables\n gradients = tape.gradient(loss, variables)\n return optimizer.apply_gradients(zip(gradients, variables))\n if not tf.executing_eagerly():\n train_fn2 = functools.partial(self.evaluate, train_fn2())\n optimizer_status.run_restore_ops()\n status.initialize_or_restore()\n init_only_optimizer_status.initialize_or_restore()\n train_fn2()\n self.assertEqual(42., self.evaluate(optimizer.beta_1))\n\n\nclass _ManualScope(tf.Module):\n\n def __call__(self):\n with tf.compat.v1.variable_scope(\"ManualScope\") as vs:\n self.variable_scope = vs\n with trackable_utils.capture_dependencies(template=self):\n return self._build()\n\n def _build(self):\n return tf.compat.v1.get_variable(name=\"in_manual_scope\", shape=[])\n\n\[email protected](combinations.combine(mode=[\"graph\", \"eager\"]))\nclass TemplateTests(keras_parameterized.TestCase):\n\n def test_trackable_save_restore(self):\n with self.test_session():\n def _templated():\n v = tf.compat.v1.get_variable(\n \"v\", shape=[1], initializer=tf.compat.v1.zeros_initializer(),\n use_resource=True)\n v2 = tf.compat.v1.get_variable(\n \"v2\", shape=[1], initializer=tf.compat.v1.zeros_initializer(),\n use_resource=True)\n manual = _ManualScope()\n return v, v + 1., v2, manual, manual()\n\n save_template = tf.compat.v1.make_template(\"s1\", _templated)\n v1_save, _, v2_save, manual_scope, manual_scope_v = save_template()\n self.assertEqual(\n set([id(v1_save), id(v2_save), id(manual_scope),\n id(manual_scope_v), id(save_template)]),\n set(map(id, trackable_utils.list_objects(save_template))))\n self.assertDictEqual({\"in_manual_scope\": manual_scope_v},\n manual_scope._trackable_children())\n optimizer = adam.Adam(0.0)\n save_root = tf.train.Checkpoint(\n my_template=save_template, optimizer=optimizer)\n optimizer.minimize(v1_save.read_value,\n var_list=[v1_save])\n self.evaluate([v.initializer for v in save_template.variables])\n optimizer_variables = optimizer.variables() + list(\n optimizer._hyper.values())\n self.evaluate([v.initializer for v in optimizer_variables])\n self.evaluate(v1_save.assign([12.]))\n self.evaluate(v2_save.assign([14.]))\n checkpoint_directory = self.get_temp_dir()\n checkpoint_prefix = os.path.join(checkpoint_directory, \"ckpt\")\n save_path = save_root.save(checkpoint_prefix)\n\n load_template = tf.compat.v1.make_template(\"s2\", _templated)\n load_optimizer = adam.Adam(0.0)\n load_root = tf.train.Checkpoint(\n my_template=load_template, optimizer=load_optimizer)\n status = load_root.restore(save_path)\n var, var_plus_one, var2, _, _ = load_template()\n load_optimizer.minimize(var.read_value, var_list=[var])\n\n children = load_template._trackable_children()\n self.assertEqual({\"v\", \"v2\", \"ManualScope\"}, children.keys())\n status.assert_consumed().run_restore_ops()\n self.assertAllEqual([12.], self.evaluate(var))\n self.assertAllEqual([13.], self.evaluate(var_plus_one))\n self.assertAllEqual([14.], self.evaluate(var2))\n\n\nclass CheckpointCompatibilityTests(keras_parameterized.TestCase):\n\n def _initialized_model(self):\n input_value = tf.constant([[3.]])\n model = MyModel()\n optimizer = adam.Adam(0.001)\n root_trackable = tf.train.Checkpoint(\n optimizer=optimizer, model=model)\n with tf.GradientTape() as tape:\n loss = model(input_value)\n variables = model.trainable_variables\n gradients = tape.gradient(loss, variables)\n train_op = optimizer.apply_gradients(zip(gradients, variables))\n self.evaluate(trackable_utils.gather_initializers(\n root_trackable))\n self.evaluate(train_op)\n # A regular variable, a slot variable, and a non-slot Optimizer variable\n # with known values to check when loading.\n self.evaluate(model._named_dense.bias.assign([1.]))\n self.evaluate(optimizer.get_slot(\n var=model._named_dense.bias, slot_name=\"m\").assign([2.]))\n self.evaluate(optimizer.beta_1.assign(3.))\n return root_trackable\n\n def _set_sentinels(self, root_trackable):\n self.evaluate(root_trackable.model._named_dense.bias.assign([101.]))\n self.evaluate(\n root_trackable.optimizer.get_slot(\n var=root_trackable.model._named_dense.bias, slot_name=\"m\")\n .assign([102.]))\n self.evaluate(root_trackable.optimizer.beta_1.assign(103.))\n\n def _check_sentinels(self, root_trackable):\n self.assertAllEqual(\n [1.], self.evaluate(root_trackable.model._named_dense.bias))\n self.assertAllEqual([2.], self.evaluate(\n root_trackable.optimizer.get_slot(\n var=root_trackable.model._named_dense.bias, slot_name=\"m\")))\n self.assertAllEqual(3.,\n self.evaluate(root_trackable.optimizer.beta_1))\n\n def _write_name_based_checkpoint(self):\n checkpoint_directory = self.get_temp_dir()\n checkpoint_prefix = os.path.join(checkpoint_directory, \"ckpt\")\n with context.graph_mode():\n save_graph = tf.Graph()\n with save_graph.as_default(), self.session(\n graph=save_graph) as session:\n root = self._initialized_model()\n name_saver = tf.compat.v1.train.Saver()\n return name_saver.save(\n sess=session,\n save_path=checkpoint_prefix,\n global_step=root.optimizer.iterations)\n\n @combinations.generate(combinations.combine(mode=[\"graph\", \"eager\"]))\n def testLoadFromNameBasedSaver(self):\n \"\"\"Save a name-based checkpoint, load it using the object-based API.\"\"\"\n with testing_utils.device(should_use_gpu=True):\n with self.test_session():\n save_path = self._write_name_based_checkpoint()\n root = self._initialized_model()\n self._set_sentinels(root)\n with self.assertRaises(AssertionError):\n self._check_sentinels(root)\n object_saver = tf.__internal__.tracking.TrackableSaver(\n tf.__internal__.tracking.ObjectGraphView(root))\n self._set_sentinels(root)\n status = object_saver.restore(save_path)\n if tf.executing_eagerly():\n self._check_sentinels(root)\n if tf.executing_eagerly():\n status.assert_consumed()\n status.assert_existing_objects_matched()\n status.assert_nontrivial_match()\n else:\n # When graph building, we haven't read any keys, so we don't know\n # whether the restore will be complete.\n with self.assertRaisesRegex(AssertionError, \"not restored\"):\n status.assert_consumed()\n with self.assertRaisesRegex(AssertionError, \"not restored\"):\n status.assert_existing_objects_matched()\n with self.assertRaisesRegex(AssertionError, \"not restored\"):\n status.assert_nontrivial_match()\n status.run_restore_ops()\n self._check_sentinels(root)\n self._set_sentinels(root)\n status = object_saver.restore(save_path)\n status.initialize_or_restore()\n status.assert_nontrivial_match()\n self._check_sentinels(root)\n # Check that there is no error when keys are missing from the name-based\n # checkpoint.\n root.not_in_name_checkpoint = tf.Variable([1.])\n status = object_saver.restore(save_path)\n with self.assertRaises(AssertionError):\n status.assert_existing_objects_matched()\n\n def testSaveGraphLoadEager(self):\n checkpoint_directory = self.get_temp_dir()\n checkpoint_prefix = os.path.join(checkpoint_directory, \"ckpt\")\n with context.graph_mode():\n save_graph = tf.Graph()\n with save_graph.as_default(), self.session(\n graph=save_graph):\n root = self._initialized_model()\n save_path = root.save(file_prefix=checkpoint_prefix)\n with tf.__internal__.eager_context.eager_mode():\n root = self._initialized_model()\n self._set_sentinels(root)\n root.restore(save_path).assert_consumed()\n self._check_sentinels(root)\n\n def testSaveEagerLoadGraph(self):\n checkpoint_directory = self.get_temp_dir()\n checkpoint_prefix = os.path.join(checkpoint_directory, \"ckpt\")\n with tf.__internal__.eager_context.eager_mode():\n root = self._initialized_model()\n save_path = root.save(file_prefix=checkpoint_prefix)\n with context.graph_mode():\n save_graph = tf.Graph()\n with save_graph.as_default(), self.session(\n graph=save_graph):\n root = self._initialized_model()\n self._set_sentinels(root)\n root.restore(save_path).assert_consumed().run_restore_ops()\n self._check_sentinels(root)\n\n def testIgnoreSaveCounter(self):\n checkpoint_directory = self.get_temp_dir()\n checkpoint_prefix = os.path.join(checkpoint_directory, \"ckpt\")\n with self.cached_session() as session:\n # Create and save a model using Saver() before using a Checkpoint. This\n # generates a snapshot without the Checkpoint's `save_counter`.\n model = sequential.Sequential()\n model.add(reshaping.Flatten(input_shape=(1,)))\n model.add(core.Dense(1))\n name_saver = tf.compat.v1.train.Saver(model.trainable_variables)\n save_path = name_saver.save(\n sess=session, save_path=checkpoint_prefix, global_step=1)\n # Checkpoint.restore must successfully load that checkpoint.\n ckpt = tf.train.Checkpoint(model=model)\n status = ckpt.restore(save_path)\n status.assert_existing_objects_matched()\n # It should, however, refuse to load a checkpoint where an unrelated\n # `save_counter` variable is missing.\n model.layers[1].var = tf.Variable(0., name=\"save_counter\")\n status = ckpt.restore(save_path)\n with self.assertRaises(AssertionError):\n status.assert_existing_objects_matched()\n\n\nif __name__ == \"__main__\":\n tf.compat.v1.enable_eager_execution()\n tf.test.main()\n" ]
[ [ "tensorflow.python.framework.test_util.run_in_graph_and_eager_modes", "tensorflow.compat.v2.executing_eagerly", "tensorflow.compat.v2.train.CheckpointManager", "tensorflow.python.training.tracking.util.list_objects", "tensorflow.compat.v2.compat.v1.make_template", "tensorflow.compat.v2.__internal__.tracking.ObjectGraphView", "tensorflow.python.training.tracking.util.add_variable", "tensorflow.compat.v2.compat.v1.variable_scope", "tensorflow.compat.v2.__internal__.eager_context.eager_mode", "tensorflow.python.training.tracking.util.capture_dependencies", "tensorflow.python.training.tracking.util.gather_initializers", "tensorflow.compat.v2.compat.v1.get_variable", "tensorflow.compat.v2.Graph", "tensorflow.python.eager.context.graph_mode", "tensorflow.compat.v2.train.Checkpoint", "tensorflow.compat.v2.Variable", "tensorflow.compat.v2.compat.v1.assign", "tensorflow.compat.v2.test.main", "tensorflow.compat.v2.train.latest_checkpoint", "tensorflow.compat.v2.compat.v1.get_default_graph", "tensorflow.compat.v2.constant", "tensorflow.compat.v2.compat.v1.test.mock.patch.object", "tensorflow.compat.v2.compat.v1.zeros_initializer", "tensorflow.compat.v2.compat.v1.train.Saver", "tensorflow.compat.v2.compat.v1.enable_eager_execution", "tensorflow.compat.v2.compat.v1.train.Checkpoint", "tensorflow.compat.v2.GradientTape", "tensorflow.compat.v2.compat.v1.train.get_or_create_global_step" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
snth/pandas
[ "49a15b1a80656be0cad351d77c7e0c045fd7cf05" ]
[ "pandas/tests/test_series.py" ]
[ "# coding=utf-8\n# pylint: disable-msg=E1101,W0612\n\nimport re\nimport sys\nfrom datetime import datetime, timedelta\nimport operator\nimport string\nfrom inspect import getargspec\nfrom itertools import product, starmap\nfrom distutils.version import LooseVersion\nimport warnings\nimport random\n\nimport nose\n\nfrom numpy import nan, inf\nimport numpy as np\nimport numpy.ma as ma\nimport pandas as pd\n\nfrom pandas import (Index, Series, DataFrame, isnull, notnull, bdate_range,\n date_range, period_range, timedelta_range, _np_version_under1p8)\nfrom pandas.core.index import MultiIndex\nfrom pandas.core.indexing import IndexingError\nfrom pandas.tseries.period import PeriodIndex\nfrom pandas.tseries.index import Timestamp, DatetimeIndex\nfrom pandas.tseries.tdi import Timedelta, TimedeltaIndex\nimport pandas.core.common as com\nimport pandas.core.config as cf\nimport pandas.lib as lib\n\nimport pandas.core.datetools as datetools\nimport pandas.core.nanops as nanops\n\nfrom pandas.compat import StringIO, lrange, range, zip, u, OrderedDict, long\nfrom pandas import compat\nfrom pandas.util.testing import (assert_series_equal,\n assert_almost_equal,\n assert_frame_equal,\n assert_index_equal,\n ensure_clean)\nimport pandas.util.testing as tm\n\n\n#------------------------------------------------------------------------------\n# Series test cases\n\nJOIN_TYPES = ['inner', 'outer', 'left', 'right']\n\n\nclass CheckNameIntegration(object):\n\n _multiprocess_can_split_ = True\n\n def test_scalarop_preserve_name(self):\n result = self.ts * 2\n self.assertEqual(result.name, self.ts.name)\n\n def test_copy_name(self):\n result = self.ts.copy()\n self.assertEqual(result.name, self.ts.name)\n\n def test_copy_index_name_checking(self):\n # don't want to be able to modify the index stored elsewhere after\n # making a copy\n\n self.ts.index.name = None\n self.assertIsNone(self.ts.index.name)\n self.assertIs(self.ts, self.ts)\n\n cp = self.ts.copy()\n cp.index.name = 'foo'\n com.pprint_thing(self.ts.index.name)\n self.assertIsNone(self.ts.index.name)\n\n def test_append_preserve_name(self):\n result = self.ts[:5].append(self.ts[5:])\n self.assertEqual(result.name, self.ts.name)\n\n def test_dt_namespace_accessor(self):\n\n # GH 7207\n # test .dt namespace accessor\n\n ok_for_base = ['year','month','day','hour','minute','second','weekofyear','week','dayofweek','weekday','dayofyear','quarter','freq','days_in_month','daysinmonth']\n ok_for_period = ok_for_base + ['qyear']\n ok_for_period_methods = ['strftime']\n ok_for_dt = ok_for_base + ['date','time','microsecond','nanosecond', 'is_month_start', 'is_month_end', 'is_quarter_start',\n 'is_quarter_end', 'is_year_start', 'is_year_end', 'tz']\n ok_for_dt_methods = ['to_period','to_pydatetime','tz_localize','tz_convert', 'normalize', 'strftime']\n ok_for_td = ['days','seconds','microseconds','nanoseconds']\n ok_for_td_methods = ['components','to_pytimedelta','total_seconds']\n\n def get_expected(s, name):\n result = getattr(Index(s._values),prop)\n if isinstance(result, np.ndarray):\n if com.is_integer_dtype(result):\n result = result.astype('int64')\n elif not com.is_list_like(result):\n return result\n return Series(result,index=s.index)\n\n def compare(s, name):\n a = getattr(s.dt,prop)\n b = get_expected(s,prop)\n if not (com.is_list_like(a) and com.is_list_like(b)):\n self.assertEqual(a,b)\n else:\n tm.assert_series_equal(a,b)\n\n # datetimeindex\n for s in [Series(date_range('20130101',periods=5)),\n Series(date_range('20130101',periods=5,freq='s')),\n Series(date_range('20130101 00:00:00',periods=5,freq='ms'))]:\n for prop in ok_for_dt:\n # we test freq below\n if prop != 'freq':\n compare(s, prop)\n\n for prop in ok_for_dt_methods:\n getattr(s.dt, prop)\n\n result = s.dt.to_pydatetime()\n self.assertIsInstance(result,np.ndarray)\n self.assertTrue(result.dtype == object)\n\n result = s.dt.tz_localize('US/Eastern')\n expected = Series(DatetimeIndex(s.values).tz_localize('US/Eastern'),index=s.index)\n tm.assert_series_equal(result, expected)\n\n tz_result = result.dt.tz\n self.assertEqual(str(tz_result), 'US/Eastern')\n freq_result = s.dt.freq\n self.assertEqual(freq_result, DatetimeIndex(s.values, freq='infer').freq)\n\n # let's localize, then convert\n result = s.dt.tz_localize('UTC').dt.tz_convert('US/Eastern')\n expected = Series(DatetimeIndex(s.values).tz_localize('UTC').tz_convert('US/Eastern'),index=s.index)\n tm.assert_series_equal(result, expected)\n\n # datetimeindex with tz\n s = Series(date_range('20130101',periods=5,tz='US/Eastern'))\n for prop in ok_for_dt:\n\n # we test freq below\n if prop != 'freq':\n compare(s, prop)\n\n for prop in ok_for_dt_methods:\n getattr(s.dt,prop)\n\n result = s.dt.to_pydatetime()\n self.assertIsInstance(result,np.ndarray)\n self.assertTrue(result.dtype == object)\n\n result = s.dt.tz_convert('CET')\n expected = Series(s._values.tz_convert('CET'),index=s.index)\n tm.assert_series_equal(result, expected)\n\n tz_result = result.dt.tz\n self.assertEqual(str(tz_result), 'CET')\n freq_result = s.dt.freq\n self.assertEqual(freq_result, DatetimeIndex(s.values, freq='infer').freq)\n\n # timedeltaindex\n for s in [Series(timedelta_range('1 day',periods=5),index=list('abcde')),\n Series(timedelta_range('1 day 01:23:45',periods=5,freq='s')),\n Series(timedelta_range('2 days 01:23:45.012345',periods=5,freq='ms'))]:\n for prop in ok_for_td:\n # we test freq below\n if prop != 'freq':\n compare(s, prop)\n\n for prop in ok_for_td_methods:\n getattr(s.dt, prop)\n\n result = s.dt.components\n self.assertIsInstance(result,DataFrame)\n tm.assert_index_equal(result.index,s.index)\n\n result = s.dt.to_pytimedelta()\n self.assertIsInstance(result,np.ndarray)\n self.assertTrue(result.dtype == object)\n\n result = s.dt.total_seconds()\n self.assertIsInstance(result,pd.Series)\n self.assertTrue(result.dtype == 'float64')\n\n freq_result = s.dt.freq\n self.assertEqual(freq_result, TimedeltaIndex(s.values, freq='infer').freq)\n\n # both\n index = date_range('20130101',periods=3,freq='D')\n s = Series(date_range('20140204',periods=3,freq='s'),index=index)\n tm.assert_series_equal(s.dt.year,Series(np.array([2014,2014,2014],dtype='int64'),index=index))\n tm.assert_series_equal(s.dt.month,Series(np.array([2,2,2],dtype='int64'),index=index))\n tm.assert_series_equal(s.dt.second,Series(np.array([0,1,2],dtype='int64'),index=index))\n tm.assert_series_equal(s.dt.normalize(), pd.Series([s[0]] * 3, index=index))\n\n # periodindex\n for s in [Series(period_range('20130101',periods=5,freq='D'))]:\n for prop in ok_for_period:\n # we test freq below\n if prop != 'freq':\n compare(s, prop)\n\n for prop in ok_for_period_methods:\n getattr(s.dt, prop)\n\n freq_result = s.dt.freq\n self.assertEqual(freq_result, PeriodIndex(s.values).freq)\n\n # test limited display api\n def get_dir(s):\n results = [ r for r in s.dt.__dir__() if not r.startswith('_') ]\n return list(sorted(set(results)))\n\n s = Series(date_range('20130101',periods=5,freq='D'))\n results = get_dir(s)\n tm.assert_almost_equal(results,list(sorted(set(ok_for_dt + ok_for_dt_methods))))\n\n s = Series(period_range('20130101',periods=5,freq='D').asobject)\n results = get_dir(s)\n tm.assert_almost_equal(results, list(sorted(set(ok_for_period + ok_for_period_methods))))\n\n # 11295\n # ambiguous time error on the conversions\n s = Series(pd.date_range('2015-01-01', '2016-01-01', freq='T'))\n s = s.dt.tz_localize('UTC').dt.tz_convert('America/Chicago')\n results = get_dir(s)\n tm.assert_almost_equal(results, list(sorted(set(ok_for_dt + ok_for_dt_methods))))\n expected = Series(pd.date_range('2015-01-01',\n '2016-01-01',\n freq='T',\n tz='UTC').tz_convert('America/Chicago'))\n tm.assert_series_equal(s, expected)\n\n # no setting allowed\n s = Series(date_range('20130101',periods=5,freq='D'))\n with tm.assertRaisesRegexp(ValueError, \"modifications\"):\n s.dt.hour = 5\n\n # trying to set a copy\n with pd.option_context('chained_assignment','raise'):\n def f():\n s.dt.hour[0] = 5\n self.assertRaises(com.SettingWithCopyError, f)\n\n def test_strftime(self):\n # GH 10086\n s = Series(date_range('20130101', periods=5))\n result = s.dt.strftime('%Y/%m/%d')\n expected = Series(['2013/01/01', '2013/01/02', '2013/01/03', '2013/01/04', '2013/01/05'])\n tm.assert_series_equal(result, expected)\n\n s = Series(date_range('2015-02-03 11:22:33.4567', periods=5))\n result = s.dt.strftime('%Y/%m/%d %H-%M-%S')\n expected = Series(['2015/02/03 11-22-33', '2015/02/04 11-22-33', '2015/02/05 11-22-33',\n '2015/02/06 11-22-33', '2015/02/07 11-22-33'])\n tm.assert_series_equal(result, expected)\n\n s = Series(period_range('20130101', periods=5))\n result = s.dt.strftime('%Y/%m/%d')\n expected = Series(['2013/01/01', '2013/01/02', '2013/01/03', '2013/01/04', '2013/01/05'])\n tm.assert_series_equal(result, expected)\n\n s = Series(period_range('2015-02-03 11:22:33.4567', periods=5, freq='s'))\n result = s.dt.strftime('%Y/%m/%d %H-%M-%S')\n expected = Series(['2015/02/03 11-22-33', '2015/02/03 11-22-34', '2015/02/03 11-22-35',\n '2015/02/03 11-22-36', '2015/02/03 11-22-37'])\n tm.assert_series_equal(result, expected)\n\n s = Series(date_range('20130101', periods=5))\n s.iloc[0] = pd.NaT\n result = s.dt.strftime('%Y/%m/%d')\n expected = Series(['NaT', '2013/01/02', '2013/01/03', '2013/01/04', '2013/01/05'])\n tm.assert_series_equal(result, expected)\n\n datetime_index = date_range('20150301', periods=5)\n result = datetime_index.strftime(\"%Y/%m/%d\")\n expected = np.array(['2015/03/01', '2015/03/02', '2015/03/03', '2015/03/04', '2015/03/05'], dtype=object)\n self.assert_numpy_array_equal(result, expected)\n\n period_index = period_range('20150301', periods=5)\n result = period_index.strftime(\"%Y/%m/%d\")\n expected = np.array(['2015/03/01', '2015/03/02', '2015/03/03', '2015/03/04', '2015/03/05'], dtype=object)\n self.assert_numpy_array_equal(result, expected)\n\n s = Series([datetime(2013, 1, 1, 2, 32, 59), datetime(2013, 1, 2, 14, 32, 1)])\n result = s.dt.strftime('%Y-%m-%d %H:%M:%S')\n expected = Series([\"2013-01-01 02:32:59\", \"2013-01-02 14:32:01\"])\n tm.assert_series_equal(result, expected)\n\n s = Series(period_range('20130101', periods=4, freq='H'))\n result = s.dt.strftime('%Y/%m/%d %H:%M:%S')\n expected = Series([\"2013/01/01 00:00:00\", \"2013/01/01 01:00:00\",\n \"2013/01/01 02:00:00\", \"2013/01/01 03:00:00\"])\n\n s = Series(period_range('20130101', periods=4, freq='L'))\n result = s.dt.strftime('%Y/%m/%d %H:%M:%S.%l')\n expected = Series([\"2013/01/01 00:00:00.000\", \"2013/01/01 00:00:00.001\",\n \"2013/01/01 00:00:00.002\", \"2013/01/01 00:00:00.003\"])\n tm.assert_series_equal(result, expected)\n\n def test_valid_dt_with_missing_values(self):\n\n from datetime import date, time\n\n # GH 8689\n s = Series(date_range('20130101',periods=5,freq='D'))\n s.iloc[2] = pd.NaT\n\n for attr in ['microsecond','nanosecond','second','minute','hour','day']:\n expected = getattr(s.dt,attr).copy()\n expected.iloc[2] = np.nan\n result = getattr(s.dt,attr)\n tm.assert_series_equal(result, expected)\n\n result = s.dt.date\n expected = Series([date(2013,1,1),date(2013,1,2),np.nan,date(2013,1,4),date(2013,1,5)],dtype='object')\n tm.assert_series_equal(result, expected)\n\n result = s.dt.time\n expected = Series([time(0),time(0),np.nan,time(0),time(0)],dtype='object')\n tm.assert_series_equal(result, expected)\n\n def test_dt_accessor_api(self):\n # GH 9322\n from pandas.tseries.common import (CombinedDatetimelikeProperties,\n DatetimeProperties)\n self.assertIs(Series.dt, CombinedDatetimelikeProperties)\n\n s = Series(date_range('2000-01-01', periods=3))\n self.assertIsInstance(s.dt, DatetimeProperties)\n\n for s in [Series(np.arange(5)),\n Series(list('abcde')),\n Series(np.random.randn(5))]:\n with tm.assertRaisesRegexp(AttributeError,\n \"only use .dt accessor\"):\n s.dt\n self.assertFalse(hasattr(s, 'dt'))\n\n def test_tab_completion(self):\n # GH 9910\n s = Series(list('abcd'))\n # Series of str values should have .str but not .dt/.cat in __dir__\n self.assertTrue('str' in dir(s))\n self.assertTrue('dt' not in dir(s))\n self.assertTrue('cat' not in dir(s))\n\n # similiarly for .dt\n s = Series(date_range('1/1/2015', periods=5))\n self.assertTrue('dt' in dir(s))\n self.assertTrue('str' not in dir(s))\n self.assertTrue('cat' not in dir(s))\n\n # similiarly for .cat\n s = Series(list('abbcd'), dtype=\"category\")\n self.assertTrue('cat' in dir(s))\n self.assertTrue('str' not in dir(s))\n self.assertTrue('dt' not in dir(s))\n\n def test_binop_maybe_preserve_name(self):\n # names match, preserve\n result = self.ts * self.ts\n self.assertEqual(result.name, self.ts.name)\n result = self.ts.mul(self.ts)\n self.assertEqual(result.name, self.ts.name)\n\n result = self.ts * self.ts[:-2]\n self.assertEqual(result.name, self.ts.name)\n\n # names don't match, don't preserve\n cp = self.ts.copy()\n cp.name = 'something else'\n result = self.ts + cp\n self.assertIsNone(result.name)\n result = self.ts.add(cp)\n self.assertIsNone(result.name)\n\n ops = ['add', 'sub', 'mul', 'div', 'truediv', 'floordiv', 'mod', 'pow']\n ops = ops + ['r' + op for op in ops]\n for op in ops:\n # names match, preserve\n s = self.ts.copy()\n result = getattr(s, op)(s)\n self.assertEqual(result.name, self.ts.name)\n\n # names don't match, don't preserve\n cp = self.ts.copy()\n cp.name = 'changed'\n result = getattr(s, op)(cp)\n self.assertIsNone(result.name)\n\n def test_combine_first_name(self):\n result = self.ts.combine_first(self.ts[:5])\n self.assertEqual(result.name, self.ts.name)\n\n def test_combine_first_dt64(self):\n from pandas.tseries.tools import to_datetime\n s0 = to_datetime(Series([\"2010\", np.NaN]))\n s1 = to_datetime(Series([np.NaN, \"2011\"]))\n rs = s0.combine_first(s1)\n xp = to_datetime(Series(['2010', '2011']))\n assert_series_equal(rs, xp)\n\n s0 = to_datetime(Series([\"2010\", np.NaN]))\n s1 = Series([np.NaN, \"2011\"])\n rs = s0.combine_first(s1)\n xp = Series([datetime(2010, 1, 1), '2011'])\n assert_series_equal(rs, xp)\n\n def test_get(self):\n\n # GH 6383\n s = Series(np.array([43, 48, 60, 48, 50, 51, 50, 45, 57, 48, 56,\n 45, 51, 39, 55, 43, 54, 52, 51, 54]))\n\n result = s.get(25, 0)\n expected = 0\n self.assertEqual(result,expected)\n\n s = Series(np.array([43, 48, 60, 48, 50, 51, 50, 45, 57, 48, 56,\n 45, 51, 39, 55, 43, 54, 52, 51, 54]),\n index=pd.Float64Index([25.0, 36.0, 49.0, 64.0, 81.0, 100.0,\n 121.0, 144.0, 169.0, 196.0, 1225.0,\n 1296.0, 1369.0, 1444.0, 1521.0, 1600.0,\n 1681.0, 1764.0, 1849.0, 1936.0],\n dtype='object'))\n\n result = s.get(25, 0)\n expected = 43\n self.assertEqual(result,expected)\n\n # GH 7407\n # with a boolean accessor\n df = pd.DataFrame({'i':[0]*3, 'b':[False]*3})\n vc = df.i.value_counts()\n result = vc.get(99,default='Missing')\n self.assertEqual(result,'Missing')\n\n vc = df.b.value_counts()\n result = vc.get(False,default='Missing')\n self.assertEqual(result,3)\n\n result = vc.get(True,default='Missing')\n self.assertEqual(result,'Missing')\n\n def test_delitem(self):\n\n # GH 5542\n # should delete the item inplace\n s = Series(lrange(5))\n del s[0]\n\n expected = Series(lrange(1,5),index=lrange(1,5))\n assert_series_equal(s, expected)\n\n del s[1]\n expected = Series(lrange(2,5),index=lrange(2,5))\n assert_series_equal(s, expected)\n\n # empty\n s = Series()\n def f():\n del s[0]\n self.assertRaises(KeyError, f)\n\n # only 1 left, del, add, del\n s = Series(1)\n del s[0]\n assert_series_equal(s, Series(dtype='int64'))\n s[0] = 1\n assert_series_equal(s, Series(1))\n del s[0]\n assert_series_equal(s, Series(dtype='int64'))\n\n def test_getitem_preserve_name(self):\n result = self.ts[self.ts > 0]\n self.assertEqual(result.name, self.ts.name)\n\n result = self.ts[[0, 2, 4]]\n self.assertEqual(result.name, self.ts.name)\n\n result = self.ts[5:10]\n self.assertEqual(result.name, self.ts.name)\n\n def test_getitem_setitem_ellipsis(self):\n s = Series(np.random.randn(10))\n\n np.fix(s)\n\n result = s[...]\n assert_series_equal(result, s)\n\n s[...] = 5\n self.assertTrue((result == 5).all())\n\n def test_getitem_negative_out_of_bounds(self):\n s = Series(tm.rands_array(5, 10), index=tm.rands_array(10, 10))\n\n self.assertRaises(IndexError, s.__getitem__, -11)\n self.assertRaises(IndexError, s.__setitem__, -11, 'foo')\n\n def test_multilevel_name_print(self):\n index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],\n ['one', 'two', 'three']],\n labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],\n [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],\n names=['first', 'second'])\n s = Series(lrange(0, len(index)), index=index, name='sth')\n expected = [\"first second\",\n \"foo one 0\",\n \" two 1\",\n \" three 2\",\n \"bar one 3\",\n \" two 4\",\n \"baz two 5\",\n \" three 6\",\n \"qux one 7\",\n \" two 8\",\n \" three 9\",\n \"Name: sth, dtype: int64\"]\n expected = \"\\n\".join(expected)\n self.assertEqual(repr(s), expected)\n\n def test_multilevel_preserve_name(self):\n index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],\n ['one', 'two', 'three']],\n labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],\n [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],\n names=['first', 'second'])\n s = Series(np.random.randn(len(index)), index=index, name='sth')\n\n result = s['foo']\n result2 = s.ix['foo']\n self.assertEqual(result.name, s.name)\n self.assertEqual(result2.name, s.name)\n\n def test_name_printing(self):\n # test small series\n s = Series([0, 1, 2])\n s.name = \"test\"\n self.assertIn(\"Name: test\", repr(s))\n s.name = None\n self.assertNotIn(\"Name:\", repr(s))\n # test big series (diff code path)\n s = Series(lrange(0, 1000))\n s.name = \"test\"\n self.assertIn(\"Name: test\", repr(s))\n s.name = None\n self.assertNotIn(\"Name:\", repr(s))\n\n s = Series(index=date_range('20010101', '20020101'), name='test')\n self.assertIn(\"Name: test\", repr(s))\n\n def test_pickle_preserve_name(self):\n unpickled = self._pickle_roundtrip_name(self.ts)\n self.assertEqual(unpickled.name, self.ts.name)\n\n def _pickle_roundtrip_name(self, obj):\n\n with ensure_clean() as path:\n obj.to_pickle(path)\n unpickled = pd.read_pickle(path)\n return unpickled\n\n def test_argsort_preserve_name(self):\n result = self.ts.argsort()\n self.assertEqual(result.name, self.ts.name)\n\n def test_sort_index_name(self):\n result = self.ts.sort_index(ascending=False)\n self.assertEqual(result.name, self.ts.name)\n\n def test_to_sparse_pass_name(self):\n result = self.ts.to_sparse()\n self.assertEqual(result.name, self.ts.name)\n\n\nclass TestNanops(tm.TestCase):\n\n _multiprocess_can_split_ = True\n\n def test_comparisons(self):\n left = np.random.randn(10)\n right = np.random.randn(10)\n left[:3] = np.nan\n\n result = nanops.nangt(left, right)\n expected = (left > right).astype('O')\n expected[:3] = np.nan\n\n assert_almost_equal(result, expected)\n\n s = Series(['a', 'b', 'c'])\n s2 = Series([False, True, False])\n\n # it works!\n s == s2\n s2 == s\n\n def test_sum_zero(self):\n arr = np.array([])\n self.assertEqual(nanops.nansum(arr), 0)\n\n arr = np.empty((10, 0))\n self.assertTrue((nanops.nansum(arr, axis=1) == 0).all())\n\n # GH #844\n s = Series([], index=[])\n self.assertEqual(s.sum(), 0)\n\n df = DataFrame(np.empty((10, 0)))\n self.assertTrue((df.sum(1) == 0).all())\n\n def test_nansum_buglet(self):\n s = Series([1.0, np.nan], index=[0, 1])\n result = np.nansum(s)\n assert_almost_equal(result, 1)\n\n def test_overflow(self):\n # GH 6915\n # overflowing on the smaller int dtypes\n for dtype in ['int32','int64']:\n v = np.arange(5000000,dtype=dtype)\n s = Series(v)\n\n # no bottleneck\n result = s.sum(skipna=False)\n self.assertEqual(int(result),v.sum(dtype='int64'))\n result = s.min(skipna=False)\n self.assertEqual(int(result),0)\n result = s.max(skipna=False)\n self.assertEqual(int(result),v[-1])\n\n # use bottleneck if available\n result = s.sum()\n self.assertEqual(int(result),v.sum(dtype='int64'))\n result = s.min()\n self.assertEqual(int(result),0)\n result = s.max()\n self.assertEqual(int(result),v[-1])\n\n for dtype in ['float32', 'float64']:\n v = np.arange(5000000, dtype=dtype)\n s = Series(v)\n\n # no bottleneck\n result = s.sum(skipna=False)\n self.assertEqual(result, v.sum(dtype=dtype))\n result = s.min(skipna=False)\n self.assertTrue(np.allclose(float(result), 0.0))\n result = s.max(skipna=False)\n self.assertTrue(np.allclose(float(result), v[-1]))\n\n # use bottleneck if available\n result = s.sum()\n self.assertEqual(result, v.sum(dtype=dtype))\n result = s.min()\n self.assertTrue(np.allclose(float(result), 0.0))\n result = s.max()\n self.assertTrue(np.allclose(float(result), v[-1]))\n\nclass SafeForSparse(object):\n pass\n\n_ts = tm.makeTimeSeries()\n\nclass TestSeries(tm.TestCase, CheckNameIntegration):\n\n _multiprocess_can_split_ = True\n\n def setUp(self):\n import warnings\n\n self.ts = _ts.copy()\n self.ts.name = 'ts'\n\n self.series = tm.makeStringSeries()\n self.series.name = 'series'\n\n self.objSeries = tm.makeObjectSeries()\n self.objSeries.name = 'objects'\n\n self.empty = Series([], index=[])\n\n def test_scalar_conversion(self):\n\n # Pass in scalar is disabled\n scalar = Series(0.5)\n self.assertNotIsInstance(scalar, float)\n\n # coercion\n self.assertEqual(float(Series([1.])), 1.0)\n self.assertEqual(int(Series([1.])), 1)\n self.assertEqual(long(Series([1.])), 1)\n\n\n def test_astype(self):\n s = Series(np.random.randn(5),name='foo')\n\n for dtype in ['float32','float64','int64','int32']:\n astyped = s.astype(dtype)\n self.assertEqual(astyped.dtype, dtype)\n self.assertEqual(astyped.name, s.name)\n\n def test_TimeSeries_deprecation(self):\n\n # deprecation TimeSeries, #10890\n with tm.assert_produces_warning(FutureWarning):\n pd.TimeSeries(1,index=date_range('20130101',periods=3))\n\n def test_constructor(self):\n # Recognize TimeSeries\n with tm.assert_produces_warning(FutureWarning):\n self.assertTrue(self.ts.is_time_series)\n self.assertTrue(self.ts.index.is_all_dates)\n\n # Pass in Series\n derived = Series(self.ts)\n with tm.assert_produces_warning(FutureWarning):\n self.assertTrue(derived.is_time_series)\n self.assertTrue(derived.index.is_all_dates)\n\n self.assertTrue(tm.equalContents(derived.index, self.ts.index))\n # Ensure new index is not created\n self.assertEqual(id(self.ts.index), id(derived.index))\n\n # Mixed type Series\n mixed = Series(['hello', np.NaN], index=[0, 1])\n self.assertEqual(mixed.dtype, np.object_)\n self.assertIs(mixed[1], np.NaN)\n\n with tm.assert_produces_warning(FutureWarning):\n self.assertFalse(self.empty.is_time_series)\n self.assertFalse(self.empty.index.is_all_dates)\n with tm.assert_produces_warning(FutureWarning):\n self.assertFalse(Series({}).is_time_series)\n self.assertFalse(Series({}).index.is_all_dates)\n self.assertRaises(Exception, Series, np.random.randn(3, 3),\n index=np.arange(3))\n\n mixed.name = 'Series'\n rs = Series(mixed).name\n xp = 'Series'\n self.assertEqual(rs, xp)\n\n # raise on MultiIndex GH4187\n m = MultiIndex.from_arrays([[1, 2], [3, 4]])\n self.assertRaises(NotImplementedError, Series, m)\n\n def test_constructor_empty(self):\n empty = Series()\n empty2 = Series([])\n assert_series_equal(empty, empty2)\n\n empty = Series(index=lrange(10))\n empty2 = Series(np.nan, index=lrange(10))\n assert_series_equal(empty, empty2)\n\n def test_constructor_series(self):\n index1 = ['d', 'b', 'a', 'c']\n index2 = sorted(index1)\n s1 = Series([4, 7, -5, 3], index=index1)\n s2 = Series(s1, index=index2)\n\n assert_series_equal(s2, s1.sort_index())\n\n def test_constructor_iterator(self):\n\n expected = Series(list(range(10)),dtype='int64')\n result = Series(range(10),dtype='int64')\n assert_series_equal(result, expected)\n\n def test_constructor_generator(self):\n gen = (i for i in range(10))\n\n result = Series(gen)\n exp = Series(lrange(10))\n assert_series_equal(result, exp)\n\n gen = (i for i in range(10))\n result = Series(gen, index=lrange(10, 20))\n exp.index = lrange(10, 20)\n assert_series_equal(result, exp)\n\n def test_constructor_map(self):\n # GH8909\n m = map(lambda x: x, range(10))\n\n result = Series(m)\n exp = Series(lrange(10))\n assert_series_equal(result, exp)\n\n m = map(lambda x: x, range(10))\n result = Series(m, index=lrange(10, 20))\n exp.index = lrange(10, 20)\n assert_series_equal(result, exp)\n\n def test_constructor_categorical(self):\n cat = pd.Categorical([0, 1, 2, 0, 1, 2], ['a', 'b', 'c'], fastpath=True)\n res = Series(cat)\n self.assertTrue(res.values.equals(cat))\n\n def test_constructor_maskedarray(self):\n data = ma.masked_all((3,), dtype=float)\n result = Series(data)\n expected = Series([nan, nan, nan])\n assert_series_equal(result, expected)\n\n data[0] = 0.0\n data[2] = 2.0\n index = ['a', 'b', 'c']\n result = Series(data, index=index)\n expected = Series([0.0, nan, 2.0], index=index)\n assert_series_equal(result, expected)\n\n data[1] = 1.0\n result = Series(data, index=index)\n expected = Series([0.0, 1.0, 2.0], index=index)\n assert_series_equal(result, expected)\n\n data = ma.masked_all((3,), dtype=int)\n result = Series(data)\n expected = Series([nan, nan, nan], dtype=float)\n assert_series_equal(result, expected)\n\n data[0] = 0\n data[2] = 2\n index = ['a', 'b', 'c']\n result = Series(data, index=index)\n expected = Series([0, nan, 2], index=index, dtype=float)\n assert_series_equal(result, expected)\n\n data[1] = 1\n result = Series(data, index=index)\n expected = Series([0, 1, 2], index=index, dtype=int)\n assert_series_equal(result, expected)\n\n data = ma.masked_all((3,), dtype=bool)\n result = Series(data)\n expected = Series([nan, nan, nan], dtype=object)\n assert_series_equal(result, expected)\n\n data[0] = True\n data[2] = False\n index = ['a', 'b', 'c']\n result = Series(data, index=index)\n expected = Series([True, nan, False], index=index, dtype=object)\n assert_series_equal(result, expected)\n\n data[1] = True\n result = Series(data, index=index)\n expected = Series([True, True, False], index=index, dtype=bool)\n assert_series_equal(result, expected)\n\n from pandas import tslib\n data = ma.masked_all((3,), dtype='M8[ns]')\n result = Series(data)\n expected = Series([tslib.iNaT, tslib.iNaT, tslib.iNaT], dtype='M8[ns]')\n assert_series_equal(result, expected)\n\n data[0] = datetime(2001, 1, 1)\n data[2] = datetime(2001, 1, 3)\n index = ['a', 'b', 'c']\n result = Series(data, index=index)\n expected = Series([datetime(2001, 1, 1), tslib.iNaT,\n datetime(2001, 1, 3)], index=index, dtype='M8[ns]')\n assert_series_equal(result, expected)\n\n data[1] = datetime(2001, 1, 2)\n result = Series(data, index=index)\n expected = Series([datetime(2001, 1, 1), datetime(2001, 1, 2),\n datetime(2001, 1, 3)], index=index, dtype='M8[ns]')\n assert_series_equal(result, expected)\n\n def test_constructor_default_index(self):\n s = Series([0, 1, 2])\n assert_almost_equal(s.index, np.arange(3))\n\n def test_constructor_corner(self):\n df = tm.makeTimeDataFrame()\n objs = [df, df]\n s = Series(objs, index=[0, 1])\n tm.assertIsInstance(s, Series)\n\n def test_constructor_sanitize(self):\n s = Series(np.array([1., 1., 8.]), dtype='i8')\n self.assertEqual(s.dtype, np.dtype('i8'))\n\n s = Series(np.array([1., 1., np.nan]), copy=True, dtype='i8')\n self.assertEqual(s.dtype, np.dtype('f8'))\n\n def test_constructor_pass_none(self):\n s = Series(None, index=lrange(5))\n self.assertEqual(s.dtype, np.float64)\n\n s = Series(None, index=lrange(5), dtype=object)\n self.assertEqual(s.dtype, np.object_)\n\n # GH 7431\n # inference on the index\n s = Series(index=np.array([None]))\n expected = Series(index=Index([None]))\n assert_series_equal(s,expected)\n\n def test_constructor_cast(self):\n self.assertRaises(ValueError, Series, ['a', 'b', 'c'], dtype=float)\n\n def test_constructor_dtype_nocast(self):\n # 1572\n s = Series([1, 2, 3])\n\n s2 = Series(s, dtype=np.int64)\n\n s2[1] = 5\n self.assertEqual(s[1], 5)\n\n def test_constructor_datelike_coercion(self):\n\n # GH 9477\n # incorrectly infering on dateimelike looking when object dtype is specified\n s = Series([Timestamp('20130101'),'NOV'],dtype=object)\n self.assertEqual(s.iloc[0],Timestamp('20130101'))\n self.assertEqual(s.iloc[1],'NOV')\n self.assertTrue(s.dtype == object)\n\n # the dtype was being reset on the slicing and re-inferred to datetime even\n # thought the blocks are mixed\n belly = '216 3T19'.split()\n wing1 = '2T15 4H19'.split()\n wing2 = '416 4T20'.split()\n mat = pd.to_datetime('2016-01-22 2019-09-07'.split())\n df = pd.DataFrame({'wing1':wing1, 'wing2':wing2, 'mat':mat}, index=belly)\n\n result = df.loc['3T19']\n self.assertTrue(result.dtype == object)\n result = df.loc['216']\n self.assertTrue(result.dtype == object)\n\n def test_constructor_dtype_datetime64(self):\n import pandas.tslib as tslib\n\n s = Series(tslib.iNaT, dtype='M8[ns]', index=lrange(5))\n self.assertTrue(isnull(s).all())\n\n # in theory this should be all nulls, but since\n # we are not specifying a dtype is ambiguous\n s = Series(tslib.iNaT, index=lrange(5))\n self.assertFalse(isnull(s).all())\n\n s = Series(nan, dtype='M8[ns]', index=lrange(5))\n self.assertTrue(isnull(s).all())\n\n s = Series([datetime(2001, 1, 2, 0, 0), tslib.iNaT], dtype='M8[ns]')\n self.assertTrue(isnull(s[1]))\n self.assertEqual(s.dtype, 'M8[ns]')\n\n s = Series([datetime(2001, 1, 2, 0, 0), nan], dtype='M8[ns]')\n self.assertTrue(isnull(s[1]))\n self.assertEqual(s.dtype, 'M8[ns]')\n\n # GH3416\n dates = [\n np.datetime64(datetime(2013, 1, 1)),\n np.datetime64(datetime(2013, 1, 2)),\n np.datetime64(datetime(2013, 1, 3)),\n ]\n\n s = Series(dates)\n self.assertEqual(s.dtype, 'M8[ns]')\n\n s.ix[0] = np.nan\n self.assertEqual(s.dtype, 'M8[ns]')\n\n # invalid astypes\n for t in ['s', 'D', 'us', 'ms']:\n self.assertRaises(TypeError, s.astype, 'M8[%s]' % t)\n\n # GH3414 related\n self.assertRaises(TypeError, lambda x: Series(\n Series(dates).astype('int') / 1000000, dtype='M8[ms]'))\n self.assertRaises(\n TypeError, lambda x: Series(dates, dtype='datetime64'))\n\n # invalid dates can be help as object\n result = Series([datetime(2,1,1)])\n self.assertEqual(result[0], datetime(2,1,1,0,0))\n\n result = Series([datetime(3000,1,1)])\n self.assertEqual(result[0], datetime(3000,1,1,0,0))\n\n # don't mix types\n result = Series([ Timestamp('20130101'), 1],index=['a','b'])\n self.assertEqual(result['a'], Timestamp('20130101'))\n self.assertEqual(result['b'], 1)\n\n # GH6529\n # coerce datetime64 non-ns properly\n dates = date_range('01-Jan-2015', '01-Dec-2015', freq='M')\n values2 = dates.view(np.ndarray).astype('datetime64[ns]')\n expected = Series(values2, dates)\n\n for dtype in ['s', 'D', 'ms', 'us', 'ns']:\n values1 = dates.view(np.ndarray).astype('M8[{0}]'.format(dtype))\n result = Series(values1, dates)\n assert_series_equal(result,expected)\n\n # leave datetime.date alone\n dates2 = np.array([d.date() for d in dates.to_pydatetime()],\n dtype=object)\n series1 = Series(dates2, dates)\n self.assert_numpy_array_equal(series1.values,dates2)\n self.assertEqual(series1.dtype,object)\n\n # these will correctly infer a datetime\n s = Series([None, pd.NaT, '2013-08-05 15:30:00.000001'])\n self.assertEqual(s.dtype,'datetime64[ns]')\n s = Series([np.nan, pd.NaT, '2013-08-05 15:30:00.000001'])\n self.assertEqual(s.dtype,'datetime64[ns]')\n s = Series([pd.NaT, None, '2013-08-05 15:30:00.000001'])\n self.assertEqual(s.dtype,'datetime64[ns]')\n s = Series([pd.NaT, np.nan, '2013-08-05 15:30:00.000001'])\n self.assertEqual(s.dtype,'datetime64[ns]')\n\n # tz-aware (UTC and other tz's)\n # GH 8411\n dr = date_range('20130101',periods=3)\n self.assertTrue(Series(dr).iloc[0].tz is None)\n dr = date_range('20130101',periods=3,tz='UTC')\n self.assertTrue(str(Series(dr).iloc[0].tz) == 'UTC')\n dr = date_range('20130101',periods=3,tz='US/Eastern')\n self.assertTrue(str(Series(dr).iloc[0].tz) == 'US/Eastern')\n\n # non-convertible\n s = Series([1479596223000, -1479590, pd.NaT])\n self.assertTrue(s.dtype == 'object')\n self.assertTrue(s[2] is pd.NaT)\n self.assertTrue('NaT' in str(s))\n\n # if we passed a NaT it remains\n s = Series([datetime(2010, 1, 1), datetime(2, 1, 1), pd.NaT])\n self.assertTrue(s.dtype == 'object')\n self.assertTrue(s[2] is pd.NaT)\n self.assertTrue('NaT' in str(s))\n\n # if we passed a nan it remains\n s = Series([datetime(2010, 1, 1), datetime(2, 1, 1), np.nan])\n self.assertTrue(s.dtype == 'object')\n self.assertTrue(s[2] is np.nan)\n self.assertTrue('NaN' in str(s))\n\n def test_constructor_with_datetime_tz(self):\n\n # 8260\n # support datetime64 with tz\n\n dr = date_range('20130101',periods=3,tz='US/Eastern')\n s = Series(dr)\n self.assertTrue(s.dtype.name == 'datetime64[ns, US/Eastern]')\n self.assertTrue(s.dtype == 'datetime64[ns, US/Eastern]')\n self.assertTrue(com.is_datetime64tz_dtype(s.dtype))\n self.assertTrue('datetime64[ns, US/Eastern]' in str(s))\n\n # export\n result = s.values\n self.assertIsInstance(result, np.ndarray)\n self.assertTrue(result.dtype == 'datetime64[ns]')\n self.assertTrue(dr.equals(pd.DatetimeIndex(result).tz_localize('UTC').tz_convert(tz=s.dt.tz)))\n\n # indexing\n result = s.iloc[0]\n self.assertEqual(result,Timestamp('2013-01-01 00:00:00-0500', tz='US/Eastern', offset='D'))\n result = s[0]\n self.assertEqual(result,Timestamp('2013-01-01 00:00:00-0500', tz='US/Eastern', offset='D'))\n\n result = s[Series([True,True,False],index=s.index)]\n assert_series_equal(result,s[0:2])\n\n result = s.iloc[0:1]\n assert_series_equal(result,Series(dr[0:1]))\n\n # concat\n result = pd.concat([s.iloc[0:1],s.iloc[1:]])\n assert_series_equal(result,s)\n\n # astype\n result = s.astype(object)\n expected = Series(DatetimeIndex(s._values).asobject)\n assert_series_equal(result, expected)\n\n result = Series(s.values).dt.tz_localize('UTC').dt.tz_convert(s.dt.tz)\n assert_series_equal(result, s)\n\n # astype - datetime64[ns, tz]\n result = Series(s.values).astype('datetime64[ns, US/Eastern]')\n assert_series_equal(result, s)\n\n result = Series(s.values).astype(s.dtype)\n assert_series_equal(result, s)\n\n result = s.astype('datetime64[ns, CET]')\n expected = Series(date_range('20130101 06:00:00',periods=3,tz='CET'))\n assert_series_equal(result, expected)\n\n # short str\n self.assertTrue('datetime64[ns, US/Eastern]' in str(s))\n\n # formatting with NaT\n result = s.shift()\n self.assertTrue('datetime64[ns, US/Eastern]' in str(result))\n self.assertTrue('NaT' in str(result))\n\n # long str\n t = Series(date_range('20130101',periods=1000,tz='US/Eastern'))\n self.assertTrue('datetime64[ns, US/Eastern]' in str(t))\n\n result = pd.DatetimeIndex(s,freq='infer')\n tm.assert_index_equal(result, dr)\n\n # inference\n s = Series([pd.Timestamp('2013-01-01 13:00:00-0800', tz='US/Pacific'),pd.Timestamp('2013-01-02 14:00:00-0800', tz='US/Pacific')])\n self.assertTrue(s.dtype == 'datetime64[ns, US/Pacific]')\n self.assertTrue(lib.infer_dtype(s) == 'datetime64')\n\n s = Series([pd.Timestamp('2013-01-01 13:00:00-0800', tz='US/Pacific'),pd.Timestamp('2013-01-02 14:00:00-0800', tz='US/Eastern')])\n self.assertTrue(s.dtype == 'object')\n self.assertTrue(lib.infer_dtype(s) == 'datetime')\n\n def test_constructor_periodindex(self):\n # GH7932\n # converting a PeriodIndex when put in a Series\n\n pi = period_range('20130101',periods=5,freq='D')\n s = Series(pi)\n expected = Series(pi.asobject)\n assert_series_equal(s, expected)\n\n def test_constructor_dict(self):\n d = {'a': 0., 'b': 1., 'c': 2.}\n result = Series(d, index=['b', 'c', 'd', 'a'])\n expected = Series([1, 2, nan, 0], index=['b', 'c', 'd', 'a'])\n assert_series_equal(result, expected)\n\n pidx = tm.makePeriodIndex(100)\n d = {pidx[0]: 0, pidx[1]: 1}\n result = Series(d, index=pidx)\n expected = Series(np.nan, pidx)\n expected.ix[0] = 0\n expected.ix[1] = 1\n assert_series_equal(result, expected)\n\n def test_constructor_dict_multiindex(self):\n check = lambda result, expected: tm.assert_series_equal(\n result, expected, check_dtype=True, check_index_type=True,\n check_series_type=True)\n d = {('a', 'a'): 0., ('b', 'a'): 1., ('b', 'c'): 2.}\n _d = sorted(d.items())\n ser = Series(d)\n expected = Series([x[1] for x in _d],\n index=MultiIndex.from_tuples([x[0] for x in _d]))\n check(ser, expected)\n\n d['z'] = 111.\n _d.insert(0, ('z', d['z']))\n ser = Series(d)\n expected = Series(\n [x[1] for x in _d],\n index=Index([x[0] for x in _d], tupleize_cols=False))\n ser = ser.reindex(index=expected.index)\n check(ser, expected)\n\n def test_constructor_subclass_dict(self):\n data = tm.TestSubDict((x, 10.0 * x) for x in range(10))\n series = Series(data)\n refseries = Series(dict(compat.iteritems(data)))\n assert_series_equal(refseries, series)\n\n def test_constructor_dict_datetime64_index(self):\n # GH 9456\n\n dates_as_str = ['1984-02-19', '1988-11-06', '1989-12-03', '1990-03-15']\n values = [42544017.198965244, 1234565, 40512335.181958228, -1]\n\n def create_data(constructor):\n return dict(zip((constructor(x) for x in dates_as_str), values))\n\n data_datetime64 = create_data(np.datetime64)\n data_datetime = create_data(lambda x: datetime.strptime(x, '%Y-%m-%d'))\n data_Timestamp = create_data(Timestamp)\n\n expected = Series(values, (Timestamp(x) for x in dates_as_str))\n\n result_datetime64 = Series(data_datetime64)\n result_datetime = Series(data_datetime)\n result_Timestamp = Series(data_Timestamp)\n\n assert_series_equal(result_datetime64, expected)\n assert_series_equal(result_datetime, expected)\n assert_series_equal(result_Timestamp, expected)\n\n def test_orderedDict_ctor(self):\n # GH3283\n import pandas\n import random\n data = OrderedDict([('col%s' % i, random.random()) for i in range(12)])\n s = pandas.Series(data)\n self.assertTrue(all(s.values == list(data.values())))\n\n def test_orderedDict_subclass_ctor(self):\n # GH3283\n import pandas\n import random\n\n class A(OrderedDict):\n pass\n data = A([('col%s' % i, random.random()) for i in range(12)])\n s = pandas.Series(data)\n self.assertTrue(all(s.values == list(data.values())))\n\n def test_constructor_list_of_tuples(self):\n data = [(1, 1), (2, 2), (2, 3)]\n s = Series(data)\n self.assertEqual(list(s), data)\n\n def test_constructor_tuple_of_tuples(self):\n data = ((1, 1), (2, 2), (2, 3))\n s = Series(data)\n self.assertEqual(tuple(s), data)\n\n def test_constructor_set(self):\n values = set([1, 2, 3, 4, 5])\n self.assertRaises(TypeError, Series, values)\n values = frozenset(values)\n self.assertRaises(TypeError, Series, values)\n\n def test_fromDict(self):\n data = {'a': 0, 'b': 1, 'c': 2, 'd': 3}\n\n series = Series(data)\n self.assertTrue(tm.is_sorted(series.index))\n\n data = {'a': 0, 'b': '1', 'c': '2', 'd': datetime.now()}\n series = Series(data)\n self.assertEqual(series.dtype, np.object_)\n\n data = {'a': 0, 'b': '1', 'c': '2', 'd': '3'}\n series = Series(data)\n self.assertEqual(series.dtype, np.object_)\n\n data = {'a': '0', 'b': '1'}\n series = Series(data, dtype=float)\n self.assertEqual(series.dtype, np.float64)\n\n def test_setindex(self):\n # wrong type\n series = self.series.copy()\n self.assertRaises(TypeError, setattr, series, 'index', None)\n\n # wrong length\n series = self.series.copy()\n self.assertRaises(Exception, setattr, series, 'index',\n np.arange(len(series) - 1))\n\n # works\n series = self.series.copy()\n series.index = np.arange(len(series))\n tm.assertIsInstance(series.index, Index)\n\n def test_array_finalize(self):\n pass\n\n def test_pop(self):\n # GH 6600\n df = DataFrame({\n 'A': 0,\n 'B': np.arange(5,dtype='int64'),\n 'C': 0,\n })\n k = df.iloc[4]\n\n result = k.pop('B')\n self.assertEqual(result, 4)\n\n expected = Series([0, 0], index=['A', 'C'], name=4)\n assert_series_equal(k, expected)\n\n def test_not_hashable(self):\n s_empty = Series()\n s = Series([1])\n self.assertRaises(TypeError, hash, s_empty)\n self.assertRaises(TypeError, hash, s)\n\n def test_fromValue(self):\n\n nans = Series(np.NaN, index=self.ts.index)\n self.assertEqual(nans.dtype, np.float_)\n self.assertEqual(len(nans), len(self.ts))\n\n strings = Series('foo', index=self.ts.index)\n self.assertEqual(strings.dtype, np.object_)\n self.assertEqual(len(strings), len(self.ts))\n\n d = datetime.now()\n dates = Series(d, index=self.ts.index)\n self.assertEqual(dates.dtype, 'M8[ns]')\n self.assertEqual(len(dates), len(self.ts))\n\n def test_contains(self):\n tm.assert_contains_all(self.ts.index, self.ts)\n\n def test_pickle(self):\n unp_series = self._pickle_roundtrip(self.series)\n unp_ts = self._pickle_roundtrip(self.ts)\n assert_series_equal(unp_series, self.series)\n assert_series_equal(unp_ts, self.ts)\n\n def _pickle_roundtrip(self, obj):\n\n with ensure_clean() as path:\n obj.to_pickle(path)\n unpickled = pd.read_pickle(path)\n return unpickled\n\n def test_getitem_get(self):\n idx1 = self.series.index[5]\n idx2 = self.objSeries.index[5]\n\n self.assertEqual(self.series[idx1], self.series.get(idx1))\n self.assertEqual(self.objSeries[idx2], self.objSeries.get(idx2))\n\n self.assertEqual(self.series[idx1], self.series[5])\n self.assertEqual(self.objSeries[idx2], self.objSeries[5])\n\n self.assertEqual(\n self.series.get(-1), self.series.get(self.series.index[-1]))\n self.assertEqual(self.series[5], self.series.get(self.series.index[5]))\n\n # missing\n d = self.ts.index[0] - datetools.bday\n self.assertRaises(KeyError, self.ts.__getitem__, d)\n\n # None\n # GH 5652\n for s in [Series(), Series(index=list('abc'))]:\n result = s.get(None)\n self.assertIsNone(result)\n\n def test_iget(self):\n\n s = Series(np.random.randn(10), index=lrange(0, 20, 2))\n\n # 10711, deprecated\n with tm.assert_produces_warning(FutureWarning):\n s.iget(1)\n\n # 10711, deprecated\n with tm.assert_produces_warning(FutureWarning):\n s.irow(1)\n\n # 10711, deprecated\n with tm.assert_produces_warning(FutureWarning):\n s.iget_value(1)\n\n for i in range(len(s)):\n result = s.iloc[i]\n exp = s[s.index[i]]\n assert_almost_equal(result, exp)\n\n # pass a slice\n result = s.iloc[slice(1, 3)]\n expected = s.ix[2:4]\n assert_series_equal(result, expected)\n\n # test slice is a view\n result[:] = 0\n self.assertTrue((s[1:3] == 0).all())\n\n # list of integers\n result = s.iloc[[0, 2, 3, 4, 5]]\n expected = s.reindex(s.index[[0, 2, 3, 4, 5]])\n assert_series_equal(result, expected)\n\n def test_iget_nonunique(self):\n s = Series([0, 1, 2], index=[0, 1, 0])\n self.assertEqual(s.iloc[2], 2)\n\n def test_getitem_regression(self):\n s = Series(lrange(5), index=lrange(5))\n result = s[lrange(5)]\n assert_series_equal(result, s)\n\n def test_getitem_setitem_slice_bug(self):\n s = Series(lrange(10), lrange(10))\n result = s[-12:]\n assert_series_equal(result, s)\n\n result = s[-7:]\n assert_series_equal(result, s[3:])\n\n result = s[:-12]\n assert_series_equal(result, s[:0])\n\n s = Series(lrange(10), lrange(10))\n s[-12:] = 0\n self.assertTrue((s == 0).all())\n\n s[:-12] = 5\n self.assertTrue((s == 0).all())\n\n def test_getitem_int64(self):\n idx = np.int64(5)\n self.assertEqual(self.ts[idx], self.ts[5])\n\n def test_getitem_fancy(self):\n slice1 = self.series[[1, 2, 3]]\n slice2 = self.objSeries[[1, 2, 3]]\n self.assertEqual(self.series.index[2], slice1.index[1])\n self.assertEqual(self.objSeries.index[2], slice2.index[1])\n self.assertEqual(self.series[2], slice1[1])\n self.assertEqual(self.objSeries[2], slice2[1])\n\n def test_getitem_boolean(self):\n s = self.series\n mask = s > s.median()\n\n # passing list is OK\n result = s[list(mask)]\n expected = s[mask]\n assert_series_equal(result, expected)\n self.assert_numpy_array_equal(result.index, s.index[mask])\n\n def test_getitem_boolean_empty(self):\n s = Series([], dtype=np.int64)\n s.index.name = 'index_name'\n s = s[s.isnull()]\n self.assertEqual(s.index.name, 'index_name')\n self.assertEqual(s.dtype, np.int64)\n\n # GH5877\n # indexing with empty series\n s = Series(['A', 'B'])\n expected = Series(np.nan,index=['C'],dtype=object)\n result = s[Series(['C'], dtype=object)]\n assert_series_equal(result, expected)\n\n s = Series(['A', 'B'])\n expected = Series(dtype=object)\n result = s[Series([], dtype=object)]\n assert_series_equal(result, expected)\n\n # invalid because of the boolean indexer\n # that's empty or not-aligned\n def f():\n s[Series([], dtype=bool)]\n self.assertRaises(IndexingError, f)\n\n def f():\n s[Series([True], dtype=bool)]\n self.assertRaises(IndexingError, f)\n\n def test_getitem_generator(self):\n gen = (x > 0 for x in self.series)\n result = self.series[gen]\n result2 = self.series[iter(self.series > 0)]\n expected = self.series[self.series > 0]\n assert_series_equal(result, expected)\n assert_series_equal(result2, expected)\n\n def test_getitem_boolean_object(self):\n # using column from DataFrame\n\n s = self.series\n mask = s > s.median()\n omask = mask.astype(object)\n\n # getitem\n result = s[omask]\n expected = s[mask]\n assert_series_equal(result, expected)\n\n # setitem\n s2 = s.copy()\n cop = s.copy()\n cop[omask] = 5\n s2[mask] = 5\n assert_series_equal(cop, s2)\n\n # nans raise exception\n omask[5:10] = np.nan\n self.assertRaises(Exception, s.__getitem__, omask)\n self.assertRaises(Exception, s.__setitem__, omask, 5)\n\n def test_getitem_setitem_boolean_corner(self):\n ts = self.ts\n mask_shifted = ts.shift(1, freq=datetools.bday) > ts.median()\n\n # these used to raise...??\n\n self.assertRaises(Exception, ts.__getitem__, mask_shifted)\n self.assertRaises(Exception, ts.__setitem__, mask_shifted, 1)\n #ts[mask_shifted]\n #ts[mask_shifted] = 1\n\n self.assertRaises(Exception, ts.ix.__getitem__, mask_shifted)\n self.assertRaises(Exception, ts.ix.__setitem__, mask_shifted, 1)\n #ts.ix[mask_shifted]\n #ts.ix[mask_shifted] = 2\n\n def test_getitem_setitem_slice_integers(self):\n s = Series(np.random.randn(8), index=[2, 4, 6, 8, 10, 12, 14, 16])\n\n result = s[:4]\n expected = s.reindex([2, 4, 6, 8])\n assert_series_equal(result, expected)\n\n s[:4] = 0\n self.assertTrue((s[:4] == 0).all())\n self.assertTrue(not (s[4:] == 0).any())\n\n def test_getitem_out_of_bounds(self):\n # don't segfault, GH #495\n self.assertRaises(IndexError, self.ts.__getitem__, len(self.ts))\n\n # GH #917\n s = Series([])\n self.assertRaises(IndexError, s.__getitem__, -1)\n\n def test_getitem_setitem_integers(self):\n # caused bug without test\n s = Series([1, 2, 3], ['a', 'b', 'c'])\n\n self.assertEqual(s.ix[0], s['a'])\n s.ix[0] = 5\n self.assertAlmostEqual(s['a'], 5)\n\n def test_getitem_box_float64(self):\n value = self.ts[5]\n tm.assertIsInstance(value, np.float64)\n\n def test_getitem_ambiguous_keyerror(self):\n s = Series(lrange(10), index=lrange(0, 20, 2))\n self.assertRaises(KeyError, s.__getitem__, 1)\n self.assertRaises(KeyError, s.ix.__getitem__, 1)\n\n def test_getitem_unordered_dup(self):\n obj = Series(lrange(5), index=['c', 'a', 'a', 'b', 'b'])\n self.assertTrue(np.isscalar(obj['c']))\n self.assertEqual(obj['c'], 0)\n\n def test_getitem_dups_with_missing(self):\n\n # breaks reindex, so need to use .ix internally\n # GH 4246\n s = Series([1, 2, 3, 4], ['foo', 'bar', 'foo', 'bah'])\n expected = s.ix[['foo', 'bar', 'bah', 'bam']]\n result = s[['foo', 'bar', 'bah', 'bam']]\n assert_series_equal(result, expected)\n\n def test_getitem_dups(self):\n s = Series(range(5),index=['A','A','B','C','C'],dtype=np.int64)\n expected = Series([3,4],index=['C','C'],dtype=np.int64)\n result = s['C']\n assert_series_equal(result, expected)\n\n def test_getitem_dataframe(self):\n rng = list(range(10))\n s = pd.Series(10, index=rng)\n df = pd.DataFrame(rng, index=rng)\n self.assertRaises(TypeError, s.__getitem__, df>5)\n\n def test_setitem_ambiguous_keyerror(self):\n s = Series(lrange(10), index=lrange(0, 20, 2))\n\n # equivalent of an append\n s2 = s.copy()\n s2[1] = 5\n expected = s.append(Series([5],index=[1]))\n assert_series_equal(s2,expected)\n\n s2 = s.copy()\n s2.ix[1] = 5\n expected = s.append(Series([5],index=[1]))\n assert_series_equal(s2,expected)\n\n def test_setitem_float_labels(self):\n # note labels are floats\n s = Series(['a', 'b', 'c'], index=[0, 0.5, 1])\n tmp = s.copy()\n\n s.ix[1] = 'zoo'\n tmp.iloc[2] = 'zoo'\n\n assert_series_equal(s, tmp)\n\n def test_slice(self):\n numSlice = self.series[10:20]\n numSliceEnd = self.series[-10:]\n objSlice = self.objSeries[10:20]\n\n self.assertNotIn(self.series.index[9], numSlice.index)\n self.assertNotIn(self.objSeries.index[9], objSlice.index)\n\n self.assertEqual(len(numSlice), len(numSlice.index))\n self.assertEqual(self.series[numSlice.index[0]],\n numSlice[numSlice.index[0]])\n\n self.assertEqual(numSlice.index[1], self.series.index[11])\n\n self.assertTrue(tm.equalContents(numSliceEnd,\n np.array(self.series)[-10:]))\n\n # test return view\n sl = self.series[10:20]\n sl[:] = 0\n self.assertTrue((self.series[10:20] == 0).all())\n\n def test_slice_can_reorder_not_uniquely_indexed(self):\n s = Series(1, index=['a', 'a', 'b', 'b', 'c'])\n result = s[::-1] # it works!\n\n def test_slice_float_get_set(self):\n\n self.assertRaises(TypeError, lambda : self.ts[4.0:10.0])\n def f():\n self.ts[4.0:10.0] = 0\n self.assertRaises(TypeError, f)\n\n self.assertRaises(TypeError, self.ts.__getitem__, slice(4.5, 10.0))\n self.assertRaises(TypeError, self.ts.__setitem__, slice(4.5, 10.0), 0)\n\n def test_slice_floats2(self):\n s = Series(np.random.rand(10), index=np.arange(10, 20, dtype=float))\n\n self.assertEqual(len(s.ix[12.0:]), 8)\n self.assertEqual(len(s.ix[12.5:]), 7)\n\n i = np.arange(10, 20, dtype=float)\n i[2] = 12.2\n s.index = i\n self.assertEqual(len(s.ix[12.0:]), 8)\n self.assertEqual(len(s.ix[12.5:]), 7)\n\n def test_slice_float64(self):\n\n values = np.arange(10., 50., 2)\n index = Index(values)\n\n start, end = values[[5, 15]]\n\n s = Series(np.random.randn(20), index=index)\n\n result = s[start:end]\n expected = s.iloc[5:16]\n assert_series_equal(result, expected)\n\n result = s.loc[start:end]\n assert_series_equal(result, expected)\n\n df = DataFrame(np.random.randn(20, 3), index=index)\n\n result = df[start:end]\n expected = df.iloc[5:16]\n tm.assert_frame_equal(result, expected)\n\n result = df.loc[start:end]\n tm.assert_frame_equal(result, expected)\n\n def test_setitem(self):\n self.ts[self.ts.index[5]] = np.NaN\n self.ts[[1, 2, 17]] = np.NaN\n self.ts[6] = np.NaN\n self.assertTrue(np.isnan(self.ts[6]))\n self.assertTrue(np.isnan(self.ts[2]))\n self.ts[np.isnan(self.ts)] = 5\n self.assertFalse(np.isnan(self.ts[2]))\n\n # caught this bug when writing tests\n series = Series(tm.makeIntIndex(20).astype(float),\n index=tm.makeIntIndex(20))\n\n series[::2] = 0\n self.assertTrue((series[::2] == 0).all())\n\n # set item that's not contained\n s = self.series.copy()\n s['foobar'] = 1\n\n app = Series([1], index=['foobar'], name='series')\n expected = self.series.append(app)\n assert_series_equal(s, expected)\n\n # Test for issue #10193\n key = pd.Timestamp('2012-01-01')\n series = pd.Series()\n series[key] = 47\n expected = pd.Series(47, [key])\n assert_series_equal(series, expected)\n\n series = pd.Series([], pd.DatetimeIndex([], freq='D'))\n series[key] = 47\n expected = pd.Series(47, pd.DatetimeIndex([key], freq='D'))\n assert_series_equal(series, expected)\n\n def test_setitem_dtypes(self):\n\n # change dtypes\n # GH 4463\n expected = Series([np.nan,2,3])\n\n s = Series([1,2,3])\n s.iloc[0] = np.nan\n assert_series_equal(s,expected)\n\n s = Series([1,2,3])\n s.loc[0] = np.nan\n assert_series_equal(s,expected)\n\n s = Series([1,2,3])\n s[0] = np.nan\n assert_series_equal(s,expected)\n\n s = Series([False])\n s.loc[0] = np.nan\n assert_series_equal(s,Series([np.nan]))\n\n s = Series([False,True])\n s.loc[0] = np.nan\n assert_series_equal(s,Series([np.nan,1.0]))\n\n def test_set_value(self):\n idx = self.ts.index[10]\n res = self.ts.set_value(idx, 0)\n self.assertIs(res, self.ts)\n self.assertEqual(self.ts[idx], 0)\n\n # equiv\n s = self.series.copy()\n res = s.set_value('foobar', 0)\n self.assertIs(res, s)\n self.assertEqual(res.index[-1], 'foobar')\n self.assertEqual(res['foobar'], 0)\n\n s = self.series.copy()\n s.loc['foobar'] = 0\n self.assertEqual(s.index[-1], 'foobar')\n self.assertEqual(s['foobar'], 0)\n\n def test_setslice(self):\n sl = self.ts[5:20]\n self.assertEqual(len(sl), len(sl.index))\n self.assertTrue(sl.index.is_unique)\n\n def test_basic_getitem_setitem_corner(self):\n # invalid tuples, e.g. self.ts[:, None] vs. self.ts[:, 2]\n with tm.assertRaisesRegexp(ValueError, 'tuple-index'):\n self.ts[:, 2]\n with tm.assertRaisesRegexp(ValueError, 'tuple-index'):\n self.ts[:, 2] = 2\n\n # weird lists. [slice(0, 5)] will work but not two slices\n result = self.ts[[slice(None, 5)]]\n expected = self.ts[:5]\n assert_series_equal(result, expected)\n\n # OK\n self.assertRaises(Exception, self.ts.__getitem__,\n [5, slice(None, None)])\n self.assertRaises(Exception, self.ts.__setitem__,\n [5, slice(None, None)], 2)\n\n def test_reshape_non_2d(self):\n # GH 4554\n x = Series(np.random.random(201), name='x')\n self.assertTrue(x.reshape(x.shape,) is x)\n\n # GH 2719\n a = Series([1, 2, 3, 4])\n result = a.reshape(2, 2)\n expected = a.values.reshape(2, 2)\n tm.assert_numpy_array_equal(result, expected)\n self.assertTrue(type(result) is type(expected))\n\n def test_reshape_2d_return_array(self):\n x = Series(np.random.random(201), name='x')\n result = x.reshape((-1, 1))\n self.assertNotIsInstance(result, Series)\n\n result2 = np.reshape(x, (-1, 1))\n self.assertNotIsInstance(result2, Series)\n\n result = x[:, None]\n expected = x.reshape((-1, 1))\n assert_almost_equal(result, expected)\n\n def test_basic_getitem_with_labels(self):\n indices = self.ts.index[[5, 10, 15]]\n\n result = self.ts[indices]\n expected = self.ts.reindex(indices)\n assert_series_equal(result, expected)\n\n result = self.ts[indices[0]:indices[2]]\n expected = self.ts.ix[indices[0]:indices[2]]\n assert_series_equal(result, expected)\n\n # integer indexes, be careful\n s = Series(np.random.randn(10), index=lrange(0, 20, 2))\n inds = [0, 2, 5, 7, 8]\n arr_inds = np.array([0, 2, 5, 7, 8])\n result = s[inds]\n expected = s.reindex(inds)\n assert_series_equal(result, expected)\n\n result = s[arr_inds]\n expected = s.reindex(arr_inds)\n assert_series_equal(result, expected)\n\n def test_basic_setitem_with_labels(self):\n indices = self.ts.index[[5, 10, 15]]\n\n cp = self.ts.copy()\n exp = self.ts.copy()\n cp[indices] = 0\n exp.ix[indices] = 0\n assert_series_equal(cp, exp)\n\n cp = self.ts.copy()\n exp = self.ts.copy()\n cp[indices[0]:indices[2]] = 0\n exp.ix[indices[0]:indices[2]] = 0\n assert_series_equal(cp, exp)\n\n # integer indexes, be careful\n s = Series(np.random.randn(10), index=lrange(0, 20, 2))\n inds = [0, 4, 6]\n arr_inds = np.array([0, 4, 6])\n\n cp = s.copy()\n exp = s.copy()\n s[inds] = 0\n s.ix[inds] = 0\n assert_series_equal(cp, exp)\n\n cp = s.copy()\n exp = s.copy()\n s[arr_inds] = 0\n s.ix[arr_inds] = 0\n assert_series_equal(cp, exp)\n\n inds_notfound = [0, 4, 5, 6]\n arr_inds_notfound = np.array([0, 4, 5, 6])\n self.assertRaises(Exception, s.__setitem__, inds_notfound, 0)\n self.assertRaises(Exception, s.__setitem__, arr_inds_notfound, 0)\n\n def test_ix_getitem(self):\n inds = self.series.index[[3, 4, 7]]\n assert_series_equal(self.series.ix[inds], self.series.reindex(inds))\n assert_series_equal(self.series.ix[5::2], self.series[5::2])\n\n # slice with indices\n d1, d2 = self.ts.index[[5, 15]]\n result = self.ts.ix[d1:d2]\n expected = self.ts.truncate(d1, d2)\n assert_series_equal(result, expected)\n\n # boolean\n mask = self.series > self.series.median()\n assert_series_equal(self.series.ix[mask], self.series[mask])\n\n # ask for index value\n self.assertEqual(self.ts.ix[d1], self.ts[d1])\n self.assertEqual(self.ts.ix[d2], self.ts[d2])\n\n def test_ix_getitem_not_monotonic(self):\n d1, d2 = self.ts.index[[5, 15]]\n\n ts2 = self.ts[::2][[1, 2, 0]]\n\n self.assertRaises(KeyError, ts2.ix.__getitem__, slice(d1, d2))\n self.assertRaises(KeyError, ts2.ix.__setitem__, slice(d1, d2), 0)\n\n def test_ix_getitem_setitem_integer_slice_keyerrors(self):\n s = Series(np.random.randn(10), index=lrange(0, 20, 2))\n\n # this is OK\n cp = s.copy()\n cp.ix[4:10] = 0\n self.assertTrue((cp.ix[4:10] == 0).all())\n\n # so is this\n cp = s.copy()\n cp.ix[3:11] = 0\n self.assertTrue((cp.ix[3:11] == 0).values.all())\n\n result = s.ix[4:10]\n result2 = s.ix[3:11]\n expected = s.reindex([4, 6, 8, 10])\n\n assert_series_equal(result, expected)\n assert_series_equal(result2, expected)\n\n # non-monotonic, raise KeyError\n s2 = s.iloc[lrange(5) + lrange(5, 10)[::-1]]\n self.assertRaises(KeyError, s2.ix.__getitem__, slice(3, 11))\n self.assertRaises(KeyError, s2.ix.__setitem__, slice(3, 11), 0)\n\n def test_ix_getitem_iterator(self):\n idx = iter(self.series.index[:10])\n result = self.series.ix[idx]\n assert_series_equal(result, self.series[:10])\n\n def test_where(self):\n s = Series(np.random.randn(5))\n cond = s > 0\n\n rs = s.where(cond).dropna()\n rs2 = s[cond]\n assert_series_equal(rs, rs2)\n\n rs = s.where(cond, -s)\n assert_series_equal(rs, s.abs())\n\n rs = s.where(cond)\n assert(s.shape == rs.shape)\n assert(rs is not s)\n\n # test alignment\n cond = Series([True,False,False,True,False],index=s.index)\n s2 = -(s.abs())\n\n expected = s2[cond].reindex(s2.index[:3]).reindex(s2.index)\n rs = s2.where(cond[:3])\n assert_series_equal(rs, expected)\n\n expected = s2.abs()\n expected.ix[0] = s2[0]\n rs = s2.where(cond[:3], -s2)\n assert_series_equal(rs, expected)\n\n self.assertRaises(ValueError, s.where, 1)\n self.assertRaises(ValueError, s.where, cond[:3].values, -s)\n\n # GH 2745\n s = Series([1, 2])\n s[[True, False]] = [0, 1]\n expected = Series([0, 2])\n assert_series_equal(s, expected)\n\n # failures\n self.assertRaises(\n ValueError, s.__setitem__, tuple([[[True, False]]]), [0, 2, 3])\n self.assertRaises(\n ValueError, s.__setitem__, tuple([[[True, False]]]), [])\n\n # unsafe dtype changes\n for dtype in [np.int8, np.int16, np.int32, np.int64, np.float16, np.float32, np.float64]:\n s = Series(np.arange(10), dtype=dtype)\n mask = s < 5\n s[mask] = lrange(2, 7)\n expected = Series(lrange(2, 7) + lrange(5, 10), dtype=dtype)\n assert_series_equal(s, expected)\n self.assertEqual(s.dtype, expected.dtype)\n\n # these are allowed operations, but are upcasted\n for dtype in [np.int64, np.float64]:\n s = Series(np.arange(10), dtype=dtype)\n mask = s < 5\n values = [2.5, 3.5, 4.5, 5.5, 6.5]\n s[mask] = values\n expected = Series(values + lrange(5, 10), dtype='float64')\n assert_series_equal(s, expected)\n self.assertEqual(s.dtype, expected.dtype)\n\n # GH 9731\n s = Series(np.arange(10), dtype='int64')\n mask = s > 5\n values = [2.5, 3.5, 4.5, 5.5]\n s[mask] = values\n expected = Series(lrange(6) + values, dtype='float64')\n assert_series_equal(s, expected)\n\n # can't do these as we are forced to change the itemsize of the input\n # to something we cannot\n for dtype in [np.int8, np.int16, np.int32, np.float16, np.float32]:\n s = Series(np.arange(10), dtype=dtype)\n mask = s < 5\n values = [2.5, 3.5, 4.5, 5.5, 6.5]\n self.assertRaises(Exception, s.__setitem__, tuple(mask), values)\n\n # GH3235\n s = Series(np.arange(10), dtype='int64')\n mask = s < 5\n s[mask] = lrange(2, 7)\n expected = Series(lrange(2, 7) + lrange(5, 10), dtype='int64')\n assert_series_equal(s, expected)\n self.assertEqual(s.dtype, expected.dtype)\n\n s = Series(np.arange(10), dtype='int64')\n mask = s > 5\n s[mask] = [0] * 4\n expected = Series([0, 1, 2, 3, 4, 5] + [0] * 4, dtype='int64')\n assert_series_equal(s, expected)\n\n s = Series(np.arange(10))\n mask = s > 5\n def f():\n s[mask] = [5,4,3,2,1]\n self.assertRaises(ValueError, f)\n def f():\n s[mask] = [0] * 5\n self.assertRaises(ValueError, f)\n\n # dtype changes\n s = Series([1,2,3,4])\n result = s.where(s>2,np.nan)\n expected = Series([np.nan,np.nan,3,4])\n assert_series_equal(result, expected)\n\n # GH 4667\n # setting with None changes dtype\n s = Series(range(10)).astype(float)\n s[8] = None\n result = s[8]\n self.assertTrue(isnull(result))\n\n s = Series(range(10)).astype(float)\n s[s > 8] = None\n result = s[isnull(s)]\n expected = Series(np.nan,index=[9])\n assert_series_equal(result, expected)\n\n def test_where_setitem_invalid(self):\n\n # GH 2702\n # make sure correct exceptions are raised on invalid list assignment\n\n # slice\n s = Series(list('abc'))\n def f():\n s[0:3] = list(range(27))\n self.assertRaises(ValueError, f)\n\n s[0:3] = list(range(3))\n expected = Series([0,1,2])\n assert_series_equal(s.astype(np.int64), expected, )\n\n # slice with step\n s = Series(list('abcdef'))\n def f():\n s[0:4:2] = list(range(27))\n self.assertRaises(ValueError, f)\n\n s = Series(list('abcdef'))\n s[0:4:2] = list(range(2))\n expected = Series([0,'b',1,'d','e','f'])\n assert_series_equal(s, expected)\n\n # neg slices\n s = Series(list('abcdef'))\n def f():\n s[:-1] = list(range(27))\n self.assertRaises(ValueError, f)\n\n s[-3:-1] = list(range(2))\n expected = Series(['a','b','c',0,1,'f'])\n assert_series_equal(s, expected)\n\n # list\n s = Series(list('abc'))\n def f():\n s[[0,1,2]] = list(range(27))\n self.assertRaises(ValueError, f)\n\n s = Series(list('abc'))\n def f():\n s[[0,1,2]] = list(range(2))\n self.assertRaises(ValueError, f)\n\n # scalar\n s = Series(list('abc'))\n s[0] = list(range(10))\n expected = Series([list(range(10)),'b','c'])\n assert_series_equal(s, expected)\n\n def test_where_broadcast(self):\n # Test a variety of differently sized series\n for size in range(2, 6):\n # Test a variety of boolean indices\n for selection in [np.resize([True, False, False, False, False], size), # First element should be set\n # Set alternating elements]\n np.resize([True, False], size),\n np.resize([False], size)]: # No element should be set\n # Test a variety of different numbers as content\n for item in [2.0, np.nan, np.finfo(np.float).max, np.finfo(np.float).min]:\n # Test numpy arrays, lists and tuples as the input to be\n # broadcast\n for arr in [np.array([item]), [item], (item,)]:\n data = np.arange(size, dtype=float)\n s = Series(data)\n s[selection] = arr\n # Construct the expected series by taking the source\n # data or item based on the selection\n expected = Series([item if use_item else data[i]\n for i, use_item in enumerate(selection)])\n assert_series_equal(s, expected)\n\n s = Series(data)\n result = s.where(~selection, arr)\n assert_series_equal(result, expected)\n\n def test_where_inplace(self):\n s = Series(np.random.randn(5))\n cond = s > 0\n\n rs = s.copy()\n\n rs.where(cond, inplace=True)\n assert_series_equal(rs.dropna(), s[cond])\n assert_series_equal(rs, s.where(cond))\n\n rs = s.copy()\n rs.where(cond, -s, inplace=True)\n assert_series_equal(rs, s.where(cond, -s))\n\n def test_where_dups(self):\n # GH 4550\n # where crashes with dups in index\n s1 = Series(list(range(3)))\n s2 = Series(list(range(3)))\n comb = pd.concat([s1,s2])\n result = comb.where(comb < 2)\n expected = Series([0,1,np.nan,0,1,np.nan],index=[0,1,2,0,1,2])\n assert_series_equal(result, expected)\n\n # GH 4548\n # inplace updating not working with dups\n comb[comb<1] = 5\n expected = Series([5,1,2,5,1,2],index=[0,1,2,0,1,2])\n assert_series_equal(comb, expected)\n\n comb[comb<2] += 10\n expected = Series([5,11,2,5,11,2],index=[0,1,2,0,1,2])\n assert_series_equal(comb, expected)\n\n def test_where_datetime(self):\n s = Series(date_range('20130102', periods=2))\n expected = Series([10, 10], dtype='datetime64[ns]')\n mask = np.array([False, False])\n\n rs = s.where(mask, [10, 10])\n assert_series_equal(rs, expected)\n\n rs = s.where(mask, 10)\n assert_series_equal(rs, expected)\n\n rs = s.where(mask, 10.0)\n assert_series_equal(rs, expected)\n\n rs = s.where(mask, [10.0, 10.0])\n assert_series_equal(rs, expected)\n\n rs = s.where(mask, [10.0, np.nan])\n expected = Series([10, None], dtype='datetime64[ns]')\n assert_series_equal(rs, expected)\n\n def test_where_timedelta(self):\n s = Series([1, 2], dtype='timedelta64[ns]')\n expected = Series([10, 10], dtype='timedelta64[ns]')\n mask = np.array([False, False])\n\n rs = s.where(mask, [10, 10])\n assert_series_equal(rs, expected)\n\n rs = s.where(mask, 10)\n assert_series_equal(rs, expected)\n\n rs = s.where(mask, 10.0)\n assert_series_equal(rs, expected)\n\n rs = s.where(mask, [10.0, 10.0])\n assert_series_equal(rs, expected)\n\n rs = s.where(mask, [10.0, np.nan])\n expected = Series([10, None], dtype='timedelta64[ns]')\n assert_series_equal(rs, expected)\n\n def test_mask(self):\n # compare with tested results in test_where\n s = Series(np.random.randn(5))\n cond = s > 0\n\n rs = s.where(~cond, np.nan)\n assert_series_equal(rs, s.mask(cond))\n\n rs = s.where(~cond)\n rs2 = s.mask(cond)\n assert_series_equal(rs, rs2)\n\n rs = s.where(~cond, -s)\n rs2 = s.mask(cond, -s)\n assert_series_equal(rs, rs2)\n\n cond = Series([True, False, False, True, False], index=s.index)\n s2 = -(s.abs())\n rs = s2.where(~cond[:3])\n rs2 = s2.mask(cond[:3])\n assert_series_equal(rs, rs2)\n\n rs = s2.where(~cond[:3], -s2)\n rs2 = s2.mask(cond[:3], -s2)\n assert_series_equal(rs, rs2)\n\n self.assertRaises(ValueError, s.mask, 1)\n self.assertRaises(ValueError, s.mask, cond[:3].values, -s)\n\n # dtype changes\n s = Series([1,2,3,4])\n result = s.mask(s>2, np.nan)\n expected = Series([1, 2, np.nan, np.nan])\n assert_series_equal(result, expected)\n\n def test_mask_broadcast(self):\n # GH 8801\n # copied from test_where_broadcast\n for size in range(2, 6):\n for selection in [np.resize([True, False, False, False, False], size), # First element should be set\n # Set alternating elements]\n np.resize([True, False], size),\n np.resize([False], size)]: # No element should be set\n for item in [2.0, np.nan, np.finfo(np.float).max, np.finfo(np.float).min]:\n for arr in [np.array([item]), [item], (item,)]:\n data = np.arange(size, dtype=float)\n s = Series(data)\n result = s.mask(selection, arr)\n expected = Series([item if use_item else data[i]\n for i, use_item in enumerate(selection)])\n assert_series_equal(result, expected)\n\n def test_mask_inplace(self):\n s = Series(np.random.randn(5))\n cond = s > 0\n\n rs = s.copy()\n rs.mask(cond, inplace=True)\n assert_series_equal(rs.dropna(), s[~cond])\n assert_series_equal(rs, s.mask(cond))\n\n rs = s.copy()\n rs.mask(cond, -s, inplace=True)\n assert_series_equal(rs, s.mask(cond, -s))\n\n def test_drop(self):\n\n # unique\n s = Series([1,2],index=['one','two'])\n expected = Series([1],index=['one'])\n result = s.drop(['two'])\n assert_series_equal(result,expected)\n result = s.drop('two', axis='rows')\n assert_series_equal(result,expected)\n\n # non-unique\n # GH 5248\n s = Series([1,1,2],index=['one','two','one'])\n expected = Series([1,2],index=['one','one'])\n result = s.drop(['two'], axis=0)\n assert_series_equal(result,expected)\n result = s.drop('two')\n assert_series_equal(result,expected)\n\n expected = Series([1],index=['two'])\n result = s.drop(['one'])\n assert_series_equal(result,expected)\n result = s.drop('one')\n assert_series_equal(result,expected)\n\n # single string/tuple-like\n s = Series(range(3),index=list('abc'))\n self.assertRaises(ValueError, s.drop, 'bc')\n self.assertRaises(ValueError, s.drop, ('a',))\n\n # errors='ignore'\n s = Series(range(3),index=list('abc'))\n result = s.drop('bc', errors='ignore')\n assert_series_equal(result, s)\n result = s.drop(['a', 'd'], errors='ignore')\n expected = s.ix[1:]\n assert_series_equal(result, expected)\n\n # bad axis\n self.assertRaises(ValueError, s.drop, 'one', axis='columns')\n\n # GH 8522\n s = Series([2,3], index=[True, False])\n self.assertTrue(s.index.is_object())\n result = s.drop(True)\n expected = Series([3],index=[False])\n assert_series_equal(result,expected)\n\n def test_ix_setitem(self):\n inds = self.series.index[[3, 4, 7]]\n\n result = self.series.copy()\n result.ix[inds] = 5\n\n expected = self.series.copy()\n expected[[3, 4, 7]] = 5\n assert_series_equal(result, expected)\n\n result.ix[5:10] = 10\n expected[5:10] = 10\n assert_series_equal(result, expected)\n\n # set slice with indices\n d1, d2 = self.series.index[[5, 15]]\n result.ix[d1:d2] = 6\n expected[5:16] = 6 # because it's inclusive\n assert_series_equal(result, expected)\n\n # set index value\n self.series.ix[d1] = 4\n self.series.ix[d2] = 6\n self.assertEqual(self.series[d1], 4)\n self.assertEqual(self.series[d2], 6)\n\n def test_where_numeric_with_string(self):\n # GH 9280\n s = pd.Series([1, 2, 3])\n w = s.where(s>1, 'X')\n\n self.assertFalse(com.is_integer(w[0]))\n self.assertTrue(com.is_integer(w[1]))\n self.assertTrue(com.is_integer(w[2]))\n self.assertTrue(isinstance(w[0], str))\n self.assertTrue(w.dtype == 'object')\n\n w = s.where(s>1, ['X', 'Y', 'Z'])\n self.assertFalse(com.is_integer(w[0]))\n self.assertTrue(com.is_integer(w[1]))\n self.assertTrue(com.is_integer(w[2]))\n self.assertTrue(isinstance(w[0], str))\n self.assertTrue(w.dtype == 'object')\n\n w = s.where(s>1, np.array(['X', 'Y', 'Z']))\n self.assertFalse(com.is_integer(w[0]))\n self.assertTrue(com.is_integer(w[1]))\n self.assertTrue(com.is_integer(w[2]))\n self.assertTrue(isinstance(w[0], str))\n self.assertTrue(w.dtype == 'object')\n\n def test_setitem_boolean(self):\n mask = self.series > self.series.median()\n\n # similiar indexed series\n result = self.series.copy()\n result[mask] = self.series * 2\n expected = self.series * 2\n assert_series_equal(result[mask], expected[mask])\n\n # needs alignment\n result = self.series.copy()\n result[mask] = (self.series * 2)[0:5]\n expected = (self.series * 2)[0:5].reindex_like(self.series)\n expected[-mask] = self.series[mask]\n assert_series_equal(result[mask], expected[mask])\n\n def test_ix_setitem_boolean(self):\n mask = self.series > self.series.median()\n\n result = self.series.copy()\n result.ix[mask] = 0\n expected = self.series\n expected[mask] = 0\n assert_series_equal(result, expected)\n\n def test_ix_setitem_corner(self):\n inds = list(self.series.index[[5, 8, 12]])\n self.series.ix[inds] = 5\n self.assertRaises(Exception, self.series.ix.__setitem__,\n inds + ['foo'], 5)\n\n def test_get_set_boolean_different_order(self):\n ordered = self.series.sort_values()\n\n # setting\n copy = self.series.copy()\n copy[ordered > 0] = 0\n\n expected = self.series.copy()\n expected[expected > 0] = 0\n\n assert_series_equal(copy, expected)\n\n # getting\n sel = self.series[ordered > 0]\n exp = self.series[self.series > 0]\n assert_series_equal(sel, exp)\n\n def test_repr(self):\n str(self.ts)\n str(self.series)\n str(self.series.astype(int))\n str(self.objSeries)\n\n str(Series(tm.randn(1000), index=np.arange(1000)))\n str(Series(tm.randn(1000), index=np.arange(1000, 0, step=-1)))\n\n # empty\n str(self.empty)\n\n # with NaNs\n self.series[5:7] = np.NaN\n str(self.series)\n\n # with Nones\n ots = self.ts.astype('O')\n ots[::2] = None\n repr(ots)\n\n # various names\n for name in ['', 1, 1.2, 'foo', u('\\u03B1\\u03B2\\u03B3'),\n 'loooooooooooooooooooooooooooooooooooooooooooooooooooong',\n ('foo', 'bar', 'baz'),\n (1, 2),\n ('foo', 1, 2.3),\n (u('\\u03B1'), u('\\u03B2'), u('\\u03B3')),\n (u('\\u03B1'), 'bar')]:\n self.series.name = name\n repr(self.series)\n\n biggie = Series(tm.randn(1000), index=np.arange(1000),\n name=('foo', 'bar', 'baz'))\n repr(biggie)\n\n # 0 as name\n ser = Series(np.random.randn(100), name=0)\n rep_str = repr(ser)\n self.assertIn(\"Name: 0\", rep_str)\n\n # tidy repr\n ser = Series(np.random.randn(1001), name=0)\n rep_str = repr(ser)\n self.assertIn(\"Name: 0\", rep_str)\n\n ser = Series([\"a\\n\\r\\tb\"], name=[\"a\\n\\r\\td\"], index=[\"a\\n\\r\\tf\"])\n self.assertFalse(\"\\t\" in repr(ser))\n self.assertFalse(\"\\r\" in repr(ser))\n self.assertFalse(\"a\\n\" in repr(ser))\n\n # with empty series (#4651)\n s = Series([], dtype=np.int64, name='foo')\n self.assertEqual(repr(s), 'Series([], Name: foo, dtype: int64)')\n\n s = Series([], dtype=np.int64, name=None)\n self.assertEqual(repr(s), 'Series([], dtype: int64)')\n\n def test_tidy_repr(self):\n a = Series([u(\"\\u05d0\")] * 1000)\n a.name = 'title1'\n repr(a) # should not raise exception\n\n def test_repr_bool_fails(self):\n s = Series([DataFrame(np.random.randn(2, 2)) for i in range(5)])\n\n import sys\n\n buf = StringIO()\n tmp = sys.stderr\n sys.stderr = buf\n try:\n # it works (with no Cython exception barf)!\n repr(s)\n finally:\n sys.stderr = tmp\n self.assertEqual(buf.getvalue(), '')\n\n def test_repr_name_iterable_indexable(self):\n s = Series([1, 2, 3], name=np.int64(3))\n\n # it works!\n repr(s)\n\n s.name = (u(\"\\u05d0\"),) * 2\n repr(s)\n\n def test_repr_should_return_str(self):\n # http://docs.python.org/py3k/reference/datamodel.html#object.__repr__\n # http://docs.python.org/reference/datamodel.html#object.__repr__\n # ...The return value must be a string object.\n\n # (str on py2.x, str (unicode) on py3)\n\n data = [8, 5, 3, 5]\n index1 = [u(\"\\u03c3\"), u(\"\\u03c4\"), u(\"\\u03c5\"), u(\"\\u03c6\")]\n df = Series(data, index=index1)\n self.assertTrue(type(df.__repr__() == str)) # both py2 / 3\n\n def test_repr_max_rows(self):\n # GH 6863\n with pd.option_context('max_rows', None):\n str(Series(range(1001))) # should not raise exception\n\n def test_unicode_string_with_unicode(self):\n df = Series([u(\"\\u05d0\")], name=u(\"\\u05d1\"))\n if compat.PY3:\n str(df)\n else:\n compat.text_type(df)\n\n def test_bytestring_with_unicode(self):\n df = Series([u(\"\\u05d0\")], name=u(\"\\u05d1\"))\n if compat.PY3:\n bytes(df)\n else:\n str(df)\n\n def test_timeseries_repr_object_dtype(self):\n index = Index([datetime(2000, 1, 1) + timedelta(i)\n for i in range(1000)], dtype=object)\n ts = Series(np.random.randn(len(index)), index)\n repr(ts)\n\n ts = tm.makeTimeSeries(1000)\n self.assertTrue(repr(ts).splitlines()[-1].startswith('Freq:'))\n\n ts2 = ts.ix[np.random.randint(0, len(ts) - 1, 400)]\n repr(ts2).splitlines()[-1]\n\n def test_timeseries_periodindex(self):\n # GH2891\n from pandas import period_range\n prng = period_range('1/1/2011', '1/1/2012', freq='M')\n ts = Series(np.random.randn(len(prng)), prng)\n new_ts = self.round_trip_pickle(ts)\n self.assertEqual(new_ts.index.freq, 'M')\n\n def test_iter(self):\n for i, val in enumerate(self.series):\n self.assertEqual(val, self.series[i])\n\n for i, val in enumerate(self.ts):\n self.assertEqual(val, self.ts[i])\n\n def test_keys(self):\n # HACK: By doing this in two stages, we avoid 2to3 wrapping the call\n # to .keys() in a list()\n getkeys = self.ts.keys\n self.assertIs(getkeys(), self.ts.index)\n\n def test_values(self):\n self.assert_numpy_array_equal(self.ts, self.ts.values)\n\n def test_iteritems(self):\n for idx, val in compat.iteritems(self.series):\n self.assertEqual(val, self.series[idx])\n\n for idx, val in compat.iteritems(self.ts):\n self.assertEqual(val, self.ts[idx])\n\n # assert is lazy (genrators don't define reverse, lists do)\n self.assertFalse(hasattr(self.series.iteritems(), 'reverse'))\n\n def test_sum(self):\n self._check_stat_op('sum', np.sum, check_allna=True)\n\n def test_sum_inf(self):\n import pandas.core.nanops as nanops\n\n s = Series(np.random.randn(10))\n s2 = s.copy()\n\n s[5:8] = np.inf\n s2[5:8] = np.nan\n\n self.assertTrue(np.isinf(s.sum()))\n\n arr = np.random.randn(100, 100).astype('f4')\n arr[:, 2] = np.inf\n\n with cf.option_context(\"mode.use_inf_as_null\", True):\n assert_almost_equal(s.sum(), s2.sum())\n\n res = nanops.nansum(arr, axis=1)\n self.assertTrue(np.isinf(res).all())\n\n def test_mean(self):\n self._check_stat_op('mean', np.mean)\n\n def test_median(self):\n self._check_stat_op('median', np.median)\n\n # test with integers, test failure\n int_ts = Series(np.ones(10, dtype=int), index=lrange(10))\n self.assertAlmostEqual(np.median(int_ts), int_ts.median())\n\n def test_mode(self):\n s = Series([12, 12, 11, 10, 19, 11])\n exp = Series([11, 12])\n assert_series_equal(s.mode(), exp)\n\n assert_series_equal(Series([1, 2, 3]).mode(), Series([], dtype='int64'))\n\n lst = [5] * 20 + [1] * 10 + [6] * 25\n np.random.shuffle(lst)\n s = Series(lst)\n assert_series_equal(s.mode(), Series([6]))\n\n s = Series([5] * 10)\n assert_series_equal(s.mode(), Series([5]))\n\n s = Series(lst)\n s[0] = np.nan\n assert_series_equal(s.mode(), Series([6.]))\n\n s = Series(list('adfasbasfwewefwefweeeeasdfasnbam'))\n assert_series_equal(s.mode(), Series(['e']))\n\n s = Series(['2011-01-03', '2013-01-02', '1900-05-03'], dtype='M8[ns]')\n assert_series_equal(s.mode(), Series([], dtype=\"M8[ns]\"))\n s = Series(['2011-01-03', '2013-01-02', '1900-05-03', '2011-01-03',\n '2013-01-02'], dtype='M8[ns]')\n assert_series_equal(s.mode(), Series(['2011-01-03', '2013-01-02'],\n dtype='M8[ns]'))\n\n def test_prod(self):\n self._check_stat_op('prod', np.prod)\n\n def test_min(self):\n self._check_stat_op('min', np.min, check_objects=True)\n\n def test_max(self):\n self._check_stat_op('max', np.max, check_objects=True)\n\n def test_var_std(self):\n alt = lambda x: np.std(x, ddof=1)\n self._check_stat_op('std', alt)\n\n alt = lambda x: np.var(x, ddof=1)\n self._check_stat_op('var', alt)\n\n result = self.ts.std(ddof=4)\n expected = np.std(self.ts.values, ddof=4)\n assert_almost_equal(result, expected)\n\n result = self.ts.var(ddof=4)\n expected = np.var(self.ts.values, ddof=4)\n assert_almost_equal(result, expected)\n\n # 1 - element series with ddof=1\n s = self.ts.iloc[[0]]\n result = s.var(ddof=1)\n self.assertTrue(isnull(result))\n\n result = s.std(ddof=1)\n self.assertTrue(isnull(result))\n\n def test_sem(self):\n alt = lambda x: np.std(x, ddof=1)/np.sqrt(len(x))\n self._check_stat_op('sem', alt)\n\n result = self.ts.sem(ddof=4)\n expected = np.std(self.ts.values, ddof=4)/np.sqrt(len(self.ts.values))\n assert_almost_equal(result, expected)\n\n # 1 - element series with ddof=1\n s = self.ts.iloc[[0]]\n result = s.sem(ddof=1)\n self.assertTrue(isnull(result))\n\n def test_skew(self):\n tm._skip_if_no_scipy()\n\n from scipy.stats import skew\n alt = lambda x: skew(x, bias=False)\n self._check_stat_op('skew', alt)\n\n # test corner cases, skew() returns NaN unless there's at least 3 values\n min_N = 3\n for i in range(1, min_N + 1):\n s = Series(np.ones(i))\n df = DataFrame(np.ones((i, i)))\n if i < min_N:\n self.assertTrue(np.isnan(s.skew()))\n self.assertTrue(np.isnan(df.skew()).all())\n else:\n self.assertEqual(0, s.skew())\n self.assertTrue((df.skew() == 0).all())\n\n def test_kurt(self):\n tm._skip_if_no_scipy()\n\n from scipy.stats import kurtosis\n alt = lambda x: kurtosis(x, bias=False)\n self._check_stat_op('kurt', alt)\n\n index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],\n labels=[[0, 0, 0, 0, 0, 0],\n [0, 1, 2, 0, 1, 2],\n [0, 1, 0, 1, 0, 1]])\n s = Series(np.random.randn(6), index=index)\n self.assertAlmostEqual(s.kurt(), s.kurt(level=0)['bar'])\n\n # test corner cases, kurt() returns NaN unless there's at least 4 values\n min_N = 4\n for i in range(1, min_N + 1):\n s = Series(np.ones(i))\n df = DataFrame(np.ones((i, i)))\n if i < min_N:\n self.assertTrue(np.isnan(s.kurt()))\n self.assertTrue(np.isnan(df.kurt()).all())\n else:\n self.assertEqual(0, s.kurt())\n self.assertTrue((df.kurt() == 0).all())\n\n def test_argsort(self):\n self._check_accum_op('argsort')\n argsorted = self.ts.argsort()\n self.assertTrue(issubclass(argsorted.dtype.type, np.integer))\n\n # GH 2967 (introduced bug in 0.11-dev I think)\n s = Series([Timestamp('201301%02d' % (i + 1)) for i in range(5)])\n self.assertEqual(s.dtype, 'datetime64[ns]')\n shifted = s.shift(-1)\n self.assertEqual(shifted.dtype, 'datetime64[ns]')\n self.assertTrue(isnull(shifted[4]))\n\n result = s.argsort()\n expected = Series(lrange(5), dtype='int64')\n assert_series_equal(result, expected)\n\n result = shifted.argsort()\n expected = Series(lrange(4) + [-1], dtype='int64')\n assert_series_equal(result, expected)\n\n def test_argsort_stable(self):\n s = Series(np.random.randint(0, 100, size=10000))\n mindexer = s.argsort(kind='mergesort')\n qindexer = s.argsort()\n\n mexpected = np.argsort(s.values, kind='mergesort')\n qexpected = np.argsort(s.values, kind='quicksort')\n\n self.assert_numpy_array_equal(mindexer, mexpected)\n self.assert_numpy_array_equal(qindexer, qexpected)\n self.assertFalse(np.array_equal(qindexer, mindexer))\n\n def test_reorder_levels(self):\n index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],\n labels=[[0, 0, 0, 0, 0, 0],\n [0, 1, 2, 0, 1, 2],\n [0, 1, 0, 1, 0, 1]],\n names=['L0', 'L1', 'L2'])\n s = Series(np.arange(6), index=index)\n\n # no change, position\n result = s.reorder_levels([0, 1, 2])\n assert_series_equal(s, result)\n\n # no change, labels\n result = s.reorder_levels(['L0', 'L1', 'L2'])\n assert_series_equal(s, result)\n\n # rotate, position\n result = s.reorder_levels([1, 2, 0])\n e_idx = MultiIndex(levels=[['one', 'two', 'three'], [0, 1], ['bar']],\n labels=[[0, 1, 2, 0, 1, 2],\n [0, 1, 0, 1, 0, 1],\n [0, 0, 0, 0, 0, 0]],\n names=['L1', 'L2', 'L0'])\n expected = Series(np.arange(6), index=e_idx)\n assert_series_equal(result, expected)\n\n result = s.reorder_levels([0, 0, 0])\n e_idx = MultiIndex(levels=[['bar'], ['bar'], ['bar']],\n labels=[[0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0]],\n names=['L0', 'L0', 'L0'])\n expected = Series(range(6), index=e_idx)\n assert_series_equal(result, expected)\n\n result = s.reorder_levels(['L0', 'L0', 'L0'])\n assert_series_equal(result, expected)\n\n def test_cumsum(self):\n self._check_accum_op('cumsum')\n\n def test_cumprod(self):\n self._check_accum_op('cumprod')\n\n def test_cummin(self):\n self.assert_numpy_array_equal(self.ts.cummin(),\n np.minimum.accumulate(np.array(self.ts)))\n ts = self.ts.copy()\n ts[::2] = np.NaN\n result = ts.cummin()[1::2]\n expected = np.minimum.accumulate(ts.valid())\n\n self.assert_numpy_array_equal(result, expected)\n\n def test_cummax(self):\n self.assert_numpy_array_equal(self.ts.cummax(),\n np.maximum.accumulate(np.array(self.ts)))\n ts = self.ts.copy()\n ts[::2] = np.NaN\n result = ts.cummax()[1::2]\n expected = np.maximum.accumulate(ts.valid())\n\n self.assert_numpy_array_equal(result, expected)\n\n def test_cummin_datetime64(self):\n s = pd.Series(pd.to_datetime(\n ['NaT', '2000-1-2', 'NaT', '2000-1-1', 'NaT', '2000-1-3']))\n\n expected = pd.Series(pd.to_datetime(\n ['NaT', '2000-1-2', 'NaT', '2000-1-1', 'NaT', '2000-1-1']))\n result = s.cummin(skipna=True)\n self.assert_series_equal(expected, result)\n\n expected = pd.Series(pd.to_datetime(\n ['NaT', '2000-1-2', '2000-1-2', '2000-1-1', '2000-1-1', '2000-1-1']))\n result = s.cummin(skipna=False)\n self.assert_series_equal(expected, result)\n\n def test_cummax_datetime64(self):\n s = pd.Series(pd.to_datetime(\n ['NaT', '2000-1-2', 'NaT', '2000-1-1', 'NaT', '2000-1-3']))\n\n expected = pd.Series(pd.to_datetime(\n ['NaT', '2000-1-2', 'NaT', '2000-1-2', 'NaT', '2000-1-3']))\n result = s.cummax(skipna=True)\n self.assert_series_equal(expected, result)\n\n expected = pd.Series(pd.to_datetime(\n ['NaT', '2000-1-2', '2000-1-2', '2000-1-2', '2000-1-2', '2000-1-3']))\n result = s.cummax(skipna=False)\n self.assert_series_equal(expected, result)\n\n def test_cummin_timedelta64(self):\n s = pd.Series(pd.to_timedelta(\n ['NaT', '2 min', 'NaT', '1 min', 'NaT', '3 min', ]))\n\n expected = pd.Series(pd.to_timedelta(\n ['NaT', '2 min', 'NaT', '1 min', 'NaT', '1 min', ]))\n result = s.cummin(skipna=True)\n self.assert_series_equal(expected, result)\n\n expected = pd.Series(pd.to_timedelta(\n ['NaT', '2 min', '2 min', '1 min', '1 min', '1 min', ]))\n result = s.cummin(skipna=False)\n self.assert_series_equal(expected, result)\n\n def test_cummax_timedelta64(self):\n s = pd.Series(pd.to_timedelta(\n ['NaT', '2 min', 'NaT', '1 min', 'NaT', '3 min', ]))\n\n expected = pd.Series(pd.to_timedelta(\n ['NaT', '2 min', 'NaT', '2 min', 'NaT', '3 min', ]))\n result = s.cummax(skipna=True)\n self.assert_series_equal(expected, result)\n\n expected = pd.Series(pd.to_timedelta(\n ['NaT', '2 min', '2 min', '2 min', '2 min', '3 min', ]))\n result = s.cummax(skipna=False)\n self.assert_series_equal(expected, result)\n\n def test_npdiff(self):\n raise nose.SkipTest(\"skipping due to Series no longer being an \"\n \"ndarray\")\n\n # no longer works as the return type of np.diff is now nd.array\n s = Series(np.arange(5))\n\n r = np.diff(s)\n assert_series_equal(Series([nan, 0, 0, 0, nan]), r)\n\n def _check_stat_op(self, name, alternate, check_objects=False, check_allna=False):\n import pandas.core.nanops as nanops\n\n def testit():\n f = getattr(Series, name)\n\n # add some NaNs\n self.series[5:15] = np.NaN\n\n # idxmax, idxmin, min, and max are valid for dates\n if name not in ['max','min']:\n ds = Series(date_range('1/1/2001', periods=10))\n self.assertRaises(TypeError, f, ds)\n\n # skipna or no\n self.assertTrue(notnull(f(self.series)))\n self.assertTrue(isnull(f(self.series, skipna=False)))\n\n # check the result is correct\n nona = self.series.dropna()\n assert_almost_equal(f(nona), alternate(nona.values))\n assert_almost_equal(f(self.series), alternate(nona.values))\n\n allna = self.series * nan\n\n if check_allna:\n # xref 9422\n # bottleneck >= 1.0 give 0.0 for an allna Series sum\n try:\n self.assertTrue(nanops._USE_BOTTLENECK)\n import bottleneck as bn\n self.assertTrue(bn.__version__ >= LooseVersion('1.0'))\n self.assertEqual(f(allna),0.0)\n except:\n self.assertTrue(np.isnan(f(allna)))\n\n # dtype=object with None, it works!\n s = Series([1, 2, 3, None, 5])\n f(s)\n\n # 2888\n l = [0]\n l.extend(lrange(2 ** 40, 2 ** 40+1000))\n s = Series(l, dtype='int64')\n assert_almost_equal(float(f(s)), float(alternate(s.values)))\n\n # check date range\n if check_objects:\n s = Series(bdate_range('1/1/2000', periods=10))\n res = f(s)\n exp = alternate(s)\n self.assertEqual(res, exp)\n\n # check on string data\n if name not in ['sum','min','max']:\n self.assertRaises(TypeError, f, Series(list('abc')))\n\n # Invalid axis.\n self.assertRaises(ValueError, f, self.series, axis=1)\n\n # Unimplemented numeric_only parameter.\n if 'numeric_only' in getargspec(f).args:\n self.assertRaisesRegexp(NotImplementedError, name, f,\n self.series, numeric_only=True)\n\n testit()\n\n try:\n import bottleneck as bn\n nanops._USE_BOTTLENECK = False\n testit()\n nanops._USE_BOTTLENECK = True\n except ImportError:\n pass\n\n def _check_accum_op(self, name):\n func = getattr(np, name)\n self.assert_numpy_array_equal(func(self.ts), func(np.array(self.ts)))\n\n # with missing values\n ts = self.ts.copy()\n ts[::2] = np.NaN\n\n result = func(ts)[1::2]\n expected = func(np.array(ts.valid()))\n\n self.assert_numpy_array_equal(result, expected)\n\n def test_round(self):\n # numpy.round doesn't preserve metadata, probably a numpy bug,\n # re: GH #314\n result = np.round(self.ts, 2)\n expected = Series(np.round(self.ts.values, 2), index=self.ts.index,\n name='ts')\n assert_series_equal(result, expected)\n self.assertEqual(result.name, self.ts.name)\n\n def test_prod_numpy16_bug(self):\n s = Series([1., 1., 1.], index=lrange(3))\n result = s.prod()\n self.assertNotIsInstance(result, Series)\n\n def test_quantile(self):\n from numpy import percentile\n\n q = self.ts.quantile(0.1)\n self.assertEqual(q, percentile(self.ts.valid(), 10))\n\n q = self.ts.quantile(0.9)\n self.assertEqual(q, percentile(self.ts.valid(), 90))\n\n # object dtype\n q = Series(self.ts,dtype=object).quantile(0.9)\n self.assertEqual(q, percentile(self.ts.valid(), 90))\n\n # datetime64[ns] dtype\n dts = self.ts.index.to_series()\n q = dts.quantile(.2)\n self.assertEqual(q, Timestamp('2000-01-10 19:12:00'))\n\n # timedelta64[ns] dtype\n tds = dts.diff()\n q = tds.quantile(.25)\n self.assertEqual(q, pd.to_timedelta('24:00:00'))\n\n # GH7661\n result = Series([np.timedelta64('NaT')]).sum()\n self.assertTrue(result is pd.NaT)\n\n msg = 'percentiles should all be in the interval \\\\[0, 1\\\\]'\n for invalid in [-1, 2, [0.5, -1], [0.5, 2]]:\n with tm.assertRaisesRegexp(ValueError, msg):\n self.ts.quantile(invalid)\n\n def test_quantile_multi(self):\n from numpy import percentile\n\n qs = [.1, .9]\n result = self.ts.quantile(qs)\n expected = pd.Series([percentile(self.ts.valid(), 10),\n percentile(self.ts.valid(), 90)],\n index=qs, name=self.ts.name)\n assert_series_equal(result, expected)\n\n dts = self.ts.index.to_series()\n dts.name = 'xxx'\n result = dts.quantile((.2, .2))\n expected = Series([Timestamp('2000-01-10 19:12:00'),\n Timestamp('2000-01-10 19:12:00')],\n index=[.2, .2], name='xxx')\n assert_series_equal(result, expected)\n\n result = self.ts.quantile([])\n expected = pd.Series([], name=self.ts.name)\n assert_series_equal(result, expected)\n\n def test_append(self):\n appendedSeries = self.series.append(self.objSeries)\n for idx, value in compat.iteritems(appendedSeries):\n if idx in self.series.index:\n self.assertEqual(value, self.series[idx])\n elif idx in self.objSeries.index:\n self.assertEqual(value, self.objSeries[idx])\n else:\n self.fail(\"orphaned index!\")\n\n self.assertRaises(ValueError, self.ts.append, self.ts,\n verify_integrity=True)\n\n def test_append_many(self):\n pieces = [self.ts[:5], self.ts[5:10], self.ts[10:]]\n\n result = pieces[0].append(pieces[1:])\n assert_series_equal(result, self.ts)\n\n def test_all_any(self):\n ts = tm.makeTimeSeries()\n bool_series = ts > 0\n self.assertFalse(bool_series.all())\n self.assertTrue(bool_series.any())\n\n # Alternative types, with implicit 'object' dtype.\n s = Series(['abc', True])\n self.assertEqual('abc', s.any()) # 'abc' || True => 'abc'\n\n def test_all_any_params(self):\n # Check skipna, with implicit 'object' dtype.\n s1 = Series([np.nan, True])\n s2 = Series([np.nan, False])\n self.assertTrue(s1.all(skipna=False)) # nan && True => True\n self.assertTrue(s1.all(skipna=True))\n self.assertTrue(np.isnan(s2.any(skipna=False))) # nan || False => nan\n self.assertFalse(s2.any(skipna=True))\n\n # Check level.\n s = pd.Series([False, False, True, True, False, True],\n index=[0, 0, 1, 1, 2, 2])\n assert_series_equal(s.all(level=0), Series([False, True, False]))\n assert_series_equal(s.any(level=0), Series([False, True, True]))\n\n # bool_only is not implemented with level option.\n self.assertRaises(NotImplementedError, s.any, bool_only=True, level=0)\n self.assertRaises(NotImplementedError, s.all, bool_only=True, level=0)\n\n # bool_only is not implemented alone.\n self.assertRaises(NotImplementedError, s.any, bool_only=True)\n self.assertRaises(NotImplementedError, s.all, bool_only=True)\n\n def test_op_method(self):\n def check(series, other, check_reverse=False):\n simple_ops = ['add', 'sub', 'mul', 'floordiv', 'truediv', 'pow']\n if not compat.PY3:\n simple_ops.append('div')\n\n for opname in simple_ops:\n op = getattr(Series, opname)\n\n if op == 'div':\n alt = operator.truediv\n else:\n alt = getattr(operator, opname)\n\n result = op(series, other)\n expected = alt(series, other)\n tm.assert_almost_equal(result, expected)\n if check_reverse:\n rop = getattr(Series, \"r\" + opname)\n result = rop(series, other)\n expected = alt(other, series)\n tm.assert_almost_equal(result, expected)\n\n check(self.ts, self.ts * 2)\n check(self.ts, self.ts[::2])\n check(self.ts, 5, check_reverse=True)\n check(tm.makeFloatSeries(), tm.makeFloatSeries(), check_reverse=True)\n\n def test_neg(self):\n assert_series_equal(-self.series, -1 * self.series)\n\n def test_invert(self):\n assert_series_equal(-(self.series < 0), ~(self.series < 0))\n\n def test_modulo(self):\n\n # GH3590, modulo as ints\n p = DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})\n result = p['first'] % p['second']\n expected = Series(p['first'].values %\n p['second'].values, dtype='float64')\n expected.iloc[0:3] = np.nan\n assert_series_equal(result, expected)\n\n result = p['first'] % 0\n expected = Series(np.nan, index=p.index, name='first')\n assert_series_equal(result, expected)\n\n p = p.astype('float64')\n result = p['first'] % p['second']\n expected = Series(p['first'].values % p['second'].values)\n assert_series_equal(result, expected)\n\n p = p.astype('float64')\n result = p['first'] % p['second']\n result2 = p['second'] % p['first']\n self.assertFalse(np.array_equal(result, result2))\n\n # GH 9144\n s = Series([0, 1])\n\n result = s % 0\n expected = Series([nan, nan])\n assert_series_equal(result, expected)\n\n result = 0 % s\n expected = Series([nan, 0.0])\n assert_series_equal(result, expected)\n\n def test_div(self):\n\n # no longer do integer div for any ops, but deal with the 0's\n p = DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})\n result = p['first'] / p['second']\n expected = Series(p['first'].values.astype(float) / p['second'].values,\n dtype='float64')\n expected.iloc[0:3] = np.inf\n assert_series_equal(result, expected)\n\n result = p['first'] / 0\n expected = Series(np.inf, index=p.index, name='first')\n assert_series_equal(result, expected)\n\n p = p.astype('float64')\n result = p['first'] / p['second']\n expected = Series(p['first'].values / p['second'].values)\n assert_series_equal(result, expected)\n\n p = DataFrame({'first': [3, 4, 5, 8], 'second': [1, 1, 1, 1]})\n result = p['first'] / p['second']\n assert_series_equal(result, p['first'].astype('float64'), check_names=False)\n self.assertTrue(result.name is None)\n self.assertFalse(np.array_equal(result, p['second'] / p['first']))\n\n # inf signing\n s = Series([np.nan,1.,-1.])\n result = s / 0\n expected = Series([np.nan,np.inf,-np.inf])\n assert_series_equal(result, expected)\n\n # float/integer issue\n # GH 7785\n p = DataFrame({'first': (1,0), 'second': (-0.01,-0.02)})\n expected = Series([-0.01,-np.inf])\n\n result = p['second'].div(p['first'])\n assert_series_equal(result, expected, check_names=False)\n\n result = p['second'] / p['first']\n assert_series_equal(result, expected)\n\n # GH 9144\n s = Series([-1, 0, 1])\n\n result = 0 / s\n expected = Series([0.0, nan, 0.0])\n assert_series_equal(result, expected)\n\n result = s / 0\n expected = Series([-inf, nan, inf])\n assert_series_equal(result, expected)\n\n result = s // 0\n expected = Series([-inf, nan, inf])\n assert_series_equal(result, expected)\n\n def test_operators(self):\n\n def _check_op(series, other, op, pos_only=False):\n left = np.abs(series) if pos_only else series\n right = np.abs(other) if pos_only else other\n\n cython_or_numpy = op(left, right)\n python = left.combine(right, op)\n tm.assert_almost_equal(cython_or_numpy, python)\n\n def check(series, other):\n simple_ops = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'mod']\n\n for opname in simple_ops:\n _check_op(series, other, getattr(operator, opname))\n\n _check_op(series, other, operator.pow, pos_only=True)\n\n _check_op(series, other, lambda x, y: operator.add(y, x))\n _check_op(series, other, lambda x, y: operator.sub(y, x))\n _check_op(series, other, lambda x, y: operator.truediv(y, x))\n _check_op(series, other, lambda x, y: operator.floordiv(y, x))\n _check_op(series, other, lambda x, y: operator.mul(y, x))\n _check_op(series, other, lambda x, y: operator.pow(y, x),\n pos_only=True)\n _check_op(series, other, lambda x, y: operator.mod(y, x))\n\n check(self.ts, self.ts * 2)\n check(self.ts, self.ts * 0)\n check(self.ts, self.ts[::2])\n check(self.ts, 5)\n\n def check_comparators(series, other):\n _check_op(series, other, operator.gt)\n _check_op(series, other, operator.ge)\n _check_op(series, other, operator.eq)\n _check_op(series, other, operator.lt)\n _check_op(series, other, operator.le)\n\n check_comparators(self.ts, 5)\n check_comparators(self.ts, self.ts + 1)\n\n def test_operators_empty_int_corner(self):\n s1 = Series([], [], dtype=np.int32)\n s2 = Series({'x': 0.})\n tm.assert_series_equal(s1 * s2, Series([np.nan], index=['x']))\n\n def test_constructor_dtype_timedelta64(self):\n\n # basic\n td = Series([timedelta(days=i) for i in range(3)])\n self.assertEqual(td.dtype, 'timedelta64[ns]')\n\n td = Series([timedelta(days=1)])\n self.assertEqual(td.dtype, 'timedelta64[ns]')\n\n td = Series([timedelta(days=1),timedelta(days=2),np.timedelta64(1,'s')])\n self.assertEqual(td.dtype, 'timedelta64[ns]')\n\n # mixed with NaT\n from pandas import tslib\n td = Series([timedelta(days=1),tslib.NaT ], dtype='m8[ns]' )\n self.assertEqual(td.dtype, 'timedelta64[ns]')\n\n td = Series([timedelta(days=1),np.nan ], dtype='m8[ns]' )\n self.assertEqual(td.dtype, 'timedelta64[ns]')\n\n td = Series([np.timedelta64(300000000), pd.NaT],dtype='m8[ns]')\n self.assertEqual(td.dtype, 'timedelta64[ns]')\n\n # improved inference\n # GH5689\n td = Series([np.timedelta64(300000000), pd.NaT])\n self.assertEqual(td.dtype, 'timedelta64[ns]')\n\n td = Series([np.timedelta64(300000000), tslib.iNaT])\n self.assertEqual(td.dtype, 'timedelta64[ns]')\n\n td = Series([np.timedelta64(300000000), np.nan])\n self.assertEqual(td.dtype, 'timedelta64[ns]')\n\n td = Series([pd.NaT, np.timedelta64(300000000)])\n self.assertEqual(td.dtype, 'timedelta64[ns]')\n\n td = Series([np.timedelta64(1,'s')])\n self.assertEqual(td.dtype, 'timedelta64[ns]')\n\n # these are frequency conversion astypes\n #for t in ['s', 'D', 'us', 'ms']:\n # self.assertRaises(TypeError, td.astype, 'm8[%s]' % t)\n\n # valid astype\n td.astype('int64')\n\n # invalid casting\n self.assertRaises(TypeError, td.astype, 'int32')\n\n # this is an invalid casting\n def f():\n Series([timedelta(days=1), 'foo'],dtype='m8[ns]')\n self.assertRaises(Exception, f)\n\n # leave as object here\n td = Series([timedelta(days=i) for i in range(3)] + ['foo'])\n self.assertEqual(td.dtype, 'object')\n\n # these will correctly infer a timedelta\n s = Series([None, pd.NaT, '1 Day'])\n self.assertEqual(s.dtype,'timedelta64[ns]')\n s = Series([np.nan, pd.NaT, '1 Day'])\n self.assertEqual(s.dtype,'timedelta64[ns]')\n s = Series([pd.NaT, None, '1 Day'])\n self.assertEqual(s.dtype,'timedelta64[ns]')\n s = Series([pd.NaT, np.nan, '1 Day'])\n self.assertEqual(s.dtype,'timedelta64[ns]')\n\n def test_operators_timedelta64(self):\n\n # invalid ops\n self.assertRaises(Exception, self.objSeries.__add__, 1)\n self.assertRaises(\n Exception, self.objSeries.__add__, np.array(1, dtype=np.int64))\n self.assertRaises(Exception, self.objSeries.__sub__, 1)\n self.assertRaises(\n Exception, self.objSeries.__sub__, np.array(1, dtype=np.int64))\n\n # seriese ops\n v1 = date_range('2012-1-1', periods=3, freq='D')\n v2 = date_range('2012-1-2', periods=3, freq='D')\n rs = Series(v2) - Series(v1)\n xp = Series(1e9 * 3600 * 24, rs.index).astype(\n 'int64').astype('timedelta64[ns]')\n assert_series_equal(rs, xp)\n self.assertEqual(rs.dtype, 'timedelta64[ns]')\n\n df = DataFrame(dict(A=v1))\n td = Series([timedelta(days=i) for i in range(3)])\n self.assertEqual(td.dtype, 'timedelta64[ns]')\n\n # series on the rhs\n result = df['A'] - df['A'].shift()\n self.assertEqual(result.dtype, 'timedelta64[ns]')\n\n result = df['A'] + td\n self.assertEqual(result.dtype, 'M8[ns]')\n\n # scalar Timestamp on rhs\n maxa = df['A'].max()\n tm.assertIsInstance(maxa, Timestamp)\n\n resultb = df['A'] - df['A'].max()\n self.assertEqual(resultb.dtype, 'timedelta64[ns]')\n\n # timestamp on lhs\n result = resultb + df['A']\n values = [Timestamp('20111230'), Timestamp('20120101'), Timestamp('20120103')]\n expected = Series(values, name='A')\n assert_series_equal(result, expected)\n\n # datetimes on rhs\n result = df['A'] - datetime(2001, 1, 1)\n expected = Series([timedelta(days=4017 + i) for i in range(3)], name='A')\n assert_series_equal(result, expected)\n self.assertEqual(result.dtype, 'm8[ns]')\n\n d = datetime(2001, 1, 1, 3, 4)\n resulta = df['A'] - d\n self.assertEqual(resulta.dtype, 'm8[ns]')\n\n # roundtrip\n resultb = resulta + d\n assert_series_equal(df['A'], resultb)\n\n # timedeltas on rhs\n td = timedelta(days=1)\n resulta = df['A'] + td\n resultb = resulta - td\n assert_series_equal(resultb, df['A'])\n self.assertEqual(resultb.dtype, 'M8[ns]')\n\n # roundtrip\n td = timedelta(minutes=5, seconds=3)\n resulta = df['A'] + td\n resultb = resulta - td\n assert_series_equal(df['A'], resultb)\n self.assertEqual(resultb.dtype, 'M8[ns]')\n\n # inplace\n value = rs[2] + np.timedelta64(timedelta(minutes=5,seconds=1))\n rs[2] += np.timedelta64(timedelta(minutes=5,seconds=1))\n self.assertEqual(rs[2], value)\n\n def test_timedeltas_with_DateOffset(self):\n\n # GH 4532\n # operate with pd.offsets\n s = Series([Timestamp('20130101 9:01'), Timestamp('20130101 9:02')])\n\n result = s + pd.offsets.Second(5)\n result2 = pd.offsets.Second(5) + s\n expected = Series(\n [Timestamp('20130101 9:01:05'), Timestamp('20130101 9:02:05')])\n assert_series_equal(result, expected)\n assert_series_equal(result2, expected)\n\n result = s + pd.offsets.Milli(5)\n result2 = pd.offsets.Milli(5) + s\n expected = Series(\n [Timestamp('20130101 9:01:00.005'), Timestamp('20130101 9:02:00.005')])\n assert_series_equal(result, expected)\n assert_series_equal(result2, expected)\n\n result = s + pd.offsets.Minute(5) + pd.offsets.Milli(5)\n expected = Series(\n [Timestamp('20130101 9:06:00.005'), Timestamp('20130101 9:07:00.005')])\n assert_series_equal(result, expected)\n\n # operate with np.timedelta64 correctly\n result = s + np.timedelta64(1, 's')\n result2 = np.timedelta64(1, 's') + s\n expected = Series(\n [Timestamp('20130101 9:01:01'), Timestamp('20130101 9:02:01')])\n assert_series_equal(result, expected)\n assert_series_equal(result2, expected)\n\n result = s + np.timedelta64(5, 'ms')\n result2 = np.timedelta64(5, 'ms') + s\n expected = Series(\n [Timestamp('20130101 9:01:00.005'), Timestamp('20130101 9:02:00.005')])\n assert_series_equal(result, expected)\n assert_series_equal(result2, expected)\n\n # valid DateOffsets\n for do in [ 'Hour', 'Minute', 'Second', 'Day', 'Micro',\n 'Milli', 'Nano' ]:\n op = getattr(pd.offsets,do)\n s + op(5)\n op(5) + s\n\n\n def test_timedelta64_operations_with_DateOffset(self):\n # GH 10699\n td = Series([timedelta(minutes=5, seconds=3)] * 3)\n result = td + pd.offsets.Minute(1)\n expected = Series([timedelta(minutes=6, seconds=3)] * 3)\n assert_series_equal(result, expected)\n\n result = td - pd.offsets.Minute(1)\n expected = Series([timedelta(minutes=4, seconds=3)] * 3)\n assert_series_equal(result, expected)\n\n result = td + Series([pd.offsets.Minute(1), pd.offsets.Second(3),\n pd.offsets.Hour(2)])\n expected = Series([timedelta(minutes=6, seconds=3),\n timedelta(minutes=5, seconds=6),\n timedelta(hours=2, minutes=5, seconds=3)])\n assert_series_equal(result, expected)\n\n result = td + pd.offsets.Minute(1) + pd.offsets.Second(12)\n expected = Series([timedelta(minutes=6, seconds=15)] * 3)\n assert_series_equal(result, expected)\n\n # valid DateOffsets\n for do in [ 'Hour', 'Minute', 'Second', 'Day', 'Micro',\n 'Milli', 'Nano' ]:\n op = getattr(pd.offsets,do)\n td + op(5)\n op(5) + td\n td - op(5)\n op(5) - td\n\n def test_timedelta64_operations_with_timedeltas(self):\n\n # td operate with td\n td1 = Series([timedelta(minutes=5, seconds=3)] * 3)\n td2 = timedelta(minutes=5, seconds=4)\n result = td1 - td2\n expected = Series([timedelta(seconds=0)] * 3) -Series(\n [timedelta(seconds=1)] * 3)\n self.assertEqual(result.dtype, 'm8[ns]')\n assert_series_equal(result, expected)\n\n result2 = td2 - td1\n expected = (Series([timedelta(seconds=1)] * 3) -\n Series([timedelta(seconds=0)] * 3))\n assert_series_equal(result2, expected)\n\n # roundtrip\n assert_series_equal(result + td2,td1)\n\n # Now again, using pd.to_timedelta, which should build\n # a Series or a scalar, depending on input.\n td1 = Series(pd.to_timedelta(['00:05:03'] * 3))\n td2 = pd.to_timedelta('00:05:04')\n result = td1 - td2\n expected = Series([timedelta(seconds=0)] * 3) -Series(\n [timedelta(seconds=1)] * 3)\n self.assertEqual(result.dtype, 'm8[ns]')\n assert_series_equal(result, expected)\n\n result2 = td2 - td1\n expected = (Series([timedelta(seconds=1)] * 3) -\n Series([timedelta(seconds=0)] * 3))\n assert_series_equal(result2, expected)\n\n # roundtrip\n assert_series_equal(result + td2,td1)\n\n def test_timedelta64_operations_with_integers(self):\n\n # GH 4521\n # divide/multiply by integers\n startdate = Series(date_range('2013-01-01', '2013-01-03'))\n enddate = Series(date_range('2013-03-01', '2013-03-03'))\n\n s1 = enddate - startdate\n s1[2] = np.nan\n s2 = Series([2, 3, 4])\n expected = Series(s1.values.astype(np.int64) / s2, dtype='m8[ns]')\n expected[2] = np.nan\n result = s1 / s2\n assert_series_equal(result,expected)\n\n s2 = Series([20, 30, 40])\n expected = Series(s1.values.astype(np.int64) / s2, dtype='m8[ns]')\n expected[2] = np.nan\n result = s1 / s2\n assert_series_equal(result,expected)\n\n result = s1 / 2\n expected = Series(s1.values.astype(np.int64) / 2, dtype='m8[ns]')\n expected[2] = np.nan\n assert_series_equal(result,expected)\n\n s2 = Series([20, 30, 40])\n expected = Series(s1.values.astype(np.int64) * s2, dtype='m8[ns]')\n expected[2] = np.nan\n result = s1 * s2\n assert_series_equal(result,expected)\n\n for dtype in ['int32','int16','uint32','uint64','uint32','uint16','uint8']:\n s2 = Series([20, 30, 40],dtype=dtype)\n expected = Series(s1.values.astype(np.int64) * s2.astype(np.int64), dtype='m8[ns]')\n expected[2] = np.nan\n result = s1 * s2\n assert_series_equal(result,expected)\n\n result = s1 * 2\n expected = Series(s1.values.astype(np.int64) * 2, dtype='m8[ns]')\n expected[2] = np.nan\n assert_series_equal(result,expected)\n\n result = s1 * -1\n expected = Series(s1.values.astype(np.int64) * -1, dtype='m8[ns]')\n expected[2] = np.nan\n assert_series_equal(result,expected)\n\n # invalid ops\n for op in ['__true_div__','__div__','__mul__']:\n sop = getattr(s1,op,None)\n if sop is not None:\n self.assertRaises(TypeError, sop, s2.astype(float))\n self.assertRaises(TypeError, sop, 2.)\n\n for op in ['__add__','__sub__']:\n sop = getattr(s1,op,None)\n if sop is not None:\n self.assertRaises(TypeError, sop, 1)\n self.assertRaises(TypeError, sop, s2.values)\n\n def test_timedelta64_conversions(self):\n startdate = Series(date_range('2013-01-01', '2013-01-03'))\n enddate = Series(date_range('2013-03-01', '2013-03-03'))\n\n s1 = enddate - startdate\n s1[2] = np.nan\n\n for m in [1, 3, 10]:\n for unit in ['D','h','m','s','ms','us','ns']:\n\n # op\n expected = s1.apply(lambda x: x / np.timedelta64(m,unit))\n result = s1 / np.timedelta64(m,unit)\n assert_series_equal(result, expected)\n\n if m == 1 and unit != 'ns':\n\n # astype\n result = s1.astype(\"timedelta64[{0}]\".format(unit))\n assert_series_equal(result, expected)\n\n # reverse op\n expected = s1.apply(lambda x: np.timedelta64(m,unit) / x)\n result = np.timedelta64(m,unit) / s1\n\n # astype\n s = Series(date_range('20130101',periods=3))\n result = s.astype(object)\n self.assertIsInstance(result.iloc[0],datetime)\n self.assertTrue(result.dtype == np.object_)\n\n result = s1.astype(object)\n self.assertIsInstance(result.iloc[0],timedelta)\n self.assertTrue(result.dtype == np.object_)\n\n def test_timedelta64_equal_timedelta_supported_ops(self):\n ser = Series([Timestamp('20130301'), Timestamp('20130228 23:00:00'),\n Timestamp('20130228 22:00:00'),\n Timestamp('20130228 21:00:00')])\n\n intervals = 'D', 'h', 'm', 's', 'us'\n npy16_mappings = {'D': 24 * 60 * 60 * 1000000, 'h': 60 * 60 * 1000000,\n 'm': 60 * 1000000, 's': 1000000, 'us': 1}\n\n def timedelta64(*args):\n return sum(starmap(np.timedelta64, zip(args, intervals)))\n\n for op, d, h, m, s, us in product([operator.add, operator.sub],\n *([range(2)] * 5)):\n nptd = timedelta64(d, h, m, s, us)\n pytd = timedelta(days=d, hours=h, minutes=m, seconds=s,\n microseconds=us)\n lhs = op(ser, nptd)\n rhs = op(ser, pytd)\n\n try:\n assert_series_equal(lhs, rhs)\n except:\n raise AssertionError(\n \"invalid comparsion [op->{0},d->{1},h->{2},m->{3},s->{4},us->{5}]\\n{6}\\n{7}\\n\".format(op, d, h, m, s, us, lhs, rhs))\n\n def test_timedelta_assignment(self):\n # GH 8209\n s = Series([])\n s.loc['B'] = timedelta(1)\n tm.assert_series_equal(s,Series(Timedelta('1 days'),index=['B']))\n\n s = s.reindex(s.index.insert(0, 'A'))\n tm.assert_series_equal(s,Series([np.nan,Timedelta('1 days')],index=['A','B']))\n\n result = s.fillna(timedelta(1))\n expected = Series(Timedelta('1 days'),index=['A','B'])\n tm.assert_series_equal(result, expected)\n\n s.loc['A'] = timedelta(1)\n tm.assert_series_equal(s, expected)\n\n def test_operators_datetimelike(self):\n\n def run_ops(ops, get_ser, test_ser):\n\n # check that we are getting a TypeError\n # with 'operate' (from core/ops.py) for the ops that are not defined\n for op_str in ops:\n op = getattr(get_ser, op_str, None)\n with tm.assertRaisesRegexp(TypeError, 'operate'):\n op(test_ser)\n\n ### timedelta64 ###\n td1 = Series([timedelta(minutes=5,seconds=3)]*3)\n td1.iloc[2] = np.nan\n td2 = timedelta(minutes=5,seconds=4)\n ops = ['__mul__','__floordiv__','__pow__',\n '__rmul__','__rfloordiv__','__rpow__']\n run_ops(ops, td1, td2)\n td1 + td2\n td2 + td1\n td1 - td2\n td2 - td1\n td1 / td2\n td2 / td1\n\n ### datetime64 ###\n dt1 = Series([Timestamp('20111230'), Timestamp('20120101'),\n Timestamp('20120103')])\n dt1.iloc[2] = np.nan\n dt2 = Series([Timestamp('20111231'), Timestamp('20120102'),\n Timestamp('20120104')])\n ops = ['__add__', '__mul__', '__floordiv__', '__truediv__', '__div__',\n '__pow__', '__radd__', '__rmul__', '__rfloordiv__',\n '__rtruediv__', '__rdiv__', '__rpow__']\n run_ops(ops, dt1, dt2)\n dt1 - dt2\n dt2 - dt1\n\n ### datetime64 with timetimedelta ###\n ops = ['__mul__', '__floordiv__', '__truediv__', '__div__', '__pow__',\n '__rmul__', '__rfloordiv__', '__rtruediv__', '__rdiv__',\n '__rpow__']\n run_ops(ops, dt1, td1)\n dt1 + td1\n td1 + dt1\n dt1 - td1\n # TODO: Decide if this ought to work.\n # td1 - dt1\n\n ### timetimedelta with datetime64 ###\n ops = ['__sub__', '__mul__', '__floordiv__', '__truediv__', '__div__',\n '__pow__', '__rsub__', '__rmul__', '__rfloordiv__',\n '__rtruediv__', '__rdiv__', '__rpow__']\n run_ops(ops, td1, dt1)\n td1 + dt1\n dt1 + td1\n\n # 8260, 10763\n # datetime64 with tz\n ops = ['__mul__', '__floordiv__', '__truediv__', '__div__', '__pow__',\n '__rmul__', '__rfloordiv__', '__rtruediv__', '__rdiv__',\n '__rpow__']\n dt1 = Series(date_range('2000-01-01 09:00:00',periods=5,tz='US/Eastern'),name='foo')\n dt2 = dt1.copy()\n dt2.iloc[2] = np.nan\n td1 = Series(timedelta_range('1 days 1 min',periods=5, freq='H'))\n td2 = td1.copy()\n td2.iloc[1] = np.nan\n run_ops(ops, dt1, td1)\n\n result = dt1 + td1[0]\n expected = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize('US/Eastern')\n assert_series_equal(result, expected)\n\n result = dt2 + td2[0]\n expected = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize('US/Eastern')\n assert_series_equal(result, expected)\n\n # odd numpy behavior with scalar timedeltas\n if not _np_version_under1p8:\n result = td1[0] + dt1\n expected = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize('US/Eastern')\n assert_series_equal(result, expected)\n\n result = td2[0] + dt2\n expected = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize('US/Eastern')\n assert_series_equal(result, expected)\n\n result = dt1 - td1[0]\n expected = (dt1.dt.tz_localize(None) - td1[0]).dt.tz_localize('US/Eastern')\n assert_series_equal(result, expected)\n self.assertRaises(TypeError, lambda: td1[0] - dt1)\n\n result = dt2 - td2[0]\n expected = (dt2.dt.tz_localize(None) - td2[0]).dt.tz_localize('US/Eastern')\n assert_series_equal(result, expected)\n self.assertRaises(TypeError, lambda: td2[0] - dt2)\n\n result = dt1 + td1\n expected = (dt1.dt.tz_localize(None) + td1).dt.tz_localize('US/Eastern')\n assert_series_equal(result, expected)\n\n result = dt2 + td2\n expected = (dt2.dt.tz_localize(None) + td2).dt.tz_localize('US/Eastern')\n assert_series_equal(result, expected)\n\n result = dt1 - td1\n expected = (dt1.dt.tz_localize(None) - td1).dt.tz_localize('US/Eastern')\n assert_series_equal(result, expected)\n\n result = dt2 - td2\n expected = (dt2.dt.tz_localize(None) - td2).dt.tz_localize('US/Eastern')\n assert_series_equal(result, expected)\n\n self.assertRaises(TypeError, lambda: td1 - dt1)\n self.assertRaises(TypeError, lambda: td2 - dt2)\n\n def test_ops_datetimelike_align(self):\n # GH 7500\n # datetimelike ops need to align\n dt = Series(date_range('2012-1-1', periods=3, freq='D'))\n dt.iloc[2] = np.nan\n dt2 = dt[::-1]\n\n expected = Series([timedelta(0), timedelta(0), pd.NaT])\n # name is reset\n result = dt2 - dt\n assert_series_equal(result, expected)\n\n expected = Series(expected, name=0)\n result = (dt2.to_frame() - dt.to_frame())[0]\n assert_series_equal(result, expected)\n\n def test_timedelta64_functions(self):\n\n from datetime import timedelta\n from pandas import date_range\n\n # index min/max\n td = Series(date_range('2012-1-1', periods=3, freq='D')) - \\\n Timestamp('20120101')\n\n result = td.idxmin()\n self.assertEqual(result, 0)\n\n result = td.idxmax()\n self.assertEqual(result, 2)\n\n # GH 2982\n # with NaT\n td[0] = np.nan\n\n result = td.idxmin()\n self.assertEqual(result, 1)\n\n result = td.idxmax()\n self.assertEqual(result, 2)\n\n # abs\n s1 = Series(date_range('20120101', periods=3))\n s2 = Series(date_range('20120102', periods=3))\n expected = Series(s2 - s1)\n\n # this fails as numpy returns timedelta64[us]\n #result = np.abs(s1-s2)\n # assert_frame_equal(result,expected)\n\n result = (s1 - s2).abs()\n assert_series_equal(result, expected)\n\n # max/min\n result = td.max()\n expected = Timedelta('2 days')\n self.assertEqual(result, expected)\n\n result = td.min()\n expected = Timedelta('1 days')\n self.assertEqual(result, expected)\n\n def test_ops_consistency_on_empty(self):\n\n # GH 7869\n # consistency on empty\n\n # float\n result = Series(dtype=float).sum()\n self.assertEqual(result,0)\n\n result = Series(dtype=float).mean()\n self.assertTrue(isnull(result))\n\n result = Series(dtype=float).median()\n self.assertTrue(isnull(result))\n\n # timedelta64[ns]\n result = Series(dtype='m8[ns]').sum()\n self.assertEqual(result, Timedelta(0))\n\n result = Series(dtype='m8[ns]').mean()\n self.assertTrue(result is pd.NaT)\n\n result = Series(dtype='m8[ns]').median()\n self.assertTrue(result is pd.NaT)\n\n def test_timedelta_fillna(self):\n #GH 3371\n s = Series([Timestamp('20130101'), Timestamp('20130101'),\n Timestamp('20130102'), Timestamp('20130103 9:01:01')])\n td = s.diff()\n\n # reg fillna\n result = td.fillna(0)\n expected = Series([timedelta(0), timedelta(0), timedelta(1),\n timedelta(days=1, seconds=9*3600+60+1)])\n assert_series_equal(result, expected)\n\n # interprested as seconds\n result = td.fillna(1)\n expected = Series([timedelta(seconds=1), timedelta(0),\n timedelta(1), timedelta(days=1, seconds=9*3600+60+1)])\n assert_series_equal(result, expected)\n\n result = td.fillna(timedelta(days=1, seconds=1))\n expected = Series([timedelta(days=1, seconds=1), timedelta(0),\n timedelta(1), timedelta(days=1, seconds=9*3600+60+1)])\n assert_series_equal(result, expected)\n\n result = td.fillna(np.timedelta64(int(1e9)))\n expected = Series([timedelta(seconds=1), timedelta(0), timedelta(1),\n timedelta(days=1, seconds=9*3600+60+1)])\n assert_series_equal(result, expected)\n\n from pandas import tslib\n result = td.fillna(tslib.NaT)\n expected = Series([tslib.NaT, timedelta(0), timedelta(1),\n timedelta(days=1, seconds=9*3600+60+1)], dtype='m8[ns]')\n assert_series_equal(result, expected)\n\n # ffill\n td[2] = np.nan\n result = td.ffill()\n expected = td.fillna(0)\n expected[0] = np.nan\n assert_series_equal(result, expected)\n\n # bfill\n td[2] = np.nan\n result = td.bfill()\n expected = td.fillna(0)\n expected[2] = timedelta(days=1, seconds=9*3600+60+1)\n assert_series_equal(result, expected)\n\n def test_datetime64_fillna(self):\n\n s = Series([Timestamp('20130101'), Timestamp('20130101'),\n Timestamp('20130102'), Timestamp('20130103 9:01:01')])\n s[2] = np.nan\n\n # reg fillna\n result = s.fillna(Timestamp('20130104'))\n expected = Series([Timestamp('20130101'), Timestamp('20130101'),\n Timestamp('20130104'), Timestamp('20130103 9:01:01')])\n assert_series_equal(result, expected)\n\n from pandas import tslib\n result = s.fillna(tslib.NaT)\n expected = s\n assert_series_equal(result, expected)\n\n # ffill\n result = s.ffill()\n expected = Series([Timestamp('20130101'), Timestamp('20130101'),\n Timestamp('20130101'), Timestamp('20130103 9:01:01')])\n assert_series_equal(result, expected)\n\n # bfill\n result = s.bfill()\n expected = Series([Timestamp('20130101'), Timestamp('20130101'),\n Timestamp('20130103 9:01:01'),\n Timestamp('20130103 9:01:01')])\n assert_series_equal(result, expected)\n\n # GH 6587\n # make sure that we are treating as integer when filling\n # this also tests inference of a datetime-like with NaT's\n s = Series([pd.NaT, pd.NaT, '2013-08-05 15:30:00.000001'])\n expected = Series(['2013-08-05 15:30:00.000001', '2013-08-05 15:30:00.000001', '2013-08-05 15:30:00.000001'], dtype='M8[ns]')\n result = s.fillna(method='backfill')\n assert_series_equal(result, expected)\n\n def test_datetime64_tz_fillna(self):\n for tz in ['US/Eastern', 'Asia/Tokyo']:\n # DatetimeBlock\n s = Series([Timestamp('2011-01-01 10:00'), pd.NaT,\n Timestamp('2011-01-03 10:00'), pd.NaT])\n result = s.fillna(pd.Timestamp('2011-01-02 10:00'))\n expected = Series([Timestamp('2011-01-01 10:00'), Timestamp('2011-01-02 10:00'),\n Timestamp('2011-01-03 10:00'), Timestamp('2011-01-02 10:00')])\n self.assert_series_equal(expected, result)\n\n result = s.fillna(pd.Timestamp('2011-01-02 10:00', tz=tz))\n expected = Series([Timestamp('2011-01-01 10:00'),\n Timestamp('2011-01-02 10:00', tz=tz),\n Timestamp('2011-01-03 10:00'),\n Timestamp('2011-01-02 10:00', tz=tz)])\n self.assert_series_equal(expected, result)\n\n result = s.fillna('AAA')\n expected = Series([Timestamp('2011-01-01 10:00'), 'AAA',\n Timestamp('2011-01-03 10:00'), 'AAA'], dtype=object)\n self.assert_series_equal(expected, result)\n\n result = s.fillna({1: pd.Timestamp('2011-01-02 10:00', tz=tz),\n 3: pd.Timestamp('2011-01-04 10:00')})\n expected = Series([Timestamp('2011-01-01 10:00'),\n Timestamp('2011-01-02 10:00', tz=tz),\n Timestamp('2011-01-03 10:00'),\n Timestamp('2011-01-04 10:00')])\n self.assert_series_equal(expected, result)\n\n result = s.fillna({1: pd.Timestamp('2011-01-02 10:00'),\n 3: pd.Timestamp('2011-01-04 10:00')})\n expected = Series([Timestamp('2011-01-01 10:00'), Timestamp('2011-01-02 10:00'),\n Timestamp('2011-01-03 10:00'), Timestamp('2011-01-04 10:00')])\n self.assert_series_equal(expected, result)\n\n # DatetimeBlockTZ\n idx = pd.DatetimeIndex(['2011-01-01 10:00', pd.NaT,\n '2011-01-03 10:00', pd.NaT], tz=tz)\n s = pd.Series(idx)\n result = s.fillna(pd.Timestamp('2011-01-02 10:00'))\n expected = Series([Timestamp('2011-01-01 10:00', tz=tz),\n Timestamp('2011-01-02 10:00'),\n Timestamp('2011-01-03 10:00', tz=tz),\n Timestamp('2011-01-02 10:00')])\n self.assert_series_equal(expected, result)\n\n result = s.fillna(pd.Timestamp('2011-01-02 10:00', tz=tz))\n idx = pd.DatetimeIndex(['2011-01-01 10:00', '2011-01-02 10:00',\n '2011-01-03 10:00', '2011-01-02 10:00'],\n tz=tz)\n expected = Series(idx)\n self.assert_series_equal(expected, result)\n\n result = s.fillna(pd.Timestamp('2011-01-02 10:00', tz=tz).to_pydatetime())\n idx = pd.DatetimeIndex(['2011-01-01 10:00', '2011-01-02 10:00',\n '2011-01-03 10:00', '2011-01-02 10:00'],\n tz=tz)\n expected = Series(idx)\n self.assert_series_equal(expected, result)\n\n result = s.fillna('AAA')\n expected = Series([Timestamp('2011-01-01 10:00', tz=tz), 'AAA',\n Timestamp('2011-01-03 10:00', tz=tz), 'AAA'],\n dtype=object)\n self.assert_series_equal(expected, result)\n\n result = s.fillna({1: pd.Timestamp('2011-01-02 10:00', tz=tz),\n 3: pd.Timestamp('2011-01-04 10:00')})\n expected = Series([Timestamp('2011-01-01 10:00', tz=tz),\n Timestamp('2011-01-02 10:00', tz=tz),\n Timestamp('2011-01-03 10:00', tz=tz),\n Timestamp('2011-01-04 10:00')])\n self.assert_series_equal(expected, result)\n\n result = s.fillna({1: pd.Timestamp('2011-01-02 10:00', tz=tz),\n 3: pd.Timestamp('2011-01-04 10:00', tz=tz)})\n expected = Series([Timestamp('2011-01-01 10:00', tz=tz),\n Timestamp('2011-01-02 10:00', tz=tz),\n Timestamp('2011-01-03 10:00', tz=tz),\n Timestamp('2011-01-04 10:00', tz=tz)])\n self.assert_series_equal(expected, result)\n\n # filling with a naive/other zone, coerce to object\n result = s.fillna(Timestamp('20130101'))\n expected = Series([Timestamp('2011-01-01 10:00', tz=tz),\n Timestamp('2013-01-01'),\n Timestamp('2011-01-03 10:00', tz=tz),\n Timestamp('2013-01-01')])\n self.assert_series_equal(expected, result)\n\n result = s.fillna(Timestamp('20130101',tz='US/Pacific'))\n expected = Series([Timestamp('2011-01-01 10:00', tz=tz),\n Timestamp('2013-01-01',tz='US/Pacific'),\n Timestamp('2011-01-03 10:00', tz=tz),\n Timestamp('2013-01-01',tz='US/Pacific')])\n self.assert_series_equal(expected, result)\n\n def test_fillna_int(self):\n s = Series(np.random.randint(-100, 100, 50))\n s.fillna(method='ffill', inplace=True)\n assert_series_equal(s.fillna(method='ffill', inplace=False), s)\n\n def test_fillna_raise(self):\n s = Series(np.random.randint(-100, 100, 50))\n self.assertRaises(TypeError, s.fillna, [1, 2])\n self.assertRaises(TypeError, s.fillna, (1, 2))\n\n def test_raise_on_info(self):\n s = Series(np.random.randn(10))\n with tm.assertRaises(AttributeError):\n s.info()\n\n def test_isnull_for_inf(self):\n s = Series(['a', np.inf, np.nan, 1.0])\n with pd.option_context('mode.use_inf_as_null', True):\n r = s.isnull()\n dr = s.dropna()\n e = Series([False, True, True, False])\n de = Series(['a', 1.0], index=[0, 3])\n tm.assert_series_equal(r, e)\n tm.assert_series_equal(dr, de)\n\n\n# TimeSeries-specific\n\n def test_fillna(self):\n ts = Series([0., 1., 2., 3., 4.], index=tm.makeDateIndex(5))\n\n self.assert_numpy_array_equal(ts, ts.fillna(method='ffill'))\n\n ts[2] = np.NaN\n\n self.assert_numpy_array_equal(ts.fillna(method='ffill'),\n [0., 1., 1., 3., 4.])\n self.assert_numpy_array_equal(ts.fillna(method='backfill'),\n [0., 1., 3., 3., 4.])\n\n self.assert_numpy_array_equal(ts.fillna(value=5), [0., 1., 5., 3., 4.])\n\n self.assertRaises(ValueError, ts.fillna)\n self.assertRaises(ValueError, self.ts.fillna, value=0, method='ffill')\n\n # GH 5703\n s1 = Series([np.nan])\n s2 = Series([1])\n result = s1.fillna(s2)\n expected = Series([1.])\n assert_series_equal(result,expected)\n result = s1.fillna({})\n assert_series_equal(result,s1)\n result = s1.fillna(Series(()))\n assert_series_equal(result,s1)\n result = s2.fillna(s1)\n assert_series_equal(result,s2)\n result = s1.fillna({ 0 : 1})\n assert_series_equal(result,expected)\n result = s1.fillna({ 1 : 1})\n assert_series_equal(result,Series([np.nan]))\n result = s1.fillna({ 0 : 1, 1 : 1})\n assert_series_equal(result,expected)\n result = s1.fillna(Series({ 0 : 1, 1 : 1}))\n assert_series_equal(result,expected)\n result = s1.fillna(Series({ 0 : 1, 1 : 1},index=[4,5]))\n assert_series_equal(result,s1)\n\n s1 = Series([0, 1, 2], list('abc'))\n s2 = Series([0, np.nan, 2], list('bac'))\n result = s2.fillna(s1)\n expected = Series([0,0,2.], list('bac'))\n assert_series_equal(result,expected)\n\n # limit\n s = Series(np.nan,index=[0,1,2])\n result = s.fillna(999,limit=1)\n expected = Series([999,np.nan,np.nan],index=[0,1,2])\n assert_series_equal(result,expected)\n\n result = s.fillna(999,limit=2)\n expected = Series([999,999,np.nan],index=[0,1,2])\n assert_series_equal(result,expected)\n\n # GH 9043\n # make sure a string representation of int/float values can be filled\n # correctly without raising errors or being converted\n vals = ['0', '1.5', '-0.3']\n for val in vals:\n s = Series([0, 1, np.nan, np.nan, 4], dtype='float64')\n result = s.fillna(val)\n expected = Series([0, 1, val, val, 4], dtype='object')\n assert_series_equal(result, expected)\n\n def test_fillna_bug(self):\n x = Series([nan, 1., nan, 3., nan], ['z', 'a', 'b', 'c', 'd'])\n filled = x.fillna(method='ffill')\n expected = Series([nan, 1., 1., 3., 3.], x.index)\n assert_series_equal(filled, expected)\n\n filled = x.fillna(method='bfill')\n expected = Series([1., 1., 3., 3., nan], x.index)\n assert_series_equal(filled, expected)\n\n def test_fillna_inplace(self):\n x = Series([nan, 1., nan, 3., nan], ['z', 'a', 'b', 'c', 'd'])\n y = x.copy()\n\n y.fillna(value=0, inplace=True)\n\n expected = x.fillna(value=0)\n assert_series_equal(y, expected)\n\n def test_fillna_invalid_method(self):\n try:\n self.ts.fillna(method='ffil')\n except ValueError as inst:\n self.assertIn('ffil', str(inst))\n\n def test_ffill(self):\n ts = Series([0., 1., 2., 3., 4.], index=tm.makeDateIndex(5))\n ts[2] = np.NaN\n assert_series_equal(ts.ffill(), ts.fillna(method='ffill'))\n\n def test_bfill(self):\n ts = Series([0., 1., 2., 3., 4.], index=tm.makeDateIndex(5))\n ts[2] = np.NaN\n assert_series_equal(ts.bfill(), ts.fillna(method='bfill'))\n\n def test_sub_of_datetime_from_TimeSeries(self):\n from pandas.tseries.timedeltas import to_timedelta\n from datetime import datetime\n a = Timestamp(datetime(1993, 0o1, 0o7, 13, 30, 00))\n b = datetime(1993, 6, 22, 13, 30)\n a = Series([a])\n result = to_timedelta(np.abs(a - b))\n self.assertEqual(result.dtype, 'timedelta64[ns]')\n\n def test_datetime64_with_index(self):\n\n # arithmetic integer ops with an index\n s = Series(np.random.randn(5))\n expected = s - s.index.to_series()\n result = s - s.index\n assert_series_equal(result, expected)\n\n # GH 4629\n # arithmetic datetime64 ops with an index\n s = Series(date_range('20130101', periods=5),\n index=date_range('20130101', periods=5))\n expected = s - s.index.to_series()\n result = s - s.index\n assert_series_equal(result, expected)\n\n result = s - s.index.to_period()\n assert_series_equal(result, expected)\n\n df = DataFrame(np.random.randn(5,2),\n index=date_range('20130101', periods=5))\n df['date'] = Timestamp('20130102')\n df['expected'] = df['date'] - df.index.to_series()\n df['result'] = df['date'] - df.index\n assert_series_equal(df['result'], df['expected'], check_names=False)\n\n def test_timedelta64_nan(self):\n\n from pandas import tslib\n td = Series([timedelta(days=i) for i in range(10)])\n\n # nan ops on timedeltas\n td1 = td.copy()\n td1[0] = np.nan\n self.assertTrue(isnull(td1[0]))\n self.assertEqual(td1[0].value, tslib.iNaT)\n td1[0] = td[0]\n self.assertFalse(isnull(td1[0]))\n\n td1[1] = tslib.iNaT\n self.assertTrue(isnull(td1[1]))\n self.assertEqual(td1[1].value, tslib.iNaT)\n td1[1] = td[1]\n self.assertFalse(isnull(td1[1]))\n\n td1[2] = tslib.NaT\n self.assertTrue(isnull(td1[2]))\n self.assertEqual(td1[2].value, tslib.iNaT)\n td1[2] = td[2]\n self.assertFalse(isnull(td1[2]))\n\n # boolean setting\n # this doesn't work, not sure numpy even supports it\n #result = td[(td>np.timedelta64(timedelta(days=3))) & (td<np.timedelta64(timedelta(days=7)))] = np.nan\n #self.assertEqual(isnull(result).sum(), 7)\n\n # NumPy limitiation =(\n\n # def test_logical_range_select(self):\n # np.random.seed(12345)\n # selector = -0.5 <= self.ts <= 0.5\n # expected = (self.ts >= -0.5) & (self.ts <= 0.5)\n # assert_series_equal(selector, expected)\n\n def test_operators_na_handling(self):\n from decimal import Decimal\n from datetime import date\n s = Series([Decimal('1.3'), Decimal('2.3')],\n index=[date(2012, 1, 1), date(2012, 1, 2)])\n\n result = s + s.shift(1)\n result2 = s.shift(1) + s\n self.assertTrue(isnull(result[0]))\n self.assertTrue(isnull(result2[0]))\n\n s = Series(['foo', 'bar', 'baz', np.nan])\n result = 'prefix_' + s\n expected = Series(['prefix_foo', 'prefix_bar', 'prefix_baz', np.nan])\n assert_series_equal(result, expected)\n\n result = s + '_suffix'\n expected = Series(['foo_suffix', 'bar_suffix', 'baz_suffix', np.nan])\n assert_series_equal(result, expected)\n\n def test_object_comparisons(self):\n s = Series(['a', 'b', np.nan, 'c', 'a'])\n\n result = s == 'a'\n expected = Series([True, False, False, False, True])\n assert_series_equal(result, expected)\n\n result = s < 'a'\n expected = Series([False, False, False, False, False])\n assert_series_equal(result, expected)\n\n result = s != 'a'\n expected = -(s == 'a')\n assert_series_equal(result, expected)\n\n def test_comparison_tuples(self):\n # GH11339\n # comparisons vs tuple\n s = Series([(1,1),(1,2)])\n\n result = s == (1,2)\n expected = Series([False,True])\n assert_series_equal(result, expected)\n\n result = s != (1,2)\n expected = Series([True, False])\n assert_series_equal(result, expected)\n\n result = s == (0,0)\n expected = Series([False, False])\n assert_series_equal(result, expected)\n\n result = s != (0,0)\n expected = Series([True, True])\n assert_series_equal(result, expected)\n\n s = Series([(1,1),(1,1)])\n\n result = s == (1,1)\n expected = Series([True, True])\n assert_series_equal(result, expected)\n\n result = s != (1,1)\n expected = Series([False, False])\n assert_series_equal(result, expected)\n\n s = Series([frozenset([1]),frozenset([1,2])])\n\n result = s == frozenset([1])\n expected = Series([True, False])\n assert_series_equal(result, expected)\n\n def test_comparison_operators_with_nas(self):\n s = Series(bdate_range('1/1/2000', periods=10), dtype=object)\n s[::2] = np.nan\n\n # test that comparisons work\n ops = ['lt', 'le', 'gt', 'ge', 'eq', 'ne']\n for op in ops:\n val = s[5]\n\n f = getattr(operator, op)\n result = f(s, val)\n\n expected = f(s.dropna(), val).reindex(s.index)\n\n if op == 'ne':\n expected = expected.fillna(True).astype(bool)\n else:\n expected = expected.fillna(False).astype(bool)\n\n assert_series_equal(result, expected)\n\n # fffffffuuuuuuuuuuuu\n # result = f(val, s)\n # expected = f(val, s.dropna()).reindex(s.index)\n # assert_series_equal(result, expected)\n\n # boolean &, |, ^ should work with object arrays and propagate NAs\n\n ops = ['and_', 'or_', 'xor']\n mask = s.isnull()\n for bool_op in ops:\n f = getattr(operator, bool_op)\n\n filled = s.fillna(s[0])\n\n result = f(s < s[9], s > s[3])\n\n expected = f(filled < filled[9], filled > filled[3])\n expected[mask] = False\n assert_series_equal(result, expected)\n\n def test_comparison_object_numeric_nas(self):\n s = Series(np.random.randn(10), dtype=object)\n shifted = s.shift(2)\n\n ops = ['lt', 'le', 'gt', 'ge', 'eq', 'ne']\n for op in ops:\n f = getattr(operator, op)\n\n result = f(s, shifted)\n expected = f(s.astype(float), shifted.astype(float))\n assert_series_equal(result, expected)\n\n def test_comparison_invalid(self):\n\n # GH4968\n # invalid date/int comparisons\n s = Series(range(5))\n s2 = Series(date_range('20010101', periods=5))\n\n for (x, y) in [(s,s2),(s2,s)]:\n self.assertRaises(TypeError, lambda : x == y)\n self.assertRaises(TypeError, lambda : x != y)\n self.assertRaises(TypeError, lambda : x >= y)\n self.assertRaises(TypeError, lambda : x > y)\n self.assertRaises(TypeError, lambda : x < y)\n self.assertRaises(TypeError, lambda : x <= y)\n\n def test_more_na_comparisons(self):\n left = Series(['a', np.nan, 'c'])\n right = Series(['a', np.nan, 'd'])\n\n result = left == right\n expected = Series([True, False, False])\n assert_series_equal(result, expected)\n\n result = left != right\n expected = Series([False, True, True])\n assert_series_equal(result, expected)\n\n result = left == np.nan\n expected = Series([False, False, False])\n assert_series_equal(result, expected)\n\n result = left != np.nan\n expected = Series([True, True, True])\n assert_series_equal(result, expected)\n\n def test_comparison_different_length(self):\n a = Series(['a', 'b', 'c'])\n b = Series(['b', 'a'])\n self.assertRaises(ValueError, a.__lt__, b)\n\n a = Series([1, 2])\n b = Series([2, 3, 4])\n self.assertRaises(ValueError, a.__eq__, b)\n\n def test_comparison_label_based(self):\n\n # GH 4947\n # comparisons should be label based\n\n a = Series([True, False, True], list('bca'))\n b = Series([False, True, False], list('abc'))\n\n expected = Series([True, False, False], list('bca'))\n result = a & b\n assert_series_equal(result,expected)\n\n expected = Series([True, False, True], list('bca'))\n result = a | b\n assert_series_equal(result,expected)\n\n expected = Series([False, False, True], list('bca'))\n result = a ^ b\n assert_series_equal(result,expected)\n\n # rhs is bigger\n a = Series([True, False, True], list('bca'))\n b = Series([False, True, False, True], list('abcd'))\n\n expected = Series([True, False, False], list('bca'))\n result = a & b\n assert_series_equal(result,expected)\n\n expected = Series([True, False, True], list('bca'))\n result = a | b\n assert_series_equal(result,expected)\n\n # filling\n\n # vs empty\n result = a & Series([])\n expected = Series([False, False, False], list('bca'))\n assert_series_equal(result,expected)\n\n result = a | Series([])\n expected = Series([True, False, True], list('bca'))\n assert_series_equal(result,expected)\n\n # vs non-matching\n result = a & Series([1],['z'])\n expected = Series([False, False, False], list('bca'))\n assert_series_equal(result,expected)\n\n result = a | Series([1],['z'])\n expected = Series([True, False, True], list('bca'))\n assert_series_equal(result,expected)\n\n # identity\n # we would like s[s|e] == s to hold for any e, whether empty or not\n for e in [Series([]),Series([1],['z']),Series(['z']),Series(np.nan,b.index),Series(np.nan,a.index)]:\n result = a[a | e]\n assert_series_equal(result,a[a])\n\n # vs scalars\n index = list('bca')\n t = Series([True,False,True])\n\n for v in [True,1,2]:\n result = Series([True,False,True],index=index) | v\n expected = Series([True,True,True],index=index)\n assert_series_equal(result,expected)\n\n for v in [np.nan,'foo']:\n self.assertRaises(TypeError, lambda : t | v)\n\n for v in [False,0]:\n result = Series([True,False,True],index=index) | v\n expected = Series([True,False,True],index=index)\n assert_series_equal(result,expected)\n\n for v in [True,1]:\n result = Series([True,False,True],index=index) & v\n expected = Series([True,False,True],index=index)\n assert_series_equal(result,expected)\n\n for v in [False,0]:\n result = Series([True,False,True],index=index) & v\n expected = Series([False,False,False],index=index)\n assert_series_equal(result,expected)\n for v in [np.nan]:\n self.assertRaises(TypeError, lambda : t & v)\n\n def test_operators_bitwise(self):\n # GH 9016: support bitwise op for integer types\n index = list('bca')\n\n s_tft = Series([True, False, True], index=index)\n s_fff = Series([False, False, False], index=index)\n s_tff = Series([True, False, False], index=index)\n s_empty = Series([])\n s_0101 = Series([0,1,0,1])\n s_0123 = Series(range(4),dtype='int64')\n s_3333 = Series([3] * 4)\n s_4444 = Series([4] * 4)\n\n res = s_tft & s_empty\n expected = s_fff\n assert_series_equal(res, expected)\n\n res = s_tft | s_empty\n expected = s_tft\n assert_series_equal(res, expected)\n\n res = s_0123 & s_3333\n expected = Series(range(4),dtype='int64')\n assert_series_equal(res, expected)\n\n res = s_0123 | s_4444\n expected = Series(range(4, 8),dtype='int64')\n assert_series_equal(res, expected)\n\n s_a0b1c0 = Series([1], list('b'))\n\n res = s_tft & s_a0b1c0\n expected = s_tff\n assert_series_equal(res, expected)\n\n res = s_tft | s_a0b1c0\n expected = s_tft\n assert_series_equal(res, expected)\n\n n0 = 0\n res = s_tft & n0\n expected = s_fff\n assert_series_equal(res, expected)\n\n res = s_0123 & n0\n expected = Series([0] * 4)\n assert_series_equal(res, expected)\n\n n1 = 1\n res = s_tft & n1\n expected = s_tft\n assert_series_equal(res, expected)\n\n res = s_0123 & n1\n expected = Series([0, 1, 0, 1])\n assert_series_equal(res, expected)\n\n s_1111 = Series([1]*4, dtype='int8')\n res = s_0123 & s_1111\n expected = Series([0, 1, 0, 1], dtype='int64')\n assert_series_equal(res, expected)\n\n res = s_0123.astype(np.int16) | s_1111.astype(np.int32)\n expected = Series([1, 1, 3, 3], dtype='int32')\n assert_series_equal(res, expected)\n\n self.assertRaises(TypeError, lambda: s_1111 & 'a')\n self.assertRaises(TypeError, lambda: s_1111 & ['a','b','c','d'])\n self.assertRaises(TypeError, lambda: s_0123 & np.NaN)\n self.assertRaises(TypeError, lambda: s_0123 & 3.14)\n self.assertRaises(TypeError, lambda: s_0123 & [0.1, 4, 3.14, 2])\n\n # s_0123 will be all false now because of reindexing like s_tft\n assert_series_equal(s_tft & s_0123, Series([False] * 3, list('bca')))\n # s_tft will be all false now because of reindexing like s_0123\n assert_series_equal(s_0123 & s_tft, Series([False] * 4))\n assert_series_equal(s_0123 & False, Series([False] * 4))\n assert_series_equal(s_0123 ^ False, Series([False, True, True, True]))\n assert_series_equal(s_0123 & [False], Series([False] * 4))\n assert_series_equal(s_0123 & (False), Series([False] * 4))\n assert_series_equal(s_0123 & Series([False, np.NaN, False, False]), Series([False] * 4))\n\n s_ftft = Series([False, True, False, True])\n assert_series_equal(s_0123 & Series([0.1, 4, -3.14, 2]), s_ftft)\n\n s_abNd = Series(['a','b',np.NaN,'d'])\n res = s_0123 & s_abNd\n expected = s_ftft\n assert_series_equal(res, expected)\n\n def test_between(self):\n s = Series(bdate_range('1/1/2000', periods=20).asobject)\n s[::2] = np.nan\n\n result = s[s.between(s[3], s[17])]\n expected = s[3:18].dropna()\n assert_series_equal(result, expected)\n\n result = s[s.between(s[3], s[17], inclusive=False)]\n expected = s[5:16].dropna()\n assert_series_equal(result, expected)\n\n def test_setitem_na(self):\n # these induce dtype changes\n expected = Series([np.nan, 3, np.nan, 5, np.nan, 7, np.nan, 9, np.nan])\n s = Series([2, 3, 4, 5, 6, 7, 8, 9, 10])\n s[::2] = np.nan\n assert_series_equal(s, expected)\n\n # get's coerced to float, right?\n expected = Series([np.nan, 1, np.nan, 0])\n s = Series([True, True, False, False])\n s[::2] = np.nan\n assert_series_equal(s, expected)\n\n expected = Series([np.nan, np.nan, np.nan, np.nan, np.nan, 5, 6, 7, 8, 9])\n s = Series(np.arange(10))\n s[:5] = np.nan\n assert_series_equal(s, expected)\n\n def test_scalar_na_cmp_corners(self):\n s = Series([2, 3, 4, 5, 6, 7, 8, 9, 10])\n\n def tester(a, b):\n return a & b\n\n self.assertRaises(TypeError, tester, s, datetime(2005, 1, 1))\n\n s = Series([2, 3, 4, 5, 6, 7, 8, 9, datetime(2005, 1, 1)])\n s[::2] = np.nan\n\n expected = Series(True,index=s.index)\n expected[::2] = False\n assert_series_equal(tester(s, list(s)), expected)\n\n d = DataFrame({'A': s})\n # TODO: Fix this exception - needs to be fixed! (see GH5035)\n # (previously this was a TypeError because series returned\n # NotImplemented\n self.assertRaises(ValueError, tester, s, d)\n\n def test_idxmin(self):\n # test idxmin\n # _check_stat_op approach can not be used here because of isnull check.\n\n # add some NaNs\n self.series[5:15] = np.NaN\n\n # skipna or no\n self.assertEqual(self.series[self.series.idxmin()], self.series.min())\n self.assertTrue(isnull(self.series.idxmin(skipna=False)))\n\n # no NaNs\n nona = self.series.dropna()\n self.assertEqual(nona[nona.idxmin()], nona.min())\n self.assertEqual(nona.index.values.tolist().index(nona.idxmin()),\n nona.values.argmin())\n\n # all NaNs\n allna = self.series * nan\n self.assertTrue(isnull(allna.idxmin()))\n\n # datetime64[ns]\n from pandas import date_range\n s = Series(date_range('20130102', periods=6))\n result = s.idxmin()\n self.assertEqual(result, 0)\n\n s[0] = np.nan\n result = s.idxmin()\n self.assertEqual(result, 1)\n\n def test_idxmax(self):\n # test idxmax\n # _check_stat_op approach can not be used here because of isnull check.\n\n # add some NaNs\n self.series[5:15] = np.NaN\n\n # skipna or no\n self.assertEqual(self.series[self.series.idxmax()], self.series.max())\n self.assertTrue(isnull(self.series.idxmax(skipna=False)))\n\n # no NaNs\n nona = self.series.dropna()\n self.assertEqual(nona[nona.idxmax()], nona.max())\n self.assertEqual(nona.index.values.tolist().index(nona.idxmax()),\n nona.values.argmax())\n\n # all NaNs\n allna = self.series * nan\n self.assertTrue(isnull(allna.idxmax()))\n\n from pandas import date_range\n s = Series(date_range('20130102', periods=6))\n result = s.idxmax()\n self.assertEqual(result, 5)\n\n s[5] = np.nan\n result = s.idxmax()\n self.assertEqual(result, 4)\n\n # Float64Index\n # GH 5914\n s = pd.Series([1,2,3],[1.1,2.1,3.1])\n result = s.idxmax()\n self.assertEqual(result, 3.1)\n result = s.idxmin()\n self.assertEqual(result, 1.1)\n\n s = pd.Series(s.index, s.index)\n result = s.idxmax()\n self.assertEqual(result, 3.1)\n result = s.idxmin()\n self.assertEqual(result, 1.1)\n\n def test_ndarray_compat(self):\n\n # test numpy compat with Series as sub-class of NDFrame\n tsdf = DataFrame(np.random.randn(1000, 3), columns=['A', 'B', 'C'],\n index=date_range('1/1/2000', periods=1000))\n\n def f(x):\n return x[x.argmax()]\n result = tsdf.apply(f)\n expected = tsdf.max()\n assert_series_equal(result,expected)\n\n # .item()\n s = Series([1])\n result = s.item()\n self.assertEqual(result, 1)\n self.assertEqual(s.item(), s.iloc[0])\n\n # using an ndarray like function\n s = Series(np.random.randn(10))\n result = np.ones_like(s)\n expected = Series(1,index=range(10),dtype='float64')\n #assert_series_equal(result,expected)\n\n # ravel\n s = Series(np.random.randn(10))\n tm.assert_almost_equal(s.ravel(order='F'),s.values.ravel(order='F'))\n\n # compress\n # GH 6658\n s = Series([0,1.,-1],index=list('abc'))\n result = np.compress(s>0,s)\n assert_series_equal(result, Series([1.],index=['b']))\n\n result = np.compress(s<-1,s)\n assert_series_equal(result, Series([],dtype='float64'))\n\n def test_complexx(self):\n\n # GH4819\n # complex access for ndarray compat\n a = np.arange(5)\n b = Series(a + 4j*a)\n tm.assert_almost_equal(a,b.real)\n tm.assert_almost_equal(4*a,b.imag)\n\n b.real = np.arange(5)+5\n tm.assert_almost_equal(a+5,b.real)\n tm.assert_almost_equal(4*a,b.imag)\n\n def test_underlying_data_conversion(self):\n\n # GH 4080\n df = DataFrame(dict((c, [1,2,3]) for c in ['a', 'b', 'c']))\n df.set_index(['a', 'b', 'c'], inplace=True)\n s = Series([1], index=[(2,2,2)])\n df['val'] = 0\n df\n df['val'].update(s)\n\n expected = DataFrame(dict(a = [1,2,3], b = [1,2,3], c = [1,2,3], val = [0,1,0]))\n expected.set_index(['a', 'b', 'c'], inplace=True)\n tm.assert_frame_equal(df,expected)\n\n # GH 3970\n # these are chained assignments as well\n pd.set_option('chained_assignment',None)\n df = DataFrame({ \"aa\":range(5), \"bb\":[2.2]*5})\n df[\"cc\"] = 0.0\n ck = [True]*len(df)\n df[\"bb\"].iloc[0] = .13\n df_tmp = df.iloc[ck]\n df[\"bb\"].iloc[0] = .15\n self.assertEqual(df['bb'].iloc[0], 0.15)\n pd.set_option('chained_assignment','raise')\n\n # GH 3217\n df = DataFrame(dict(a = [1,3], b = [np.nan, 2]))\n df['c'] = np.nan\n df['c'].update(pd.Series(['foo'],index=[0]))\n\n expected = DataFrame(dict(a = [1,3], b = [np.nan, 2], c = ['foo',np.nan]))\n tm.assert_frame_equal(df,expected)\n\n def test_operators_corner(self):\n series = self.ts\n\n empty = Series([], index=Index([]))\n\n result = series + empty\n self.assertTrue(np.isnan(result).all())\n\n result = empty + Series([], index=Index([]))\n self.assertEqual(len(result), 0)\n\n # TODO: this returned NotImplemented earlier, what to do?\n # deltas = Series([timedelta(1)] * 5, index=np.arange(5))\n # sub_deltas = deltas[::2]\n # deltas5 = deltas * 5\n # deltas = deltas + sub_deltas\n\n # float + int\n int_ts = self.ts.astype(int)[:-5]\n added = self.ts + int_ts\n expected = self.ts.values[:-5] + int_ts.values\n self.assert_numpy_array_equal(added[:-5], expected)\n\n def test_operators_reverse_object(self):\n # GH 56\n arr = Series(np.random.randn(10), index=np.arange(10),\n dtype=object)\n\n def _check_op(arr, op):\n result = op(1., arr)\n expected = op(1., arr.astype(float))\n assert_series_equal(result.astype(float), expected)\n\n _check_op(arr, operator.add)\n _check_op(arr, operator.sub)\n _check_op(arr, operator.mul)\n _check_op(arr, operator.truediv)\n _check_op(arr, operator.floordiv)\n\n def test_series_frame_radd_bug(self):\n import operator\n\n # GH 353\n vals = Series(tm.rands_array(5, 10))\n result = 'foo_' + vals\n expected = vals.map(lambda x: 'foo_' + x)\n assert_series_equal(result, expected)\n\n frame = DataFrame({'vals': vals})\n result = 'foo_' + frame\n expected = DataFrame({'vals': vals.map(lambda x: 'foo_' + x)})\n tm.assert_frame_equal(result, expected)\n\n # really raise this time\n self.assertRaises(TypeError, operator.add, datetime.now(), self.ts)\n\n def test_operators_frame(self):\n # rpow does not work with DataFrame\n df = DataFrame({'A': self.ts})\n\n tm.assert_almost_equal(self.ts + self.ts, self.ts + df['A'])\n tm.assert_almost_equal(self.ts ** self.ts, self.ts ** df['A'])\n tm.assert_almost_equal(self.ts < self.ts, self.ts < df['A'])\n tm.assert_almost_equal(self.ts / self.ts, self.ts / df['A'])\n\n def test_operators_combine(self):\n def _check_fill(meth, op, a, b, fill_value=0):\n exp_index = a.index.union(b.index)\n a = a.reindex(exp_index)\n b = b.reindex(exp_index)\n\n amask = isnull(a)\n bmask = isnull(b)\n\n exp_values = []\n for i in range(len(exp_index)):\n if amask[i]:\n if bmask[i]:\n exp_values.append(nan)\n continue\n exp_values.append(op(fill_value, b[i]))\n elif bmask[i]:\n if amask[i]:\n exp_values.append(nan)\n continue\n exp_values.append(op(a[i], fill_value))\n else:\n exp_values.append(op(a[i], b[i]))\n\n result = meth(a, b, fill_value=fill_value)\n expected = Series(exp_values, exp_index)\n assert_series_equal(result, expected)\n\n a = Series([nan, 1., 2., 3., nan], index=np.arange(5))\n b = Series([nan, 1, nan, 3, nan, 4.], index=np.arange(6))\n\n pairings = []\n for op in ['add', 'sub', 'mul', 'pow', 'truediv', 'floordiv']:\n fv = 0\n lop = getattr(Series, op)\n lequiv = getattr(operator, op)\n rop = getattr(Series, 'r' + op)\n # bind op at definition time...\n requiv = lambda x, y, op=op: getattr(operator, op)(y, x)\n pairings.append((lop, lequiv, fv))\n pairings.append((rop, requiv, fv))\n\n if compat.PY3:\n pairings.append((Series.div, operator.truediv, 1))\n pairings.append((Series.rdiv, lambda x, y: operator.truediv(y, x), 1))\n else:\n pairings.append((Series.div, operator.div, 1))\n pairings.append((Series.rdiv, lambda x, y: operator.div(y, x), 1))\n\n for op, equiv_op, fv in pairings:\n result = op(a, b)\n exp = equiv_op(a, b)\n assert_series_equal(result, exp)\n _check_fill(op, equiv_op, a, b, fill_value=fv)\n # should accept axis=0 or axis='rows'\n op(a, b, axis=0)\n\n def test_combine_first(self):\n values = tm.makeIntIndex(20).values.astype(float)\n series = Series(values, index=tm.makeIntIndex(20))\n\n series_copy = series * 2\n series_copy[::2] = np.NaN\n\n # nothing used from the input\n combined = series.combine_first(series_copy)\n\n self.assert_numpy_array_equal(combined, series)\n\n # Holes filled from input\n combined = series_copy.combine_first(series)\n self.assertTrue(np.isfinite(combined).all())\n\n self.assert_numpy_array_equal(combined[::2], series[::2])\n self.assert_numpy_array_equal(combined[1::2], series_copy[1::2])\n\n # mixed types\n index = tm.makeStringIndex(20)\n floats = Series(tm.randn(20), index=index)\n strings = Series(tm.makeStringIndex(10), index=index[::2])\n\n combined = strings.combine_first(floats)\n\n tm.assert_dict_equal(strings, combined, compare_keys=False)\n tm.assert_dict_equal(floats[1::2], combined, compare_keys=False)\n\n # corner case\n s = Series([1., 2, 3], index=[0, 1, 2])\n result = s.combine_first(Series([], index=[]))\n assert_series_equal(s, result)\n\n def test_update(self):\n s = Series([1.5, nan, 3., 4., nan])\n s2 = Series([nan, 3.5, nan, 5.])\n s.update(s2)\n\n expected = Series([1.5, 3.5, 3., 5., np.nan])\n assert_series_equal(s, expected)\n\n # GH 3217\n df = DataFrame([{\"a\": 1}, {\"a\": 3, \"b\": 2}])\n df['c'] = np.nan\n\n # this will fail as long as series is a sub-class of ndarray\n # df['c'].update(Series(['foo'],index=[0])) #####\n\n def test_corr(self):\n tm._skip_if_no_scipy()\n\n import scipy.stats as stats\n\n # full overlap\n self.assertAlmostEqual(self.ts.corr(self.ts), 1)\n\n # partial overlap\n self.assertAlmostEqual(self.ts[:15].corr(self.ts[5:]), 1)\n\n self.assertTrue(isnull(self.ts[:15].corr(self.ts[5:], min_periods=12)))\n\n ts1 = self.ts[:15].reindex(self.ts.index)\n ts2 = self.ts[5:].reindex(self.ts.index)\n self.assertTrue(isnull(ts1.corr(ts2, min_periods=12)))\n\n # No overlap\n self.assertTrue(np.isnan(self.ts[::2].corr(self.ts[1::2])))\n\n # all NA\n cp = self.ts[:10].copy()\n cp[:] = np.nan\n self.assertTrue(isnull(cp.corr(cp)))\n\n A = tm.makeTimeSeries()\n B = tm.makeTimeSeries()\n result = A.corr(B)\n expected, _ = stats.pearsonr(A, B)\n self.assertAlmostEqual(result, expected)\n\n def test_corr_rank(self):\n tm._skip_if_no_scipy()\n\n import scipy\n import scipy.stats as stats\n\n # kendall and spearman\n A = tm.makeTimeSeries()\n B = tm.makeTimeSeries()\n A[-5:] = A[:5]\n result = A.corr(B, method='kendall')\n expected = stats.kendalltau(A, B)[0]\n self.assertAlmostEqual(result, expected)\n\n result = A.corr(B, method='spearman')\n expected = stats.spearmanr(A, B)[0]\n self.assertAlmostEqual(result, expected)\n\n # these methods got rewritten in 0.8\n if scipy.__version__ < LooseVersion('0.9'):\n raise nose.SkipTest(\"skipping corr rank because of scipy version \"\n \"{0}\".format(scipy.__version__))\n\n # results from R\n A = Series([-0.89926396, 0.94209606, -1.03289164, -0.95445587,\n 0.76910310, -0.06430576, -2.09704447, 0.40660407,\n -0.89926396, 0.94209606])\n B = Series([-1.01270225, -0.62210117, -1.56895827, 0.59592943,\n -0.01680292, 1.17258718, -1.06009347, -0.10222060,\n -0.89076239, 0.89372375])\n kexp = 0.4319297\n sexp = 0.5853767\n self.assertAlmostEqual(A.corr(B, method='kendall'), kexp)\n self.assertAlmostEqual(A.corr(B, method='spearman'), sexp)\n\n def test_cov(self):\n # full overlap\n self.assertAlmostEqual(self.ts.cov(self.ts), self.ts.std() ** 2)\n\n # partial overlap\n self.assertAlmostEqual(\n self.ts[:15].cov(self.ts[5:]), self.ts[5:15].std() ** 2)\n\n # No overlap\n self.assertTrue(np.isnan(self.ts[::2].cov(self.ts[1::2])))\n\n # all NA\n cp = self.ts[:10].copy()\n cp[:] = np.nan\n self.assertTrue(isnull(cp.cov(cp)))\n\n # min_periods\n self.assertTrue(isnull(self.ts[:15].cov(self.ts[5:], min_periods=12)))\n\n ts1 = self.ts[:15].reindex(self.ts.index)\n ts2 = self.ts[5:].reindex(self.ts.index)\n self.assertTrue(isnull(ts1.cov(ts2, min_periods=12)))\n\n def test_copy(self):\n ts = self.ts.copy()\n\n ts[::2] = np.NaN\n\n # Did not modify original Series\n self.assertFalse(np.isnan(self.ts[0]))\n\n def test_count(self):\n self.assertEqual(self.ts.count(), len(self.ts))\n\n self.ts[::2] = np.NaN\n\n self.assertEqual(self.ts.count(), np.isfinite(self.ts).sum())\n\n mi = MultiIndex.from_arrays([list('aabbcc'), [1, 2, 2, nan, 1, 2]])\n ts = Series(np.arange(len(mi)), index=mi)\n\n left = ts.count(level=1)\n right = Series([2, 3, 1], index=[1, 2, nan])\n assert_series_equal(left, right)\n\n ts.iloc[[0, 3, 5]] = nan\n assert_series_equal(ts.count(level=1), right - 1)\n\n def test_dtype(self):\n\n self.assertEqual(self.ts.dtype, np.dtype('float64'))\n self.assertEqual(self.ts.dtypes, np.dtype('float64'))\n self.assertEqual(self.ts.ftype, 'float64:dense')\n self.assertEqual(self.ts.ftypes, 'float64:dense')\n assert_series_equal(self.ts.get_dtype_counts(),Series(1,['float64']))\n assert_series_equal(self.ts.get_ftype_counts(),Series(1,['float64:dense']))\n\n def test_dot(self):\n a = Series(np.random.randn(4), index=['p', 'q', 'r', 's'])\n b = DataFrame(np.random.randn(3, 4), index=['1', '2', '3'],\n columns=['p', 'q', 'r', 's']).T\n\n result = a.dot(b)\n expected = Series(np.dot(a.values, b.values),\n index=['1', '2', '3'])\n assert_series_equal(result, expected)\n\n # Check index alignment\n b2 = b.reindex(index=reversed(b.index))\n result = a.dot(b)\n assert_series_equal(result, expected)\n\n # Check ndarray argument\n result = a.dot(b.values)\n self.assertTrue(np.all(result == expected.values))\n assert_almost_equal(a.dot(b['2'].values), expected['2'])\n\n # Check series argument\n assert_almost_equal(a.dot(b['1']), expected['1'])\n assert_almost_equal(a.dot(b2['1']), expected['1'])\n\n self.assertRaises(Exception, a.dot, a.values[:3])\n self.assertRaises(ValueError, a.dot, b.T)\n\n def test_value_counts_nunique(self):\n\n # basics.rst doc example\n series = Series(np.random.randn(500))\n series[20:500] = np.nan\n series[10:20] = 5000\n result = series.nunique()\n self.assertEqual(result, 11)\n\n def test_unique(self):\n\n # 714 also, dtype=float\n s = Series([1.2345] * 100)\n s[::2] = np.nan\n result = s.unique()\n self.assertEqual(len(result), 2)\n\n s = Series([1.2345] * 100, dtype='f4')\n s[::2] = np.nan\n result = s.unique()\n self.assertEqual(len(result), 2)\n\n # NAs in object arrays #714\n s = Series(['foo'] * 100, dtype='O')\n s[::2] = np.nan\n result = s.unique()\n self.assertEqual(len(result), 2)\n\n # decision about None\n s = Series([1, 2, 3, None, None, None], dtype=object)\n result = s.unique()\n expected = np.array([1, 2, 3, None], dtype=object)\n self.assert_numpy_array_equal(result, expected)\n\n def test_dropna_empty(self):\n s = Series([])\n self.assertEqual(len(s.dropna()), 0)\n s.dropna(inplace=True)\n self.assertEqual(len(s), 0)\n\n # invalid axis\n self.assertRaises(ValueError, s.dropna, axis=1)\n\n def test_datetime64_tz_dropna(self):\n # DatetimeBlock\n s = Series([Timestamp('2011-01-01 10:00'), pd.NaT,\n Timestamp('2011-01-03 10:00'), pd.NaT])\n result = s.dropna()\n expected = Series([Timestamp('2011-01-01 10:00'),\n Timestamp('2011-01-03 10:00')], index=[0, 2])\n self.assert_series_equal(result, expected)\n\n # DatetimeBlockTZ\n idx = pd.DatetimeIndex(['2011-01-01 10:00', pd.NaT,\n '2011-01-03 10:00', pd.NaT],\n tz='Asia/Tokyo')\n s = pd.Series(idx)\n self.assertEqual(s.dtype, 'datetime64[ns, Asia/Tokyo]')\n result = s.dropna()\n expected = Series([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),\n Timestamp('2011-01-03 10:00', tz='Asia/Tokyo')],\n index=[0, 2])\n self.assertEqual(result.dtype, 'datetime64[ns, Asia/Tokyo]')\n self.assert_series_equal(result, expected)\n\n def test_dropna_no_nan(self):\n for s in [Series([1, 2, 3], name='x'),\n Series([False, True, False], name='x')]:\n\n result = s.dropna()\n self.assert_series_equal(result, s)\n self.assertFalse(result is s)\n\n s2 = s.copy()\n s2.dropna(inplace=True)\n self.assert_series_equal(s2, s)\n\n def test_axis_alias(self):\n s = Series([1, 2, np.nan])\n assert_series_equal(s.dropna(axis='rows'), s.dropna(axis='index'))\n self.assertEqual(s.dropna().sum('rows'), 3)\n self.assertEqual(s._get_axis_number('rows'), 0)\n self.assertEqual(s._get_axis_name('rows'), 'index')\n\n def test_drop_duplicates(self):\n # check both int and object\n for s in [Series([1, 2, 3, 3]), Series(['1', '2', '3', '3'])]:\n expected = Series([False, False, False, True])\n assert_series_equal(s.duplicated(), expected)\n assert_series_equal(s.drop_duplicates(), s[~expected])\n sc = s.copy()\n sc.drop_duplicates(inplace=True)\n assert_series_equal(sc, s[~expected])\n\n expected = Series([False, False, True, False])\n assert_series_equal(s.duplicated(keep='last'), expected)\n assert_series_equal(s.drop_duplicates(keep='last'), s[~expected])\n sc = s.copy()\n sc.drop_duplicates(keep='last', inplace=True)\n assert_series_equal(sc, s[~expected])\n\n # deprecate take_last\n with tm.assert_produces_warning(FutureWarning):\n assert_series_equal(s.duplicated(take_last=True), expected)\n with tm.assert_produces_warning(FutureWarning):\n assert_series_equal(s.drop_duplicates(take_last=True), s[~expected])\n sc = s.copy()\n with tm.assert_produces_warning(FutureWarning):\n sc.drop_duplicates(take_last=True, inplace=True)\n assert_series_equal(sc, s[~expected])\n\n expected = Series([False, False, True, True])\n assert_series_equal(s.duplicated(keep=False), expected)\n assert_series_equal(s.drop_duplicates(keep=False), s[~expected])\n sc = s.copy()\n sc.drop_duplicates(keep=False, inplace=True)\n assert_series_equal(sc, s[~expected])\n\n for s in [Series([1, 2, 3, 5, 3, 2, 4]),\n Series(['1', '2', '3', '5', '3', '2', '4'])]:\n expected = Series([False, False, False, False, True, True, False])\n assert_series_equal(s.duplicated(), expected)\n assert_series_equal(s.drop_duplicates(), s[~expected])\n sc = s.copy()\n sc.drop_duplicates(inplace=True)\n assert_series_equal(sc, s[~expected])\n\n expected = Series([False, True, True, False, False, False, False])\n assert_series_equal(s.duplicated(keep='last'), expected)\n assert_series_equal(s.drop_duplicates(keep='last'), s[~expected])\n sc = s.copy()\n sc.drop_duplicates(keep='last', inplace=True)\n assert_series_equal(sc, s[~expected])\n\n # deprecate take_last\n with tm.assert_produces_warning(FutureWarning):\n assert_series_equal(s.duplicated(take_last=True), expected)\n with tm.assert_produces_warning(FutureWarning):\n assert_series_equal(s.drop_duplicates(take_last=True), s[~expected])\n sc = s.copy()\n with tm.assert_produces_warning(FutureWarning):\n sc.drop_duplicates(take_last=True, inplace=True)\n assert_series_equal(sc, s[~expected])\n\n expected = Series([False, True, True, False, True, True, False])\n assert_series_equal(s.duplicated(keep=False), expected)\n assert_series_equal(s.drop_duplicates(keep=False), s[~expected])\n sc = s.copy()\n sc.drop_duplicates(keep=False, inplace=True)\n assert_series_equal(sc, s[~expected])\n\n def test_sort_values(self):\n\n ts = self.ts.copy()\n\n # 9816 deprecated\n with tm.assert_produces_warning(FutureWarning):\n ts.sort()\n\n self.assert_numpy_array_equal(ts, self.ts.sort_values())\n self.assert_numpy_array_equal(ts.index, self.ts.sort_values().index)\n\n ts.sort_values(ascending=False, inplace=True)\n self.assert_numpy_array_equal(ts, self.ts.sort_values(ascending=False))\n self.assert_numpy_array_equal(ts.index,\n self.ts.sort_values(ascending=False).index)\n\n # GH 5856/5853\n # Series.sort_values operating on a view\n df = DataFrame(np.random.randn(10,4))\n s = df.iloc[:,0]\n def f():\n s.sort_values(inplace=True)\n self.assertRaises(ValueError, f)\n\n # test order/sort inplace\n # GH6859\n ts1 = self.ts.copy()\n ts1.sort_values(ascending=False, inplace=True)\n ts2 = self.ts.copy()\n ts2.sort_values(ascending=False, inplace=True)\n assert_series_equal(ts1,ts2)\n\n ts1 = self.ts.copy()\n ts1 = ts1.sort_values(ascending=False, inplace=False)\n ts2 = self.ts.copy()\n ts2 = ts.sort_values(ascending=False)\n assert_series_equal(ts1,ts2)\n\n def test_sort_index(self):\n rindex = list(self.ts.index)\n random.shuffle(rindex)\n\n random_order = self.ts.reindex(rindex)\n sorted_series = random_order.sort_index()\n assert_series_equal(sorted_series, self.ts)\n\n # descending\n sorted_series = random_order.sort_index(ascending=False)\n assert_series_equal(sorted_series,\n self.ts.reindex(self.ts.index[::-1]))\n\n def test_sort_index_inplace(self):\n\n # For #11402\n rindex = list(self.ts.index)\n random.shuffle(rindex)\n\n # descending\n random_order = self.ts.reindex(rindex)\n result = random_order.sort_index(ascending=False, inplace=True)\n self.assertIs(result, None,\n msg='sort_index() inplace should return None')\n assert_series_equal(random_order,\n self.ts.reindex(self.ts.index[::-1]))\n\n # ascending\n random_order = self.ts.reindex(rindex)\n result = random_order.sort_index(ascending=True, inplace=True)\n self.assertIs(result, None,\n msg='sort_index() inplace should return None')\n assert_series_equal(random_order, self.ts)\n\n def test_sort_API(self):\n\n # API for 9816\n\n # sortlevel\n mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list('ABC'))\n s = Series([1, 2], mi)\n backwards = s.iloc[[1, 0]]\n\n res = s.sort_index(level='A')\n assert_series_equal(backwards, res)\n\n # sort_index\n rindex = list(self.ts.index)\n random.shuffle(rindex)\n\n random_order = self.ts.reindex(rindex)\n sorted_series = random_order.sort_index(level=0)\n assert_series_equal(sorted_series, self.ts)\n\n # compat on axis\n sorted_series = random_order.sort_index(axis=0)\n assert_series_equal(sorted_series, self.ts)\n\n self.assertRaises(ValueError, lambda : random_order.sort_values(axis=1))\n\n sorted_series = random_order.sort_index(level=0, axis=0)\n assert_series_equal(sorted_series, self.ts)\n\n self.assertRaises(ValueError, lambda : random_order.sort_index(level=0, axis=1))\n\n def test_order(self):\n\n # 9816 deprecated\n with tm.assert_produces_warning(FutureWarning):\n self.ts.order()\n\n ts = self.ts.copy()\n ts[:5] = np.NaN\n vals = ts.values\n\n result = ts.sort_values()\n self.assertTrue(np.isnan(result[-5:]).all())\n self.assert_numpy_array_equal(result[:-5], np.sort(vals[5:]))\n\n result = ts.sort_values(na_position='first')\n self.assertTrue(np.isnan(result[:5]).all())\n self.assert_numpy_array_equal(result[5:], np.sort(vals[5:]))\n\n # something object-type\n ser = Series(['A', 'B'], [1, 2])\n # no failure\n ser.sort_values()\n\n # ascending=False\n ordered = ts.sort_values(ascending=False)\n expected = np.sort(ts.valid().values)[::-1]\n assert_almost_equal(expected, ordered.valid().values)\n ordered = ts.sort_values(ascending=False, na_position='first')\n assert_almost_equal(expected, ordered.valid().values)\n\n def test_nsmallest_nlargest(self):\n # float, int, datetime64 (use i8), timedelts64 (same),\n # object that are numbers, object that are strings\n\n base = [3, 2, 1, 2, 5]\n\n s_list = [\n Series(base, dtype='int8'),\n Series(base, dtype='int16'),\n Series(base, dtype='int32'),\n Series(base, dtype='int64'),\n Series(base, dtype='float32'),\n Series(base, dtype='float64'),\n Series(base, dtype='uint8'),\n Series(base, dtype='uint16'),\n Series(base, dtype='uint32'),\n Series(base, dtype='uint64'),\n Series(base).astype('timedelta64[ns]'),\n Series(pd.to_datetime(['2003', '2002', '2001', '2002', '2005'])),\n ]\n\n raising = [\n Series([3., 2, 1, 2, '5'], dtype='object'),\n Series([3., 2, 1, 2, 5], dtype='object'),\n # not supported on some archs\n # Series([3., 2, 1, 2, 5], dtype='complex256'),\n Series([3., 2, 1, 2, 5], dtype='complex128'),\n ]\n\n for r in raising:\n dt = r.dtype\n msg = \"Cannot use method 'n(larg|small)est' with dtype %s\" % dt\n args = 2, len(r), 0, -1\n methods = r.nlargest, r.nsmallest\n for method, arg in product(methods, args):\n with tm.assertRaisesRegexp(TypeError, msg):\n method(arg)\n\n for s in s_list:\n\n assert_series_equal(s.nsmallest(2), s.iloc[[2, 1]])\n\n assert_series_equal(s.nsmallest(2, keep='last'), s.iloc[[2, 3]])\n with tm.assert_produces_warning(FutureWarning):\n assert_series_equal(s.nsmallest(2, take_last=True), s.iloc[[2, 3]])\n\n assert_series_equal(s.nlargest(3), s.iloc[[4, 0, 1]])\n\n assert_series_equal(s.nlargest(3, keep='last'), s.iloc[[4, 0, 3]])\n with tm.assert_produces_warning(FutureWarning):\n assert_series_equal(s.nlargest(3, take_last=True), s.iloc[[4, 0, 3]])\n\n empty = s.iloc[0:0]\n assert_series_equal(s.nsmallest(0), empty)\n assert_series_equal(s.nsmallest(-1), empty)\n assert_series_equal(s.nlargest(0), empty)\n assert_series_equal(s.nlargest(-1), empty)\n\n assert_series_equal(s.nsmallest(len(s)), s.sort_values())\n assert_series_equal(s.nsmallest(len(s) + 1), s.sort_values())\n assert_series_equal(s.nlargest(len(s)), s.iloc[[4, 0, 1, 3, 2]])\n assert_series_equal(s.nlargest(len(s) + 1),\n s.iloc[[4, 0, 1, 3, 2]])\n\n s = Series([3., np.nan, 1, 2, 5])\n assert_series_equal(s.nlargest(), s.iloc[[4, 0, 3, 2]])\n assert_series_equal(s.nsmallest(), s.iloc[[2, 3, 0, 4]])\n\n msg = 'keep must be either \"first\", \"last\"'\n with tm.assertRaisesRegexp(ValueError, msg):\n s.nsmallest(keep='invalid')\n with tm.assertRaisesRegexp(ValueError, msg):\n s.nlargest(keep='invalid')\n\n def test_rank(self):\n tm._skip_if_no_scipy()\n from scipy.stats import rankdata\n\n self.ts[::2] = np.nan\n self.ts[:10][::3] = 4.\n\n ranks = self.ts.rank()\n oranks = self.ts.astype('O').rank()\n\n assert_series_equal(ranks, oranks)\n\n mask = np.isnan(self.ts)\n filled = self.ts.fillna(np.inf)\n\n # rankdata returns a ndarray\n exp = Series(rankdata(filled),index=filled.index)\n exp[mask] = np.nan\n\n assert_almost_equal(ranks, exp)\n\n iseries = Series(np.arange(5).repeat(2))\n\n iranks = iseries.rank()\n exp = iseries.astype(float).rank()\n assert_series_equal(iranks, exp)\n iseries = Series(np.arange(5)) + 1.0\n exp = iseries / 5.0\n iranks = iseries.rank(pct=True)\n\n assert_series_equal(iranks, exp)\n\n iseries = Series(np.repeat(1, 100))\n exp = Series(np.repeat(0.505, 100))\n iranks = iseries.rank(pct=True)\n assert_series_equal(iranks, exp)\n\n iseries[1] = np.nan\n exp = Series(np.repeat(50.0 / 99.0, 100))\n exp[1] = np.nan\n iranks = iseries.rank(pct=True)\n assert_series_equal(iranks, exp)\n\n iseries = Series(np.arange(5)) + 1.0\n iseries[4] = np.nan\n exp = iseries / 4.0\n iranks = iseries.rank(pct=True)\n assert_series_equal(iranks, exp)\n\n iseries = Series(np.repeat(np.nan, 100))\n exp = iseries.copy()\n iranks = iseries.rank(pct=True)\n assert_series_equal(iranks, exp)\n\n iseries = Series(np.arange(5)) + 1\n iseries[4] = np.nan\n exp = iseries / 4.0\n iranks = iseries.rank(pct=True)\n assert_series_equal(iranks, exp)\n\n rng = date_range('1/1/1990', periods=5)\n iseries = Series(np.arange(5), rng) + 1\n iseries.ix[4] = np.nan\n exp = iseries / 4.0\n iranks = iseries.rank(pct=True)\n assert_series_equal(iranks, exp)\n\n iseries = Series([1e-50, 1e-100, 1e-20, 1e-2, 1e-20+1e-30, 1e-1])\n exp = Series([2, 1, 3, 5, 4, 6.0])\n iranks = iseries.rank()\n assert_series_equal(iranks, exp)\n\n values = np.array([-50, -1, -1e-20, -1e-25, -1e-50, 0, 1e-40, 1e-20, 1e-10, 2, 40], dtype='float64')\n random_order = np.random.permutation(len(values))\n iseries = Series(values[random_order])\n exp = Series(random_order + 1.0, dtype='float64')\n iranks = iseries.rank()\n assert_series_equal(iranks, exp)\n\n def test_rank_inf(self):\n raise nose.SkipTest('DataFrame.rank does not currently rank np.inf and -np.inf properly')\n\n values = np.array([-np.inf, -50, -1, -1e-20, -1e-25, -1e-50, 0, 1e-40, 1e-20, 1e-10, 2, 40, np.inf], dtype='float64')\n random_order = np.random.permutation(len(values))\n iseries = Series(values[random_order])\n exp = Series(random_order + 1.0, dtype='float64')\n iranks = iseries.rank()\n assert_series_equal(iranks, exp)\n\n\n def test_from_csv(self):\n\n with ensure_clean() as path:\n self.ts.to_csv(path)\n ts = Series.from_csv(path)\n assert_series_equal(self.ts, ts, check_names=False)\n self.assertTrue(ts.name is None)\n self.assertTrue(ts.index.name is None)\n\n # GH10483\n self.ts.to_csv(path, header=True)\n ts_h = Series.from_csv(path, header=0)\n self.assertTrue(ts_h.name == 'ts')\n\n self.series.to_csv(path)\n series = Series.from_csv(path)\n self.assertIsNone(series.name)\n self.assertIsNone(series.index.name)\n assert_series_equal(self.series, series, check_names=False)\n self.assertTrue(series.name is None)\n self.assertTrue(series.index.name is None)\n\n self.series.to_csv(path, header=True)\n series_h = Series.from_csv(path, header=0)\n self.assertTrue(series_h.name == 'series')\n\n outfile = open(path, 'w')\n outfile.write('1998-01-01|1.0\\n1999-01-01|2.0')\n outfile.close()\n series = Series.from_csv(path, sep='|')\n checkseries = Series(\n {datetime(1998, 1, 1): 1.0, datetime(1999, 1, 1): 2.0})\n assert_series_equal(checkseries, series)\n\n series = Series.from_csv(path, sep='|', parse_dates=False)\n checkseries = Series({'1998-01-01': 1.0, '1999-01-01': 2.0})\n assert_series_equal(checkseries, series)\n\n def test_to_csv(self):\n import io\n\n with ensure_clean() as path:\n self.ts.to_csv(path)\n\n lines = io.open(path, newline=None).readlines()\n assert(lines[1] != '\\n')\n\n self.ts.to_csv(path, index=False)\n arr = np.loadtxt(path)\n assert_almost_equal(arr, self.ts.values)\n\n def test_to_csv_unicode_index(self):\n buf = StringIO()\n s = Series([u(\"\\u05d0\"), \"d2\"], index=[u(\"\\u05d0\"), u(\"\\u05d1\")])\n\n s.to_csv(buf, encoding='UTF-8')\n buf.seek(0)\n\n s2 = Series.from_csv(buf, index_col=0, encoding='UTF-8')\n\n assert_series_equal(s, s2)\n\n def test_tolist(self):\n rs = self.ts.tolist()\n xp = self.ts.values.tolist()\n assert_almost_equal(rs, xp)\n\n # datetime64\n s = Series(self.ts.index)\n rs = s.tolist()\n self.assertEqual(self.ts.index[0], rs[0])\n\n def test_to_frame(self):\n self.ts.name = None\n rs = self.ts.to_frame()\n xp = pd.DataFrame(self.ts.values, index=self.ts.index)\n assert_frame_equal(rs, xp)\n\n self.ts.name = 'testname'\n rs = self.ts.to_frame()\n xp = pd.DataFrame(dict(testname=self.ts.values), index=self.ts.index)\n assert_frame_equal(rs, xp)\n\n rs = self.ts.to_frame(name='testdifferent')\n xp = pd.DataFrame(dict(testdifferent=self.ts.values), index=self.ts.index)\n assert_frame_equal(rs, xp)\n\n def test_to_dict(self):\n self.assert_numpy_array_equal(Series(self.ts.to_dict()), self.ts)\n\n def test_to_csv_float_format(self):\n\n with ensure_clean() as filename:\n ser = Series([0.123456, 0.234567, 0.567567])\n ser.to_csv(filename, float_format='%.2f')\n\n rs = Series.from_csv(filename)\n xp = Series([0.12, 0.23, 0.57])\n assert_series_equal(rs, xp)\n\n def test_to_csv_list_entries(self):\n s = Series(['jack and jill', 'jesse and frank'])\n\n split = s.str.split(r'\\s+and\\s+')\n\n buf = StringIO()\n split.to_csv(buf)\n\n def test_to_csv_path_is_none(self):\n # GH 8215\n # Series.to_csv() was returning None, inconsistent with\n # DataFrame.to_csv() which returned string\n s = Series([1, 2, 3])\n csv_str = s.to_csv(path=None)\n self.assertIsInstance(csv_str, str)\n\n def test_str_attribute(self):\n # GH9068\n methods = ['strip', 'rstrip', 'lstrip']\n s = Series([' jack', 'jill ', ' jesse ', 'frank'])\n for method in methods:\n expected = Series([getattr(str, method)(x) for x in s.values])\n assert_series_equal(getattr(Series.str, method)(s.str), expected)\n\n # str accessor only valid with string values\n s = Series(range(5))\n with self.assertRaisesRegexp(AttributeError, 'only use .str accessor'):\n s.str.repeat(2)\n\n def test_clip(self):\n val = self.ts.median()\n\n self.assertEqual(self.ts.clip_lower(val).min(), val)\n self.assertEqual(self.ts.clip_upper(val).max(), val)\n\n self.assertEqual(self.ts.clip(lower=val).min(), val)\n self.assertEqual(self.ts.clip(upper=val).max(), val)\n\n result = self.ts.clip(-0.5, 0.5)\n expected = np.clip(self.ts, -0.5, 0.5)\n assert_series_equal(result, expected)\n tm.assertIsInstance(expected, Series)\n\n def test_clip_types_and_nulls(self):\n\n sers = [Series([np.nan, 1.0, 2.0, 3.0]),\n Series([None, 'a', 'b', 'c']),\n Series(pd.to_datetime([np.nan, 1, 2, 3], unit='D'))]\n\n for s in sers:\n thresh = s[2]\n l = s.clip_lower(thresh)\n u = s.clip_upper(thresh)\n self.assertEqual(l[notnull(l)].min(), thresh)\n self.assertEqual(u[notnull(u)].max(), thresh)\n self.assertEqual(list(isnull(s)), list(isnull(l)))\n self.assertEqual(list(isnull(s)), list(isnull(u)))\n\n def test_clip_against_series(self):\n # GH #6966\n\n s = Series([1.0, 1.0, 4.0])\n threshold = Series([1.0, 2.0, 3.0])\n\n assert_series_equal(s.clip_lower(threshold), Series([1.0, 2.0, 4.0]))\n assert_series_equal(s.clip_upper(threshold), Series([1.0, 1.0, 3.0]))\n\n lower = Series([1.0, 2.0, 3.0])\n upper = Series([1.5, 2.5, 3.5])\n assert_series_equal(s.clip(lower, upper), Series([1.0, 2.0, 3.5]))\n assert_series_equal(s.clip(1.5, upper), Series([1.5, 1.5, 3.5]))\n\n def test_valid(self):\n ts = self.ts.copy()\n ts[::2] = np.NaN\n\n result = ts.valid()\n self.assertEqual(len(result), ts.count())\n\n tm.assert_dict_equal(result, ts, compare_keys=False)\n\n def test_isnull(self):\n ser = Series([0, 5.4, 3, nan, -0.001])\n np.array_equal(\n ser.isnull(), Series([False, False, False, True, False]).values)\n ser = Series([\"hi\", \"\", nan])\n np.array_equal(ser.isnull(), Series([False, False, True]).values)\n\n def test_notnull(self):\n ser = Series([0, 5.4, 3, nan, -0.001])\n np.array_equal(\n ser.notnull(), Series([True, True, True, False, True]).values)\n ser = Series([\"hi\", \"\", nan])\n np.array_equal(ser.notnull(), Series([True, True, False]).values)\n\n def test_shift(self):\n shifted = self.ts.shift(1)\n unshifted = shifted.shift(-1)\n\n tm.assert_dict_equal(unshifted.valid(), self.ts, compare_keys=False)\n\n offset = datetools.bday\n shifted = self.ts.shift(1, freq=offset)\n unshifted = shifted.shift(-1, freq=offset)\n\n assert_series_equal(unshifted, self.ts)\n\n unshifted = self.ts.shift(0, freq=offset)\n assert_series_equal(unshifted, self.ts)\n\n shifted = self.ts.shift(1, freq='B')\n unshifted = shifted.shift(-1, freq='B')\n\n assert_series_equal(unshifted, self.ts)\n\n # corner case\n unshifted = self.ts.shift(0)\n assert_series_equal(unshifted, self.ts)\n\n # Shifting with PeriodIndex\n ps = tm.makePeriodSeries()\n shifted = ps.shift(1)\n unshifted = shifted.shift(-1)\n tm.assert_dict_equal(unshifted.valid(), ps, compare_keys=False)\n\n shifted2 = ps.shift(1, 'B')\n shifted3 = ps.shift(1, datetools.bday)\n assert_series_equal(shifted2, shifted3)\n assert_series_equal(ps, shifted2.shift(-1, 'B'))\n\n self.assertRaises(ValueError, ps.shift, freq='D')\n\n # legacy support\n shifted4 = ps.shift(1, freq='B')\n assert_series_equal(shifted2, shifted4)\n\n shifted5 = ps.shift(1, freq=datetools.bday)\n assert_series_equal(shifted5, shifted4)\n\n # 32-bit taking\n # GH 8129\n index=date_range('2000-01-01',periods=5)\n for dtype in ['int32','int64']:\n s1 = Series(np.arange(5,dtype=dtype),index=index)\n p = s1.iloc[1]\n result = s1.shift(periods=p)\n expected = Series([np.nan,0,1,2,3],index=index)\n assert_series_equal(result,expected)\n\n # xref 8260\n # with tz\n s = Series(date_range('2000-01-01 09:00:00',periods=5,tz='US/Eastern'),name='foo')\n result = s-s.shift()\n assert_series_equal(result,Series(TimedeltaIndex(['NaT'] + ['1 days']*4),name='foo'))\n\n # incompat tz\n s2 = Series(date_range('2000-01-01 09:00:00',periods=5,tz='CET'),name='foo')\n self.assertRaises(ValueError, lambda : s-s2)\n\n def test_tshift(self):\n # PeriodIndex\n ps = tm.makePeriodSeries()\n shifted = ps.tshift(1)\n unshifted = shifted.tshift(-1)\n\n assert_series_equal(unshifted, ps)\n\n shifted2 = ps.tshift(freq='B')\n assert_series_equal(shifted, shifted2)\n\n shifted3 = ps.tshift(freq=datetools.bday)\n assert_series_equal(shifted, shifted3)\n\n self.assertRaises(ValueError, ps.tshift, freq='M')\n\n # DatetimeIndex\n shifted = self.ts.tshift(1)\n unshifted = shifted.tshift(-1)\n\n assert_series_equal(self.ts, unshifted)\n\n shifted2 = self.ts.tshift(freq=self.ts.index.freq)\n assert_series_equal(shifted, shifted2)\n\n inferred_ts = Series(self.ts.values, Index(np.asarray(self.ts.index)),\n name='ts')\n shifted = inferred_ts.tshift(1)\n unshifted = shifted.tshift(-1)\n assert_series_equal(shifted, self.ts.tshift(1))\n assert_series_equal(unshifted, inferred_ts)\n\n no_freq = self.ts[[0, 5, 7]]\n self.assertRaises(ValueError, no_freq.tshift)\n\n def test_shift_int(self):\n ts = self.ts.astype(int)\n shifted = ts.shift(1)\n expected = ts.astype(float).shift(1)\n assert_series_equal(shifted, expected)\n\n def test_shift_categorical(self):\n # GH 9416\n s = pd.Series(['a', 'b', 'c', 'd'], dtype='category')\n\n assert_series_equal(s.iloc[:-1], s.shift(1).shift(-1).valid())\n\n sp1 = s.shift(1)\n assert_index_equal(s.index, sp1.index)\n self.assertTrue(np.all(sp1.values.codes[:1] == -1))\n self.assertTrue(np.all(s.values.codes[:-1] == sp1.values.codes[1:]))\n\n sn2 = s.shift(-2)\n assert_index_equal(s.index, sn2.index)\n self.assertTrue(np.all(sn2.values.codes[-2:] == -1))\n self.assertTrue(np.all(s.values.codes[2:] == sn2.values.codes[:-2]))\n\n assert_index_equal(s.values.categories, sp1.values.categories)\n assert_index_equal(s.values.categories, sn2.values.categories)\n\n def test_truncate(self):\n offset = datetools.bday\n\n ts = self.ts[::3]\n\n start, end = self.ts.index[3], self.ts.index[6]\n start_missing, end_missing = self.ts.index[2], self.ts.index[7]\n\n # neither specified\n truncated = ts.truncate()\n assert_series_equal(truncated, ts)\n\n # both specified\n expected = ts[1:3]\n\n truncated = ts.truncate(start, end)\n assert_series_equal(truncated, expected)\n\n truncated = ts.truncate(start_missing, end_missing)\n assert_series_equal(truncated, expected)\n\n # start specified\n expected = ts[1:]\n\n truncated = ts.truncate(before=start)\n assert_series_equal(truncated, expected)\n\n truncated = ts.truncate(before=start_missing)\n assert_series_equal(truncated, expected)\n\n # end specified\n expected = ts[:3]\n\n truncated = ts.truncate(after=end)\n assert_series_equal(truncated, expected)\n\n truncated = ts.truncate(after=end_missing)\n assert_series_equal(truncated, expected)\n\n # corner case, empty series returned\n truncated = ts.truncate(after=self.ts.index[0] - offset)\n assert(len(truncated) == 0)\n\n truncated = ts.truncate(before=self.ts.index[-1] + offset)\n assert(len(truncated) == 0)\n\n self.assertRaises(ValueError, ts.truncate,\n before=self.ts.index[-1] + offset,\n after=self.ts.index[0] - offset)\n\n def test_ptp(self):\n N = 1000\n arr = np.random.randn(N)\n ser = Series(arr)\n self.assertEqual(np.ptp(ser), np.ptp(arr))\n\n def test_asof(self):\n # array or list or dates\n N = 50\n rng = date_range('1/1/1990', periods=N, freq='53s')\n ts = Series(np.random.randn(N), index=rng)\n ts[15:30] = np.nan\n dates = date_range('1/1/1990', periods=N * 3, freq='25s')\n\n result = ts.asof(dates)\n self.assertTrue(notnull(result).all())\n lb = ts.index[14]\n ub = ts.index[30]\n\n result = ts.asof(list(dates))\n self.assertTrue(notnull(result).all())\n lb = ts.index[14]\n ub = ts.index[30]\n\n mask = (result.index >= lb) & (result.index < ub)\n rs = result[mask]\n self.assertTrue((rs == ts[lb]).all())\n\n val = result[result.index[result.index >= ub][0]]\n self.assertEqual(ts[ub], val)\n\n self.ts[5:10] = np.NaN\n self.ts[15:20] = np.NaN\n\n val1 = self.ts.asof(self.ts.index[7])\n val2 = self.ts.asof(self.ts.index[19])\n\n self.assertEqual(val1, self.ts[4])\n self.assertEqual(val2, self.ts[14])\n\n # accepts strings\n val1 = self.ts.asof(str(self.ts.index[7]))\n self.assertEqual(val1, self.ts[4])\n\n # in there\n self.assertEqual(self.ts.asof(self.ts.index[3]), self.ts[3])\n\n # no as of value\n d = self.ts.index[0] - datetools.bday\n self.assertTrue(np.isnan(self.ts.asof(d)))\n\n def test_getitem_setitem_datetimeindex(self):\n from pandas import date_range\n N = 50\n # testing with timezone, GH #2785\n rng = date_range('1/1/1990', periods=N, freq='H', tz='US/Eastern')\n ts = Series(np.random.randn(N), index=rng)\n\n result = ts[\"1990-01-01 04:00:00\"]\n expected = ts[4]\n self.assertEqual(result, expected)\n\n result = ts.copy()\n result[\"1990-01-01 04:00:00\"] = 0\n result[\"1990-01-01 04:00:00\"] = ts[4]\n assert_series_equal(result, ts)\n\n result = ts[\"1990-01-01 04:00:00\":\"1990-01-01 07:00:00\"]\n expected = ts[4:8]\n assert_series_equal(result, expected)\n\n result = ts.copy()\n result[\"1990-01-01 04:00:00\":\"1990-01-01 07:00:00\"] = 0\n result[\"1990-01-01 04:00:00\":\"1990-01-01 07:00:00\"] = ts[4:8]\n assert_series_equal(result, ts)\n\n lb = \"1990-01-01 04:00:00\"\n rb = \"1990-01-01 07:00:00\"\n result = ts[(ts.index >= lb) & (ts.index <= rb)]\n expected = ts[4:8]\n assert_series_equal(result, expected)\n\n # repeat all the above with naive datetimes\n result = ts[datetime(1990, 1, 1, 4)]\n expected = ts[4]\n self.assertEqual(result, expected)\n\n result = ts.copy()\n result[datetime(1990, 1, 1, 4)] = 0\n result[datetime(1990, 1, 1, 4)] = ts[4]\n assert_series_equal(result, ts)\n\n result = ts[datetime(1990, 1, 1, 4):datetime(1990, 1, 1, 7)]\n expected = ts[4:8]\n assert_series_equal(result, expected)\n\n result = ts.copy()\n result[datetime(1990, 1, 1, 4):datetime(1990, 1, 1, 7)] = 0\n result[datetime(1990, 1, 1, 4):datetime(1990, 1, 1, 7)] = ts[4:8]\n assert_series_equal(result, ts)\n\n lb = datetime(1990, 1, 1, 4)\n rb = datetime(1990, 1, 1, 7)\n result = ts[(ts.index >= lb) & (ts.index <= rb)]\n expected = ts[4:8]\n assert_series_equal(result, expected)\n\n result = ts[ts.index[4]]\n expected = ts[4]\n self.assertEqual(result, expected)\n\n result = ts[ts.index[4:8]]\n expected = ts[4:8]\n assert_series_equal(result, expected)\n\n result = ts.copy()\n result[ts.index[4:8]] = 0\n result[4:8] = ts[4:8]\n assert_series_equal(result, ts)\n\n # also test partial date slicing\n result = ts[\"1990-01-02\"]\n expected = ts[24:48]\n assert_series_equal(result, expected)\n\n result = ts.copy()\n result[\"1990-01-02\"] = 0\n result[\"1990-01-02\"] = ts[24:48]\n assert_series_equal(result, ts)\n\n def test_getitem_setitem_datetime_tz_pytz(self):\n tm._skip_if_no_pytz()\n from pytz import timezone as tz\n\n from pandas import date_range\n N = 50\n # testing with timezone, GH #2785\n rng = date_range('1/1/1990', periods=N, freq='H', tz='US/Eastern')\n ts = Series(np.random.randn(N), index=rng)\n\n # also test Timestamp tz handling, GH #2789\n result = ts.copy()\n result[\"1990-01-01 09:00:00+00:00\"] = 0\n result[\"1990-01-01 09:00:00+00:00\"] = ts[4]\n assert_series_equal(result, ts)\n\n result = ts.copy()\n result[\"1990-01-01 03:00:00-06:00\"] = 0\n result[\"1990-01-01 03:00:00-06:00\"] = ts[4]\n assert_series_equal(result, ts)\n\n # repeat with datetimes\n result = ts.copy()\n result[datetime(1990, 1, 1, 9, tzinfo=tz('UTC'))] = 0\n result[datetime(1990, 1, 1, 9, tzinfo=tz('UTC'))] = ts[4]\n assert_series_equal(result, ts)\n\n result = ts.copy()\n\n # comparison dates with datetime MUST be localized!\n date = tz('US/Central').localize(datetime(1990, 1, 1, 3))\n result[date] = 0\n result[date] = ts[4]\n assert_series_equal(result, ts)\n\n\n def test_getitem_setitem_datetime_tz_dateutil(self):\n tm._skip_if_no_dateutil()\n from dateutil.tz import tzutc\n from pandas.tslib import _dateutil_gettz as gettz\n\n tz = lambda x: tzutc() if x == 'UTC' else gettz(x) # handle special case for utc in dateutil\n\n from pandas import date_range\n N = 50\n # testing with timezone, GH #2785\n rng = date_range('1/1/1990', periods=N, freq='H', tz='US/Eastern')\n ts = Series(np.random.randn(N), index=rng)\n\n # also test Timestamp tz handling, GH #2789\n result = ts.copy()\n result[\"1990-01-01 09:00:00+00:00\"] = 0\n result[\"1990-01-01 09:00:00+00:00\"] = ts[4]\n assert_series_equal(result, ts)\n\n result = ts.copy()\n result[\"1990-01-01 03:00:00-06:00\"] = 0\n result[\"1990-01-01 03:00:00-06:00\"] = ts[4]\n assert_series_equal(result, ts)\n\n # repeat with datetimes\n result = ts.copy()\n result[datetime(1990, 1, 1, 9, tzinfo=tz('UTC'))] = 0\n result[datetime(1990, 1, 1, 9, tzinfo=tz('UTC'))] = ts[4]\n assert_series_equal(result, ts)\n\n result = ts.copy()\n result[datetime(1990, 1, 1, 3, tzinfo=tz('US/Central'))] = 0\n result[datetime(1990, 1, 1, 3, tzinfo=tz('US/Central'))] = ts[4]\n assert_series_equal(result, ts)\n\n def test_getitem_setitem_periodindex(self):\n from pandas import period_range\n N = 50\n rng = period_range('1/1/1990', periods=N, freq='H')\n ts = Series(np.random.randn(N), index=rng)\n\n result = ts[\"1990-01-01 04\"]\n expected = ts[4]\n self.assertEqual(result, expected)\n\n result = ts.copy()\n result[\"1990-01-01 04\"] = 0\n result[\"1990-01-01 04\"] = ts[4]\n assert_series_equal(result, ts)\n\n result = ts[\"1990-01-01 04\":\"1990-01-01 07\"]\n expected = ts[4:8]\n assert_series_equal(result, expected)\n\n result = ts.copy()\n result[\"1990-01-01 04\":\"1990-01-01 07\"] = 0\n result[\"1990-01-01 04\":\"1990-01-01 07\"] = ts[4:8]\n assert_series_equal(result, ts)\n\n lb = \"1990-01-01 04\"\n rb = \"1990-01-01 07\"\n result = ts[(ts.index >= lb) & (ts.index <= rb)]\n expected = ts[4:8]\n assert_series_equal(result, expected)\n\n # GH 2782\n result = ts[ts.index[4]]\n expected = ts[4]\n self.assertEqual(result, expected)\n\n result = ts[ts.index[4:8]]\n expected = ts[4:8]\n assert_series_equal(result, expected)\n\n result = ts.copy()\n result[ts.index[4:8]] = 0\n result[4:8] = ts[4:8]\n assert_series_equal(result, ts)\n\n def test_asof_periodindex(self):\n from pandas import period_range, PeriodIndex\n # array or list or dates\n N = 50\n rng = period_range('1/1/1990', periods=N, freq='H')\n ts = Series(np.random.randn(N), index=rng)\n ts[15:30] = np.nan\n dates = date_range('1/1/1990', periods=N * 3, freq='37min')\n\n result = ts.asof(dates)\n self.assertTrue(notnull(result).all())\n lb = ts.index[14]\n ub = ts.index[30]\n\n result = ts.asof(list(dates))\n self.assertTrue(notnull(result).all())\n lb = ts.index[14]\n ub = ts.index[30]\n\n pix = PeriodIndex(result.index.values, freq='H')\n mask = (pix >= lb) & (pix < ub)\n rs = result[mask]\n self.assertTrue((rs == ts[lb]).all())\n\n ts[5:10] = np.NaN\n ts[15:20] = np.NaN\n\n val1 = ts.asof(ts.index[7])\n val2 = ts.asof(ts.index[19])\n\n self.assertEqual(val1, ts[4])\n self.assertEqual(val2, ts[14])\n\n # accepts strings\n val1 = ts.asof(str(ts.index[7]))\n self.assertEqual(val1, ts[4])\n\n # in there\n self.assertEqual(ts.asof(ts.index[3]), ts[3])\n\n # no as of value\n d = ts.index[0].to_timestamp() - datetools.bday\n self.assertTrue(np.isnan(ts.asof(d)))\n\n def test_asof_more(self):\n from pandas import date_range\n s = Series([nan, nan, 1, 2, nan, nan, 3, 4, 5],\n index=date_range('1/1/2000', periods=9))\n\n dates = s.index[[4, 5, 6, 2, 1]]\n\n result = s.asof(dates)\n expected = Series([2, 2, 3, 1, np.nan], index=dates)\n\n assert_series_equal(result, expected)\n\n s = Series([1.5, 2.5, 1, 2, nan, nan, 3, 4, 5],\n index=date_range('1/1/2000', periods=9))\n result = s.asof(s.index[0])\n self.assertEqual(result, s[0])\n\n def test_cast_on_putmask(self):\n\n # GH 2746\n\n # need to upcast\n s = Series([1, 2], index=[1, 2], dtype='int64')\n s[[True, False]] = Series([0], index=[1], dtype='int64')\n expected = Series([0, 2], index=[1, 2], dtype='int64')\n\n assert_series_equal(s, expected)\n\n def test_type_promote_putmask(self):\n\n # GH8387: test that changing types does not break alignment\n ts = Series(np.random.randn(100), index=np.arange(100,0,-1)).round(5)\n left, mask = ts.copy(), ts > 0\n right = ts[mask].copy().map(str)\n left[mask] = right\n assert_series_equal(left, ts.map(lambda t: str(t) if t > 0 else t))\n\n s = Series([0, 1, 2, 0 ])\n mask = s > 0\n s2 = s[ mask ].map( str )\n s[mask] = s2\n assert_series_equal(s, Series([0, '1', '2', 0]))\n\n s = Series([0, 'foo', 'bar', 0 ])\n mask = Series([False, True, True, False])\n s2 = s[ mask ]\n s[mask] = s2\n assert_series_equal(s, Series([0, 'foo','bar', 0]))\n\n def test_astype_cast_nan_int(self):\n df = Series([1.0, 2.0, 3.0, np.nan])\n self.assertRaises(ValueError, df.astype, np.int64)\n\n def test_astype_cast_object_int(self):\n arr = Series([\"car\", \"house\", \"tree\", \"1\"])\n\n self.assertRaises(ValueError, arr.astype, int)\n self.assertRaises(ValueError, arr.astype, np.int64)\n self.assertRaises(ValueError, arr.astype, np.int8)\n\n arr = Series(['1', '2', '3', '4'], dtype=object)\n result = arr.astype(int)\n self.assert_numpy_array_equal(result, np.arange(1, 5))\n\n def test_astype_datetimes(self):\n import pandas.tslib as tslib\n\n s = Series(tslib.iNaT, dtype='M8[ns]', index=lrange(5))\n s = s.astype('O')\n self.assertEqual(s.dtype, np.object_)\n\n s = Series([datetime(2001, 1, 2, 0, 0)])\n s = s.astype('O')\n self.assertEqual(s.dtype, np.object_)\n\n s = Series([datetime(2001, 1, 2, 0, 0) for i in range(3)])\n s[1] = np.nan\n self.assertEqual(s.dtype, 'M8[ns]')\n s = s.astype('O')\n self.assertEqual(s.dtype, np.object_)\n\n def test_astype_str(self):\n # GH4405\n digits = string.digits\n s1 = Series([digits * 10, tm.rands(63), tm.rands(64),\n tm.rands(1000)])\n s2 = Series([digits * 10, tm.rands(63), tm.rands(64), nan, 1.0])\n types = (compat.text_type, np.str_)\n for typ in types:\n for s in (s1, s2):\n res = s.astype(typ)\n expec = s.map(compat.text_type)\n assert_series_equal(res, expec)\n\n # GH9757\n # Test str and unicode on python 2.x and just str on python 3.x\n for tt in set([str, compat.text_type]):\n ts = Series([Timestamp('2010-01-04 00:00:00')])\n s = ts.astype(tt)\n expected = Series([tt('2010-01-04')])\n assert_series_equal(s, expected)\n\n ts = Series([Timestamp('2010-01-04 00:00:00', tz='US/Eastern')])\n s = ts.astype(tt)\n expected = Series([tt('2010-01-04 00:00:00-05:00')])\n assert_series_equal(s, expected)\n\n td = Series([Timedelta(1, unit='d')])\n s = td.astype(tt)\n expected = Series([tt('1 days 00:00:00.000000000')])\n assert_series_equal(s, expected)\n\n def test_astype_unicode(self):\n\n # GH7758\n # a bit of magic is required to set default encoding encoding to utf-8\n digits = string.digits\n test_series = [\n Series([digits * 10, tm.rands(63), tm.rands(64), tm.rands(1000)]),\n Series([u('データーサイエンス、お前はもう死んでいる')]),\n\n ]\n\n former_encoding = None\n if not compat.PY3:\n # in python we can force the default encoding\n # for this test\n former_encoding = sys.getdefaultencoding()\n reload(sys)\n sys.setdefaultencoding(\"utf-8\")\n if sys.getdefaultencoding() == \"utf-8\":\n test_series.append(Series([u('野菜食べないとやばい').encode(\"utf-8\")]))\n for s in test_series:\n res = s.astype(\"unicode\")\n expec = s.map(compat.text_type)\n assert_series_equal(res, expec)\n # restore the former encoding\n if former_encoding is not None and former_encoding != \"utf-8\":\n reload(sys)\n sys.setdefaultencoding(former_encoding)\n\n\n def test_map(self):\n index, data = tm.getMixedTypeDict()\n\n source = Series(data['B'], index=data['C'])\n target = Series(data['C'][:4], index=data['D'][:4])\n\n merged = target.map(source)\n\n for k, v in compat.iteritems(merged):\n self.assertEqual(v, source[target[k]])\n\n # input could be a dict\n merged = target.map(source.to_dict())\n\n for k, v in compat.iteritems(merged):\n self.assertEqual(v, source[target[k]])\n\n # function\n result = self.ts.map(lambda x: x * 2)\n self.assert_numpy_array_equal(result, self.ts * 2)\n\n # GH 10324\n a = Series([1, 2, 3, 4])\n b = Series([\"even\", \"odd\", \"even\", \"odd\"], dtype=\"category\")\n c = Series([\"even\", \"odd\", \"even\", \"odd\"])\n\n exp = Series([\"odd\", \"even\", \"odd\", np.nan], dtype=\"category\")\n self.assert_series_equal(a.map(b), exp)\n exp = Series([\"odd\", \"even\", \"odd\", np.nan])\n self.assert_series_equal(a.map(c), exp)\n\n a = Series(['a', 'b', 'c', 'd'])\n b = Series([1, 2, 3, 4], index=pd.CategoricalIndex(['b', 'c', 'd', 'e']))\n c = Series([1, 2, 3, 4], index=Index(['b', 'c', 'd', 'e']))\n\n exp = Series([np.nan, 1, 2, 3])\n self.assert_series_equal(a.map(b), exp)\n exp = Series([np.nan, 1, 2, 3])\n self.assert_series_equal(a.map(c), exp)\n\n a = Series(['a', 'b', 'c', 'd'])\n b = Series(['B', 'C', 'D', 'E'], dtype='category',\n index=pd.CategoricalIndex(['b', 'c', 'd', 'e']))\n c = Series(['B', 'C', 'D', 'E'], index=Index(['b', 'c', 'd', 'e']))\n\n exp = Series([np.nan, 'B', 'C', 'D'], dtype='category')\n self.assert_series_equal(a.map(b), exp)\n exp = Series([np.nan, 'B', 'C', 'D'])\n self.assert_series_equal(a.map(c), exp)\n\n def test_map_compat(self):\n # related GH 8024\n s = Series([True,True,False],index=[1,2,3])\n result = s.map({ True : 'foo', False : 'bar' })\n expected = Series(['foo','foo','bar'],index=[1,2,3])\n assert_series_equal(result,expected)\n\n def test_map_int(self):\n left = Series({'a': 1., 'b': 2., 'c': 3., 'd': 4})\n right = Series({1: 11, 2: 22, 3: 33})\n\n self.assertEqual(left.dtype, np.float_)\n self.assertTrue(issubclass(right.dtype.type, np.integer))\n\n merged = left.map(right)\n self.assertEqual(merged.dtype, np.float_)\n self.assertTrue(isnull(merged['d']))\n self.assertTrue(not isnull(merged['c']))\n\n def test_map_type_inference(self):\n s = Series(lrange(3))\n s2 = s.map(lambda x: np.where(x == 0, 0, 1))\n self.assertTrue(issubclass(s2.dtype.type, np.integer))\n\n def test_divide_decimal(self):\n ''' resolves issue #9787 '''\n from decimal import Decimal\n\n expected = Series([Decimal(5)])\n\n s = Series([Decimal(10)])\n s = s/Decimal(2)\n\n tm.assert_series_equal(expected, s)\n\n s = Series([Decimal(10)])\n s = s//Decimal(2)\n\n tm.assert_series_equal(expected, s)\n\n def test_map_decimal(self):\n from decimal import Decimal\n\n result = self.series.map(lambda x: Decimal(str(x)))\n self.assertEqual(result.dtype, np.object_)\n tm.assertIsInstance(result[0], Decimal)\n\n def test_map_na_exclusion(self):\n s = Series([1.5, np.nan, 3, np.nan, 5])\n\n result = s.map(lambda x: x * 2, na_action='ignore')\n exp = s * 2\n assert_series_equal(result, exp)\n\n def test_map_dict_with_tuple_keys(self):\n '''\n Due to new MultiIndex-ing behaviour in v0.14.0,\n dicts with tuple keys passed to map were being\n converted to a multi-index, preventing tuple values\n from being mapped properly.\n '''\n df = pd.DataFrame({'a': [(1,), (2,), (3, 4), (5, 6)]})\n label_mappings = {\n (1,): 'A',\n (2,): 'B',\n (3, 4): 'A',\n (5, 6): 'B'\n }\n df['labels'] = df['a'].map(label_mappings)\n df['expected_labels'] = pd.Series(['A', 'B', 'A', 'B'], index=df.index)\n # All labels should be filled now\n tm.assert_series_equal(df['labels'], df['expected_labels'], check_names=False)\n\n def test_apply(self):\n assert_series_equal(self.ts.apply(np.sqrt), np.sqrt(self.ts))\n\n # elementwise-apply\n import math\n assert_series_equal(self.ts.apply(math.exp), np.exp(self.ts))\n\n # how to handle Series result, #2316\n result = self.ts.apply(lambda x: Series([x, x ** 2],\n index=['x', 'x^2']))\n expected = DataFrame({'x': self.ts, 'x^2': self.ts ** 2})\n tm.assert_frame_equal(result, expected)\n\n # empty series\n s = Series(dtype=object, name='foo', index=pd.Index([], name='bar'))\n rs = s.apply(lambda x: x)\n tm.assert_series_equal(s, rs)\n # check all metadata (GH 9322)\n self.assertIsNot(s, rs)\n self.assertIs(s.index, rs.index)\n self.assertEqual(s.dtype, rs.dtype)\n self.assertEqual(s.name, rs.name)\n\n # index but no data\n s = Series(index=[1, 2, 3])\n rs = s.apply(lambda x: x)\n tm.assert_series_equal(s, rs)\n\n def test_apply_same_length_inference_bug(self):\n s = Series([1, 2])\n f = lambda x: (x, x + 1)\n\n result = s.apply(f)\n expected = s.map(f)\n assert_series_equal(result, expected)\n\n s = Series([1, 2, 3])\n result = s.apply(f)\n expected = s.map(f)\n assert_series_equal(result, expected)\n\n def test_apply_dont_convert_dtype(self):\n s = Series(np.random.randn(10))\n\n f = lambda x: x if x > 0 else np.nan\n result = s.apply(f, convert_dtype=False)\n self.assertEqual(result.dtype, object)\n\n def test_convert_objects(self):\n\n s = Series([1., 2, 3], index=['a', 'b', 'c'])\n with tm.assert_produces_warning(FutureWarning):\n result = s.convert_objects(convert_dates=False, convert_numeric=True)\n assert_series_equal(result, s)\n\n # force numeric conversion\n r = s.copy().astype('O')\n r['a'] = '1'\n with tm.assert_produces_warning(FutureWarning):\n result = r.convert_objects(convert_dates=False, convert_numeric=True)\n assert_series_equal(result, s)\n\n r = s.copy().astype('O')\n r['a'] = '1.'\n with tm.assert_produces_warning(FutureWarning):\n result = r.convert_objects(convert_dates=False, convert_numeric=True)\n assert_series_equal(result, s)\n\n r = s.copy().astype('O')\n r['a'] = 'garbled'\n expected = s.copy()\n expected['a'] = np.nan\n with tm.assert_produces_warning(FutureWarning):\n result = r.convert_objects(convert_dates=False, convert_numeric=True)\n assert_series_equal(result, expected)\n\n # GH 4119, not converting a mixed type (e.g.floats and object)\n s = Series([1, 'na', 3, 4])\n with tm.assert_produces_warning(FutureWarning):\n result = s.convert_objects(convert_numeric=True)\n expected = Series([1, np.nan, 3, 4])\n assert_series_equal(result, expected)\n\n s = Series([1, '', 3, 4])\n with tm.assert_produces_warning(FutureWarning):\n result = s.convert_objects(convert_numeric=True)\n expected = Series([1, np.nan, 3, 4])\n assert_series_equal(result, expected)\n\n # dates\n s = Series(\n [datetime(2001, 1, 1, 0, 0), datetime(2001, 1, 2, 0, 0), datetime(2001, 1, 3, 0, 0)])\n s2 = Series([datetime(2001, 1, 1, 0, 0), datetime(2001, 1, 2, 0, 0), datetime(\n 2001, 1, 3, 0, 0), 'foo', 1.0, 1, Timestamp('20010104'), '20010105'], dtype='O')\n with tm.assert_produces_warning(FutureWarning):\n result = s.convert_objects(convert_dates=True, convert_numeric=False)\n expected = Series(\n [Timestamp('20010101'), Timestamp('20010102'), Timestamp('20010103')], dtype='M8[ns]')\n assert_series_equal(result, expected)\n\n with tm.assert_produces_warning(FutureWarning):\n result = s.convert_objects(convert_dates='coerce',\n convert_numeric=False)\n with tm.assert_produces_warning(FutureWarning):\n result = s.convert_objects(convert_dates='coerce',\n convert_numeric=True)\n assert_series_equal(result, expected)\n\n expected = Series(\n [Timestamp(\n '20010101'), Timestamp('20010102'), Timestamp('20010103'),\n lib.NaT, lib.NaT, lib.NaT, Timestamp('20010104'), Timestamp('20010105')], dtype='M8[ns]')\n with tm.assert_produces_warning(FutureWarning):\n result = s2.convert_objects(convert_dates='coerce',\n convert_numeric=False)\n assert_series_equal(result, expected)\n with tm.assert_produces_warning(FutureWarning):\n result = s2.convert_objects(convert_dates='coerce',\n convert_numeric=True)\n assert_series_equal(result, expected)\n\n # preserver all-nans (if convert_dates='coerce')\n s = Series(['foo', 'bar', 1, 1.0], dtype='O')\n with tm.assert_produces_warning(FutureWarning):\n result = s.convert_objects(convert_dates='coerce',\n convert_numeric=False)\n assert_series_equal(result, s)\n\n # preserver if non-object\n s = Series([1], dtype='float32')\n with tm.assert_produces_warning(FutureWarning):\n result = s.convert_objects(convert_dates='coerce',\n convert_numeric=False)\n assert_series_equal(result, s)\n\n #r = s.copy()\n #r[0] = np.nan\n #result = r.convert_objects(convert_dates=True,convert_numeric=False)\n #self.assertEqual(result.dtype, 'M8[ns]')\n\n # dateutil parses some single letters into today's value as a date\n for x in 'abcdefghijklmnopqrstuvwxyz':\n s = Series([x])\n with tm.assert_produces_warning(FutureWarning):\n result = s.convert_objects(convert_dates='coerce')\n assert_series_equal(result, s)\n s = Series([x.upper()])\n with tm.assert_produces_warning(FutureWarning):\n result = s.convert_objects(convert_dates='coerce')\n assert_series_equal(result, s)\n\n def test_convert_objects_preserve_bool(self):\n s = Series([1, True, 3, 5], dtype=object)\n with tm.assert_produces_warning(FutureWarning):\n r = s.convert_objects(convert_numeric=True)\n e = Series([1, 1, 3, 5], dtype='i8')\n tm.assert_series_equal(r, e)\n\n def test_convert_objects_preserve_all_bool(self):\n s = Series([False, True, False, False], dtype=object)\n with tm.assert_produces_warning(FutureWarning):\n r = s.convert_objects(convert_numeric=True)\n e = Series([False, True, False, False], dtype=bool)\n tm.assert_series_equal(r, e)\n\n # GH 10265\n def test_convert(self):\n # Tests: All to nans, coerce, true\n # Test coercion returns correct type\n s = Series(['a', 'b', 'c'])\n results = s._convert(datetime=True, coerce=True)\n expected = Series([lib.NaT] * 3)\n assert_series_equal(results, expected)\n\n results = s._convert(numeric=True, coerce=True)\n expected = Series([np.nan] * 3)\n assert_series_equal(results, expected)\n\n expected = Series([lib.NaT] * 3, dtype=np.dtype('m8[ns]'))\n results = s._convert(timedelta=True, coerce=True)\n assert_series_equal(results, expected)\n\n dt = datetime(2001, 1, 1, 0, 0)\n td = dt - datetime(2000, 1, 1, 0, 0)\n\n # Test coercion with mixed types\n s = Series(['a', '3.1415', dt, td])\n results = s._convert(datetime=True, coerce=True)\n expected = Series([lib.NaT, lib.NaT, dt, lib.NaT])\n assert_series_equal(results, expected)\n\n results = s._convert(numeric=True, coerce=True)\n expected = Series([nan, 3.1415, nan, nan])\n assert_series_equal(results, expected)\n\n results = s._convert(timedelta=True, coerce=True)\n expected = Series([lib.NaT, lib.NaT, lib.NaT, td],\n dtype=np.dtype('m8[ns]'))\n assert_series_equal(results, expected)\n\n # Test standard conversion returns original\n results = s._convert(datetime=True)\n assert_series_equal(results, s)\n results = s._convert(numeric=True)\n expected = Series([nan, 3.1415, nan, nan])\n assert_series_equal(results, expected)\n results = s._convert(timedelta=True)\n assert_series_equal(results, s)\n\n # test pass-through and non-conversion when other types selected\n s = Series(['1.0','2.0','3.0'])\n results = s._convert(datetime=True, numeric=True, timedelta=True)\n expected = Series([1.0,2.0,3.0])\n assert_series_equal(results, expected)\n results = s._convert(True,False,True)\n assert_series_equal(results, s)\n\n s = Series([datetime(2001, 1, 1, 0, 0),datetime(2001, 1, 1, 0, 0)],\n dtype='O')\n results = s._convert(datetime=True, numeric=True, timedelta=True)\n expected = Series([datetime(2001, 1, 1, 0, 0),datetime(2001, 1, 1, 0, 0)])\n assert_series_equal(results, expected)\n results = s._convert(datetime=False,numeric=True,timedelta=True)\n assert_series_equal(results, s)\n\n td = datetime(2001, 1, 1, 0, 0) - datetime(2000, 1, 1, 0, 0)\n s = Series([td, td], dtype='O')\n results = s._convert(datetime=True, numeric=True, timedelta=True)\n expected = Series([td, td])\n assert_series_equal(results, expected)\n results = s._convert(True,True,False)\n assert_series_equal(results, s)\n\n\n s = Series([1., 2, 3], index=['a', 'b', 'c'])\n result = s._convert(numeric=True)\n assert_series_equal(result, s)\n\n # force numeric conversion\n r = s.copy().astype('O')\n r['a'] = '1'\n result = r._convert(numeric=True)\n assert_series_equal(result, s)\n\n r = s.copy().astype('O')\n r['a'] = '1.'\n result = r._convert(numeric=True)\n assert_series_equal(result, s)\n\n r = s.copy().astype('O')\n r['a'] = 'garbled'\n result = r._convert(numeric=True)\n expected = s.copy()\n expected['a'] = nan\n assert_series_equal(result, expected)\n\n # GH 4119, not converting a mixed type (e.g.floats and object)\n s = Series([1, 'na', 3, 4])\n result = s._convert(datetime=True, numeric=True)\n expected = Series([1, nan, 3, 4])\n assert_series_equal(result, expected)\n\n s = Series([1, '', 3, 4])\n result = s._convert(datetime=True, numeric=True)\n assert_series_equal(result, expected)\n\n # dates\n s = Series(\n [datetime(2001, 1, 1, 0, 0), datetime(2001, 1, 2, 0, 0), datetime(2001, 1, 3, 0, 0)])\n s2 = Series([datetime(2001, 1, 1, 0, 0), datetime(2001, 1, 2, 0, 0), datetime(\n 2001, 1, 3, 0, 0), 'foo', 1.0, 1, Timestamp('20010104'), '20010105'], dtype='O')\n\n result = s._convert(datetime=True)\n expected = Series(\n [Timestamp('20010101'), Timestamp('20010102'), Timestamp('20010103')], dtype='M8[ns]')\n assert_series_equal(result, expected)\n\n result = s._convert(datetime=True, coerce=True)\n assert_series_equal(result, expected)\n\n expected = Series(\n [Timestamp(\n '20010101'), Timestamp('20010102'), Timestamp('20010103'),\n lib.NaT, lib.NaT, lib.NaT, Timestamp('20010104'), Timestamp('20010105')], dtype='M8[ns]')\n result = s2._convert(datetime=True,\n numeric=False,\n timedelta=False,\n coerce=True)\n assert_series_equal(result, expected)\n result = s2._convert(datetime=True, coerce=True)\n assert_series_equal(result, expected)\n\n s = Series(['foo', 'bar', 1, 1.0], dtype='O')\n result = s._convert(datetime=True, coerce=True)\n expected = Series([lib.NaT]*4)\n assert_series_equal(result, expected)\n\n # preserver if non-object\n s = Series([1], dtype='float32')\n result = s._convert(datetime=True, coerce=True)\n assert_series_equal(result, s)\n\n #r = s.copy()\n #r[0] = np.nan\n #result = r._convert(convert_dates=True,convert_numeric=False)\n #self.assertEqual(result.dtype, 'M8[ns]')\n\n # dateutil parses some single letters into today's value as a date\n expected = Series([lib.NaT])\n for x in 'abcdefghijklmnopqrstuvwxyz':\n s = Series([x])\n result = s._convert(datetime=True, coerce=True)\n assert_series_equal(result, expected)\n s = Series([x.upper()])\n result = s._convert(datetime=True, coerce=True)\n assert_series_equal(result, expected)\n\n def test_convert_no_arg_error(self):\n s = Series(['1.0','2'])\n self.assertRaises(ValueError, s._convert)\n\n def test_convert_preserve_bool(self):\n s = Series([1, True, 3, 5], dtype=object)\n r = s._convert(datetime=True, numeric=True)\n e = Series([1, 1, 3, 5], dtype='i8')\n tm.assert_series_equal(r, e)\n\n def test_convert_preserve_all_bool(self):\n s = Series([False, True, False, False], dtype=object)\n r = s._convert(datetime=True, numeric=True)\n e = Series([False, True, False, False], dtype=bool)\n tm.assert_series_equal(r, e)\n\n def test_apply_args(self):\n s = Series(['foo,bar'])\n\n result = s.apply(str.split, args=(',',))\n self.assertEqual(result[0], ['foo', 'bar'])\n tm.assertIsInstance(result[0], list)\n\n def test_align(self):\n def _check_align(a, b, how='left', fill=None):\n aa, ab = a.align(b, join=how, fill_value=fill)\n\n join_index = a.index.join(b.index, how=how)\n if fill is not None:\n diff_a = aa.index.difference(join_index)\n diff_b = ab.index.difference(join_index)\n if len(diff_a) > 0:\n self.assertTrue((aa.reindex(diff_a) == fill).all())\n if len(diff_b) > 0:\n self.assertTrue((ab.reindex(diff_b) == fill).all())\n\n ea = a.reindex(join_index)\n eb = b.reindex(join_index)\n\n if fill is not None:\n ea = ea.fillna(fill)\n eb = eb.fillna(fill)\n\n assert_series_equal(aa, ea)\n assert_series_equal(ab, eb)\n self.assertEqual(aa.name, 'ts')\n self.assertEqual(ea.name, 'ts')\n self.assertEqual(ab.name, 'ts')\n self.assertEqual(eb.name, 'ts')\n\n for kind in JOIN_TYPES:\n _check_align(self.ts[2:], self.ts[:-5], how=kind)\n _check_align(self.ts[2:], self.ts[:-5], how=kind, fill=-1)\n\n # empty left\n _check_align(self.ts[:0], self.ts[:-5], how=kind)\n _check_align(self.ts[:0], self.ts[:-5], how=kind, fill=-1)\n\n # empty right\n _check_align(self.ts[:-5], self.ts[:0], how=kind)\n _check_align(self.ts[:-5], self.ts[:0], how=kind, fill=-1)\n\n # both empty\n _check_align(self.ts[:0], self.ts[:0], how=kind)\n _check_align(self.ts[:0], self.ts[:0], how=kind, fill=-1)\n\n def test_align_fill_method(self):\n def _check_align(a, b, how='left', method='pad', limit=None):\n aa, ab = a.align(b, join=how, method=method, limit=limit)\n\n join_index = a.index.join(b.index, how=how)\n ea = a.reindex(join_index)\n eb = b.reindex(join_index)\n\n ea = ea.fillna(method=method, limit=limit)\n eb = eb.fillna(method=method, limit=limit)\n\n assert_series_equal(aa, ea)\n assert_series_equal(ab, eb)\n\n for kind in JOIN_TYPES:\n for meth in ['pad', 'bfill']:\n _check_align(self.ts[2:], self.ts[:-5], how=kind, method=meth)\n _check_align(self.ts[2:], self.ts[:-5], how=kind,\n method=meth, limit=1)\n\n # empty left\n _check_align(self.ts[:0], self.ts[:-5], how=kind, method=meth)\n _check_align(self.ts[:0], self.ts[:-5], how=kind, method=meth,\n limit=1)\n\n # empty right\n _check_align(self.ts[:-5], self.ts[:0], how=kind, method=meth)\n _check_align(self.ts[:-5], self.ts[:0], how=kind, method=meth,\n limit=1)\n\n # both empty\n _check_align(self.ts[:0], self.ts[:0], how=kind, method=meth)\n _check_align(self.ts[:0], self.ts[:0], how=kind, method=meth,\n limit=1)\n\n def test_align_nocopy(self):\n b = self.ts[:5].copy()\n\n # do copy\n a = self.ts.copy()\n ra, _ = a.align(b, join='left')\n ra[:5] = 5\n self.assertFalse((a[:5] == 5).any())\n\n # do not copy\n a = self.ts.copy()\n ra, _ = a.align(b, join='left', copy=False)\n ra[:5] = 5\n self.assertTrue((a[:5] == 5).all())\n\n # do copy\n a = self.ts.copy()\n b = self.ts[:5].copy()\n _, rb = a.align(b, join='right')\n rb[:3] = 5\n self.assertFalse((b[:3] == 5).any())\n\n # do not copy\n a = self.ts.copy()\n b = self.ts[:5].copy()\n _, rb = a.align(b, join='right', copy=False)\n rb[:2] = 5\n self.assertTrue((b[:2] == 5).all())\n\n def test_align_sameindex(self):\n a, b = self.ts.align(self.ts, copy=False)\n self.assertIs(a.index, self.ts.index)\n self.assertIs(b.index, self.ts.index)\n\n # a, b = self.ts.align(self.ts, copy=True)\n # self.assertIsNot(a.index, self.ts.index)\n # self.assertIsNot(b.index, self.ts.index)\n\n def test_align_multiindex(self):\n # GH 10665\n\n midx = pd.MultiIndex.from_product([range(2), range(3), range(2)],\n names=('a', 'b', 'c'))\n idx = pd.Index(range(2), name='b')\n s1 = pd.Series(np.arange(12,dtype='int64'), index=midx)\n s2 = pd.Series(np.arange(2,dtype='int64'), index=idx)\n\n # these must be the same results (but flipped)\n res1l, res1r = s1.align(s2, join='left')\n res2l, res2r = s2.align(s1, join='right')\n\n expl = s1\n tm.assert_series_equal(expl, res1l)\n tm.assert_series_equal(expl, res2r)\n expr = pd.Series([0, 0, 1, 1, np.nan, np.nan] * 2, index=midx)\n tm.assert_series_equal(expr, res1r)\n tm.assert_series_equal(expr, res2l)\n\n res1l, res1r = s1.align(s2, join='right')\n res2l, res2r = s2.align(s1, join='left')\n\n exp_idx = pd.MultiIndex.from_product([range(2), range(2), range(2)],\n names=('a', 'b', 'c'))\n expl = pd.Series([0, 1, 2, 3, 6, 7, 8, 9], index=exp_idx)\n tm.assert_series_equal(expl, res1l)\n tm.assert_series_equal(expl, res2r)\n expr = pd.Series([0, 0, 1, 1] * 2, index=exp_idx)\n tm.assert_series_equal(expr, res1r)\n tm.assert_series_equal(expr, res2l)\n\n def test_reindex(self):\n\n identity = self.series.reindex(self.series.index)\n\n # __array_interface__ is not defined for older numpies\n # and on some pythons\n try:\n self.assertTrue(np.may_share_memory(self.series.index, identity.index))\n except (AttributeError):\n pass\n\n self.assertTrue(identity.index.is_(self.series.index))\n self.assertTrue(identity.index.identical(self.series.index))\n\n subIndex = self.series.index[10:20]\n subSeries = self.series.reindex(subIndex)\n\n for idx, val in compat.iteritems(subSeries):\n self.assertEqual(val, self.series[idx])\n\n subIndex2 = self.ts.index[10:20]\n subTS = self.ts.reindex(subIndex2)\n\n for idx, val in compat.iteritems(subTS):\n self.assertEqual(val, self.ts[idx])\n stuffSeries = self.ts.reindex(subIndex)\n\n self.assertTrue(np.isnan(stuffSeries).all())\n\n # This is extremely important for the Cython code to not screw up\n nonContigIndex = self.ts.index[::2]\n subNonContig = self.ts.reindex(nonContigIndex)\n for idx, val in compat.iteritems(subNonContig):\n self.assertEqual(val, self.ts[idx])\n\n # return a copy the same index here\n result = self.ts.reindex()\n self.assertFalse((result is self.ts))\n\n def test_reindex_nan(self):\n ts = Series([2, 3, 5, 7], index=[1, 4, nan, 8])\n\n i, j = [nan, 1, nan, 8, 4, nan], [2, 0, 2, 3, 1, 2]\n assert_series_equal(ts.reindex(i), ts.iloc[j])\n\n ts.index = ts.index.astype('object')\n assert_series_equal(ts.reindex(i), ts.iloc[j])\n\n def test_reindex_corner(self):\n # (don't forget to fix this) I think it's fixed\n reindexed_dep = self.empty.reindex(self.ts.index, method='pad')\n\n # corner case: pad empty series\n reindexed = self.empty.reindex(self.ts.index, method='pad')\n\n # pass non-Index\n reindexed = self.ts.reindex(list(self.ts.index))\n assert_series_equal(self.ts, reindexed)\n\n # bad fill method\n ts = self.ts[::2]\n self.assertRaises(Exception, ts.reindex, self.ts.index, method='foo')\n\n def test_reindex_pad(self):\n\n s = Series(np.arange(10),dtype='int64')\n s2 = s[::2]\n\n reindexed = s2.reindex(s.index, method='pad')\n reindexed2 = s2.reindex(s.index, method='ffill')\n assert_series_equal(reindexed, reindexed2)\n\n expected = Series([0, 0, 2, 2, 4, 4, 6, 6, 8, 8], index=np.arange(10))\n assert_series_equal(reindexed, expected)\n\n # GH4604\n s = Series([1,2,3,4,5], index=['a', 'b', 'c', 'd', 'e'])\n new_index = ['a','g','c','f']\n expected = Series([1,1,3,3],index=new_index)\n\n # this changes dtype because the ffill happens after\n result = s.reindex(new_index).ffill()\n assert_series_equal(result, expected.astype('float64'))\n\n result = s.reindex(new_index).ffill(downcast='infer')\n assert_series_equal(result, expected)\n\n expected = Series([1, 5, 3, 5], index=new_index)\n result = s.reindex(new_index, method='ffill')\n assert_series_equal(result, expected)\n\n # inferrence of new dtype\n s = Series([True,False,False,True],index=list('abcd'))\n new_index='agc'\n result = s.reindex(list(new_index)).ffill()\n expected = Series([True,True,False],index=list(new_index))\n assert_series_equal(result, expected)\n\n # GH4618 shifted series downcasting\n s = Series(False,index=lrange(0,5))\n result = s.shift(1).fillna(method='bfill')\n expected = Series(False,index=lrange(0,5))\n assert_series_equal(result, expected)\n\n def test_reindex_nearest(self):\n s = Series(np.arange(10, dtype='int64'))\n target = [0.1, 0.9, 1.5, 2.0]\n actual = s.reindex(target, method='nearest')\n expected = Series(np.around(target).astype('int64'), target)\n assert_series_equal(expected, actual)\n\n actual = s.reindex_like(actual, method='nearest')\n assert_series_equal(expected, actual)\n\n actual = s.reindex_like(actual, method='nearest', tolerance=1)\n assert_series_equal(expected, actual)\n\n actual = s.reindex(target, method='nearest', tolerance=0.2)\n expected = Series([0, 1, np.nan, 2], target)\n assert_series_equal(expected, actual)\n\n def test_reindex_backfill(self):\n pass\n\n def test_reindex_int(self):\n ts = self.ts[::2]\n int_ts = Series(np.zeros(len(ts), dtype=int), index=ts.index)\n\n # this should work fine\n reindexed_int = int_ts.reindex(self.ts.index)\n\n # if NaNs introduced\n self.assertEqual(reindexed_int.dtype, np.float_)\n\n # NO NaNs introduced\n reindexed_int = int_ts.reindex(int_ts.index[::2])\n self.assertEqual(reindexed_int.dtype, np.int_)\n\n def test_reindex_bool(self):\n\n # A series other than float, int, string, or object\n ts = self.ts[::2]\n bool_ts = Series(np.zeros(len(ts), dtype=bool), index=ts.index)\n\n # this should work fine\n reindexed_bool = bool_ts.reindex(self.ts.index)\n\n # if NaNs introduced\n self.assertEqual(reindexed_bool.dtype, np.object_)\n\n # NO NaNs introduced\n reindexed_bool = bool_ts.reindex(bool_ts.index[::2])\n self.assertEqual(reindexed_bool.dtype, np.bool_)\n\n def test_reindex_bool_pad(self):\n # fail\n ts = self.ts[5:]\n bool_ts = Series(np.zeros(len(ts), dtype=bool), index=ts.index)\n filled_bool = bool_ts.reindex(self.ts.index, method='pad')\n self.assertTrue(isnull(filled_bool[:5]).all())\n\n def test_reindex_like(self):\n other = self.ts[::2]\n assert_series_equal(self.ts.reindex(other.index),\n self.ts.reindex_like(other))\n\n # GH 7179\n day1 = datetime(2013,3,5)\n day2 = datetime(2013,5,5)\n day3 = datetime(2014,3,5)\n\n series1 = Series([5, None, None],[day1, day2, day3])\n series2 = Series([None, None], [day1, day3])\n\n result = series1.reindex_like(series2, method='pad')\n expected = Series([5, np.nan], index=[day1, day3])\n assert_series_equal(result, expected)\n\n def test_reindex_fill_value(self):\n #------------------------------------------------------------\n # floats\n floats = Series([1., 2., 3.])\n result = floats.reindex([1, 2, 3])\n expected = Series([2., 3., np.nan], index=[1, 2, 3])\n assert_series_equal(result, expected)\n\n result = floats.reindex([1, 2, 3], fill_value=0)\n expected = Series([2., 3., 0], index=[1, 2, 3])\n assert_series_equal(result, expected)\n\n #------------------------------------------------------------\n # ints\n ints = Series([1, 2, 3])\n\n result = ints.reindex([1, 2, 3])\n expected = Series([2., 3., np.nan], index=[1, 2, 3])\n assert_series_equal(result, expected)\n\n # don't upcast\n result = ints.reindex([1, 2, 3], fill_value=0)\n expected = Series([2, 3, 0], index=[1, 2, 3])\n self.assertTrue(issubclass(result.dtype.type, np.integer))\n assert_series_equal(result, expected)\n\n #------------------------------------------------------------\n # objects\n objects = Series([1, 2, 3], dtype=object)\n\n result = objects.reindex([1, 2, 3])\n expected = Series([2, 3, np.nan], index=[1, 2, 3], dtype=object)\n assert_series_equal(result, expected)\n\n result = objects.reindex([1, 2, 3], fill_value='foo')\n expected = Series([2, 3, 'foo'], index=[1, 2, 3], dtype=object)\n assert_series_equal(result, expected)\n\n #------------------------------------------------------------\n # bools\n bools = Series([True, False, True])\n\n result = bools.reindex([1, 2, 3])\n expected = Series([False, True, np.nan], index=[1, 2, 3], dtype=object)\n assert_series_equal(result, expected)\n\n result = bools.reindex([1, 2, 3], fill_value=False)\n expected = Series([False, True, False], index=[1, 2, 3])\n assert_series_equal(result, expected)\n\n def test_rename(self):\n renamer = lambda x: x.strftime('%Y%m%d')\n renamed = self.ts.rename(renamer)\n self.assertEqual(renamed.index[0], renamer(self.ts.index[0]))\n\n # dict\n rename_dict = dict(zip(self.ts.index, renamed.index))\n renamed2 = self.ts.rename(rename_dict)\n assert_series_equal(renamed, renamed2)\n\n # partial dict\n s = Series(np.arange(4), index=['a', 'b', 'c', 'd'], dtype='int64')\n renamed = s.rename({'b': 'foo', 'd': 'bar'})\n self.assert_numpy_array_equal(renamed.index, ['a', 'foo', 'c', 'bar'])\n\n # index with name\n renamer = Series(\n np.arange(4), index=Index(['a', 'b', 'c', 'd'], name='name'), dtype='int64')\n renamed = renamer.rename({})\n self.assertEqual(renamed.index.name, renamer.index.name)\n\n def test_rename_inplace(self):\n renamer = lambda x: x.strftime('%Y%m%d')\n expected = renamer(self.ts.index[0])\n\n self.ts.rename(renamer, inplace=True)\n self.assertEqual(self.ts.index[0], expected)\n\n def test_preserveRefs(self):\n seq = self.ts[[5, 10, 15]]\n seq[1] = np.NaN\n self.assertFalse(np.isnan(self.ts[10]))\n\n def test_ne(self):\n ts = Series([3, 4, 5, 6, 7], [3, 4, 5, 6, 7], dtype=float)\n expected = [True, True, False, True, True]\n self.assertTrue(tm.equalContents(ts.index != 5, expected))\n self.assertTrue(tm.equalContents(~(ts.index == 5), expected))\n\n def test_pad_nan(self):\n x = Series([np.nan, 1., np.nan, 3., np.nan],\n ['z', 'a', 'b', 'c', 'd'], dtype=float)\n\n x.fillna(method='pad', inplace=True)\n\n expected = Series([np.nan, 1.0, 1.0, 3.0, 3.0],\n ['z', 'a', 'b', 'c', 'd'], dtype=float)\n assert_series_equal(x[1:], expected[1:])\n self.assertTrue(np.isnan(x[0]), np.isnan(expected[0]))\n\n def test_unstack(self):\n from numpy import nan\n from pandas.util.testing import assert_frame_equal\n\n index = MultiIndex(levels=[['bar', 'foo'], ['one', 'three', 'two']],\n labels=[[1, 1, 0, 0], [0, 1, 0, 2]])\n\n s = Series(np.arange(4.), index=index)\n unstacked = s.unstack()\n\n expected = DataFrame([[2., nan, 3.], [0., 1., nan]],\n index=['bar', 'foo'],\n columns=['one', 'three', 'two'])\n\n assert_frame_equal(unstacked, expected)\n\n unstacked = s.unstack(level=0)\n assert_frame_equal(unstacked, expected.T)\n\n index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],\n labels=[[0, 0, 0, 0, 0, 0],\n [0, 1, 2, 0, 1, 2],\n [0, 1, 0, 1, 0, 1]])\n s = Series(np.random.randn(6), index=index)\n exp_index = MultiIndex(levels=[['one', 'two', 'three'], [0, 1]],\n labels=[[0, 1, 2, 0, 1, 2],\n [0, 1, 0, 1, 0, 1]])\n expected = DataFrame({'bar': s.values}, index=exp_index).sortlevel(0)\n unstacked = s.unstack(0)\n assert_frame_equal(unstacked, expected)\n\n # GH5873\n idx = pd.MultiIndex.from_arrays([[101, 102], [3.5, np.nan]])\n ts = pd.Series([1,2], index=idx)\n left = ts.unstack()\n right = DataFrame([[nan, 1], [2, nan]], index=[101, 102],\n columns=[nan, 3.5])\n print(left)\n print(right)\n assert_frame_equal(left, right)\n\n idx = pd.MultiIndex.from_arrays([['cat', 'cat', 'cat', 'dog', 'dog'],\n ['a', 'a', 'b', 'a', 'b'], [1, 2, 1, 1, np.nan]])\n ts = pd.Series([1.0, 1.1, 1.2, 1.3, 1.4], index=idx)\n right = DataFrame([[1.0, 1.3], [1.1, nan], [nan, 1.4], [1.2, nan]],\n columns=['cat', 'dog'])\n tpls = [('a', 1), ('a', 2), ('b', nan), ('b', 1)]\n right.index = pd.MultiIndex.from_tuples(tpls)\n assert_frame_equal(ts.unstack(level=0), right)\n\n def test_sortlevel(self):\n mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list('ABC'))\n s = Series([1, 2], mi)\n backwards = s.iloc[[1, 0]]\n\n res = s.sortlevel('A')\n assert_series_equal(backwards, res)\n\n res = s.sortlevel(['A', 'B'])\n assert_series_equal(backwards, res)\n\n res = s.sortlevel('A', sort_remaining=False)\n assert_series_equal(s, res)\n\n res = s.sortlevel(['A', 'B'], sort_remaining=False)\n assert_series_equal(s, res)\n\n def test_head_tail(self):\n assert_series_equal(self.series.head(), self.series[:5])\n assert_series_equal(self.series.tail(), self.series[-5:])\n\n def test_isin(self):\n s = Series(['A', 'B', 'C', 'a', 'B', 'B', 'A', 'C'])\n\n result = s.isin(['A', 'C'])\n expected = Series([True, False, True, False, False, False, True, True])\n assert_series_equal(result, expected)\n\n def test_isin_with_string_scalar(self):\n # GH4763\n s = Series(['A', 'B', 'C', 'a', 'B', 'B', 'A', 'C'])\n with tm.assertRaises(TypeError):\n s.isin('a')\n\n with tm.assertRaises(TypeError):\n s = Series(['aaa', 'b', 'c'])\n s.isin('aaa')\n\n def test_isin_with_i8(self):\n # GH 5021\n\n expected = Series([True,True,False,False,False])\n expected2 = Series([False,True,False,False,False])\n\n # datetime64[ns]\n s = Series(date_range('jan-01-2013','jan-05-2013'))\n\n result = s.isin(s[0:2])\n assert_series_equal(result, expected)\n\n result = s.isin(s[0:2].values)\n assert_series_equal(result, expected)\n\n # fails on dtype conversion in the first place\n result = s.isin(s[0:2].values.astype('datetime64[D]'))\n assert_series_equal(result, expected)\n\n result = s.isin([s[1]])\n assert_series_equal(result, expected2)\n\n result = s.isin([np.datetime64(s[1])])\n assert_series_equal(result, expected2)\n\n # timedelta64[ns]\n s = Series(pd.to_timedelta(lrange(5),unit='d'))\n result = s.isin(s[0:2])\n assert_series_equal(result, expected)\n\n#------------------------------------------------------------------------------\n# TimeSeries-specific\n def test_cummethods_bool(self):\n # GH 6270\n # looks like a buggy np.maximum.accumulate for numpy 1.6.1, py 3.2\n def cummin(x):\n return np.minimum.accumulate(x)\n\n def cummax(x):\n return np.maximum.accumulate(x)\n\n a = pd.Series([False, False, False, True, True, False, False])\n b = ~a\n c = pd.Series([False] * len(b))\n d = ~c\n methods = {'cumsum': np.cumsum, 'cumprod': np.cumprod,\n 'cummin': cummin, 'cummax': cummax}\n args = product((a, b, c, d), methods)\n for s, method in args:\n expected = Series(methods[method](s.values))\n result = getattr(s, method)()\n assert_series_equal(result, expected)\n\n e = pd.Series([False, True, nan, False])\n cse = pd.Series([0, 1, nan, 1], dtype=object)\n cpe = pd.Series([False, 0, nan, 0])\n cmin = pd.Series([False, False, nan, False])\n cmax = pd.Series([False, True, nan, True])\n expecteds = {'cumsum': cse, 'cumprod': cpe, 'cummin': cmin,\n 'cummax': cmax}\n\n for method in methods:\n res = getattr(e, method)()\n assert_series_equal(res, expecteds[method])\n\n def test_replace(self):\n N = 100\n ser = Series(np.random.randn(N))\n ser[0:4] = np.nan\n ser[6:10] = 0\n\n # replace list with a single value\n ser.replace([np.nan], -1, inplace=True)\n\n exp = ser.fillna(-1)\n assert_series_equal(ser, exp)\n\n rs = ser.replace(0., np.nan)\n ser[ser == 0.] = np.nan\n assert_series_equal(rs, ser)\n\n ser = Series(np.fabs(np.random.randn(N)), tm.makeDateIndex(N),\n dtype=object)\n ser[:5] = np.nan\n ser[6:10] = 'foo'\n ser[20:30] = 'bar'\n\n # replace list with a single value\n rs = ser.replace([np.nan, 'foo', 'bar'], -1)\n\n self.assertTrue((rs[:5] == -1).all())\n self.assertTrue((rs[6:10] == -1).all())\n self.assertTrue((rs[20:30] == -1).all())\n self.assertTrue((isnull(ser[:5])).all())\n\n # replace with different values\n rs = ser.replace({np.nan: -1, 'foo': -2, 'bar': -3})\n\n self.assertTrue((rs[:5] == -1).all())\n self.assertTrue((rs[6:10] == -2).all())\n self.assertTrue((rs[20:30] == -3).all())\n self.assertTrue((isnull(ser[:5])).all())\n\n # replace with different values with 2 lists\n rs2 = ser.replace([np.nan, 'foo', 'bar'], [-1, -2, -3])\n assert_series_equal(rs, rs2)\n\n # replace inplace\n ser.replace([np.nan, 'foo', 'bar'], -1, inplace=True)\n\n self.assertTrue((ser[:5] == -1).all())\n self.assertTrue((ser[6:10] == -1).all())\n self.assertTrue((ser[20:30] == -1).all())\n\n ser = Series([np.nan, 0, np.inf])\n assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0))\n\n ser = Series([np.nan, 0, 'foo', 'bar', np.inf, None, lib.NaT])\n assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0))\n filled = ser.copy()\n filled[4] = 0\n assert_series_equal(ser.replace(np.inf, 0), filled)\n\n ser = Series(self.ts.index)\n assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0))\n\n # malformed\n self.assertRaises(ValueError, ser.replace, [1, 2, 3], [np.nan, 0])\n\n # make sure that we aren't just masking a TypeError because bools don't\n # implement indexing\n with tm.assertRaisesRegexp(TypeError, 'Cannot compare types .+'):\n ser.replace([1, 2], [np.nan, 0])\n\n ser = Series([0, 1, 2, 3, 4])\n result = ser.replace([0, 1, 2, 3, 4], [4, 3, 2, 1, 0])\n assert_series_equal(result, Series([4, 3, 2, 1, 0]))\n\n # API change from 0.12?\n # GH 5319\n ser = Series([0, np.nan, 2, 3, 4])\n expected = ser.ffill()\n result = ser.replace([np.nan])\n assert_series_equal(result, expected)\n\n ser = Series([0, np.nan, 2, 3, 4])\n expected = ser.ffill()\n result = ser.replace(np.nan)\n assert_series_equal(result, expected)\n #GH 5797\n ser = Series(date_range('20130101', periods=5))\n expected = ser.copy()\n expected.loc[2] = Timestamp('20120101')\n result = ser.replace({Timestamp('20130103'):\n Timestamp('20120101')})\n assert_series_equal(result, expected)\n result = ser.replace(Timestamp('20130103'), Timestamp('20120101'))\n assert_series_equal(result, expected)\n\n def test_replace_with_single_list(self):\n ser = Series([0, 1, 2, 3, 4])\n result = ser.replace([1,2,3])\n assert_series_equal(result, Series([0,0,0,0,4]))\n\n s = ser.copy()\n s.replace([1,2,3],inplace=True)\n assert_series_equal(s, Series([0,0,0,0,4]))\n\n # make sure things don't get corrupted when fillna call fails\n s = ser.copy()\n with tm.assertRaises(ValueError):\n s.replace([1,2,3],inplace=True,method='crash_cymbal')\n assert_series_equal(s, ser)\n\n def test_replace_mixed_types(self):\n s = Series(np.arange(5),dtype='int64')\n\n def check_replace(to_rep, val, expected):\n sc = s.copy()\n r = s.replace(to_rep, val)\n sc.replace(to_rep, val, inplace=True)\n assert_series_equal(expected, r)\n assert_series_equal(expected, sc)\n\n # should NOT upcast to float\n e = Series([0,1,2,3,4])\n tr, v = [3], [3.0]\n check_replace(tr, v, e)\n\n # MUST upcast to float\n e = Series([0,1,2,3.5,4])\n tr, v = [3], [3.5]\n check_replace(tr, v, e)\n\n # casts to object\n e = Series([0,1,2,3.5,'a'])\n tr, v = [3,4], [3.5,'a']\n check_replace(tr, v, e)\n\n # again casts to object\n e = Series([0,1,2,3.5,Timestamp('20130101')])\n tr, v = [3,4],[3.5,Timestamp('20130101')]\n check_replace(tr, v, e)\n\n # casts to float\n e = Series([0,1,2,3.5,1])\n tr, v = [3,4],[3.5,True]\n check_replace(tr, v, e)\n\n # test an object with dates + floats + integers + strings\n dr = date_range('1/1/2001', '1/10/2001',\n freq='D').to_series().reset_index(drop=True)\n result = dr.astype(object).replace([dr[0],dr[1],dr[2]], [1.0,2,'a'])\n expected = Series([1.0,2,'a'] + dr[3:].tolist(),dtype=object)\n assert_series_equal(result, expected)\n\n def test_replace_bool_with_string_no_op(self):\n s = Series([True, False, True])\n result = s.replace('fun', 'in-the-sun')\n tm.assert_series_equal(s, result)\n\n def test_replace_bool_with_string(self):\n # nonexistent elements\n s = Series([True, False, True])\n result = s.replace(True, '2u')\n expected = Series(['2u', False, '2u'])\n tm.assert_series_equal(expected, result)\n\n def test_replace_bool_with_bool(self):\n s = Series([True, False, True])\n result = s.replace(True, False)\n expected = Series([False] * len(s))\n tm.assert_series_equal(expected, result)\n\n def test_replace_with_dict_with_bool_keys(self):\n s = Series([True, False, True])\n with tm.assertRaisesRegexp(TypeError, 'Cannot compare types .+'):\n s.replace({'asdf': 'asdb', True: 'yes'})\n\n def test_asfreq(self):\n ts = Series([0., 1., 2.], index=[datetime(2009, 10, 30),\n datetime(2009, 11, 30),\n datetime(2009, 12, 31)])\n\n daily_ts = ts.asfreq('B')\n monthly_ts = daily_ts.asfreq('BM')\n self.assert_numpy_array_equal(monthly_ts, ts)\n\n daily_ts = ts.asfreq('B', method='pad')\n monthly_ts = daily_ts.asfreq('BM')\n self.assert_numpy_array_equal(monthly_ts, ts)\n\n daily_ts = ts.asfreq(datetools.bday)\n monthly_ts = daily_ts.asfreq(datetools.bmonthEnd)\n self.assert_numpy_array_equal(monthly_ts, ts)\n\n result = ts[:0].asfreq('M')\n self.assertEqual(len(result), 0)\n self.assertIsNot(result, ts)\n\n def test_diff(self):\n # Just run the function\n self.ts.diff()\n\n # int dtype\n a = 10000000000000000\n b = a + 1\n s = Series([a, b])\n\n rs = s.diff()\n self.assertEqual(rs[1], 1)\n\n # neg n\n rs = self.ts.diff(-1)\n xp = self.ts - self.ts.shift(-1)\n assert_series_equal(rs, xp)\n\n # 0\n rs = self.ts.diff(0)\n xp = self.ts - self.ts\n assert_series_equal(rs, xp)\n\n # datetime diff (GH3100)\n s = Series(date_range('20130102', periods=5))\n rs = s - s.shift(1)\n xp = s.diff()\n assert_series_equal(rs, xp)\n\n # timedelta diff\n nrs = rs - rs.shift(1)\n nxp = xp.diff()\n assert_series_equal(nrs, nxp)\n\n # with tz\n s = Series(date_range('2000-01-01 09:00:00',periods=5,tz='US/Eastern'), name='foo')\n result = s.diff()\n assert_series_equal(result,Series(TimedeltaIndex(['NaT'] + ['1 days']*4),name='foo'))\n\n def test_pct_change(self):\n rs = self.ts.pct_change(fill_method=None)\n assert_series_equal(rs, self.ts / self.ts.shift(1) - 1)\n\n rs = self.ts.pct_change(2)\n filled = self.ts.fillna(method='pad')\n assert_series_equal(rs, filled / filled.shift(2) - 1)\n\n rs = self.ts.pct_change(fill_method='bfill', limit=1)\n filled = self.ts.fillna(method='bfill', limit=1)\n assert_series_equal(rs, filled / filled.shift(1) - 1)\n\n rs = self.ts.pct_change(freq='5D')\n filled = self.ts.fillna(method='pad')\n assert_series_equal(rs, filled / filled.shift(freq='5D') - 1)\n\n def test_pct_change_shift_over_nas(self):\n s = Series([1., 1.5, np.nan, 2.5, 3.])\n\n chg = s.pct_change()\n expected = Series([np.nan, 0.5, np.nan, 2.5 / 1.5 - 1, .2])\n assert_series_equal(chg, expected)\n\n def test_autocorr(self):\n # Just run the function\n corr1 = self.ts.autocorr()\n\n # Now run it with the lag parameter\n corr2 = self.ts.autocorr(lag=1)\n\n # corr() with lag needs Series of at least length 2\n if len(self.ts) <= 2:\n self.assertTrue(np.isnan(corr1))\n self.assertTrue(np.isnan(corr2))\n else:\n self.assertEqual(corr1, corr2)\n\n # Choose a random lag between 1 and length of Series - 2\n # and compare the result with the Series corr() function\n n = 1 + np.random.randint(max(1, len(self.ts) - 2))\n corr1 = self.ts.corr(self.ts.shift(n))\n corr2 = self.ts.autocorr(lag=n)\n\n # corr() with lag needs Series of at least length 2\n if len(self.ts) <= 2:\n self.assertTrue(np.isnan(corr1))\n self.assertTrue(np.isnan(corr2))\n else:\n self.assertEqual(corr1, corr2)\n\n def test_first_last_valid(self):\n ts = self.ts.copy()\n ts[:5] = np.NaN\n\n index = ts.first_valid_index()\n self.assertEqual(index, ts.index[5])\n\n ts[-5:] = np.NaN\n index = ts.last_valid_index()\n self.assertEqual(index, ts.index[-6])\n\n ts[:] = np.nan\n self.assertIsNone(ts.last_valid_index())\n self.assertIsNone(ts.first_valid_index())\n\n ser = Series([], index=[])\n self.assertIsNone(ser.last_valid_index())\n self.assertIsNone(ser.first_valid_index())\n\n def test_mpl_compat_hack(self):\n result = self.ts[:, np.newaxis]\n expected = self.ts.values[:, np.newaxis]\n assert_almost_equal(result, expected)\n\n#------------------------------------------------------------------------------\n# GroupBy\n\n def test_select(self):\n n = len(self.ts)\n result = self.ts.select(lambda x: x >= self.ts.index[n // 2])\n expected = self.ts.reindex(self.ts.index[n // 2:])\n assert_series_equal(result, expected)\n\n result = self.ts.select(lambda x: x.weekday() == 2)\n expected = self.ts[self.ts.index.weekday == 2]\n assert_series_equal(result, expected)\n\n#------------------------------------------------------------------------------\n# Misc not safe for sparse\n\n def test_dropna_preserve_name(self):\n self.ts[:5] = np.nan\n result = self.ts.dropna()\n self.assertEqual(result.name, self.ts.name)\n name = self.ts.name\n ts = self.ts.copy()\n ts.dropna(inplace=True)\n self.assertEqual(ts.name, name)\n\n def test_numpy_unique(self):\n # it works!\n result = np.unique(self.ts)\n\n def test_concat_empty_series_dtypes_roundtrips(self):\n\n # round-tripping with self & like self\n dtypes = map(np.dtype,['float64','int8','uint8','bool','m8[ns]','M8[ns]'])\n\n for dtype in dtypes:\n self.assertEqual(pd.concat([Series(dtype=dtype)]).dtype, dtype)\n self.assertEqual(pd.concat([Series(dtype=dtype),\n Series(dtype=dtype)]).dtype, dtype)\n\n def int_result_type(dtype, dtype2):\n typs = set([dtype.kind,dtype2.kind])\n if not len(typs-set(['i','u','b'])) and (dtype.kind == 'i' or dtype2.kind == 'i'):\n return 'i'\n elif not len(typs-set(['u','b'])) and (dtype.kind == 'u' or dtype2.kind == 'u'):\n return 'u'\n return None\n\n def float_result_type(dtype, dtype2):\n typs = set([dtype.kind,dtype2.kind])\n if not len(typs-set(['f','i','u'])) and (dtype.kind == 'f' or dtype2.kind == 'f'):\n return 'f'\n return None\n\n def get_result_type(dtype, dtype2):\n result = float_result_type(dtype, dtype2)\n if result is not None:\n return result\n result = int_result_type(dtype, dtype2)\n if result is not None:\n return result\n return 'O'\n\n for dtype in dtypes:\n for dtype2 in dtypes:\n if dtype == dtype2:\n continue\n\n expected = get_result_type(dtype, dtype2)\n result = pd.concat([Series(dtype=dtype),\n Series(dtype=dtype2)]).dtype\n self.assertEqual(result.kind, expected)\n\n def test_concat_empty_series_dtypes(self):\n\n # bools\n self.assertEqual(pd.concat([Series(dtype=np.bool_),\n Series(dtype=np.int32)]).dtype, np.int32)\n self.assertEqual(pd.concat([Series(dtype=np.bool_),\n Series(dtype=np.float32)]).dtype, np.object_)\n\n # datetimelike\n self.assertEqual(pd.concat([Series(dtype='m8[ns]'),\n Series(dtype=np.bool)]).dtype, np.object_)\n self.assertEqual(pd.concat([Series(dtype='m8[ns]'),\n Series(dtype=np.int64)]).dtype, np.object_)\n self.assertEqual(pd.concat([Series(dtype='M8[ns]'),\n Series(dtype=np.bool)]).dtype, np.object_)\n self.assertEqual(pd.concat([Series(dtype='M8[ns]'),\n Series(dtype=np.int64)]).dtype, np.object_)\n self.assertEqual(pd.concat([Series(dtype='M8[ns]'),\n Series(dtype=np.bool_),\n Series(dtype=np.int64)]).dtype, np.object_)\n\n # categorical\n self.assertEqual(pd.concat([Series(dtype='category'),\n Series(dtype='category')]).dtype, 'category')\n self.assertEqual(pd.concat([Series(dtype='category'),\n Series(dtype='float64')]).dtype, np.object_)\n self.assertEqual(pd.concat([Series(dtype='category'),\n Series(dtype='object')]).dtype, 'category')\n\n # sparse\n result = pd.concat([Series(dtype='float64').to_sparse(),\n Series(dtype='float64').to_sparse()])\n self.assertEqual(result.dtype,np.float64)\n self.assertEqual(result.ftype,'float64:sparse')\n\n result = pd.concat([Series(dtype='float64').to_sparse(),\n Series(dtype='float64')])\n self.assertEqual(result.dtype,np.float64)\n self.assertEqual(result.ftype,'float64:sparse')\n\n result = pd.concat([Series(dtype='float64').to_sparse(),\n Series(dtype='object')])\n self.assertEqual(result.dtype,np.object_)\n self.assertEqual(result.ftype,'object:dense')\n\n def test_searchsorted_numeric_dtypes_scalar(self):\n s = Series([1, 2, 90, 1000, 3e9])\n r = s.searchsorted(30)\n e = 2\n tm.assert_equal(r, e)\n\n r = s.searchsorted([30])\n e = np.array([2])\n tm.assert_numpy_array_equal(r, e)\n\n def test_searchsorted_numeric_dtypes_vector(self):\n s = Series([1, 2, 90, 1000, 3e9])\n r = s.searchsorted([91, 2e6])\n e = np.array([3, 4])\n tm.assert_numpy_array_equal(r, e)\n\n def test_search_sorted_datetime64_scalar(self):\n s = Series(pd.date_range('20120101', periods=10, freq='2D'))\n v = pd.Timestamp('20120102')\n r = s.searchsorted(v)\n e = 1\n tm.assert_equal(r, e)\n\n def test_search_sorted_datetime64_list(self):\n s = Series(pd.date_range('20120101', periods=10, freq='2D'))\n v = [pd.Timestamp('20120102'), pd.Timestamp('20120104')]\n r = s.searchsorted(v)\n e = np.array([1, 2])\n tm.assert_numpy_array_equal(r, e)\n\n def test_searchsorted_sorter(self):\n # GH8490\n s = Series([3, 1, 2])\n r = s.searchsorted([0, 3], sorter=np.argsort(s))\n e = np.array([0, 2])\n tm.assert_numpy_array_equal(r, e)\n\n def test_to_frame_expanddim(self):\n # GH 9762\n\n class SubclassedSeries(Series):\n @property\n def _constructor_expanddim(self):\n return SubclassedFrame\n\n class SubclassedFrame(DataFrame):\n pass\n\n s = SubclassedSeries([1, 2, 3], name='X')\n result = s.to_frame()\n self.assertTrue(isinstance(result, SubclassedFrame))\n expected = SubclassedFrame({'X': [1, 2, 3]})\n assert_frame_equal(result, expected)\n\n\nclass TestSeriesNonUnique(tm.TestCase):\n\n _multiprocess_can_split_ = True\n\n def setUp(self):\n pass\n\n def test_basic_indexing(self):\n s = Series(np.random.randn(5), index=['a', 'b', 'a', 'a', 'b'])\n\n self.assertRaises(IndexError, s.__getitem__, 5)\n self.assertRaises(IndexError, s.__setitem__, 5, 0)\n\n self.assertRaises(KeyError, s.__getitem__, 'c')\n\n s = s.sort_index()\n\n self.assertRaises(IndexError, s.__getitem__, 5)\n self.assertRaises(IndexError, s.__setitem__, 5, 0)\n\n\n def test_int_indexing(self):\n s = Series(np.random.randn(6), index=[0, 0, 1, 1, 2, 2])\n\n self.assertRaises(KeyError, s.__getitem__, 5)\n\n self.assertRaises(KeyError, s.__getitem__, 'c')\n\n # not monotonic\n s = Series(np.random.randn(6), index=[2, 2, 0, 0, 1, 1])\n\n self.assertRaises(KeyError, s.__getitem__, 5)\n\n self.assertRaises(KeyError, s.__getitem__, 'c')\n\n def test_datetime_indexing(self):\n from pandas import date_range\n\n index = date_range('1/1/2000', '1/7/2000')\n index = index.repeat(3)\n\n s = Series(len(index), index=index)\n stamp = Timestamp('1/8/2000')\n\n self.assertRaises(KeyError, s.__getitem__, stamp)\n s[stamp] = 0\n self.assertEqual(s[stamp], 0)\n\n # not monotonic\n s = Series(len(index), index=index)\n s = s[::-1]\n\n self.assertRaises(KeyError, s.__getitem__, stamp)\n s[stamp] = 0\n self.assertEqual(s[stamp], 0)\n\n def test_reset_index(self):\n df = tm.makeDataFrame()[:5]\n ser = df.stack()\n ser.index.names = ['hash', 'category']\n\n ser.name = 'value'\n df = ser.reset_index()\n self.assertIn('value', df)\n\n df = ser.reset_index(name='value2')\n self.assertIn('value2', df)\n\n # check inplace\n s = ser.reset_index(drop=True)\n s2 = ser\n s2.reset_index(drop=True, inplace=True)\n assert_series_equal(s, s2)\n\n # level\n index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],\n labels=[[0, 0, 0, 0, 0, 0],\n [0, 1, 2, 0, 1, 2],\n [0, 1, 0, 1, 0, 1]])\n s = Series(np.random.randn(6), index=index)\n rs = s.reset_index(level=1)\n self.assertEqual(len(rs.columns), 2)\n\n rs = s.reset_index(level=[0, 2], drop=True)\n self.assertTrue(rs.index.equals(Index(index.get_level_values(1))))\n tm.assertIsInstance(rs, Series)\n\n def test_set_index_makes_timeseries(self):\n idx = tm.makeDateIndex(10)\n\n s = Series(lrange(10))\n s.index = idx\n\n with tm.assert_produces_warning(FutureWarning):\n self.assertTrue(s.is_time_series == True)\n self.assertTrue(s.index.is_all_dates == True)\n\n def test_timeseries_coercion(self):\n idx = tm.makeDateIndex(10000)\n ser = Series(np.random.randn(len(idx)), idx.astype(object))\n with tm.assert_produces_warning(FutureWarning):\n self.assertTrue(ser.is_time_series)\n self.assertTrue(ser.index.is_all_dates)\n self.assertIsInstance(ser.index, DatetimeIndex)\n\n def test_replace(self):\n N = 100\n ser = Series(np.fabs(np.random.randn(N)), tm.makeDateIndex(N),\n dtype=object)\n ser[:5] = np.nan\n ser[6:10] = 'foo'\n ser[20:30] = 'bar'\n\n # replace list with a single value\n rs = ser.replace([np.nan, 'foo', 'bar'], -1)\n\n self.assertTrue((rs[:5] == -1).all())\n self.assertTrue((rs[6:10] == -1).all())\n self.assertTrue((rs[20:30] == -1).all())\n self.assertTrue((isnull(ser[:5])).all())\n\n # replace with different values\n rs = ser.replace({np.nan: -1, 'foo': -2, 'bar': -3})\n\n self.assertTrue((rs[:5] == -1).all())\n self.assertTrue((rs[6:10] == -2).all())\n self.assertTrue((rs[20:30] == -3).all())\n self.assertTrue((isnull(ser[:5])).all())\n\n # replace with different values with 2 lists\n rs2 = ser.replace([np.nan, 'foo', 'bar'], [-1, -2, -3])\n assert_series_equal(rs, rs2)\n\n # replace inplace\n ser.replace([np.nan, 'foo', 'bar'], -1, inplace=True)\n self.assertTrue((ser[:5] == -1).all())\n self.assertTrue((ser[6:10] == -1).all())\n self.assertTrue((ser[20:30] == -1).all())\n\n def test_repeat(self):\n s = Series(np.random.randn(3), index=['a', 'b', 'c'])\n\n reps = s.repeat(5)\n exp = Series(s.values.repeat(5), index=s.index.values.repeat(5))\n assert_series_equal(reps, exp)\n\n to_rep = [2, 3, 4]\n reps = s.repeat(to_rep)\n exp = Series(s.values.repeat(to_rep),\n index=s.index.values.repeat(to_rep))\n assert_series_equal(reps, exp)\n\n def test_unique_data_ownership(self):\n # it works! #1807\n Series(Series([\"a\", \"c\", \"b\"]).unique()).sort_values()\n\n def test_datetime_timedelta_quantiles(self):\n # covers #9694\n self.assertTrue(pd.isnull(Series([],dtype='M8[ns]').quantile(.5)))\n self.assertTrue(pd.isnull(Series([],dtype='m8[ns]').quantile(.5)))\n\n def test_empty_timeseries_redections_return_nat(self):\n # covers #11245\n for dtype in ('m8[ns]', 'm8[ns]', 'M8[ns]', 'M8[ns, UTC]'):\n self.assertIs(Series([], dtype=dtype).min(), pd.NaT)\n self.assertIs(Series([], dtype=dtype).max(), pd.NaT)\n\nif __name__ == '__main__':\n nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],\n exit=False)\n" ]
[ [ "pandas.PeriodIndex", "numpy.sqrt", "pandas.util.testing.makeObjectSeries", "pandas.util.testing.assert_contains_all", "pandas.util.testing._skip_if_no_pytz", "pandas.util.testing.assert_produces_warning", "pandas.MultiIndex.from_tuples", "numpy.all", "pandas.util.testing.assert_index_equal", "numpy.exp", "numpy.where", "numpy.unique", "pandas.compat.text_type", "numpy.nansum", "numpy.diff", "pandas.util.testing.makePeriodSeries", "pandas.concat", "pandas.core.index.MultiIndex.from_arrays", "scipy.stats.pearsonr", "numpy.median", "pandas.offsets.Milli", "pandas.util.testing.getMixedTypeDict", "pandas.core.common.is_integer", "pandas.util.testing.assert_equal", "pandas.date_range", "scipy.stats.kurtosis", "numpy.array", "pandas.timedelta_range", "pandas.CategoricalIndex", "pandas.util.testing.is_sorted", "pandas.period_range", "pandas.core.common.is_datetime64tz_dtype", "numpy.random.shuffle", "numpy.datetime64", "scipy.stats.kendalltau", "numpy.isinf", "pandas.compat.range", "numpy.resize", "pandas.Series", "pandas.core.common.is_integer_dtype", "numpy.asarray", "numpy.var", "pandas.util.testing.assert_numpy_array_equal", "numpy.may_share_memory", "numpy.reshape", "pandas.compat.StringIO", "pandas.core.index.MultiIndex.from_tuples", "numpy.std", "pandas.core.config.option_context", "pandas.set_option", "pandas.util.testing.equalContents", "scipy.stats.skew", "pandas.tseries.index.Timestamp", "pandas.util.testing.makeStringIndex", "numpy.timedelta64", "numpy.int64", "numpy.random.rand", "pandas.offsets.Second", "numpy.array_equal", "pandas.util.testing.assertRaisesRegexp", "pandas.util.testing.makeFloatSeries", "numpy.ptp", "numpy.ones", "pandas.offsets.Minute", "pandas.lib.infer_dtype", "pandas.util.testing.rands", "numpy.isscalar", "pandas.to_timedelta", "pandas.util.testing.makePeriodIndex", "pandas.util.testing.assert_dict_equal", "numpy.empty", "numpy.around", "pandas.DataFrame", "numpy.round", "pandas.compat.iteritems", "numpy.random.randint", "numpy.clip", "pandas.util.testing.assert_series_equal", "pandas.DatetimeIndex", "pandas.Index", "pandas.util.testing._skip_if_no_scipy", "numpy.repeat", "pandas.notnull", "pandas.bdate_range", "numpy.isnan", "pandas.tseries.index.DatetimeIndex", "pandas.util.testing.assert_almost_equal", "pandas.Float64Index", "numpy.argsort", "pandas.tseries.tdi.Timedelta", "pandas.util.testing._skip_if_no_dateutil", "pandas.tslib._dateutil_gettz", "pandas.isnull", "pandas.util.testing.assertRaises", "pandas.util.testing.makeStringSeries", "pandas.util.testing.makeIntIndex", "pandas.compat.zip", "pandas.tseries.tdi.TimedeltaIndex", "pandas.core.common.pprint_thing", "pandas.read_pickle", "pandas.offsets.Hour", "numpy.loadtxt", "numpy.dot", "pandas.core.common.is_list_like", "pandas.to_datetime", "pandas.util.testing.assertIsInstance", "pandas.util.testing.ensure_clean", "numpy.minimum.accumulate", "numpy.dtype", "pandas.util.testing.assert_frame_equal", "numpy.random.randn", "scipy.stats.spearmanr", "numpy.fix", "pandas.util.testing.makeDataFrame", "pandas.util.testing.makeTimeDataFrame", "numpy.ones_like", "pandas.core.nanops.nansum", "numpy.arange", "numpy.finfo", "pandas.Series.from_csv", "pandas.util.testing.makeDateIndex", "pandas.util.testing.rands_array", "pandas.core.index.MultiIndex", "pandas.Categorical", "pandas.option_context", "pandas.core.nanops.nangt", "pandas.util.testing.randn", "numpy.ma.masked_all", "numpy.random.random", "pandas.util.testing.makeTimeSeries", "numpy.abs", "scipy.stats.rankdata", "numpy.isfinite", "numpy.compress", "pandas.MultiIndex.from_arrays", "numpy.sort", "numpy.maximum.accumulate", "pandas.Timestamp", "pandas.compat.lrange" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "0.19", "0.24", "0.20" ], "scipy": [ "1.6", "1.10", "1.4", "1.3", "1.9", "0.19", "1.5", "0.18", "1.7", "1.0", "0.17", "1.2", "1.8" ], "tensorflow": [] } ]
amlozano1/kalman_car_counter
[ "0804a476e1b767365415d41cddf9d5946dd871ce" ]
[ "hungarian.py" ]
[ "__author__ = 'Anthony'\n\"\"\"\nSolve the unique lowest-cost assignment problem using the\nHungarian algorithm (also known as Munkres algorithm).\n\n\"\"\"\n# Based on original code by Brain Clapper, adapted to NumPy by Gael Varoquaux.\n# Heavily refactored by Lars Buitinck.\n\n# Copyright (c) 2008 Brian M. Clapper <[email protected]>, Gael Varoquaux\n# Author: Brian M. Clapper, Gael Varoquaux\n# LICENSE: BSD\n\nimport numpy as np\n\n\ndef linear_assignment(X):\n \"\"\"Solve the linear assignment problem using the Hungarian algorithm.\n\n The problem is also known as maximum weight matching in bipartite graphs.\n The method is also known as the Munkres or Kuhn-Munkres algorithm.\n\n Parameters\n ----------\n X : array\n The cost matrix of the bipartite graph\n\n Returns\n -------\n indices : array,\n The pairs of (row, col) indices in the original array giving\n the original ordering.\n\n References\n ----------\n\n 1. http://www.public.iastate.edu/~ddoty/HungarianAlgorithm.html\n\n 2. Harold W. Kuhn. The Hungarian Method for the assignment problem.\n *Naval Research Logistics Quarterly*, 2:83-97, 1955.\n\n 3. Harold W. Kuhn. Variants of the Hungarian method for assignment\n problems. *Naval Research Logistics Quarterly*, 3: 253-258, 1956.\n\n 4. Munkres, J. Algorithms for the Assignment and Transportation Problems.\n *Journal of the Society of Industrial and Applied Mathematics*,\n 5(1):32-38, March, 1957.\n\n 5. http://en.wikipedia.org/wiki/Hungarian_algorithm\n \"\"\"\n indices = _hungarian(X).tolist()\n indices.sort()\n # Re-force dtype to ints in case of empty list\n indices = np.array(indices, dtype=int)\n # Make sure the array is 2D with 2 columns.\n # This is needed when dealing with an empty list\n indices.shape = (-1, 2)\n return indices\n\n\nclass _HungarianState(object):\n \"\"\"State of one execution of the Hungarian algorithm.\n\n Parameters\n ----------\n cost_matrix : 2D matrix\n The cost matrix. Does not have to be square.\n \"\"\"\n\n def __init__(self, cost_matrix):\n cost_matrix = np.atleast_2d(cost_matrix)\n\n # If there are more rows (n) than columns (m), then the algorithm\n # will not be able to work correctly. Therefore, we\n # transpose the cost function when needed. Just have to\n # remember to swap the result columns back later.\n transposed = (cost_matrix.shape[1] < cost_matrix.shape[0])\n if transposed:\n self.C = (cost_matrix.T).copy()\n else:\n self.C = cost_matrix.copy()\n self.transposed = transposed\n\n # At this point, m >= n.\n n, m = self.C.shape\n self.row_uncovered = np.ones(n, dtype=np.bool)\n self.col_uncovered = np.ones(m, dtype=np.bool)\n self.Z0_r = 0\n self.Z0_c = 0\n self.path = np.zeros((n + m, 2), dtype=int)\n self.marked = np.zeros((n, m), dtype=int)\n\n def _find_prime_in_row(self, row):\n \"\"\"\n Find the first prime element in the specified row. Returns\n the column index, or -1 if no starred element was found.\n \"\"\"\n col = np.argmax(self.marked[row] == 2)\n if self.marked[row, col] != 2:\n col = -1\n return col\n\n def _clear_covers(self):\n \"\"\"Clear all covered matrix cells\"\"\"\n self.row_uncovered[:] = True\n self.col_uncovered[:] = True\n\n\ndef _hungarian(cost_matrix):\n \"\"\"The Hungarian algorithm.\n\n Calculate the Munkres solution to the classical assignment problem and\n return the indices for the lowest-cost pairings.\n\n Parameters\n ----------\n cost_matrix : 2D matrix\n The cost matrix. Does not have to be square.\n\n Returns\n -------\n indices : 2D array of indices\n The pairs of (row, col) indices in the original array giving\n the original ordering.\n \"\"\"\n state = _HungarianState(cost_matrix)\n\n # No need to bother with assignments if one of the dimensions\n # of the cost matrix is zero-length.\n step = None if 0 in cost_matrix.shape else _step1\n\n while step is not None:\n step = step(state)\n\n # Look for the starred columns\n results = np.array(np.where(state.marked == 1)).T\n\n # We need to swap the columns because we originally\n # did a transpose on the input cost matrix.\n if state.transposed:\n results = results[:, ::-1]\n\n return results\n\n\n# Individual steps of the algorithm follow, as a state machine: they return\n# the next step to be taken (function to be called), if any.\n\ndef _step1(state):\n \"\"\"Steps 1 and 2 in the Wikipedia page.\"\"\"\n\n # Step1: For each row of the matrix, find the smallest element and\n # subtract it from every element in its row.\n state.C -= state.C.min(axis=1)[:, np.newaxis]\n # Step2: Find a zero (Z) in the resulting matrix. If there is no\n # starred zero in its row or column, star Z. Repeat for each element\n # in the matrix.\n for i, j in zip(*np.where(state.C == 0)):\n if state.col_uncovered[j] and state.row_uncovered[i]:\n state.marked[i, j] = 1\n state.col_uncovered[j] = False\n state.row_uncovered[i] = False\n\n state._clear_covers()\n return _step3\n\n\ndef _step3(state):\n \"\"\"\n Cover each column containing a starred zero. If n columns are covered,\n the starred zeros describe a complete set of unique assignments.\n In this case, Go to DONE, otherwise, Go to Step 4.\n \"\"\"\n marked = (state.marked == 1)\n state.col_uncovered[np.any(marked, axis=0)] = False\n\n if marked.sum() < state.C.shape[0]:\n return _step4\n\n\ndef _step4(state):\n \"\"\"\n Find a noncovered zero and prime it. If there is no starred zero\n in the row containing this primed zero, Go to Step 5. Otherwise,\n cover this row and uncover the column containing the starred\n zero. Continue in this manner until there are no uncovered zeros\n left. Save the smallest uncovered value and Go to Step 6.\n \"\"\"\n # We convert to int as numpy operations are faster on int\n C = (state.C == 0).astype(np.int)\n covered_C = C * state.row_uncovered[:, np.newaxis]\n covered_C *= state.col_uncovered.astype(np.int)\n n = state.C.shape[0]\n m = state.C.shape[1]\n while True:\n # Find an uncovered zero\n row, col = np.unravel_index(np.argmax(covered_C), (n, m))\n if covered_C[row, col] == 0:\n return _step6\n else:\n state.marked[row, col] = 2\n # Find the first starred element in the row\n star_col = np.argmax(state.marked[row] == 1)\n if not state.marked[row, star_col] == 1:\n # Could not find one\n state.Z0_r = row\n state.Z0_c = col\n return _step5\n else:\n col = star_col\n state.row_uncovered[row] = False\n state.col_uncovered[col] = True\n covered_C[:, col] = C[:, col] * (\n state.row_uncovered.astype(np.int))\n covered_C[row] = 0\n\n\ndef _step5(state):\n \"\"\"\n Construct a series of alternating primed and starred zeros as follows.\n Let Z0 represent the uncovered primed zero found in Step 4.\n Let Z1 denote the starred zero in the column of Z0 (if any).\n Let Z2 denote the primed zero in the row of Z1 (there will always be one).\n Continue until the series terminates at a primed zero that has no starred\n zero in its column. Unstar each starred zero of the series, star each\n primed zero of the series, erase all primes and uncover every line in the\n matrix. Return to Step 3\n \"\"\"\n count = 0\n path = state.path\n path[count, 0] = state.Z0_r\n path[count, 1] = state.Z0_c\n\n while True:\n # Find the first starred element in the col defined by\n # the path.\n row = np.argmax(state.marked[:, path[count, 1]] == 1)\n if not state.marked[row, path[count, 1]] == 1:\n # Could not find one\n break\n else:\n count += 1\n path[count, 0] = row\n path[count, 1] = path[count - 1, 1]\n\n # Find the first prime element in the row defined by the\n # first path step\n col = np.argmax(state.marked[path[count, 0]] == 2)\n if state.marked[row, col] != 2:\n col = -1\n count += 1\n path[count, 0] = path[count - 1, 0]\n path[count, 1] = col\n\n # Convert paths\n for i in range(count + 1):\n if state.marked[path[i, 0], path[i, 1]] == 1:\n state.marked[path[i, 0], path[i, 1]] = 0\n else:\n state.marked[path[i, 0], path[i, 1]] = 1\n\n state._clear_covers()\n # Erase all prime markings\n state.marked[state.marked == 2] = 0\n return _step3\n\n\ndef _step6(state):\n \"\"\"\n Add the value found in Step 4 to every element of each covered row,\n and subtract it from every element of each uncovered column.\n Return to Step 4 without altering any stars, primes, or covered lines.\n \"\"\"\n # the smallest uncovered value in the matrix\n if np.any(state.row_uncovered) and np.any(state.col_uncovered):\n minval = np.min(state.C[state.row_uncovered], axis=0)\n minval = np.min(minval[state.col_uncovered])\n state.C[np.logical_not(state.row_uncovered)] += minval\n state.C[:, state.col_uncovered] -= minval\n return _step4" ]
[ [ "numpy.logical_not", "numpy.min", "numpy.ones", "numpy.atleast_2d", "numpy.argmax", "numpy.any", "numpy.array", "numpy.zeros", "numpy.where" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
TomasFisica/Redes_Prac_4
[ "fa594f088b2089ef789e014f564548388b4954c4" ]
[ "Garcia_Prac6.py" ]
[ "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Jun 9 19:13:27 2020\r\n\r\n@author: tomas\r\n\"\"\"\r\nimport numpy as np\r\nfrom matplotlib import pyplot as plt\r\n# =============================================================================\r\n# Funciones a utilizar\r\n# =============================================================================\r\n\r\nclass Hopfield():\r\n \"\"\"Modelo de RNA tipo Hopfield\"\"\"\r\n \r\n def __init__(self,num_N,Alfa,ruido):\r\n \"\"\"Inicializacion de los valores\"\"\"\r\n self.num_N=num_N #Numero unidades de la red\r\n self.num_P=int(Alfa*num_N) #Numero de patrones\r\n self.Alfa=Alfa #Relacion entre P y N \"P/N\"\r\n self.Patrones=self.Generar_Patrones(ruido)\r\n self.Matriz_Conexion=self.Generar_Matriz_Conexion() \r\n \r\n def Generar_Patrones(self,ruido=\"defecto\"):\r\n \"\"\"Método para generar los patrones. Con ruido o sin ruido\"\"\"\r\n patrones=[]\r\n generacion1=lambda arg: 1 if arg>0 else -1\r\n generacion2=lambda arg: list(map(generacion1, arg))\r\n if ruido==\"Sin_Ruido\":\r\n patrones=np.random.normal(size=(self.num_P,self.num_N))\r\n patrones=np.array(list(map (generacion2,patrones)))\r\n return patrones\r\n else:\r\n pass\r\n \r\n \r\n def Generar_Matriz_Conexion(self):\r\n \"\"\"Método para generar la matriz de conexiones\"\"\"\r\n producto_externo=lambda arg: arg.reshape([self.num_N,1])@arg.reshape([1,self.num_N])\r\n matriz_sumada=list(map(producto_externo, self.Patrones))\r\n matriz_conexion=np.zeros([self.num_N,self.num_N])\r\n for i in range(self.num_P):\r\n matriz_conexion+=matriz_sumada[i]\r\n #la matriz de abajo al multiplicarse por una matriz, devuelve una matriz con la diagonal = 0\r\n diagonalizar=np.ones_like(matriz_conexion)-np.diag([1 for i in range(self.num_N)]) \r\n matriz_conexion=matriz_conexion*diagonalizar\r\n return matriz_conexion/self.num_N\r\n \r\n def Calcular_overlap(self,):\r\n \"\"\"Funcion para calcular cada overlap\"\"\"\r\n return\r\n \r\n def Scalon(self, matriz):\r\n \"\"\"Método para aplicar funcion escalon a matriz\"\"\"\r\n matriz=list(map(lambda arg: 1 if arg>0 else (-1 if arg<0 else 0),matriz) )\r\n return np.array(matriz)\r\n\r\n# =============================================================================\r\n# Método funcional\r\n# def Dinamica_red(self, entrada):\r\n# \"\"\"Método para evolucionar la red\"\"\"\r\n# [email protected]([self.num_N,1])\r\n# output=self.Scalon(output)\r\n# if (output!=entrada).all():\r\n# return self.Dinamica_red(output)\r\n# else: \r\n# return output \r\n# =============================================================================\r\n def Dinamica_red(self, entrada):\r\n \"\"\"Método para evolucionar la red\"\"\"\r\n while True:\r\n [email protected]([self.num_N,1])\r\n output=self.Scalon(output)\r\n if (output==entrada).all():\r\n break\r\n entrada=output\r\n return output \r\n def Generar_distribucion_overlap(self):\r\n \"\"\"Funcion para generar la distribucion de los overlaps\"\"\"\r\n Lista_S_Fijos=[]\r\n Lista_Overlap=[]\r\n for i in range(len(self.Patrones)):\r\n Lista_S_Fijos=self.Dinamica_red(self.Patrones[i])\r\n Lista_Overlap+=[float(Lista_S_Fijos.reshape([1,self.num_N])@self.Patrones[i].reshape([self.num_N,1]))/self.num_N]\r\n return Lista_Overlap\r\n\r\n def Distribuir(self,Bins=2):\r\n \"\"\"Método que genera la distribucion de overlap y mostrar\"\"\"\r\n plt.hist(self.Generar_distribucion_overlap())\r\n\r\nMi=Hopfield(4000, 0.12, \"Sin_Ruido\")\r\nMi.Distribuir()" ]
[ [ "numpy.ones_like", "numpy.random.normal", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
pradeepkumarcm-egov/DIGIT-Dev
[ "d8fb601fae6d919d2386f36b36dfc7fde77ebd4f" ]
[ "utilities/datamart/fsm/fsm.py" ]
[ "import psycopg2\nimport csv\nimport pandas as pd\nimport numpy as np\nimport requests\nimport json\nfrom dateutil import parser\n\ndef mapApplicationChannel(s):\n return s.capitalize()\n \ndef map_vehicle_status(s):\n if s == 'SCHEDULED':\n return 'Scheduled'\n elif s == 'DISPOSED':\n return 'Disposed' \n elif s == 'WAITING_FOR_DISPOSAL':\n return 'Waiting for disposal' \n \n\ndef map_status(s):\n if s == 'CREATED':\n return 'Application Created'\n elif s == 'PENDING_APPL_FEE_PAYMENT':\n return 'Pending for payment'\n elif s == 'ASSING_DSO':\n return 'Pending for DSO Assignment'\n elif s == 'DSO_REJECTED':\n return 'DSO Rejected'\n elif s == 'DSO_INPROGRESS':\n return 'DSO Inprogress'\n elif s == 'PENDING_DSO_APPROVAL':\n return 'Pending for DSO Approval'\n elif s == 'COMPLETED':\n return 'Completed Request'\n elif s == 'REJECTED':\n return 'Rejected'\n elif s == 'CANCELED':\n return 'Cancelled'\n elif s == 'CITIZEN_FEEDBACK_PENDING':\n return 'Citizen feedback pending'\n\ndef map_propertytype(s):\n return s.capitalize()\n\ndef map_propertySubType(s):\n return (s.replace('_',' ').capitalize())\n\ndef map_santationtype(s):\n\n if s =='SEPTIC_TANK_WITH_SOAK_PIT':\n return 'Septic tank with soak pit'\n\n elif s == 'CONVENTIONAL_SPECTIC_TANK':\n return 'Conventional Spectic tank'\n\n elif s=='CONVENTIONAL_SINGLE_PIT':\n return 'Conventional septic tanks with single pit'\n\n elif s=='Conventional septic tank with dual pit':\n return 'Improved septic tank - Upflow Anaerabic filter'\n\n elif s=='IMPROVED_PACKED':\n return 'Improved septic tank - Packaged contact aeration type'\n\n elif s=='JOHAKASU_SYSTEMS':\n return 'Johakasu systems'\n\n elif s=='BIO_DIGESTER':\n return 'Bio digester'\n\ndef map_vehicletype(s):\n\n if s=='MAHINDRA':\n return 'Mahindra'\n\n elif s=='MAHINDRA.BOLERO_PICKUP':\n return 'Bolero Pickup'\n\n elif s== 'TATA':\n\n return 'TATA'\n\n elif s== 'TATA.LPT709/34':\n\n return 'TATA LPT709/34'\n\n elif s== 'TATA.407':\n\n return 'TATA 407'\n\n elif s== 'TAFE':\n\n return 'TAFE'\n\n elif s== 'TAFE.TRACTOR_45DI':\n\n return 'TAFE Tractor 45DI'\n\n elif s== 'SONALIKA':\n return 'Sonalika'\n\n elif s== 'SONALIKA.TRACTOR_35DI':\n\n return 'Sonalika Tractor 35DI'\ndef map_paymentsource (s):\n return s.capitalize()\ndef map_paymentmode (s):\n return s.capitalize()\n\ndef mapstate(s):\n return 'Punjab'\n\ndef mapDistrict(s):\n if s =='Phagwara':\n return 'Jalandhar'\n\n else:\n return s;\n\ndef map_paymentsourceFromMode(s):\n if s=='Online' :\n return 'Online'\n else :\n return 'Counter'\n\ndef map_pincode(s):\n if s == '':\n return ''\n elif s.isdigit():\n return int(s)\n else:\n return s\ndef map_rating(s):\n if s == '':\n return ''\n else:\n return int (s)\ndef mapslumName(s):\n if s=='SL0001' :\n return 'Kathagada juanga sahi'\n elif s=='SL0002':\n return 'Kathagada Parbatia Sahi'\n elif s=='SL0003':\n return 'Gangadhar Sahi'\n elif s=='SL0004':\n return 'Pandab Nagar'\n elif s=='SL0005':\n return 'Haridakhandi Harijana sahi'\n elif s=='SL0006':\n return 'Haridakhandi Kadalibada sahi'\n elif s== 'SL0007':\n return 'Haridakhandi Bada Sahi'\n elif s== 'SL0008':\n return 'Haridakhandi Redika Sahi'\n elif s== 'SL0009':\n return 'Golapali Sahi'\n elif s== 'SL00010':\n return 'Surya Nagar'\n elif s== 'SL00011':\n return 'Damba Sahi'\n elif s== 'SL00012':\n return 'Raju Dhoba Sahi'\ndef mapplantname(s):\n if s =='AMR001':\n return 'Amritsar FSTP'\n elif s == 'MOH002':\n return 'Mohali SeTPP'\n\n\ndef connect():\n try:\n conn = psycopg2.connect(database=\"{{REPLACE-WITH-DATABASE}}\", user=\"{{REPLACE-WITH-USERNAME}}\",\n password=\"{{REPLACE-WITH-PASSWORD}}\", host=\"{{REPLACE-WITH-HOST}}\")\n print(\"Connection established!\")\n except Exception as exception:\n print(\"Exception occurred while connecting to the database\")\n print(exception)\n\n fsmquery=\"SELECT fsm.tenantid,fsmvehicleTrip.applicationstatus AS Vehicle_Application_Status, fsm.applicationno as ApplicationId,COALESCE(fsm.applicationStatus,'N/A') as ApplicationStatus,split_part(propertyusage::TEXT,'.', 1) as PropertyType, CASE WHEN split_part(propertyusage::TEXT,'.', 2)!='' THEN split_part(propertyusage::TEXT,'.', 2) ELSE 'N/A' END as PropertySubType,COALESCE(fsm.sanitationType,'N/A') as OnSiteSanitationType, COALESCE(REPLACE(fsmaddress.doorno,',','#'),'N/A') as DoorNumber, COALESCE(REPLACE(fsmaddress.street,',','#'),'N/A') as StreetName, COALESCE(fsmaddress.city,'N/A') as City, COALESCE(fsmaddress.pincode,'N/A') as Pincode, COALESCE(fsmaddress.locality,'N/A') as Locality, COALESCE(fsmaddress.district,'N/A') as District, COALESCE(fsmaddress.state,'N/A') as State, COALESCE(fsmaddress.slumname,'N/A') as SlumName, COALESCE(fsm.source,'N/A') as ApplicationSource,COALESCE(fsmdso.name,'N/A') as DesludgingEntity, COALESCE(fsmgeolocation.longitude,0) as Longitude, COALESCE(fsmgeolocation.latitude,0) as Latitude, CASE WHEN fsmgeolocation.longitude>0 THEN 'Yes' ELSE 'No' end as GeoLocationProvided, COALESCE(fsmvehicle.registrationNumber,'N/A') as DesludgingVehicleNumber, COALESCE(fsm.vehicleType,'N/A') as VehicleType, COALESCE(fsmvehicle.tankcapicity,0) as VehicleCapacity , COALESCE(fsmvehicleTripdetail.volume,0) as WasteCollected, COALESCE(fsmvehicleTrip.volumeCarried,0) as WasteDumped, to_char((to_timestamp(fsmvehicleTrip.tripstarttime)::timestamp at time zone 'utc' at time Zone 'Asia/Kolkata'), 'dd/mm/yyyy HH24:MI:SS') as VehicleInDateTime, to_char((to_timestamp(fsmvehicleTrip.tripendtime)::timestamp at time zone 'utc' at time Zone 'Asia/Kolkata'), 'dd/mm/yyyy HH24:MI:SS') as VehicleOutDateTime, fsmvehicleTrip.additionaldetails->>'plantCode' as fstpplant, COALESCE(fsmpayment.totalamountpaid,0) as PaymentAmount, COALESCE(fsmpayment.paymentstatus,'N/A') as PaymentStatus,COALESCE(fsmpayment.paymentmode,'N/A') as PaymentSource, COALESCE(fsmpayment.paymentmode,'N/A') as PaymentInstrumentType,to_char((to_timestamp(fsm.createdtime/1000)::timestamp at time zone 'utc' at time Zone 'Asia/Kolkata'), 'dd/mm/yyyy HH24:MI:SS') as ApplicationSumbitDate FROM eg_fsm_application as fsm JOIN eg_fsm_address as fsmaddress ON ( fsmaddress.fsm_id = fsm.id ) JOIN eg_fsm_geolocation as fsmgeolocation ON ( fsmaddress.id = fsmgeolocation.address_id ) LEFT JOIN eg_vendor as fsmdso ON ( fsmdso.id = fsm.dso_id) LEFT JOIN eg_vehicle as fsmvehicle ON ( fsm.vehicle_id = fsmvehicle.id) LEFT JOIN eg_vehicle_trip_detail as fsmvehicleTripdetail ON ( fsmvehicleTripdetail.referenceNo = fsm.applicationNo) LEFT JOIN eg_vehicle_trip as fsmvehicleTrip ON ( fsmvehicleTripdetail.trip_id = fsmvehicleTrip.id) LEFT JOIN egcl_bill as egbill ON ( egbill.consumercode =fsm.applicationno) LEFT JOIN egcl_paymentdetail as paymentdl ON ( paymentdl.billid = egbill.id ) LEFT JOIN egcl_payment as fsmpayment ON ( fsmpayment.id=paymentdl.paymentid) AND fsm.createdtime > {START_TIME} AND fsm.createdtime < {END_TIME}\"\n\n starttime = input('Enter start date (dd-mm-yyyy): ')\n endtime = input('Enter end date (dd-mm-yyyy): ')\n fsmquery = fsmquery.replace('{START_TIME}',dateToEpoch(starttime))\n fsmquery = fsmquery.replace('{END_TIME}',dateToEpoch(endtime))\n \n query = pd.read_sql_query(fsmquery, conn)\n data = pd.DataFrame(query)\n pendingpaymentstatus=\"select businessid as applicationno, to_char((to_timestamp(createdtime/1000)::timestamp at time zone 'utc' at time Zone 'Asia/Kolkata'), 'dd/mm/yyyy HH24:MI:SS') as PendingPaymentSumbitDate from eg_wf_processinstance_v2 where businessservice='FSM' and status in (select uuid from eg_wf_state_v2 where state='PENDING_APPL_FEE_PAYMENT')\"\n pendingpaymentstatusQuery=pd.read_sql_query(pendingpaymentstatus, conn)\n pendingpaymentstatusData=pd.DataFrame(pendingpaymentstatusQuery)\n assigndsostatus=\"select businessid as applicationno, to_char((to_timestamp(createdtime/1000)::timestamp at time zone 'utc' at time Zone 'Asia/Kolkata'), 'dd/mm/yyyy HH24:MI:SS') as PendingPaymentSumbitDate from eg_wf_processinstance_v2 where businessservice='FSM' and status in (select uuid from eg_wf_state_v2 where state='ASSING_DSO')\"\n assigndsostatusQuery=pd.read_sql_query(assigndsostatus, conn)\n assigndsostatusData=pd.DataFrame(assigndsostatusQuery)\n dsorejectstatus=\"select businessid as applicationno, to_char((to_timestamp(createdtime/1000)::timestamp at time zone 'utc' at time Zone 'Asia/Kolkata'), 'dd/mm/yyyy HH24:MI:SS') as PendingPaymentSumbitDate from eg_wf_processinstance_v2 where businessservice='FSM' and status in (select uuid from eg_wf_state_v2 where state='DSO_REJECTED')\"\n dsorejectstatusQuery=pd.read_sql_query(dsorejectstatus, conn)\n dsorejectstatusData=pd.DataFrame(dsorejectstatusQuery)\n dsoinprogressstatus=\"select businessid as applicationno, to_char((to_timestamp(createdtime/1000)::timestamp at time zone 'utc' at time Zone 'Asia/Kolkata'), 'dd/mm/yyyy HH24:MI:SS') as PendingPaymentSumbitDate from eg_wf_processinstance_v2 where businessservice='FSM' and status in (select uuid from eg_wf_state_v2 where state='DSO_INPROGRESS')\"\n dsoinprogressstatusQuery=pd.read_sql_query(dsoinprogressstatus, conn)\n dsoinprogressstatusData=pd.DataFrame(dsoinprogressstatusQuery)\n pendingdsoapprovalstatus=\"select businessid as applicationno, to_char((to_timestamp(createdtime/1000)::timestamp at time zone 'utc' at time Zone 'Asia/Kolkata'), 'dd/mm/yyyy HH24:MI:SS') as PendingPaymentSumbitDate from eg_wf_processinstance_v2 where businessservice='FSM' and status in (select uuid from eg_wf_state_v2 where state='PENDING_DSO_APPROVAL')\"\n pendingdsoapprovalstatusQuery=pd.read_sql_query(pendingdsoapprovalstatus, conn)\n pendingdsoapprovalstatusData=pd.DataFrame(pendingdsoapprovalstatusQuery)\n completedstatus=\"select businessid as applicationno, to_char((to_timestamp(createdtime/1000)::timestamp at time zone 'utc' at time Zone 'Asia/Kolkata'), 'dd/mm/yyyy HH24:MI:SS') as PendingPaymentSumbitDate,rating as rating from eg_wf_processinstance_v2 where businessservice='FSM' and status in (select uuid from eg_wf_state_v2 where state='COMPLETED')\"\n completedstatusQuery=pd.read_sql_query(completedstatus, conn)\n completedstatusData=pd.DataFrame(completedstatusQuery)\n rejectedstatus=\"select businessid as applicationno, to_char((to_timestamp(createdtime/1000)::timestamp at time zone 'utc' at time Zone 'Asia/Kolkata'), 'dd/mm/yyyy HH24:MI:SS') as PendingPaymentSumbitDate from eg_wf_processinstance_v2 where businessservice='FSM' and status in (select uuid from eg_wf_state_v2 where state='REJECTED')\"\n rejectedstatus=\"select businessid as applicationno, to_char((to_timestamp(createdtime/1000)::timestamp at time zone 'utc' at time Zone 'Asia/Kolkata'), 'dd/mm/yyyy HH24:MI:SS') as PendingPaymentSumbitDate from eg_wf_processinstance_v2 where businessservice='FSM' and status in (select uuid from eg_wf_state_v2 where state='REJECTED')\"\n rejectedstatusQuery=pd.read_sql_query(rejectedstatus,conn)\n rejectedstatusData=pd.DataFrame(rejectedstatusQuery)\n cancelledstatus=\"select businessid as applicationno, to_char((to_timestamp(createdtime/1000)::timestamp at time zone 'utc' at time Zone 'Asia/Kolkata'), 'dd/mm/yyyy HH24:MI:SS') as PendingPaymentSumbitDate from eg_wf_processinstance_v2 where businessservice='FSM' and status in (select uuid from eg_wf_state_v2 where state='CANCELED')\"\n cancelledstatusQuery=pd.read_sql_query(cancelledstatus,conn)\n cancelledstatusData=pd.DataFrame(cancelledstatusQuery)\n citizenfeedbackstatus=\"select businessid as applicationno, to_char((to_timestamp(createdtime/1000)::timestamp at time zone 'utc' at time Zone 'Asia/Kolkata'), 'dd/mm/yyyy HH24:MI:SS') as PendingPaymentSumbitDate from eg_wf_processinstance_v2 where businessservice='FSM' and status in (select uuid from eg_wf_state_v2 where state='CITIZEN_FEEDBACK_PENDING')\"\n citizenfeedbackstatusQuery=pd.read_sql_query(citizenfeedbackstatus,conn)\n citizenfeedbackstatusData=pd.DataFrame(citizenfeedbackstatusQuery)\n scheduledstatus=\"select referenceno as applicationno, to_char((to_timestamp(process.createdtime/1000)::timestamp at time zone 'utc' at time Zone 'Asia/Kolkata'), 'dd/mm/yyyy HH24:MI:SS') as scheduleddatetime from eg_wf_processinstance_v2 as process inner join eg_vehicle_trip_detail as detail on process.businessid=detail.referenceno where businessservice='FSM_VEHICLE_TRIP' and process.status in (select uuid from eg_wf_state_v2 where state='SCHEDULED')\"\n scheduledstatusQuery=pd.read_sql_query(scheduledstatus,conn)\n scheduledstatusData=pd.DataFrame(scheduledstatusQuery)\n waitingfordisposalstatus=\"select referenceno as applicationno, to_char((to_timestamp(process.createdtime/1000)::timestamp at time zone 'utc' at time Zone 'Asia/Kolkata'), 'dd/mm/yyyy HH24:MI:SS') as waitingfordisposalassignedtime from eg_wf_processinstance_v2 as process inner join eg_vehicle_trip_detail as detail on process.businessid=detail.referenceno where businessservice='FSM_VEHICLE_TRIP' and process.status in (select uuid from eg_wf_state_v2 where state='WAITING_FOR_DISPOSAL')\"\n waitingfordisposalstatusQuery=pd.read_sql_query(waitingfordisposalstatus,conn)\n waitingfordisposalstatusData=pd.DataFrame(waitingfordisposalstatusQuery)\n disposedstatus=\"select referenceno as applicationno, to_char((to_timestamp(process.createdtime/1000)::timestamp at time zone 'utc' at time Zone 'Asia/Kolkata'), 'dd/mm/yyyy HH24:MI:SS') as disposedtime from eg_wf_processinstance_v2 as process inner join eg_vehicle_trip_detail as detail on process.businessid=detail.referenceno where businessservice='FSM_VEHICLE_TRIP' and process.status in (select uuid from eg_wf_state_v2 where state='DISPOSED')\"\n disposedstatusQuery=pd.read_sql_query(disposedstatus,conn)\n disposedstatusData=pd.DataFrame(disposedstatusQuery)\n data.columns=['tenantid','Vehicle_Application_Status','Application ID','Application Status', 'Property Type','Property Sub Type','OnSite Sanitation Type','Door Number','Street Name','City','Pincode','Locality','District','State','Slum Name','Application Source','Desludging Entity','Longitude','Latitude','Geo Location Provided','Desludging Vehicle Number','Vehicle Type','Vehicle Capacity','Waste Collected','Waste Dumped','Vehicle In DateTime','Vehicle Out DateTime','Fstp Plant Name','Payment Amount','Payment Status','Payment Source','Payment Instrument Type','Application Submitted Time']\n pendingpaymentstatusData.columns=['Application ID','Pending payment Submitted Time']\n assigndsostatusData.columns=['Application ID','Assigned DSO Submitted Time']\n dsorejectstatusData.columns=['Application ID','DSO Rejected Submitted Time']\n dsoinprogressstatusData.columns=['Application ID','DSO Inprogress Submitted Time']\n pendingdsoapprovalstatusData.columns=['Application ID','Pending DSO Approval Submitted Time']\n rejectedstatusData.columns=['Application ID','Rejected Date Time']\n cancelledstatusData.columns=['Application ID','Cancelled Date Time']\n citizenfeedbackstatusData.columns=['Application ID','Citizen feedback Submitted Date Time']\n completedstatusData.columns=['Application ID','Application Completed Time','Rating']\n scheduledstatusData.columns=['Application ID','Scheduled Time']\n waitingfordisposalstatusData.columns=['Application ID','Waiting for disposalTime']\n disposedstatusData.columns=['Application ID','Disposed Time'] \n fsmdata = pd.DataFrame()\n fsmdata=pd.merge(data,pendingpaymentstatusData,left_on='Application ID',right_on='Application ID',how='left')\n fsmdata=pd.merge(fsmdata,assigndsostatusData,left_on='Application ID',right_on='Application ID',how='left')\n fsmdata=pd.merge(fsmdata,dsorejectstatusData,left_on='Application ID',right_on='Application ID',how='left')\n fsmdata=pd.merge(fsmdata,dsoinprogressstatusData,left_on='Application ID',right_on='Application ID',how='left')\n fsmdata=pd.merge(fsmdata,pendingdsoapprovalstatusData,left_on='Application ID',right_on='Application ID',how='left')\n fsmdata=pd.merge(fsmdata,rejectedstatusData,left_on='Application ID',right_on='Application ID',how='left')\n fsmdata=pd.merge(fsmdata,cancelledstatusData,left_on='Application ID',right_on='Application ID',how='left')\n fsmdata=pd.merge(fsmdata,citizenfeedbackstatusData,left_on='Application ID',right_on='Application ID',how='left')\n fsmdata=pd.merge(fsmdata,completedstatusData,left_on='Application ID',right_on='Application ID',how='left') \n fsmdata=pd.merge(fsmdata,scheduledstatusData,left_on='Application ID',right_on='Application ID',how='left') \n fsmdata=pd.merge(fsmdata,waitingfordisposalstatusData,left_on='Application ID',right_on='Application ID',how='left')\n fsmdata=pd.merge(fsmdata,disposedstatusData,left_on='Application ID',right_on='Application ID',how='left')\n fsmdata['Application Status'] = fsmdata['Application Status'].map(map_status) \n fsmdata['Property Type']=fsmdata['Property Type'].map(map_propertytype)\n fsmdata['Property Sub Type']=fsmdata['Property Sub Type'].map(map_propertySubType)\n fsmdata['SLA Planned (In Days)']=2 \n fsmdata=fsmdata.fillna('N/A') \n fsmdata['Application Source'] =fsmdata['Application Source'].map(mapApplicationChannel) \n fsmdata['OnSite Sanitation Type']= fsmdata['OnSite Sanitation Type'].map(map_santationtype)\n fsmdata['Application Completed Time']=fsmdata['Application Completed Time'].replace('N/A', '')\n fsmdata['Application Submitted Time']=fsmdata['Application Submitted Time'].replace('N/A', '') \n fsmdata['Scheduled Time']=fsmdata['Scheduled Time'].replace('N/A', '')\n fsmdata['Waiting for disposalTime']=fsmdata['Waiting for disposalTime'].replace('N/A', '')\n fsmdata['Disposed Time']=fsmdata['Disposed Time'].replace('N/A', '')\n fsmdata = fsmdata.dropna(axis=0, subset=['Application Submitted Time'])\n fsmdata['SLA achieved'] = (pd.to_datetime(fsmdata['Application Completed Time'])- pd.to_datetime(fsmdata['Application Submitted Time'])).dt.days\n fsmdata=fsmdata.fillna('N/A') \n fsmdata['Vehicle Type']=fsmdata['Vehicle Type'].map(map_vehicletype) \n fsmdata['Payment Status'] =fsmdata['Payment Status'].map(map_paymentsource) \n fsmdata['State']=fsmdata['State'].map(mapstate)\n fsmdata['District']=fsmdata['City']\n fsmdata['District']=fsmdata['District'].map(mapDistrict)\n fsmdata['Payment Source']= fsmdata['Payment Source'].map(map_paymentsource)\n fsmdata['Payment Source']=fsmdata['Payment Source'].map(map_paymentsourceFromMode)\n fsmdata['Waste Collected']=fsmdata['Waste Collected'].apply(np.int64)\n fsmdata['Payment Amount']=fsmdata['Payment Amount'].apply(np.int64)\n fsmdata['Rating']=fsmdata['Rating'].replace('N/A','')\n fsmdata['Rating']=fsmdata['Rating'].map(map_rating)\n fsmdata['Pincode']=fsmdata['Pincode'].replace('N/A','') \n fsmdata['Pincode']=fsmdata['Pincode'].map(map_pincode)\n fsmdata['Longitude']=fsmdata['Longitude'].replace(0,'')\n fsmdata['Latitude']=fsmdata['Latitude'].replace(0,'')\n fsmdata['Longitude']=fsmdata['Longitude'].map(map_rating) \n fsmdata['Latitude']=fsmdata['Latitude'].map(map_rating) \n fsmdata['Slum Name'] = fsmdata['Slum Name'].map(mapslumName)\n fsmdata['Waste Dumped']=fsmdata['Waste Dumped'].apply(np.int64)\n fsmdata['Fstp Plant Name'] = fsmdata['Fstp Plant Name'].map(mapplantname)\n fsmdata['Vehicle_Application_Status'] = fsmdata['Vehicle_Application_Status'].map(map_vehicle_status)\n global uniquetenant\n uniquetenant = fsmdata['tenantid'].unique()\n global accesstoken\n accesstoken = accessToken()\n global localitydict\n localitydict={}\n storeTenantValues()\n\n fsmdata['Locality'] = fsmdata.apply(lambda x : enrichLocality(x.tenantid,x.Locality), axis=1)\n fsmdata = fsmdata.drop(columns=['tenantid'])\n fsmdata.fillna('', inplace=True)\n fsmdata=fsmdata.drop_duplicates(subset = [\"Application ID\"]).reset_index(drop=True)\n fsmdata.to_csv('/tmp/fsmDatamart.csv')\n\n print(\"Datamart exported. Please copy it using kubectl cp command to your required location.\")\n\ndef accessToken():\n query = {'username':'{{REPLACE-WITH-USERNAME}}','password':'{{REPLACE-WITH-PASSWORD}}','userType':'EMPLOYEE',\"scope\":\"read\",\"grant_type\":\"password\"}\n query['tenantId']='pb.amritsar'\n response = requests.post(\"{{REPLACE-WITH-URL}}\",data=query, headers={\n \"Connection\":\"keep-alive\",\"content-type\":\"application/x-www-form-urlencoded\", \"origin\":\"{{REPLACE-WITH-URL}}\",\"Authorization\": \"Basic ZWdvdi11c2VyLWNsaWVudDo=\"})\n jsondata = response.json()\n return jsondata.get('access_token')\n\n\ndef locationApiCall(tenantid):\n body = { \"RequestInfo\": {\"apiId\": \"Rainmaker\", \"ver\": \".01\",\"ts\": \"\",\"action\": \"\",\"did\": \"1\",\"key\": \"\",\"msgId\": \"20170310130900|en_IN\",}}\n body[\"RequestInfo\"][\"authToken\"]=accesstoken\n paramlist = {\"hierarchyTypeCode\":\"REVENUE\",\"boundaryType\":\"locality\"}\n paramlist[\"tenantId\"]=tenantid\n response = requests.post(\"{{REPLACE-WITH-URL}}\",params = paramlist,json=body, headers={\n \"Connection\":\"keep-alive\",\"content-type\":\"application/json;charset=UTF-8\", \"origin\":\"{{REPLACE-WITH-URL}}\"})\n\n jsondata={}\n if response.status_code == 200:\n jsondata = response.json()\n else:\n return ''\n\n if 'TenantBoundary' in jsondata:\n jsondata = jsondata['TenantBoundary']\n else:\n return ''\n if len(jsondata)>0:\n jsondata = jsondata[0]\n else:\n return ''\n if 'boundary' in jsondata:\n jsondata = jsondata['boundary']\n else:\n return ''\n\n\n dictionary={}\n for v in jsondata:\n dictionary[v['code']]= v['name']\n\n return dictionary\n\ndef storeTenantValues():\n for tenant in uniquetenant:\n localitydict[tenant]=locationApiCall(tenant)\n\n\ndef enrichLocality(tenantid,locality):\n if tenantid in localitydict:\n if localitydict[tenantid]=='':\n return ''\n elif locality in localitydict[tenantid]:\n return localitydict[tenantid][locality]\n else:\n return ''\n else:\n return ''\n\ndef dateToEpoch(dateString):\n return str(parser.parse(dateString).timestamp() * 1000)\n\nif __name__ == '__main__':\n connect()\n" ]
[ [ "pandas.read_sql_query", "pandas.merge", "pandas.to_datetime", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
Batake/wavegan
[ "8ba09b68717a29829c061083803b7d21f7004e19" ]
[ "eval/similarity/feats.py" ]
[ "import tensorflow as tf\nfrom scipy.io.wavfile import read as wavread\nimport numpy as np\nfrom tqdm import tqdm\n\nif __name__ == '__main__':\n import argparse\n import cPickle as pickle\n import glob\n import os\n import random\n import sys\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--audio_dir', type=str,\n help='Directory with 16-bit signed integer PCM wav files at 16kHz')\n parser.add_argument('--out_fp', type=str,\n help='Output file with audio features')\n parser.add_argument('--n', type=int,\n help='Limit the number of items for comparison')\n\n parser.set_defaults(\n n=None)\n\n args = parser.parse_args()\n\n wav_fps = sorted(glob.glob(os.path.join(args.audio_dir, '*.wav')))\n random.seed(0)\n random.shuffle(wav_fps)\n if args.n is not None:\n wav_fps = wav_fps[:args.n]\n\n # Graph to calculate feats\n x = tf.placeholder(tf.float32, [None])\n x_trim = x[:16384]\n x_trim = tf.pad(x_trim, [[0, 16384 - tf.shape(x_trim)[0]]])\n X = tf.contrib.signal.stft(x_trim, 2048, 128, pad_end=True)\n X_mag = tf.abs(X)\n W_mel = tf.contrib.signal.linear_to_mel_weight_matrix(\n num_mel_bins=128,\n num_spectrogram_bins=1025,\n sample_rate=16000,\n lower_edge_hertz=40.,\n upper_edge_hertz=7800.,\n )\n X_mel = tf.matmul(X_mag, W_mel)\n X_lmel = tf.log(X_mel + 1e-6)\n X_feat = X_lmel\n\n # Calculate feats for each wav file\n with tf.Session() as sess:\n _X_feats = []\n for wav_fp in tqdm(wav_fps):\n _, _x = wavread(wav_fp)\n\n _X_feats.append(sess.run(X_feat, {x: _x}))\n _X_feats = np.array(_X_feats)\n\n with open(args.out_fp, 'wb') as f:\n pickle.dump(_X_feats, f)\n" ]
[ [ "tensorflow.matmul", "tensorflow.contrib.signal.linear_to_mel_weight_matrix", "tensorflow.shape", "tensorflow.contrib.signal.stft", "tensorflow.placeholder", "tensorflow.log", "tensorflow.Session", "numpy.array", "scipy.io.wavfile.read", "tensorflow.abs" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7" ] } ]
jelmer/datalad
[ "fedc04867d87e0191bd500991d0df97e97113457" ]
[ "datalad/tests/test_tests_utils.py" ]
[ "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil; coding: utf-8 -*-\n# ex: set sts=4 ts=4 sw=4 noet:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\nimport platform\nimport sys\nimport os\nimport random\nimport traceback\nimport logging\n\ntry:\n # optional direct dependency we might want to kick out\n import bs4\nexcept ImportError: # pragma: no cover\n bs4 = None\n\nfrom glob import glob\nfrom os.path import exists, join as opj, basename\n\nfrom six import PY2, PY3\nfrom six import text_type\nfrom six.moves.urllib.request import urlopen\n\nfrom mock import patch\nfrom nose.tools import assert_in, assert_not_in, assert_true\nfrom nose import SkipTest\n\nfrom ..utils import getpwd, chpwd\n\nfrom . import utils\nfrom .utils import eq_, ok_, assert_false, ok_startswith, nok_startswith, \\\n with_tempfile, with_testrepos, with_tree, \\\n rmtemp, \\\n OBSCURE_PREFIX, OBSCURE_FILENAMES,\\\n get_most_obscure_supported_name, \\\n swallow_outputs, swallow_logs, \\\n on_windows, assert_raises, assert_cwd_unchanged, serve_path_via_http, \\\n ok_symlink, assert_true, ok_good_symlink, ok_broken_symlink\n\nfrom .utils import ok_generator\nfrom .utils import assert_dict_equal\nfrom .utils import assert_str_equal\nfrom .utils import assert_re_in\nfrom .utils import local_testrepo_flavors\nfrom .utils import skip_if_no_network\nfrom .utils import skip_if_no_module\nfrom .utils import run_under_dir\nfrom .utils import skip_if\nfrom .utils import ok_file_has_content\nfrom .utils import without_http_proxy\nfrom .utils import with_testsui\nfrom .utils import skip_ssh\nfrom .utils import probe_known_failure\nfrom .utils import patch_config\nfrom .utils import ignore_nose_capturing_stdout\n\n#\n# Test with_tempfile, especially nested invocations\n#\n\n@with_tempfile\ndef _with_tempfile_decorated_dummy(path):\n return path\n\n\ndef test_with_tempfile_dir_via_env_variable():\n target = os.path.join(os.path.expanduser(\"~\"), \"dataladtesttmpdir\")\n assert_false(os.path.exists(target), \"directory %s already exists.\" % target)\n with patch.dict('os.environ', {'DATALAD_TESTS_TEMP_DIR': target}):\n filename = _with_tempfile_decorated_dummy()\n ok_startswith(filename, target)\n\n\n@with_tempfile\n@with_tempfile\ndef test_nested_with_tempfile_basic(f1, f2):\n ok_(f1 != f2)\n ok_(not os.path.exists(f1))\n ok_(not os.path.exists(f2))\n\n\n# And the most obscure case to test. Generator for the test is\n# used as well to verify that every one of those functions adds new argument\n# to the end of incoming arguments.\n@with_tempfile(prefix=\"TEST\", suffix='big')\n@with_tree((('f1.txt', 'load'),))\n@with_tempfile(suffix='.cfg')\n@with_tempfile(suffix='.cfg.old')\n@with_testrepos(flavors=local_testrepo_flavors, count=1)\ndef check_nested_with_tempfile_parametrized_surrounded(\n param, f0, tree, f1, f2, repo):\n eq_(param, \"param1\")\n ok_(f0.endswith('big'), msg=\"got %s\" % f0)\n ok_(os.path.basename(f0).startswith('TEST'), msg=\"got %s\" % f0)\n ok_(os.path.exists(os.path.join(tree, 'f1.txt')))\n ok_(f1 != f2)\n ok_(f1.endswith('.cfg'), msg=\"got %s\" % f1)\n ok_(f2.endswith('.cfg.old'), msg=\"got %s\" % f2)\n ok_(repo) # got some repo -- local or url\n\n\ndef test_nested_with_tempfile_parametrized_surrounded():\n yield check_nested_with_tempfile_parametrized_surrounded, \"param1\"\n\n\n@with_tempfile(content=\"testtest\")\ndef test_with_tempfile_content(f):\n ok_file_has_content(f, \"testtest\")\n ok_file_has_content(f, \"test*\", re_=True)\n\n\ndef test_with_tempfile_content_raises_on_mkdir():\n\n @with_tempfile(content=\"test\", mkdir=True)\n def t(): # pragma: no cover\n raise AssertionError(\"must not be run\")\n\n with assert_raises(ValueError):\n # after this commit, it will check when invoking, not when decorating\n t()\n\n\ndef test_with_testrepos():\n repos = []\n\n @with_testrepos\n def check_with_testrepos(repo):\n repos.append(repo)\n\n check_with_testrepos()\n\n eq_(len(repos),\n 2 if on_windows # TODO -- would fail now in DATALAD_TESTS_NONETWORK mode\n else (15 if os.environ.get('DATALAD_TESTS_NONETWORK') else 16)) # local, local-url, clone, network\n\n for repo in repos:\n if not (repo.startswith('git://') or repo.startswith('http')):\n # either it is a \"local\" or a removed clone\n ok_(exists(opj(repo, '.git'))\n or\n not exists(opj(repo, '.git', 'remove-me')))\n\n\ndef test_get_resolved_values():\n from datalad.tests.utils import _get_resolved_flavors\n flavors = ['networkish', 'local']\n eq_(([] if os.environ.get('DATALAD_TESTS_NONETWORK') else ['networkish'])\n + ['local'],\n _get_resolved_flavors(flavors))\n\n with patch.dict('os.environ', {'DATALAD_TESTS_NONETWORK': '1'}):\n eq_(_get_resolved_flavors(flavors), ['local'])\n\n # and one more to see the exception being raised if nothing to teston\n @with_testrepos(flavors=['network'])\n def magical():\n raise AssertionError(\"Must not be ran\")\n assert_raises(SkipTest, magical)\n\ndef test_with_tempfile_mkdir():\n dnames = [] # just to store the name within the decorated function\n\n @with_tempfile(mkdir=True)\n def check_mkdir(d1):\n ok_(os.path.exists(d1))\n ok_(os.path.isdir(d1))\n dnames.append(d1)\n eq_(glob(os.path.join(d1, '*')), [])\n # Create a file to assure we can remove later the temporary load\n with open(os.path.join(d1, \"test.dat\"), \"w\") as f:\n f.write(\"TEST LOAD\")\n\n check_mkdir()\n if not os.environ.get('DATALAD_TESTS_TEMP_KEEP'):\n ok_(not os.path.exists(dnames[0])) # got removed\n\n\n@with_tempfile()\ndef test_with_tempfile_default_prefix(d1):\n d = basename(d1)\n short = 'datalad_temp_'\n full = short + \\\n 'test_with_tempfile_default_prefix'\n if on_windows:\n ok_startswith(d, short)\n nok_startswith(d, full)\n else:\n ok_startswith(d, full)\n\n\n@with_tempfile(prefix=\"nodatalad_\")\ndef test_with_tempfile_specified_prefix(d1):\n ok_startswith(basename(d1), 'nodatalad_')\n ok_('test_with_tempfile_specified_prefix' not in d1)\n\n\ndef test_get_most_obscure_supported_name():\n n = get_most_obscure_supported_name()\n if platform.system() in ('Linux', 'Darwin'):\n eq_(n, OBSCURE_PREFIX + OBSCURE_FILENAMES[1])\n else:\n # ATM no one else is as good\n ok_(n in OBSCURE_PREFIX + OBSCURE_FILENAMES[2:])\n\n\ndef test_keeptemp_via_env_variable():\n\n if os.environ.get('DATALAD_TESTS_TEMP_KEEP'): # pragma: no cover\n raise SkipTest(\"We have env variable set to preserve tempfiles\")\n\n files = []\n\n @with_tempfile()\n def check(f):\n open(f, 'w').write(\"LOAD\")\n files.append(f)\n\n with patch.dict('os.environ', {}):\n check()\n\n with patch.dict('os.environ', {'DATALAD_TESTS_TEMP_KEEP': '1'}):\n check()\n\n eq_(len(files), 2)\n ok_(not exists(files[0]), msg=\"File %s still exists\" % files[0])\n ok_( exists(files[1]), msg=\"File %s not exists\" % files[1])\n\n rmtemp(files[-1])\n\n\n@with_tempfile\ndef test_ok_symlink_helpers(tmpfile):\n\n if on_windows: # pragma: no cover\n raise SkipTest(\"no sylmlinks on windows\")\n\n assert_raises(AssertionError, ok_symlink, tmpfile)\n assert_raises(AssertionError, ok_good_symlink, tmpfile)\n assert_raises(AssertionError, ok_broken_symlink, tmpfile)\n\n tmpfile_symlink = tmpfile + '_symlink'\n os.symlink(tmpfile, tmpfile_symlink) \n\n # broken symlink\n ok_symlink(tmpfile_symlink)\n ok_broken_symlink(tmpfile_symlink)\n assert_raises(AssertionError, ok_good_symlink, tmpfile_symlink)\n\n with open(tmpfile, 'w') as tf:\n tf.write('test text')\n \n # tmpfile is still not a symlink here\n assert_raises(AssertionError, ok_symlink, tmpfile)\n assert_raises(AssertionError, ok_good_symlink, tmpfile)\n assert_raises(AssertionError, ok_broken_symlink, tmpfile)\n\n ok_symlink(tmpfile_symlink)\n ok_good_symlink(tmpfile_symlink)\n assert_raises(AssertionError, ok_broken_symlink, tmpfile_symlink)\n\n\ndef test_ok_startswith():\n ok_startswith('abc', 'abc')\n ok_startswith('abc', 'a')\n ok_startswith('abc', '')\n ok_startswith(' abc', ' ')\n ok_startswith('abc\\r\\n', 'a') # no effect from \\r\\n etc\n assert_raises(AssertionError, ok_startswith, 'abc', 'b')\n assert_raises(AssertionError, ok_startswith, 'abc', 'abcd')\n\n\ndef test_nok_startswith():\n nok_startswith('abc', 'bc')\n nok_startswith('abc', 'c')\n assert_raises(AssertionError, nok_startswith, 'abc', 'a')\n assert_raises(AssertionError, nok_startswith, 'abc', 'abc')\n\ndef test_ok_generator():\n def func(a, b=1):\n return a+b\n def gen(a, b=1): # pragma: no cover\n yield a+b\n # not sure how to determine if xrange is a generator\n if PY2:\n assert_raises(AssertionError, ok_generator, xrange(2))\n assert_raises(AssertionError, ok_generator, range(2))\n assert_raises(AssertionError, ok_generator, gen)\n ok_generator(gen(1))\n assert_raises(AssertionError, ok_generator, func)\n assert_raises(AssertionError, ok_generator, func(1))\n\n\ndef _test_assert_Xwd_unchanged(func):\n orig_cwd = os.getcwd()\n orig_pwd = getpwd()\n\n @assert_cwd_unchanged\n def do_chdir():\n func(os.pardir)\n\n with assert_raises(AssertionError) as cm:\n do_chdir()\n\n eq_(orig_cwd, os.getcwd(),\n \"assert_cwd_unchanged didn't return us back to cwd %s\" % orig_cwd)\n eq_(orig_pwd, getpwd(),\n \"assert_cwd_unchanged didn't return us back to pwd %s\" % orig_pwd)\n\ndef test_assert_Xwd_unchanged():\n yield _test_assert_Xwd_unchanged, os.chdir\n yield _test_assert_Xwd_unchanged, chpwd\n\n\ndef _test_assert_Xwd_unchanged_ok_chdir(func):\n # Test that we are not masking out other \"more important\" exceptions\n\n orig_cwd = os.getcwd()\n orig_pwd = getpwd()\n\n @assert_cwd_unchanged(ok_to_chdir=True)\n def do_chdir_value_error():\n func(os.pardir)\n\n with swallow_logs() as cml:\n do_chdir_value_error()\n eq_(orig_cwd, os.getcwd(),\n \"assert_cwd_unchanged didn't return us back to cwd %s\" % orig_cwd)\n eq_(orig_pwd, getpwd(),\n \"assert_cwd_unchanged didn't return us back to cwd %s\" % orig_pwd)\n assert_not_in(\"Mitigating and changing back\", cml.out)\n\ndef test_assert_Xwd_unchanged_ok_chdir():\n yield _test_assert_Xwd_unchanged_ok_chdir, os.chdir\n yield _test_assert_Xwd_unchanged_ok_chdir, chpwd\n\ndef test_assert_cwd_unchanged_not_masking_exceptions():\n # Test that we are not masking out other \"more important\" exceptions\n\n orig_cwd = os.getcwd()\n\n @assert_cwd_unchanged\n def do_chdir_value_error():\n os.chdir(os.pardir)\n raise ValueError(\"error exception\")\n\n with swallow_logs(new_level=logging.WARN) as cml:\n with assert_raises(ValueError) as cm:\n do_chdir_value_error()\n # retrospect exception\n if PY2:\n # could not figure out how to make it legit for PY3\n # but on manual try -- works, and exception traceback is not masked out\n exc_info = sys.exc_info()\n assert_in('raise ValueError(\"error exception\")', traceback.format_exception(*exc_info)[-2])\n\n eq_(orig_cwd, os.getcwd(),\n \"assert_cwd_unchanged didn't return us back to %s\" % orig_cwd)\n assert_in(\"Mitigating and changing back\", cml.out)\n\n # and again but allowing to chdir\n @assert_cwd_unchanged(ok_to_chdir=True)\n def do_chdir_value_error():\n os.chdir(os.pardir)\n raise ValueError(\"error exception\")\n\n with swallow_logs(new_level=logging.WARN) as cml:\n assert_raises(ValueError, do_chdir_value_error)\n eq_(orig_cwd, os.getcwd(),\n \"assert_cwd_unchanged didn't return us back to %s\" % orig_cwd)\n assert_not_in(\"Mitigating and changing back\", cml.out)\n\n\n@with_tempfile(mkdir=True)\ndef _test_serve_path_via_http(test_fpath, tmp_dir): # pragma: no cover\n\n # First verify that filesystem layer can encode this filename\n # verify first that we could encode file name in this environment\n try:\n filesysencoding = sys.getfilesystemencoding()\n test_fpath_encoded = test_fpath.encode(filesysencoding)\n except UnicodeEncodeError: # pragma: no cover\n raise SkipTest(\"Environment doesn't support unicode filenames\")\n if test_fpath_encoded.decode(filesysencoding) != test_fpath: # pragma: no cover\n raise SkipTest(\"Can't convert back/forth using %s encoding\"\n % filesysencoding)\n\n test_fpath_full = text_type(os.path.join(tmp_dir, test_fpath))\n test_fpath_dir = text_type(os.path.dirname(test_fpath_full))\n\n if not os.path.exists(test_fpath_dir):\n os.makedirs(test_fpath_dir)\n\n with open(test_fpath_full, 'w') as f:\n test_txt = 'some txt and a randint {}'.format(random.randint(1, 10)) \n f.write(test_txt)\n\n @serve_path_via_http(tmp_dir)\n def test_path_and_url(path, url):\n\n # @serve_ should remove http_proxy from the os.environ if was present\n assert_false('http_proxy' in os.environ)\n url = url + os.path.dirname(test_fpath)\n assert_true(urlopen(url))\n u = urlopen(url)\n assert_true(u.getcode() == 200)\n html = u.read()\n soup = bs4.BeautifulSoup(html, \"html.parser\")\n href_links = [txt.get('href') for txt in soup.find_all('a')]\n assert_true(len(href_links) == 1)\n\n url = \"{}/{}\".format(url, href_links[0])\n u = urlopen(url)\n html = u.read().decode()\n assert(test_txt == html)\n\n if bs4 is None: # pragma: no cover\n raise SkipTest(\"bs4 is absent\")\n test_path_and_url()\n\n\ndef test_serve_path_via_http():\n for test_fpath in ['test1.txt',\n 'test_dir/test2.txt',\n 'test_dir/d2/d3/test3.txt',\n 'file with space test4',\n u'Джэйсон',\n get_most_obscure_supported_name(),\n ]:\n\n yield _test_serve_path_via_http, test_fpath\n\n # just with the last one check that we did remove proxy setting\n with patch.dict('os.environ', {'http_proxy': 'http://127.0.0.1:9/'}):\n yield _test_serve_path_via_http, test_fpath\n\n\ndef test_without_http_proxy():\n\n @without_http_proxy\n def check(a, kw=False):\n assert_false('http_proxy' in os.environ)\n assert_false('https_proxy' in os.environ)\n assert_in(kw, [False, 'custom'])\n\n check(1)\n\n with patch.dict('os.environ', {'http_proxy': 'http://127.0.0.1:9/'}):\n check(1)\n check(1, \"custom\")\n with assert_raises(AssertionError):\n check(1, \"wrong\")\n\n with patch.dict('os.environ', {'https_proxy': 'http://127.0.0.1:9/'}):\n check(1)\n with patch.dict('os.environ', {'http_proxy': 'http://127.0.0.1:9/',\n 'https_proxy': 'http://127.0.0.1:9/'}):\n check(1)\n\n\ndef test_assert_re_in():\n assert_re_in(\".*\", \"\")\n assert_re_in(\".*\", [\"any\"])\n\n # should do match not search\n assert_re_in(\"ab\", \"abc\")\n assert_raises(AssertionError, assert_re_in, \"ab\", \"cab\")\n assert_raises(AssertionError, assert_re_in, \"ab$\", \"abc\")\n\n # Sufficient to have one entry matching\n assert_re_in(\"ab\", [\"\", \"abc\", \"laskdjf\"])\n assert_raises(AssertionError, assert_re_in, \"ab$\", [\"ddd\", \"\"])\n\n # Tuples should be ok too\n assert_re_in(\"ab\", (\"\", \"abc\", \"laskdjf\"))\n assert_raises(AssertionError, assert_re_in, \"ab$\", (\"ddd\", \"\"))\n\n # shouldn't \"match\" the empty list\n assert_raises(AssertionError, assert_re_in, \"\", [])\n\n\ndef test_skip_if_no_network():\n cleaned_env = os.environ.copy()\n cleaned_env.pop('DATALAD_TESTS_NONETWORK', None)\n # we need to run under cleaned env to make sure we actually test in both conditions\n with patch('os.environ', cleaned_env):\n @skip_if_no_network\n def somefunc(a1):\n return a1\n ok_(hasattr(somefunc, \"network\"))\n with patch.dict('os.environ', {'DATALAD_TESTS_NONETWORK': '1'}):\n assert_raises(SkipTest, somefunc, 1)\n with patch.dict('os.environ', {}):\n eq_(somefunc(1), 1)\n # and now if used as a function, not a decorator\n with patch.dict('os.environ', {'DATALAD_TESTS_NONETWORK': '1'}):\n assert_raises(SkipTest, skip_if_no_network)\n with patch.dict('os.environ', {}):\n eq_(skip_if_no_network(), None)\n\n\ndef test_skip_if_no_module():\n\n def testish():\n skip_if_no_module(\"nonexistingforsuremodule\")\n raise ValueError\n assert_raises(SkipTest, testish)\n\n def testish2():\n skip_if_no_module(\"datalad\")\n return \"magic\"\n eq_(testish2(), \"magic\")\n\n\ndef test_skip_if():\n\n with assert_raises(SkipTest):\n @skip_if(True)\n def f(): # pragma: no cover\n raise AssertionError(\"must have not been ran\")\n f()\n\n @skip_if(False)\n def f():\n return \"magical\"\n eq_(f(), 'magical')\n\n\n@assert_cwd_unchanged\n@with_tempfile(mkdir=True)\ndef test_run_under_dir(d):\n orig_pwd = getpwd()\n orig_cwd = os.getcwd()\n\n @run_under_dir(d)\n def f(arg, kwarg=None):\n eq_(arg, 1)\n eq_(kwarg, 2)\n eq_(getpwd(), d)\n\n f(1, 2)\n eq_(getpwd(), orig_pwd)\n eq_(os.getcwd(), orig_cwd)\n\n # and if fails\n assert_raises(AssertionError, f, 1, 3)\n eq_(getpwd(), orig_pwd)\n eq_(os.getcwd(), orig_cwd)\n\n\ndef test_assert_dict_equal():\n assert_dict_equal({}, {})\n assert_dict_equal({\"a\": 3}, {\"a\": 3})\n assert_raises(AssertionError, assert_dict_equal, {1: 3}, {1: 4})\n assert_raises(AssertionError, assert_dict_equal, {1: 3}, {2: 4})\n assert_raises(AssertionError, assert_dict_equal, {1: 3}, {2: 4, 1: 3})\n assert_raises(AssertionError, assert_dict_equal, {1: 3}, {2: 4, 1: 'a'})\n try:\n import numpy as np\n except: # pragma: no cover\n raise SkipTest(\"need numpy for this tiny one\")\n # one is scalar another one array\n assert_raises(AssertionError, assert_dict_equal, {1: 0}, {1: np.arange(1)})\n assert_raises(AssertionError, assert_dict_equal, {1: 0}, {1: np.arange(3)})\n\n\ndef test_assert_str_equal():\n assert_str_equal(\"a\", \"a\")\n assert_str_equal(\"a\\n\", \"a\\n\")\n assert_str_equal(\"a\\nb\", \"a\\nb\")\n assert_raises(AssertionError, assert_str_equal, \"a\", \"a\\n\")\n assert_raises(AssertionError, assert_str_equal, \"a\", \"b\")\n assert_raises(AssertionError, assert_str_equal, \"ab\", \"b\")\n\n\ndef test_testsui():\n # just one for now to test conflicting arguments\n with assert_raises(ValueError):\n @with_testsui(responses='some', interactive=False)\n def some_func(): # pragma: no cover\n pass\n\n from datalad.ui import ui\n\n @with_testsui(responses=['yes', \"maybe so\"])\n def func2(x):\n assert x == 1\n eq_(ui.yesno(\"title\"), True)\n eq_(ui.question(\"title2\"), \"maybe so\")\n assert_raises(AssertionError, ui.question, \"asking more than we know\")\n return x*2\n eq_(func2(1), 2)\n\n @with_testsui(interactive=False)\n def func3(x):\n assert_false(ui.is_interactive)\n return x*3\n eq_(func3(2), 6)\n\n\ndef test_setup():\n # just verify that we monkey patched consts correctly\n from datalad.consts import DATASETS_TOPURL\n eq_(DATASETS_TOPURL, 'http://datasets-tests.datalad.org/')\n from datalad.tests.utils import get_datasets_topdir\n eq_(get_datasets_topdir(), 'datasets-tests.datalad.org')\n\n\ndef test_skip_ssh():\n with patch_config({'datalad.tests.ssh': False}):\n with assert_raises(SkipTest):\n skip_ssh(lambda: False)()\n\n\ndef test_probe_known_failure():\n # should raise assert error if function no longer fails\n with patch_config({'datalad.tests.knownfailures.probe': True}):\n with assert_raises(AssertionError):\n probe_known_failure(lambda: True)()\n\n with patch_config({'datalad.tests.knownfailures.probe': False}):\n ok_(probe_known_failure(lambda: True))\n\n\ndef test_ignore_nose_capturing_stdout():\n # Just test the logic, not really a situation under overwritten stdout\n def raise_exc():\n raise AttributeError('nose causes a message which includes words '\n 'StringIO and fileno')\n with assert_raises(SkipTest):\n ignore_nose_capturing_stdout(raise_exc)()\n" ]
[ [ "numpy.arange" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
bjayakumar/test_vendor
[ "e32c1a69754cedcec46d3e76e43a72743ebb8ed8", "e32c1a69754cedcec46d3e76e43a72743ebb8ed8" ]
[ "python/baseline/tf/seq2seq/train.py", "python/baseline/pytorch/seq2seq/model.py" ]
[ "import tensorflow as tf\nimport numpy as np\nfrom baseline.utils import listify, get_model_file\nfrom baseline.reporting import basic_reporting\nfrom baseline.tf.tfy import optimizer\nfrom baseline.train import Trainer, create_trainer\nimport time\nimport os\nfrom baseline.utils import zip_model\n\nclass Seq2SeqTrainerTf(Trainer):\n\n def __init__(self, model, **kwargs):\n super(Seq2SeqTrainerTf, self).__init__()\n self.sess = model.sess\n self.loss = model.create_loss()\n self.test_loss = model.create_test_loss()\n self.model = model\n self.global_step, self.train_op = optimizer(self.loss, **kwargs)\n\n def checkpoint(self):\n self.model.saver.save(self.model.sess, \"./tf-seq2seq-%d/seq2seq\" % os.getpid(), global_step=self.global_step)\n\n def recover_last_checkpoint(self):\n latest = tf.train.latest_checkpoint(\"./tf-seq2seq-%d\" % os.getpid())\n print('Reloading ' + latest)\n self.model.saver.restore(self.model.sess, latest)\n\n def prepare(self, saver):\n self.model.set_saver(saver)\n\n def train(self, ts, reporting_fns):\n total_loss = 0\n steps = 0\n metrics = {}\n duration = 0\n\n fetches = {\n \"loss\": self.loss,\n \"train_op\": self.train_op,\n \"global_step\": self.global_step}\n\n for batch_dict in ts:\n start_time = time.time()\n steps += 1\n feed_dict = self.model.make_input(batch_dict, do_dropout=True)\n vals = self.model.sess.run(fetches, feed_dict=feed_dict)\n global_step = vals[\"global_step\"]\n lossv = vals[\"loss\"]\n\n total_loss += lossv\n duration += time.time() - start_time\n\n if steps % 500 == 0:\n print('Step time (%.3f sec)' % (duration / 500.))\n duration = 0\n metrics['avg_loss'] = total_loss / steps\n metrics['perplexity'] = np.exp(total_loss / steps)\n for reporting in reporting_fns:\n reporting(metrics, global_step, 'Train')\n \n assert(steps == len(ts))\n\n metrics['avg_loss'] = total_loss / steps\n metrics['perplexity'] = np.exp(total_loss / steps)\n for reporting in reporting_fns:\n reporting(metrics, global_step, 'Train')\n return metrics\n\n def test(self, vs, reporting_fns, phase='Valid'):\n epochs = 0\n if phase == 'Valid':\n self.valid_epochs += 1\n epochs = self.valid_epochs\n\n fetches = {\n \"loss\": self.test_loss,\n }\n\n total_loss = 0\n steps = len(vs)\n metrics = {}\n\n for batch_dict in vs:\n\n feed_dict = self.model.make_input(batch_dict)\n vals = self.model.sess.run(fetches, feed_dict)\n lossv = vals[\"loss\"]\n total_loss += lossv\n\n avg_loss = total_loss/steps\n metrics['avg_loss'] = avg_loss\n metrics['perplexity'] = np.exp(avg_loss)\n for reporting in reporting_fns:\n reporting(metrics, epochs, phase)\n return metrics\n\n\ndef fit(model, ts, vs, es=None, **kwargs):\n epochs = int(kwargs['epochs']) if 'epochs' in kwargs else 5\n patience = int(kwargs['patience']) if 'patience' in kwargs else epochs\n\n model_file = get_model_file(kwargs, 'seq2seq', 'tf')\n after_train_fn = kwargs['after_train_fn'] if 'after_train_fn' in kwargs else None\n trainer = create_trainer(Seq2SeqTrainerTf, model, **kwargs)\n init = tf.global_variables_initializer()\n model.sess.run(init)\n saver = tf.train.Saver()\n trainer.prepare(saver)\n\n do_early_stopping = bool(kwargs.get('do_early_stopping', True))\n\n if do_early_stopping:\n early_stopping_metric = kwargs.get('early_stopping_metric', 'avg_loss')\n patience = kwargs.get('patience', epochs)\n print('Doing early stopping on [%s] with patience [%d]' % (early_stopping_metric, patience))\n\n reporting_fns = listify(kwargs.get('reporting', basic_reporting))\n print('reporting', reporting_fns)\n\n min_metric = 10000\n last_improved = 0\n\n for epoch in range(epochs):\n\n trainer.train(ts, reporting_fns)\n if after_train_fn is not None:\n after_train_fn(model)\n test_metrics = trainer.test(vs, reporting_fns, phase='Valid')\n\n if do_early_stopping is False:\n trainer.checkpoint()\n trainer.model.save(model_file)\n\n elif test_metrics[early_stopping_metric] < min_metric:\n last_improved = epoch\n min_metric = test_metrics[early_stopping_metric]\n print('New min %.3f' % min_metric)\n trainer.checkpoint()\n trainer.model.save(model_file)\n\n elif (epoch - last_improved) > patience:\n print('Stopping due to persistent failures to improve')\n break\n\n if do_early_stopping is True:\n print('Best performance on min_metric %.3f at epoch %d' % (min_metric, last_improved))\n if es is not None:\n\n trainer.recover_last_checkpoint()\n trainer.test(es, reporting_fns, phase='Test')\n if kwargs.get(\"model_zip\", False):\n zip_model(model_file)\n", "import torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nfrom baseline.pytorch.torchy import *\nfrom baseline.model import EncoderDecoder, load_seq2seq_model, create_seq2seq_model\n\n\nclass Seq2SeqBase(nn.Module, EncoderDecoder):\n\n def __init__(self, embeddings_in, embeddings_out):\n super(Seq2SeqBase, self).__init__()\n self.embed_in = pytorch_embedding(embeddings_in)\n self.embed_out = pytorch_embedding(embeddings_out)\n self.nc = embeddings_out.vsz + 1\n self.vocab1 = embeddings_in.vocab\n self.vocab2 = embeddings_out.vocab\n self.beam_sz = 1\n\n def get_src_vocab(self):\n return self.vocab1\n\n def get_dst_vocab(self):\n return self.vocab2\n\n def save(self, model_file):\n torch.save(self, model_file)\n\n def create_loss(self):\n return SequenceCriterion()\n\n @classmethod\n def load(cls, outname, **kwargs):\n model = torch.load(outname)\n return model\n\n @classmethod\n def create(cls, input_embeddings, output_embeddings, **kwargs):\n\n model = cls(input_embeddings, output_embeddings, **kwargs)\n print(model)\n return model\n\n def make_input(self, batch_dict):\n src = batch_dict['src']\n src_len = batch_dict['src_len']\n tgt = batch_dict['dst']\n\n dst = tgt[:, :-1]\n tgt = tgt[:, 1:]\n\n src_len, perm_idx = src_len.sort(0, descending=True)\n src = src[perm_idx]\n dst = dst[perm_idx]\n tgt = tgt[perm_idx]\n\n if self.gpu:\n src = src.cuda()\n dst = dst.cuda()\n tgt = tgt.cuda()\n src_len = src_len.cuda()\n\n return Variable(src), Variable(dst), Variable(src_len, requires_grad=False), Variable(tgt)\n\n # Input better be xch, x\n def forward(self, input):\n src = input[0]\n dst = input[1]\n src_len = input[2]\n rnn_enc_tbh, final_encoder_state = self.encode(src, src_len)\n return self.decode(rnn_enc_tbh, src_len, final_encoder_state, dst)\n\n def encode(self, src_bth, src_len):\n src = src_bth.transpose(0, 1).contiguous()\n embed_in_seq = self.embed_in(src)\n packed = torch.nn.utils.rnn.pack_padded_sequence(embed_in_seq, src_len.data.tolist())\n output_tbh, hidden = self.encoder_rnn(packed)\n output_tbh, _ = torch.nn.utils.rnn.pad_packed_sequence(output_tbh)\n return output_tbh, hidden\n\n def input_i(self, embed_i, output_i):\n pass\n\n def bridge(self, final_encoder_state, context):\n pass\n\n def attn(self, output_t, context, src_mask=None):\n pass\n\n def decode_rnn(self, context_tbh, h_i, output_i, dst, src_mask):\n embed_out_tbh = self.embed_out(dst)\n context_bth = context_tbh.transpose(0, 1)\n outputs = []\n\n for i, embed_i in enumerate(embed_out_tbh.split(1)):\n embed_i = self.input_i(embed_i, output_i)\n output_i, h_i = self.decoder_rnn(embed_i, h_i)\n output_i = self.attn(output_i, context_bth, src_mask)\n output_i = self.dropout(output_i)\n outputs += [output_i]\n\n outputs = torch.stack(outputs)\n return outputs, h_i\n\n def decode(self, context_tbh, src_len, final_encoder_state, dst):\n\n src_mask = sequence_mask(src_len)\n if self.gpu:\n src_mask = src_mask.cuda()\n dst = dst.transpose(0, 1).contiguous()\n\n h_i, output_i = self.bridge(final_encoder_state, context_tbh)\n output, _ = self.decode_rnn(context_tbh, h_i, output_i, dst, src_mask)\n pred = self.prediction(output)\n return pred.transpose(0, 1).contiguous()\n\n def prediction(self, output):\n # Reform batch as (T x B, D)\n pred = self.probs(self.preds(output.view(output.size(0)*output.size(1),\n -1)))\n # back to T x B x H -> B x T x H\n pred = pred.view(output.size(0), output.size(1), -1)\n return pred\n\n # B x K x T and here T is a list\n def run(self, batch_dict, **kwargs):\n src = batch_dict['src']\n src_len = batch_dict['src_len']\n src = torch.from_numpy(src) if type(src) == np.ndarray else src\n if type(src_len) == int:\n src_len = np.array([src_len])\n src_len = torch.from_numpy(src_len) if type(src_len) == np.ndarray else src_len\n if torch.is_tensor(src):\n src = torch.autograd.Variable(src, requires_grad=False)\n if torch.is_tensor(src_len):\n src_len = torch.autograd.Variable(src_len, requires_grad=False)\n\n if self.gpu:\n src = src.cuda()\n src_len = src_len.cuda()\n batch = []\n for src_i, src_len_i in zip(src, src_len):\n src_len_i = src_len_i.unsqueeze(0)\n batch += [self.beam_decode(src_i.view(1, -1), src_len_i, kwargs.get('beam', 1))[0]]\n\n return batch\n\n def beam_decode(self, src, src_len, K):\n with torch.no_grad():\n\n T = src.size(1)\n context, h_i = self.encode(src, src_len)\n src_mask = sequence_mask(src_len)\n dst_vocab = self.get_dst_vocab()\n GO = dst_vocab['<GO>']\n EOS = dst_vocab['<EOS>']\n\n paths = [[GO] for _ in range(K)]\n # K\n scores = torch.FloatTensor([0. for _ in range(K)])\n if self.gpu:\n scores = scores.cuda()\n src_mask = src_mask.cuda()\n # TBH\n context = torch.autograd.Variable(context.data.repeat(1, K, 1))\n h_i = (torch.autograd.Variable(h_i[0].data.repeat(1, K, 1)),\n torch.autograd.Variable(h_i[1].data.repeat(1, K, 1)))\n h_i, dec_out = self.bridge(h_i, context)\n\n for i in range(T):\n lst = [path[-1] for path in paths]\n dst = torch.LongTensor(lst).type(src.data.type())\n mask_eos = dst == EOS\n mask_pad = dst == 0\n dst = dst.view(1, K)\n var = torch.autograd.Variable(dst)\n dec_out, h_i = self.decode_rnn(context, h_i, dec_out, var, src_mask)\n # 1 x K x V\n wll = self.prediction(dec_out).data\n # Just mask wll against end data\n V = wll.size(-1)\n dec_out = dec_out.squeeze(0) # get rid of T=t dimension\n # K x V\n wll = wll.squeeze(0) # get rid of T=t dimension\n\n if i > 0:\n expanded_history = scores.unsqueeze(1).expand_as(wll)\n wll.masked_fill_(mask_eos | mask_pad, 0)\n sll = wll + expanded_history\n else:\n sll = wll[0]\n\n flat_sll = sll.view(-1)\n best, best_idx = flat_sll.squeeze().topk(K, 0)\n best_beams = best_idx / V\n best_idx = best_idx % V\n new_paths = []\n for j, beam_id in enumerate(best_beams):\n new_paths.append(paths[beam_id] + [best_idx[j]])\n scores[j] = best[j]\n\n # Copy the beam state of the winners\n for hc in h_i: # iterate over h, c\n old_beam_state = hc.clone()\n for i, beam_id in enumerate(best_beams):\n H = hc.size(2)\n src_beam = old_beam_state.view(-1, K, H)[:, beam_id]\n dst_beam = hc.view(-1, K, H)[:, i]\n dst_beam.data.copy_(src_beam.data)\n paths = new_paths\n\n return [p[1:] for p in paths], scores\n\n\nclass Seq2SeqModel(Seq2SeqBase):\n\n def __init__(self, embeddings_in, embeddings_out, **kwargs):\n super(Seq2SeqModel, self).__init__(embeddings_in, embeddings_out)\n\n self.hsz = kwargs['hsz']\n nlayers = kwargs['layers']\n rnntype = kwargs['rnntype']\n pdrop = kwargs.get('dropout', 0.5)\n enc_hsz = self.hsz\n if rnntype == 'blstm':\n enc_hsz = enc_hsz // 2\n dsz = embeddings_in.dsz\n self.gpu = kwargs.get('gpu', True)\n self.dropout = nn.Dropout(pdrop)\n self.encoder_rnn = pytorch_rnn(dsz, enc_hsz, rnntype, nlayers, pdrop)\n self.preds = nn.Linear(self.hsz, self.nc)\n self.decoder_rnn = pytorch_rnn_cell(dsz, self.hsz, rnntype, nlayers, pdrop)\n self.probs = nn.LogSoftmax(dim=1)\n\n def input_i(self, embed_i, output_i):\n return embed_i.squeeze(0)\n\n def bridge(self, final_encoder_state, context):\n return final_encoder_state, None\n\n def attn(self, output_t, context, src_mask=None):\n return output_t\n\n\nclass Seq2SeqAttnModel(Seq2SeqBase):\n\n def __init__(self, embeddings_in, embeddings_out, **kwargs):\n super(Seq2SeqAttnModel, self).__init__(embeddings_in, embeddings_out)\n self.hsz = kwargs['hsz']\n nlayers = kwargs['layers']\n rnntype = kwargs['rnntype']\n pdrop = kwargs.get('dropout', 0.5)\n enc_hsz = self.hsz\n if rnntype == 'blstm':\n enc_hsz = enc_hsz // 2\n dsz = embeddings_in.dsz\n self.gpu = kwargs.get('gpu', True)\n self.encoder_rnn = pytorch_rnn(dsz, enc_hsz, rnntype, nlayers, pdrop)\n self.dropout = nn.Dropout(pdrop)\n self.decoder_rnn = pytorch_rnn_cell(self.hsz + dsz, self.hsz, rnntype, nlayers, pdrop)\n self.preds = nn.Linear(self.hsz, self.nc)\n self.probs = nn.LogSoftmax(dim=1)\n self.nlayers = nlayers\n attn_type = kwargs.get('attn_type', 'bahdanau').lower()\n if attn_type == 'dot':\n self.attn_module = LuongDotProductAttention(self.hsz)\n elif attn_type == 'concat' or attn_type == 'bahdanau':\n self.attn_module = BahdanauAttention(self.hsz)\n elif attn_type == 'sdp':\n self.attn_module = ScaledDotProductAttention(self.hsz)\n else:\n self.attn_module = LuongGeneralAttention(self.hsz)\n\n def attn(self, output_t, context, src_mask=None):\n return self.attn_module(output_t, context, context, src_mask)\n\n def bridge(self, final_encoder_state, context):\n batch_size = context.size(1)\n h_size = (batch_size, self.hsz)\n context_zeros = Variable(context.data.new(*h_size).zero_(), requires_grad=False)\n if type(final_encoder_state) is tuple:\n s1, s2 = final_encoder_state\n return (s1, s2), context_zeros\n else:\n return final_encoder_state, context_zeros\n\n def input_i(self, embed_i, output_i):\n embed_i = embed_i.squeeze(0)\n return torch.cat([embed_i, output_i], 1)\n\n\nBASELINE_SEQ2SEQ_MODELS = {\n 'default': Seq2SeqModel.create,\n 'attn': Seq2SeqAttnModel.create\n}\nBASELINE_SEQ2SEQ_LOADERS = {\n 'default': Seq2SeqModel.load,\n 'attn': Seq2SeqAttnModel.create\n}\n\n\ndef create_model(src_vocab_embed, dst_vocab_embed, **kwargs):\n model = create_seq2seq_model(BASELINE_SEQ2SEQ_MODELS, src_vocab_embed, dst_vocab_embed, **kwargs)\n return model\n\n\ndef load_model(modelname, **kwargs):\n return load_seq2seq_model(BASELINE_SEQ2SEQ_LOADERS, modelname, **kwargs)\n" ]
[ [ "tensorflow.train.Saver", "tensorflow.global_variables_initializer", "numpy.exp" ], [ "torch.nn.Dropout", "torch.nn.LogSoftmax", "torch.LongTensor", "torch.load", "torch.cat", "torch.is_tensor", "torch.from_numpy", "torch.autograd.Variable", "torch.nn.Linear", "torch.nn.utils.rnn.pad_packed_sequence", "torch.no_grad", "torch.stack", "torch.save" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
neohanju/AutoencodingTheWorld
[ "23f8a89bb7399df63cd7a0cb1b5a750214a44072" ]
[ "Datasets/RGBImageSet.py" ]
[ "import os\nimport glob\nimport torch.utils.data\nimport numpy as np\n\nclass RGBImageSet(torch.utils.data.Dataset):\n def __init__(self, path, centered=False):\n super().__init__()\n self.centered = centered\n self.add_string = lambda a, b: a + b\n\n assert os.path.exists(path)\n self.base_path = path\n\n self.mean_image = self.get_mean_image()\n\n cur_file_paths = glob.glob(self.base_path + '/*.npy')\n cur_file_paths.sort()\n self.file_paths = cur_file_paths\n\n\n def __len__(self):\n return len(self.file_paths)\n\n def __getitem__(self, item):\n data = np.load(self.file_paths[item])\n data = np.transpose(data, (2, 0, 1))\n # (h w c) => (c h w)\n\n if data.dtype.name == 'uint8':\n data = data.astype(float)\n\n if self.centered:\n data = torch.FloatTensor(data)\n else:\n data = torch.FloatTensor(data)\n data = data - self.mean_image\n data.div_(255)\n return data\n\n def get_decenterd_data(self, centered_data):\n result = centered_data.mul_(255) + self.mean_image\n result = result.byte()\n return result\n\n def get_mean_image(self):\n mean_image = np.load(os.path.join(os.path.dirname(self.base_path), \"mean_image.npy\"))\n mean_image = np.transpose(mean_image, (2, 0, 1))\n mean_image = torch.from_numpy(mean_image).float()\n return mean_image" ]
[ [ "numpy.load", "numpy.transpose" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
nashid/naturalcc
[ "9c3329dd8387c8242deb52bf590ebe3ac795f8de", "9c3329dd8387c8242deb52bf590ebe3ac795f8de" ]
[ "ncc/modules/encoders/tree/nary_tree_lstm.py", "run/translation/transformer/train.py" ]
[ "import dgl\nimport torch\nimport torch.nn as nn\n\nfrom ncc.data.constants import DEFAULT_MAX_SOURCE_POSITIONS\nfrom ncc.modules.base.layers import (\n Embedding,\n Linear,\n)\nfrom ..ncc_encoder import NccEncoder\n\n\nclass NaryTreeLSTMCell(nn.Module):\n def __init__(self, input_size: int, hidden_size: int) -> None:\n super(NaryTreeLSTMCell, self).__init__()\n self.W_iou = Linear(input_size, 3 * hidden_size, bias=False)\n self.U_iou = Linear(2 * hidden_size, 3 * hidden_size, bias=False)\n self.b_iou = nn.Parameter(torch.zeros(1, 3 * hidden_size))\n self.U_f = Linear(2 * hidden_size, 2 * hidden_size)\n\n def message_func(self, edges):\n # aggregate neighbors’ representations\n return {'h': edges.src['h'], 'c': edges.src['c']}\n\n def reduce_func(self, nodes):\n # tranform aggregated representations from neighbors\n # LOGGER.debug(nodes.mailbox['h'].size())\n h_cat = nodes.mailbox['h'].reshape(nodes.mailbox['h'].size(0), -1)\n f = torch.sigmoid(self.U_f(h_cat)).reshape(*nodes.mailbox['h'].size())\n c = torch.sum(f * nodes.mailbox['c'], 1)\n return_iou = self.U_iou(h_cat)\n return {'iou': return_iou, 'c': c}\n\n def apply_node_func(self, nodes):\n iou = nodes.data['iou'] + self.b_iou\n i, o, u = torch.chunk(iou, 3, 1)\n i, o, u = torch.sigmoid(i), torch.sigmoid(o), torch.tanh(u)\n c = i * u + nodes.data['c']\n h = o * torch.tanh(c)\n return {'h': h, 'c': c}\n\n\nclass NaryTreeLSTMEncoder(NccEncoder):\n \"\"\"LSTM encoder.\"\"\"\n\n def __init__(\n self, dictionary, embed_dim=512, hidden_size=512, num_layers=1,\n dropout_in=0.1, dropout_out=0.1, bidirectional=False,\n left_pad=True, pretrained_embed=None, padding_idx=None,\n max_source_positions=DEFAULT_MAX_SOURCE_POSITIONS,\n ):\n super().__init__(dictionary)\n self.num_layers = num_layers\n self.dropout_in = dropout_in\n self.dropout_out = dropout_out\n self.bidirectional = bidirectional\n self.hidden_size = hidden_size\n self.max_source_positions = max_source_positions\n\n num_embeddings = len(dictionary)\n self.padding_idx = padding_idx if padding_idx is not None else dictionary.pad()\n if pretrained_embed is None:\n self.embed_tokens = Embedding(num_embeddings, embed_dim, self.padding_idx)\n else:\n self.embed_tokens = pretrained_embed\n\n self.lstm = NaryTreeLSTMCell(\n input_size=embed_dim,\n hidden_size=hidden_size,\n )\n\n self.left_pad = left_pad\n\n self.output_units = hidden_size\n if bidirectional:\n self.output_units *= 2\n\n def init_hidden(self, batch_size):\n weight = next(self.parameters()).data\n return (\n weight.new(batch_size, self.hidden_size).zero_().requires_grad_(),\n weight.new(batch_size, self.hidden_size).zero_().requires_grad_()\n )\n\n def forward(self, graph, root_ids, node_nums, enc_hidden=None):\n \"\"\"\n Compute tree-lstm prediction given a batch.\n Parameters\n ----------\n h : Tensor\n Initial hidden state.\n c : Tensor\n Initial cell state.\n Returns\n -------\n logits : Tensor\n The prediction of each node.\n \"\"\"\n if enc_hidden is None:\n enc_hidden = self.init_hidden(graph.number_of_nodes())\n\n graph.register_message_func(self.lstm.message_func)\n graph.register_reduce_func(self.lstm.reduce_func)\n graph.register_apply_node_func(self.lstm.apply_node_func)\n\n emb_subtoken = self.embed_tokens(\n graph.ndata['x'] * graph.ndata['mask'].reshape(*graph.ndata['mask'].shape, 1)\n )\n wemb = torch.sum(emb_subtoken, dim=1) # feed embedding\n graph.ndata['iou'] = self.lstm.W_iou(wemb) * graph.ndata['mask'].unsqueeze(-1).type_as(emb_subtoken)\n graph.ndata['h'], graph.ndata['c'] = enc_hidden\n\n dgl.prop_nodes_topo(graph)\n\n all_node_h_in_batch = graph.ndata.pop('h')\n all_node_c_in_batch = graph.ndata.pop('c')\n\n batch_size = root_ids.size()[0]\n root_node_h_in_batch, root_node_c_in_batch = [], []\n add_up_num_node = 0\n for _i in range(len(root_ids)):\n if _i - 1 < 0:\n add_up_num_node = 0\n else:\n add_up_num_node += node_nums[_i - 1]\n idx_to_query = root_ids[_i] + add_up_num_node\n root_node_h_in_batch.append(all_node_h_in_batch[idx_to_query])\n root_node_c_in_batch.append(all_node_c_in_batch[idx_to_query])\n\n root_node_h_in_batch = torch.cat(root_node_h_in_batch).reshape(batch_size, -1)\n root_node_c_in_batch = torch.cat(root_node_c_in_batch).reshape(batch_size, -1)\n\n tree_output = emb_subtoken.new_zeros(batch_size, max(node_nums), root_node_h_in_batch.shape[-1])\n add_up_node_num = 0\n for _i in range(batch_size):\n node_num = node_nums[_i]\n this_sample_h = all_node_h_in_batch[add_up_node_num:add_up_node_num + node_nums[_i]]. \\\n reshape(node_num, -1)\n add_up_node_num += node_nums[_i]\n tree_output[_i, :node_num, :] = this_sample_h\n\n tree_output = tree_output.transpose(dim0=0, dim1=1)\n root_node_h_in_batch = root_node_h_in_batch.unsqueeze(dim=0)\n root_node_c_in_batch = root_node_c_in_batch.unsqueeze(dim=0)\n return {\n 'encoder_out': (tree_output, root_node_h_in_batch, root_node_c_in_batch),\n 'encoder_padding_mask': None\n }\n\n def reorder_encoder_out(self, encoder_out, new_order):\n encoder_out['encoder_out'] = tuple(\n eo.index_select(1, new_order)\n for eo in encoder_out['encoder_out']\n )\n if encoder_out['encoder_padding_mask'] is not None:\n encoder_out['encoder_padding_mask'] = \\\n encoder_out['encoder_padding_mask'].index_select(1, new_order)\n return encoder_out\n\n def max_positions(self):\n \"\"\"Maximum input length supported by the encoder.\"\"\"\n return self.max_source_positions\n", "import math\nimport os\nimport random\n\nimport torch\n\nfrom ncc import LOGGER\nfrom ncc import tasks\nfrom ncc.data import iterators\nfrom ncc.trainers.ncc_trainers import Trainer\n# from ncc.trainers.disentangle.frozen_trainer import FrozenTrainer as Trainer # mapping\nfrom ncc.utils import checkpoint_utils, distributed_utils\nfrom ncc.utils import set_seed\nfrom ncc.utils import utils\nfrom ncc.utils.file_ops.yaml_io import load_yaml\nfrom ncc.utils.logging import meters\nfrom ncc.utils.logging import metrics, progress_bar\nfrom ncc.utils.path_manager import PathManager\n\ntorch.multiprocessing.set_sharing_strategy('file_system')\n\n\[email protected]('train')\ndef train(args, trainer, task, epoch_itr):\n \"\"\"Train the model for one epoch.\"\"\"\n # Initialize data iterator\n itr = epoch_itr.next_epoch_itr(\n fix_batches_to_gpus=args['distributed_training']['fix_batches_to_gpus'],\n shuffle=(epoch_itr.next_epoch_idx > args['dataset']['curriculum']),\n )\n update_freq = (\n args['optimization']['update_freq'][epoch_itr.epoch - 1]\n if epoch_itr.epoch <= len(args['optimization']['update_freq'])\n else args['optimization']['update_freq'][-1]\n )\n itr = iterators.GroupedIterator(itr, update_freq)\n progress = progress_bar.progress_bar(\n itr,\n log_format=args['common']['log_format'],\n log_interval=args['common']['log_interval'],\n epoch=epoch_itr.epoch,\n tensorboard_logdir=(\n args['common']['tensorboard_logdir'] if distributed_utils.is_master(args) else None\n ),\n default_log_format=('tqdm' if not args['common']['no_progress_bar'] else 'simple'),\n )\n\n # task specific setup per epoch\n task.begin_epoch(epoch_itr.epoch, trainer.get_model())\n\n valid_subsets = args['dataset']['valid_subset'].split(',')\n max_update = args['optimization']['max_update'] or math.inf\n num_updates = 0\n for samples in progress:\n with metrics.aggregate('train_inner'):\n log_output = trainer.train_step(samples)\n if log_output is None: # OOM, overflow, ...\n continue\n\n # log mid-epoch stats\n num_updates = trainer.get_num_updates()\n if num_updates % args['common']['log_interval'] == 0:\n stats = get_training_stats(metrics.get_smoothed_values('train_inner'))\n progress.log(stats, tag='train_inner', step=num_updates)\n\n # reset epoch-level meters\n metrics.reset_meters('train_inner')\n\n if (\n not args['dataset']['disable_validation']\n and args['checkpoint']['save_interval_updates'] > 0\n and num_updates % args['checkpoint']['save_interval_updates'] == 0\n and num_updates > 0\n ):\n valid_losses = validate(args, trainer, task, epoch_itr, valid_subsets)\n checkpoint_utils.save_checkpoint(args, trainer, epoch_itr, valid_losses[0])\n\n if num_updates >= max_update:\n break\n\n # log end-of-epoch stats\n stats = get_training_stats(metrics.get_smoothed_values('train'))\n progress.print(stats, tag='train', step=num_updates)\n\n # reset epoch-level meters\n metrics.reset_meters('train')\n\n\ndef validate(args, trainer, task, epoch_itr, subsets):\n \"\"\"Evaluate the model on the validation set(s) and return the losses.\"\"\"\n\n if args['dataset']['fixed_validation_seed'] is not None:\n # set fixed seed for every validation\n set_seed.set_torch_seed(args['dataset']['fixed_validation_seed'])\n\n valid_losses = []\n for subset in subsets:\n # Initialize data iterator\n itr = task.get_batch_iterator(\n dataset=task.dataset(subset),\n max_tokens=args['dataset']['max_tokens_valid'],\n max_sentences=args['dataset']['max_sentences_valid'],\n max_positions=utils.resolve_max_positions(\n task.max_positions(),\n trainer.get_model().max_positions(),\n ),\n ignore_invalid_inputs=args['dataset']['skip_invalid_size_inputs_valid_test'],\n required_batch_size_multiple=args['dataset']['required_batch_size_multiple'],\n seed=args['common']['seed'],\n num_shards=args['distributed_training']['distributed_world_size'],\n shard_id=args['distributed_training']['distributed_rank'],\n num_workers=args['dataset']['num_workers'],\n ).next_epoch_itr(shuffle=False)\n progress = progress_bar.progress_bar(\n itr,\n log_format=args['common']['log_format'],\n log_interval=args['common']['log_interval'],\n epoch=epoch_itr.epoch,\n prefix=f\"valid on '{subset}' subset\",\n tensorboard_logdir=(\n args['common']['tensorboard_logdir'] if distributed_utils.is_master(args) else None\n ),\n default_log_format=('tqdm' if not args['common']['no_progress_bar'] else 'simple'),\n )\n\n # create a new root metrics aggregator so validation metrics\n # don't pollute other aggregators (e.g., train meters)\n with metrics.aggregate(new_root=True) as agg:\n for sample in progress:\n trainer.valid_step(sample)\n\n # log validation stats\n stats = get_valid_stats(args, trainer, agg.get_smoothed_values())\n progress.print(stats, tag=subset, step=trainer.get_num_updates())\n\n valid_losses.append(stats[args['checkpoint']['best_checkpoint_metric']])\n\n return valid_losses\n\n\ndef get_valid_stats(args, trainer, stats):\n if 'nll_loss' in stats and 'ppl' not in stats:\n stats['ppl'] = utils.get_perplexity(stats['nll_loss'])\n stats['num_updates'] = trainer.get_num_updates()\n if hasattr(checkpoint_utils.save_checkpoint, 'best'):\n key = 'best_{0}'.format(args['checkpoint']['best_checkpoint_metric'])\n best_function = max if args['checkpoint']['maximize_best_checkpoint_metric'] else min\n stats[key] = best_function(\n checkpoint_utils.save_checkpoint.best,\n stats[args['checkpoint']['best_checkpoint_metric']],\n )\n return stats\n\n\ndef get_training_stats(stats):\n if 'nll_loss' in stats and 'ppl' not in stats:\n stats['ppl'] = utils.get_perplexity(stats['nll_loss'])\n stats['wall'] = round(metrics.get_meter('default', 'wall').elapsed_time, 0)\n return stats\n\n\ndef should_stop_early(args, valid_loss):\n # skip check if no validation was done in the current epoch\n if valid_loss is None:\n return False\n if args['checkpoint']['patience'] <= 0:\n return False\n\n def is_better(a, b):\n return a > b if args['checkpoint']['maximize_best_checkpoint_metric'] else a < b\n\n prev_best = getattr(should_stop_early, 'best', None)\n if prev_best is None or is_better(valid_loss, prev_best):\n should_stop_early.best = valid_loss\n should_stop_early.num_runs = 0\n return False\n else:\n should_stop_early.num_runs += 1\n if should_stop_early.num_runs >= args['checkpoint']['patience']:\n LOGGER.info('early stop since valid performance hasn\\'t improved for last {} runs'.format(\n args['checkpoint']['patience']))\n\n return should_stop_early.num_runs >= args['checkpoint']['patience']\n\n\ndef single_main(args, init_distributed=False):\n assert args['dataset']['max_tokens'] is not None or args['dataset']['max_sentences'] is not None, \\\n 'Must specify batch size either with --max-tokens or --max-sentences'\n metrics.reset()\n\n # 0. Initialize CUDA and distributed training\n if torch.cuda.is_available() and not args['common']['cpu']:\n torch.cuda.set_device(args['distributed_training']['device_id'])\n set_seed.set_seed(args['common']['seed'])\n if init_distributed:\n args['distributed_training']['distributed_rank'] = distributed_utils.distributed_init(args)\n\n # Verify checkpoint directory\n if distributed_utils.is_master(args):\n save_dir = args['checkpoint']['save_dir']\n checkpoint_utils.verify_checkpoint_directory(save_dir)\n PathManager.rm(os.path.join(save_dir, '*.pt')) # this code will remove pre-trained models\n\n # 1. Setup task, e.g., translation, language modeling, etc.\n task = tasks.setup_task(args)\n\n # 2. Load valid dataset (we load training data below, based on the latest checkpoint)\n task.load_dataset(args['dataset']['valid_subset'], combine=False, epoch=1)\n\n # 3. Build model and criterion\n model = task.build_model(args)\n criterion = task.build_criterion(args)\n LOGGER.info(model)\n LOGGER.info('model {}, criterion {}'.format(args['model']['arch'], criterion.__class__.__name__))\n LOGGER.info('num. model params: {} (num. trained: {})'.format(\n sum(p.numel() for p in model.parameters()),\n sum(p.numel() for p in model.parameters() if p.requires_grad),\n ))\n\n # 4. Build trainer\n trainer = Trainer(args, task, model, criterion)\n LOGGER.info('training on {} GPUs'.format(args['distributed_training']['distributed_world_size']))\n LOGGER.info('max tokens per GPU = {} and max sentences per GPU = {}'.format(\n args['dataset']['max_tokens'],\n args['dataset']['max_sentences'],\n ))\n\n # 5. Load the latest checkpoint if one is available and restore the corresponding train iterator\n extra_state, epoch_itr = checkpoint_utils.load_checkpoint(args, trainer, combine=False)\n\n # 6. Train until the learning rate gets too small\n max_epoch = args['optimization']['max_epoch'] or math.inf\n max_update = args['optimization']['max_update'] or math.inf\n lr = trainer.get_lr()\n train_meter = meters.StopwatchMeter()\n train_meter.start()\n valid_subsets = args['dataset']['valid_subset'].split(',')\n while (\n lr > args['optimization']['min_lr']\n and epoch_itr.next_epoch_idx <= max_epoch\n and trainer.get_num_updates() < max_update\n ):\n # train for one epoch\n train(args, trainer, task, epoch_itr)\n\n if not args['dataset']['disable_validation'] and epoch_itr.epoch % args['dataset']['validate_interval'] == 0:\n valid_losses = validate(args, trainer, task, epoch_itr, valid_subsets)\n else:\n valid_losses = [None]\n\n # only use first validation loss to update the learning rate\n lr = trainer.lr_step(epoch_itr.epoch, valid_losses[0])\n\n # save checkpoint\n if epoch_itr.epoch % args['checkpoint']['save_interval'] == 0:\n checkpoint_utils.save_checkpoint(args, trainer, epoch_itr, valid_losses[0])\n\n # early stop\n if should_stop_early(args, valid_losses[0]):\n LOGGER.info('early stop since valid performance hasn\\'t improved for last {} runs'.format(\n args['checkpoint']['patience']))\n break\n\n epoch_itr = trainer.get_train_iterator(\n epoch_itr.next_epoch_idx,\n combine=False, # TODO to be checked\n # sharded data: get train iterator for next epoch\n load_dataset=(os.pathsep in args['task']['data']),\n )\n\n train_meter.stop()\n LOGGER.info('done training in {:.1f} seconds'.format(train_meter.sum))\n\n\ndef distributed_main(i, args, start_rank=0):\n args['distributed_training']['device_id'] = i\n if args['distributed_training']['distributed_rank'] is None: # torch.multiprocessing.spawn\n args['distributed_training']['distributed_rank'] = start_rank + i\n single_main(args, init_distributed=True)\n\n\ndef cli_main():\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--yaml_file\", \"-f\", type=str, help=\"load {yaml_file}.yml for train\",\n default='config/codetrans/java-csharp',\n )\n args = parser.parse_args()\n yaml_file = os.path.join(os.path.dirname(__file__), '{}.yml'.format(args.yaml_file))\n LOGGER.info('Load arguments in {}'.format(yaml_file))\n args = load_yaml(yaml_file)\n LOGGER.info(args)\n\n if args['distributed_training']['distributed_init_method'] is None:\n distributed_utils.infer_init_method(args)\n\n if args['distributed_training']['distributed_init_method'] is not None:\n # distributed training\n if torch.cuda.device_count() > 1 and not args['distributed_training']['distributed_no_spawn']:\n start_rank = args['distributed_training']['distributed_rank']\n args['distributed_training']['distributed_rank'] = None # assign automatically\n torch.multiprocessing.spawn(\n fn=distributed_main,\n args=(args, start_rank),\n nprocs=torch.cuda.device_count(),\n )\n else:\n distributed_main(args['distributed_training']['device_id'], args)\n elif args['distributed_training']['distributed_world_size'] > 1:\n # fallback for single node with multiple GPUs\n assert args['distributed_training']['distributed_world_size'] <= torch.cuda.device_count()\n port = random.randint(10000, 20000)\n args['distributed_training']['distributed_init_method'] = 'tcp://localhost:{port}'.format(port=port)\n args['distributed_training']['distributed_rank'] = None # set based on device id\n torch.multiprocessing.spawn(\n fn=distributed_main,\n args=(args,),\n nprocs=args['distributed_training']['distributed_world_size'],\n )\n else:\n LOGGER.info('single GPU training...')\n single_main(args)\n\n\nif __name__ == '__main__':\n cli_main()\n" ]
[ [ "torch.sigmoid", "torch.zeros", "torch.cat", "torch.sum", "torch.tanh", "torch.chunk" ], [ "torch.multiprocessing.spawn", "torch.cuda.set_device", "torch.cuda.is_available", "torch.cuda.device_count", "torch.multiprocessing.set_sharing_strategy" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Komanawa-Solutions-Ltd/SLMACC-2020-CSRA
[ "914b6912c5f5b522107aa9406fb3d823e61c2ebe" ]
[ "Storylines/storyline_runs/run_SWG_for_all_months.py" ]
[ "\"\"\"\n the goal is to generate the full suite of options to pick from.\n Author: Matt Hanson\n Created: 24/02/2021 10:48 AM\n \"\"\"\nimport os\nimport datetime\nimport shutil\nimport ksl_env\nimport pandas as pd\nfrom Climate_Shocks import climate_shocks_env\nfrom BS_work.SWG.SWG_wrapper import default_vcf, default_base_dir, clean_swg\nfrom Storylines.storyline_building_support import make_sampling_options\nfrom BS_work.SWG.SWG_multiprocessing import run_swg_mp\n\nindividual_dir = os.path.join(climate_shocks_env.temp_storyline_dir, 'individual_runs')\nlog_dir = r\"D:\\mh_unbacked\\SLMACC_2020\\SWG_runs\\logs\"\nif not os.path.exists(log_dir):\n os.makedirs(log_dir)\n\n\ndef make_storyline_files():\n if not os.path.exists(individual_dir):\n os.makedirs(individual_dir)\n # there are 69 unique event/month comparisons.\n\n all_events = make_sampling_options()\n\n for m in range(1, 13):\n for e in all_events[m]:\n fname = 'm{:02d}-{}.csv'.format(m, '-'.join(e))\n\n with open(os.path.join(individual_dir, fname), 'w') as f:\n f.write('month,year,temp_class,precip_class,rest\\n')\n f.write('{},{},{},{},{}\\n'.format(m, 2025, *e[0:-1], 0))\n\n\ndef generate_SWG_output_support(vcfs=default_vcf, base_dirs=default_base_dir):\n make_storyline_files()\n # delete the old outputs\n shutil.rmtree(os.path.join(default_base_dir, 'Output'))\n\n print('running SWG to create the output files')\n storylines = []\n outdirs = []\n for p in os.listdir(individual_dir):\n storylines.append(os.path.join(individual_dir, p))\n outdir = os.path.join(ksl_env.slmmac_dir_unbacked, 'SWG_runs', 'populate_outputs', p.split('.')[0])\n outdirs.append(outdir)\n run_id = datetime.datetime.now().isoformat().replace(':', '-').split('.')[0]\n run_swg_mp(storyline_paths=storylines, outdirs=outdirs, ns=1,\n vcfs=vcfs, base_dirs=base_dirs,\n log_path=os.path.join(log_dir, 'generate_SWG_output_support_{}.txt'.format(run_id)),\n pool_size=1)\n\n\ndef generate_all_swg(n, n_is_final, outdir, vcfs=default_vcf, base_dirs=default_base_dir,\n prob_path=os.path.join(climate_shocks_env.supporting_data_dir, 'prob_gen_event_swg.csv')):\n \"\"\"\n genterate a full suite of values\n :param n: integer the number to run (see n_is_final)\n :param n_is_final: bool, if True then run enough runs that statistically n== number after the run if False then\n simply run n runs\n :param outdir: directory to save the data\n :param base_dirs: base_dirs can be just single path\n :param vcfs: vcfs can be single path\n :param prob_path: path to read the probability data from, only used if n_is_final=True\n :return:\n \"\"\"\n assert isinstance(n, int), 'n must be an int'\n assert isinstance(n_is_final, bool)\n if not os.path.exists(outdir):\n os.makedirs(outdir)\n\n if n_is_final:\n prob_data = pd.read_csv(prob_path, index_col=0)\n if prob_data.isna().values.any():\n raise ValueError('null data in prob data, this should not be, check: {}'.format(prob_path))\n prob_data = prob_data.loc[:, '0'].to_dict()\n\n print('running SWG')\n storylines = []\n ns = []\n outdirs = []\n for p in os.listdir(individual_dir):\n if n_is_final:\n temp = int(n / (1 - prob_data[p.replace('.csv', '')]) + 1)\n ns.append(temp)\n else:\n ns.append(n)\n storylines.append(os.path.join(individual_dir, p))\n o = os.path.join(outdir, p.split('.')[0])\n outdirs.append(o)\n run_id = datetime.datetime.now().isoformat().replace(':', '-').split('.')[0]\n run_swg_mp(storyline_paths=storylines, outdirs=outdirs, ns=ns,\n vcfs=vcfs, base_dirs=base_dirs,\n log_path=os.path.join(log_dir, 'generate_all_swg_{}.txt'.format(run_id)))\n\n\ndef clean_individual(ind_dir, duplicate=True, merge=True):\n for p in os.listdir(ind_dir):\n if '.nc' in p:\n continue\n swg_dir = os.path.join(ind_dir, p)\n t = clean_swg(swg_dir=swg_dir, yml_path=os.path.join(swg_dir, 'ind.yml'),\n duplicate=duplicate, merge=merge, nc_outpath=os.path.join(ind_dir, '{}_all.nc'.format(p)))\n print('{}: removed: {}'.format(p, len(t)))\n\n\nif __name__ == '__main__':\n run = False # to prevent accidental re-run\n if run:\n generate_SWG_output_support()\n full_dir = os.path.join(ksl_env.slmmac_dir_unbacked, 'SWG_runs', 'full_SWG')\n generate_all_swg(10000, True, full_dir)\n clean_individual(full_dir)\n" ]
[ [ "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
nlpcl-lab/finelectra
[ "01a6cc175fba3f066a5cb77f30d52505dd004524" ]
[ "finetuning_withtorch.py" ]
[ "#############################################################################\n# prepare_dataset.py: return dataset according to the task\n# ref: huggingface/transformers/examples/run_glue.py\n# monologg/KoELECTRA/blob/master/finetune/processor/seq_cls.py\n# 현재 5e-05 -> 04로 바꾼 상태\n#############################################################################\nimport os\nimport re\nimport json\nimport torch\nimport logging\nimport random\nimport argparse\nimport collections\nimport numpy as np\nimport pandas as pd\nfrom glob import glob\nfrom tqdm import tqdm, trange\nfrom collections import defaultdict\nfrom torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset\nfrom sklearn.metrics import f1_score, classification_report\nfrom torch.utils.data.dataset import random_split\nfrom torch.utils.data.distributed import DistributedSampler\nfrom transformers import *\nfrom attrdict import AttrDict\nfrom natsort import natsorted\nfrom hgtk.text import decompose, compose\n\ntry:\n from torch.utils.tensorboard import SummaryWriter\nexcept ImportError:\n from tensorboardX import SummaryWriter\n\nimport prepare_dataset\nfrom prepare_dataset import seq_cls_output_modes as output_modes\nfrom prepare_dataset import CHA\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef set_seed(args):\n random.seed(args.seed)\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n if args.n_gpu > 0:\n torch.cuda.manual_seed_all(args.seed)\n\ndef flatten(d, parent_key='', sep='_'):\n items = []\n for k, v in d.items():\n new_key = parent_key + sep + k if parent_key else k\n if isinstance(v, collections.MutableMapping):\n items.extend(flatten(v, new_key, sep=sep).items())\n else:\n items.append((new_key, v))\n return dict(items)\n\n\ndef acc_and_f1(args, preds, labels):\n # target_names, labels_order = zip(*sorted(label_dict.items(), key=lambda x: x[1]))\n # target_names = ['funded', 'expired',...], labels_order = [1, 2, 3, 4]\n # return classification_report(y_true=labels, y_pred=preds, labels=labels_order, target_names=target_names)\n return flatten(classification_report(y_true=labels, y_pred=preds, output_dict=True))\n\n\ndef train(args, model, train_dataset, dev_dataset=None, test_dataset=None):\n \"\"\" Train the model \"\"\"\n # Settings: parallel & distributed system, train_epoch & train data setting, optimizer, scheduler (for warmup)\n if args.local_rank in [-1, 0]:\n tb_writer = SummaryWriter()\n\n train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)\n train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)\n if args.max_steps > 0:\n t_total = args.max_steps\n args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1\n else:\n t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs\n\n # Prepare optimizer and schedule (linear warmup and decay)\n no_decay = ['bias', 'LayerNorm.weight']\n optimizer_grouped_parameters = [\n {'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],\n 'weight_decay': args.weight_decay},\n {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}\n ]\n optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)\n scheduler = get_linear_schedule_with_warmup(\n optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total\n )\n\n \"\"\"\n if os.path.isfile(os.path.join(args.model_name_or_path, \"optimizer.pt\")) and os.path.isfile(\n os.path.join(args.model_name_or_path, \"scheduler.pt\")\n ):\n # Load optimizer and scheduler states\n optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, \"optimizer.pt\")))\n scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, \"scheduler.pt\")))\n \"\"\"\n if os.path.isfile(os.path.join(args.output_dir, \"optimizer.pt\")) and os.path.isfile(\n os.path.join(args.output_dir, \"scheduler.pt\")\n ):\n # Load optimizer and scheduler states\n optimizer.load_state_dict(torch.load(os.path.join(args.output_dir, \"optimizer.pt\")))\n scheduler.load_state_dict(torch.load(os.path.join(args.output_dir, \"scheduler.pt\")))\n\n if args.fp16:\n try:\n from apex import amp\n except ImportError:\n raise ImportError(\"Please install apex from https://www.github.com/nvidia/apex to use fp16 training.\")\n model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)\n\n # multi-gpu training (should be after apex fp16 initialization)\n if args.n_gpu > 1:\n model = torch.nn.DataParallel(model)\n\n # Distributed training (should be after apex fp16 initialization)\n if args.local_rank != -1:\n model = torch.nn.parallel.DistributedDataParallel(\n model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True,\n )\n\n # Train!\n logger.info(\"***** Running training *****\")\n logger.info(\" Num examples = %d\", len(train_dataset))\n logger.info(\" Num Epochs = %d\", args.num_train_epochs)\n logger.info(\" Instantaneous batch size per GPU = %d\", args.train_batch_size // args.n_gpu)\n logger.info(\n \" Total train batch size (w. parallel, distributed & accumulation) = %d\",\n args.train_batch_size\n * args.gradient_accumulation_steps\n * (torch.distributed.get_world_size() if args.local_rank != -1 else 1),\n )\n logger.info(\" Gradient Accumulation steps = %d\", args.gradient_accumulation_steps)\n logger.info(\" Total optimization steps = %d\", t_total)\n logger.info(\" Logging steps = %d\", args.logging_steps)\n logger.info(\" Save steps = %d\", args.save_steps)\n\n global_step = 0 # num of batches seen by the graph\n epochs_trained = 0\n steps_trained_in_current_epoch = 0\n # Check if continuing training from a checkpoint\n \"\"\"\n if os.path.exists(args.model_name_or_path):\n # set global_step to global_step of last saved checkpoint from model path\n global_step = int(args.model_name_or_path.split(\"-\")[-1].split(\"/\")[0])\n epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps)\n steps_trained_in_current_epoch = global_step % (len(train_dataloader) // args.gradient_accumulation_steps)\n\n logger.info(\" Continuing training from checkpoint, will skip to saved global_step\")\n logger.info(\" Continuing training from epoch %d\", epochs_trained)\n logger.info(\" Continuing training from global step %d\", global_step)\n logger.info(\" Will skip the first %d steps in the first epoch\", steps_trained_in_current_epoch)\n \"\"\"\n if os.path.exists(args.output_dir) and len(glob(args.output_dir + \"checkpoint-*\")):\n # set global_step to global_step of last saved checkpoint from model path\n output_files = natsorted(glob(args.output_dir + \"checkpoint-*\"))\n global_step = int(output_files[-1].split(\"-\")[-1].split(\"/\")[0])\n epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps)\n steps_trained_in_current_epoch = global_step % (len(train_dataloader) // args.gradient_accumulation_steps)\n\n logger.info(\" Continuing training from checkpoint, will skip to saved global_step\")\n logger.info(\" Continuing training from epoch %d\", epochs_trained)\n logger.info(\" Continuing training from global step %d\", global_step)\n logger.info(\" Will skip the first %d steps in the first epoch\", steps_trained_in_current_epoch)\n\n tr_loss, logging_loss = 0.0, 0.0\n tr_losses = []\n model.zero_grad()\n train_iterator = trange(\n epochs_trained, int(args.num_train_epochs), desc=\"Epoch\", disable=args.local_rank not in [-1, 0],\n )\n\n model.zero_grad()\n set_seed(args)\n for _ in train_iterator:\n epoch_iterator = tqdm(train_dataloader, desc=\"Iteration\", disable=args.local_rank not in [-1, 0])\n for step, batch in enumerate(epoch_iterator):\n # Skip past any already trained steps if resuming training\n if steps_trained_in_current_epoch > 0:\n steps_trained_in_current_epoch -= 1\n continue\n\n model.train()\n batch = tuple(t.to(args.device) for t in batch)\n inputs = {\n \"input_ids\": batch[0],\n \"attention_mask\": batch[1],\n \"labels\": batch[3]\n }\n if args.model_type != \"distilbert\":\n inputs[\"token_type_ids\"] = (\n batch[2] if args.model_type in [\"bert\", \"xlnet\", \"albert\"] else None\n ) # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids\n outputs = model(**inputs)\n\n loss = outputs[0]\n\n if args.n_gpu > 1:\n loss = loss.mean() # mean() to average on multi-gpu parallel training\n if args.gradient_accumulation_steps > 1:\n loss = loss / args.gradient_accumulation_steps\n\n if args.fp16:\n with amp.scale_loss(loss, optimizer) as scaled_loss:\n scaled_loss.backward()\n else:\n loss.backward()\n\n tr_loss += loss.item()\n if (step + 1) % args.gradient_accumulation_steps == 0:\n if args.fp16:\n torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)\n else:\n torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)\n\n optimizer.step()\n scheduler.step()\n model.zero_grad()\n global_step += 1\n\n # show step / loss / learning rate\n if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:\n logs = {}\n if args.local_rank == -1 and args.evaluate_test_during_training:\n results = evaluate(args, model, test_dataset, \"test\", global_step)\n elif args.local_rank == -1 and not args.evaluate_test_during_training:\n results = evaluate(args, model, dev_dataset, \"dev\", global_step)\n\n for key, value in results.items():\n eval_key = \"eval_{}\".format(key)\n logs[eval_key] = value\n\n loss_scalar = (tr_loss - logging_loss) / args.logging_steps\n learning_rate_scalar = scheduler.get_lr()[0]\n logs[\"learning_rate\"] = learning_rate_scalar\n logs[\"loss\"] = loss_scalar\n logging_loss = tr_loss\n\n for key, value in logs.items():\n tb_writer.add_scalar(key, value, global_step)\n tb_writer.flush()\n print(json.dumps({**logs, **{\"step\": global_step}}))\n tr_losses.append((global_step, loss_scalar))\n\n # Save model checkpoint\n if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:\n output_dir = os.path.join(args.output_dir, \"checkpoint-{}\".format(global_step))\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n model_to_save = (\n model.module if hasattr(model, \"module\") else model\n ) # Take care of distributed/parallel training\n model_to_save.save_pretrained(output_dir)\n\n torch.save(args, os.path.join(output_dir, \"training_args.bin\"))\n logger.info(\"Saving model checkpoint to {}\".format(output_dir))\n\n if args.save_optimizer:\n torch.save(optimizer.state_dict(), os.path.join(output_dir, \"optimizer.pt\"))\n torch.save(scheduler.state_dict(), os.path.join(output_dir, \"scheduler.pt\"))\n logger.info(\"Saving optimizer and scheduler states to {}\".format(output_dir))\n\n if args.max_steps > 0 and global_step > args.max_steps:\n epoch_iterator.close()\n break\n\n if args.max_steps > 0 and global_step > args.max_steps:\n train_iterator.close()\n break\n\n if args.local_rank in [-1, 0]:\n tb_writer.close()\n\n from matplotlib import pyplot as plt\n plt.plot(*zip(*tr_losses))\n plt.show()\n\n return global_step, tr_loss / global_step\n\n\ndef evaluate(args, model, eval_dataset, mode, global_step=None):\n results = {}\n # Note that DistributedSampler samples randomly\n eval_sampler = SequentialSampler(eval_dataset)\n eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)\n\n # multi-gpu eval\n if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel):\n model = torch.nn.DataParallel(model)\n\n output_dir = os.path.join(args.output_dir, mode)\n if not os.path.exists(output_dir) and args.local_rank in [-1, 0]:\n os.makedirs(output_dir)\n\n # Eval!\n if global_step != None:\n logger.info(\"***** Running evaluation on {} dataset ({} step) *****\".format(mode, global_step))\n else:\n logger.info(\"***** Running evaluation on {} dataset *****\".format(mode))\n logger.info(\" Num examples = {}\".format(len(eval_dataset)))\n logger.info(\" Eval Batch size = {}\".format(args.eval_batch_size))\n eval_loss = 0.0\n nb_eval_steps = 0\n preds = None\n out_label_ids = None\n\n for batch in tqdm(eval_dataloader, desc=\"Evaluating\"):\n model.eval()\n batch = tuple(t.to(args.device) for t in batch)\n\n with torch.no_grad():\n inputs = {\"input_ids\": batch[0], \"attention_mask\": batch[1], \"labels\": batch[3]}\n if args.model_type != \"distilbert\":\n inputs[\"token_type_ids\"] = (\n batch[2] if args.model_type in [\"bert\", \"xlnet\", \"albert\"] else None\n ) # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids\n outputs = model(**inputs)\n tmp_eval_loss, logits = outputs[:2]\n\n eval_loss += tmp_eval_loss.mean().item()\n nb_eval_steps += 1\n if preds is None:\n preds = logits.detach().cpu().numpy()\n out_label_ids = inputs[\"labels\"].detach().cpu().numpy()\n else:\n preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)\n out_label_ids = np.append(out_label_ids, inputs[\"labels\"].detach().cpu().numpy(), axis=0)\n\n eval_loss = eval_loss / nb_eval_steps\n if output_modes[args.task] == \"classification\":\n preds = np.argmax(preds, axis=1)\n result = acc_and_f1(args, preds, out_label_ids)\n elif output_modes[args.task] == \"regression\":\n preds = np.squeeze(preds)\n result = {\"RMSE\": eval_loss ** 0.5}\n\n results.update(result)\n\n output_eval_file = os.path.join(output_dir, \"{}-{}.txt\".format(mode, global_step) if global_step else \"{}.txt\".format(mode))\n with open(output_eval_file, \"w\") as writer:\n logger.info(\"***** Eval results on {} dataset *****\".format(mode))\n for key in sorted(results.keys()):\n logger.info(\" {} = {}\".format(key, str(results[key])))\n writer.write(\" {} = {}\\n\".format(key, str(results[key])))\n\n return results\n\n\ndef display_examples():\n if re.split('-', args.model_name_or_path)[-1] == 'large':\n generator_name = '-'.join(re.split('-', args.model_name_or_path)[:-2] + ['generator'] + [re.split('-', args.model_name_or_path)[-1]])\n else:\n generator_name = '-'.join(\n re.split('-', args.model_name_or_path)[:-1] + ['generator'])\n tokenizer = AutoTokenizer.from_pretrained(generator_name)\n model = AutoModelForMaskedLM.from_pretrained(generator_name)\n nlp = pipeline(\"fill-mask\", model=model, tokenizer=tokenizer)\n if CHA:\n pre = decompose(\"오늘 서울에서 {} 먹고 왔다.\".format(nlp.tokenizer.mask_token))\n pre = nlp(pre)\n for e in pre:\n e['sequence'] = compose(e['sequence'])\n print(pre)\n else:\n pre = \"오늘 서울에서 {} 먹고 왔다.\".format(nlp.tokenizer.mask_token)\n pre = nlp(pre)\n print(pre)\n # inputs = tokenizer(decompose(\"오늘 서울에서 {} 먹고 왔다.\".format(nlp.tokenizer.mask_token)), return_tensors=\"pt\")\n # outputs = model(**inputs)\n\n\ndef main(args):\n # test electra-base-generator, MLM task에는 generator 쓰세요\n display_examples()\n\n # 0. prepare dataset setting & load configuration\n processor = prepare_dataset.seq_cls_processors[args.task](args)\n labels = processor.get_labels()\n\n # 1. prepare dataset and model\n # tip: Handling Nan elements - (df.isnull().sum(), df.notnull(), df.dropna(), df.fillna())\n\n config = AutoConfig.from_pretrained(args.model_name_or_path, num_labels=len(labels),\n id2label={str(i): label for i, label in enumerate(labels)},\n label2id={label: i for i, label in enumerate(labels)},\n )\n tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path)\n model = AutoModelForSequenceClassification.from_pretrained(args.model_name_or_path, config=config)\n\n model.to(args.device)\n train_dataset = prepare_dataset.prepare_dataset(args, tokenizer, mode=\"train\") if args.train_file else None\n dev_dataset = prepare_dataset.prepare_dataset(args, tokenizer, mode=\"dev\") if args.dev_file else None\n test_dataset = prepare_dataset.prepare_dataset(args, tokenizer, mode=\"test\") if args.test_file else None\n\n if dev_dataset == None:\n args.evaluate_test_during_training = True # If there is no dev dataset, only use testset\n\n # 2. Training\n if args.do_train:\n global_step, tr_loss = train(args, model, train_dataset, dev_dataset, test_dataset)\n logger.info(\" global_step = {}, average loss = {}\".format(global_step, tr_loss))\n\n # 2.2 Save trained model and re-load it\n # Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained()\n if args.do_train and args.n_gpu > 1:\n # Create output directory if needed\n if not os.path.exists(args.output_dir):\n os.makedirs(args.output_dir)\n\n logger.info(\"Saving model checkpoint to %s\", args.output_dir)\n # Save a trained model, configuration and tokenizer using `save_pretrained()`.\n # They can then be reloaded using `from_pretrained()`\n model_to_save = (\n model.module if hasattr(model, \"module\") else model\n ) # Take care of distributed/parallel training\n model_to_save.save_pretrained(args.output_dir)\n tokenizer.save_pretrained(args.output_dir)\n\n # Good practice: save your training arguments together with the trained model\n torch.save(args, os.path.join(args.output_dir, \"training_args.bin\"))\n\n # Load a trained model and vocabulary that you have fine-tuned\n model = AutoModelForSequenceClassification.from_pretrained(args.output_dir)\n tokenizer = AutoTokenizer.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case)\n model.to(args.device)\n\n # 3. Evaluation\n results = {}\n if args.do_eval:\n checkpoints = list(\n os.path.dirname(c) for c in\n sorted(glob(args.output_dir + \"/**/\" + \"pytorch_model.bin\", recursive=True))\n )\n if not args.eval_all_checkpoints:\n checkpoints = checkpoints[-1:]\n else:\n logging.getLogger(\"transformers.configuration_utils\").setLevel(logging.WARN) # Reduce logging\n logging.getLogger(\"transformers.modeling_utils\").setLevel(logging.WARN) # Reduce logging\n logger.info(\"Evaluate the following checkpoints: %s\", checkpoints)\n for checkpoint in checkpoints:\n global_step = checkpoint.split(\"-\")[-1]\n model = AutoModelForSequenceClassification.from_pretrained(checkpoint)\n model.to(args.device)\n result = evaluate(args, model, test_dataset, mode=\"test\", global_step=global_step)\n result = dict((k + \"_{}\".format(global_step), v) for k, v in result.items())\n results.update(result)\n\n output_eval_file = os.path.join(args.output_dir, \"eval_results.txt\")\n with open(output_eval_file, \"w\") as f_w:\n for key in sorted(results.keys()):\n f_w.write(\"{} = {}\\n\".format(key, str(results[key])))\n\n\ndef get_default_args():\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"--task\", type=str, default='nsmc')\n pre_args = parser.parse_args()\n\n # Read from config file and make args\n with open(os.path.join('twitterko-{}electra-base-{}-finetuning-config.json'.format('cha-' if CHA else '', pre_args.task))) as f:\n args = AttrDict(json.load(f))\n\n args.output_dir = os.path.join('./', args.output_dir)\n args.local_rank = -1\n args.fp16 = False\n args.fp16_opt_level = \"O1\"\n # help=\"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3'].\n # See details at https://nvidia.github.io/apex/amp.html\",)\n if args.local_rank == -1:\n args.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n args.n_gpu = torch.cuda.device_count()\n else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs\n torch.cuda.set_device(args.local_rank)\n args.device = torch.device(\"cuda\", args.local_rank)\n torch.distributed.init_process_group(backend=\"nccl\")\n args.n_gpu = 1\n\n # Load pretrained model and tokenizer\n if args.local_rank not in [-1, 0]:\n torch.distributed.barrier()\n\n # Setup logging\n logging.basicConfig(\n format=\"%(asctime)s - %(levelname)s - %(name)s - %(message)s\",\n datefmt=\"%m/%d/%Y %H:%M:%S\",\n level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN,\n )\n logger.warning(\n \"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s\",\n args.local_rank,\n args.device,\n args.n_gpu,\n bool(args.local_rank != -1),\n args.fp16,\n )\n logger.info(\"Training/evaluation parameters {}\".format(args))\n\n return args\n\n\nif __name__ == '__main__':\n args = get_default_args()\n main(args)\n" ]
[ [ "numpy.squeeze", "torch.utils.data.DataLoader", "torch.no_grad", "torch.cuda.manual_seed_all", "torch.cuda.is_available", "torch.device", "sklearn.metrics.classification_report", "torch.distributed.init_process_group", "torch.utils.data.distributed.DistributedSampler", "torch.distributed.barrier", "numpy.argmax", "torch.cuda.device_count", "matplotlib.pyplot.show", "torch.distributed.get_world_size", "torch.nn.parallel.DistributedDataParallel", "numpy.random.seed", "torch.cuda.set_device", "torch.manual_seed", "torch.utils.data.SequentialSampler", "torch.utils.data.RandomSampler", "torch.nn.DataParallel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
sisl/structmechmod
[ "069d680822e9aae7e4e198454048be59d632415d" ]
[ "structmechmod/odesolver.py" ]
[ "#\n# File: odesolver.py\n#\n\nimport abc\nimport torch\n\n\nclass Euler:\n\n @staticmethod\n def step_func(func, t, dt, y, u, transforms=None):\n return tuple(dt * f_ for f_ in func(t, y, u=u))\n\n @property\n def order(self):\n return 1\n\n\nclass Midpoint:\n\n @staticmethod\n def step_func(func, t, dt, y, u, transforms=None):\n y_mid = tuple(y_ + f_ * dt / 2 for y_, f_ in zip(y, func(t, y, u=u)))\n y_mid = tuple(trans(y_) for y_, trans in zip(y_mid, transforms))\n return tuple(dt * f_ for f_ in func(t + dt / 2, y_mid, u=u))\n\n @property\n def order(self):\n return 2\n\n\nclass RK4:\n\n @staticmethod\n def step_func(func, t, dt, y, u, transforms=None):\n return rk4_step_func(func, t, dt, y, u=u)\n\n @property\n def order(self):\n return 4\n\n\ndef rk4_alt_step_func(func, t, dt, y, k1=None, u=None):\n \"\"\"Smaller error with slightly more compute.\"\"\"\n if k1 is None:\n k1 = func(t, y, u=u)\n k2 = func(t + dt / 3, tuple(y_ + dt * k1_ / 3 for y_, k1_ in zip(y, k1)), u=u)\n k3 = func(t + dt * 2 / 3,\n tuple(y_ + dt * (k1_ / -3 + k2_) for y_, k1_, k2_ in zip(y, k1, k2)), u=u)\n k4 = func(t + dt,\n tuple(y_ + dt * (k1_ - k2_ + k3_) for y_, k1_, k2_, k3_ in zip(y, k1, k2, k3)), u=u)\n return tuple((k1_ + 3 * k2_ + 3 * k3_ + k4_) * (dt / 8)\n for k1_, k2_, k3_, k4_ in zip(k1, k2, k3, k4))\n\ndef rk4_step_func(func, t, dt, y, k1=None, u=None):\n if k1 is None: \n k1 = func(t, y, u=u)\n k2 = func(t + dt / 2, tuple(y_ + dt * k1_ / 2 for y_, k1_ in zip(y, k1)), u=u)\n k3 = func(t + dt / 2, tuple(y_ + dt * k2_ / 2 for y_, k2_ in zip(y, k2)), u=u)\n k4 = func(t + dt, tuple(y_ + dt * k3_ for y_, k3_ in zip(y, k3)), u=u)\n return tuple((k1_ + 2 * k2_ + 2 * k3_ + k4_) * (dt / 6) for k1_, k2_, k3_, k4_ in zip(k1, k2, k3, k4))\n\n\ndef odestep(func, t, dt, y0, u=None, method='midpoint', transforms=None):\n tensor_input, func, y0, t = _check_inputs(func, y0, t)\n if transforms is None:\n transforms = [lambda x: x for _ in range(len(y0))]\n\n dy = SOLVERS[method].step_func(func, t, dt, y0, u=u, transforms=transforms)\n y = tuple(trans(y0_ + dy_) for y0_, dy_, trans in zip(y0, dy, transforms))\n if tensor_input:\n y = y[0]\n\n return y\n\n\nSOLVERS = {\n 'euler': Euler,\n 'midpoint': Midpoint,\n 'rk4': RK4,\n}\n\n\ndef _check_inputs(func, y0, t):\n\n tensor_input = False\n if torch.is_tensor(y0):\n tensor_input = True\n y0 = (y0,)\n\n _base_nontuple_func_ = func\n func = lambda t, y, u: (_base_nontuple_func_(t, y[0], u),)\n assert isinstance(y0, tuple), 'y0 must be either a torch.Tensor or a tuple'\n\n for y0_ in y0:\n assert torch.is_tensor(y0_), 'each element must be a torch.Tensor but received {}'.format(\n type(y0_))\n\n for y0_ in y0:\n if not torch.is_floating_point(y0_):\n raise TypeError('`y0` must be a floating point Tensor but is a {}'.format(y0_.type()))\n\n if not torch.is_floating_point(t):\n raise TypeError('`t` must be a floating point Tensor but is a {}'.format(t.type()))\n\n return tensor_input, func, y0, t\n" ]
[ [ "torch.is_floating_point", "torch.is_tensor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
QData/SanityCheck
[ "7cbf37296f84584b5372a1d556911ad4c1dfe138" ]
[ "sanity_checker/explanation.py" ]
[ "from enum import Enum\n\nfrom deepexplain.tensorflow import DeepExplain\nfrom vis.visualization import visualize_saliency, visualize_cam\nfrom vis.utils import utils as vutils\n\nimport tensorflow as tf\nimport numpy as np\n\nimport keras\nfrom keras.models import Model\nfrom keras import backend as K\n\nclass ExplanationMethods(Enum):\n SALIENCY = 'saliency'\n INTEGRATED_GRADIENTS = 'intgrad'\n GRADIENT_INPUT = 'grad*input'\n DEEPLIFT = 'deeplift'\n ELRP = 'elrp'\n BACKPROP = 'backprop'\n GRADCAM = 'gradcam'\n\nclass BaseExplanation():\n def explain(self):\n raise NotImplementedError\n\nclass KerasExplanation(BaseExplanation):\n def __init__(self, methods, xs, ys, target=-1, batch_size=None):\n self.methods = methods\n self.target = target\n self.xs = xs\n self.ys = ys\n self.batch_size = batch_size\n\n def explain(self, model, **kwargs):\n \"\"\"\n Returns a list of explanations for each method in methods\n \"\"\"\n with DeepExplain(session=K.get_session()) as de:\n input_tensor = model.layers[0].input # get input tensor\n fModel = Model(inputs=input_tensor, outputs = model.layers[self.target].output)\n target_tensor = fModel(input_tensor) # get output tensor\n\n result = []\n for i in range(len(self.methods)):\n method = self.methods[i]\n\n if method is ExplanationMethods.BACKPROP:\n gbp = []\n for i in range(len(self.xs)):\n class_idx = np.nonzero(self.ys[i])[0][0]\n gbp.append(visualize_saliency(model, -1, filter_indices=class_idx, \n seed_input=self.xs[i], \n backprop_modifier='guided'))\n result.append(np.array(gbp))\n elif method is ExplanationMethods.GRADCAM:\n gradcam = []\n for i in range(len(self.xs)):\n class_idx = np.nonzero(self.ys[i])[0][0]\n gradcam.append(visualize_cam(model, -1, filter_indices=class_idx, \n seed_input=self.xs[i], \n backprop_modifier='guided'))\n result.append(np.array(gradcam))\n else:\n explanation = de.explain(method.value, target_tensor, input_tensor, self.xs, ys=self.ys, batch_size=self.batch_size)\n result.append(explanation)\n return result\n\n def get_method_names(self):\n return [method.value for method in self.methods]" ]
[ [ "numpy.array", "numpy.nonzero" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
shinying/METER
[ "4fa79cbeb7bb5a25a76b56b92f9ba73f9118b9fe" ]
[ "preproc/preproc_activitynetqa.py" ]
[ "import json\r\nimport os\r\nimport collections\r\nimport pandas as pd\r\n\r\n\r\ntrain_q = json.load(open(\"train_q.json\", \"r\"))\r\nval_q = json.load(open(\"val_q.json\", \"r\"))\r\ntest_q = json.load(open(\"test_q.json\", \"r\"))\r\n\r\ntrain_a = json.load(open(\"train_a.json\", \"r\"))\r\nval_a = json.load(open(\"val_a.json\", \"r\"))\r\ntest_a = json.load(open(\"test_a.json\", \"r\"))\r\n\r\n\r\ndef get_vocabulary(train_a, save=False):\r\n ans = [x[\"answer\"] for x in train_a]\r\n train_counter = collections.Counter(ans)\r\n most_common = train_counter.most_common()\r\n vocab = {}\r\n for i, x in enumerate(most_common): # 1654 answers present twice\r\n if x[1] >= 2:\r\n vocab[x[0]] = i\r\n else:\r\n break\r\n print(len(vocab))\r\n if save:\r\n with open(\"vocab.json\", \"w\") as outfile:\r\n json.dump(vocab, outfile)\r\n return vocab\r\n\r\n\r\ndef json_to_df(vocab, train_q, train_a, val_q, val_a, test_q, test_a, save=False):\r\n # Verify alignment of files\r\n for x, y in zip(train_q, train_a):\r\n assert x[\"question_id\"] == y[\"question_id\"]\r\n for x, y in zip(val_q, val_a):\r\n assert x[\"question_id\"] == y[\"question_id\"]\r\n for x, y in zip(test_q, test_a):\r\n assert x[\"question_id\"] == y[\"question_id\"]\r\n\r\n train_df = pd.DataFrame(\r\n {\r\n \"question\": [x[\"question\"] for x in train_q],\r\n \"answer\": [x[\"answer\"] for x in train_a],\r\n \"video_id\": [x[\"video_name\"] for x in train_q],\r\n \"type\": [x[\"type\"] for x in train_a],\r\n },\r\n columns=[\"question\", \"answer\", \"video_id\", \"type\"],\r\n )\r\n print(len(train_df))\r\n train_df = train_df[\r\n train_df[\"answer\"].isin(vocab)\r\n ] # do not use train samples of which the answer is not in the vocab\r\n val_df = pd.DataFrame(\r\n {\r\n \"question\": [x[\"question\"] for x in val_q],\r\n \"answer\": [x[\"answer\"] for x in val_a],\r\n \"video_id\": [x[\"video_name\"] for x in val_q],\r\n \"type\": [x[\"type\"] for x in val_a],\r\n },\r\n columns=[\"question\", \"answer\", \"video_id\", \"type\"],\r\n )\r\n test_df = pd.DataFrame(\r\n {\r\n \"question\": [x[\"question\"] for x in test_q],\r\n \"answer\": [x[\"answer\"] for x in test_a],\r\n \"video_id\": [x[\"video_name\"] for x in test_q],\r\n \"type\": [x[\"type\"] for x in test_a],\r\n },\r\n columns=[\"question\", \"answer\", \"video_id\", \"type\"],\r\n )\r\n\r\n print(len(train_df), len(val_df), len(test_df))\r\n\r\n if save:\r\n train_df.to_csv(\"train.csv\", index=False)\r\n val_df.to_csv(\"val.csv\", index=False)\r\n test_df.to_csv(\"test.csv\", index=False)\r\n\r\n return train_df, val_df, test_df\r\n\r\n\r\nvocab = get_vocabulary(train_a, True)\r\ntrain_df, val_df, test_df = json_to_df(\r\n vocab, train_q, train_a, val_q, val_a, test_q, test_a, True\r\n)\r\n" ]
[ [ "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
ohong/pretty-whale
[ "9435d2a04dea7509f10cfd17e44ab8d146c1eb5f" ]
[ "cgi-bin/paint_x2_unet/cgi_exe.py" ]
[ "#!/usr/bin/env python\n\n\nimport numpy as np\nimport chainer\nimport cv2\n\n#import chainer.functions as F\n#import chainer.links as L\n#import six\n#import os\n\nfrom chainer import cuda, serializers, Variable # , optimizers, training\n#from chainer.training import extensions\n#from train import Image2ImageDataset\nfrom img2imgDataset import ImageAndRefDataset\n\nimport unet\nimport lnet\n\n\nclass Painter:\n\n def __init__(self, gpu=0):\n\n print(\"start\")\n self.root = \"./images/\"\n self.batchsize = 1\n self.outdir = self.root + \"out/\"\n self.outdir_min = self.root + \"out_min/\"\n self.gpu = gpu\n self._dtype = np.float32\n\n print(\"load model\")\n if self.gpu >= 0:\n cuda.get_device(self.gpu).use()\n cuda.set_max_workspace_size(64 * 1024 * 1024) # 64MB\n chainer.Function.type_check_enable = False\n self.cnn_128 = unet.UNET()\n self.cnn_512 = unet.UNET()\n if self.gpu >= 0:\n self.cnn_128.to_gpu()\n self.cnn_512.to_gpu()\n #lnn = lnet.LNET()\n #serializers.load_npz(\"./cgi-bin/wnet/models/model_cnn_128_df_4\", cnn_128)\n #serializers.load_npz(\"./cgi-bin/paint_x2_unet/models/model_cnn_128_f3_2\", cnn_128)\n serializers.load_npz(\n \"./cgi-bin/paint_x2_unet/models/unet_128_standard\", self.cnn_128)\n #serializers.load_npz(\"./cgi-bin/paint_x2_unet/models/model_cnn_128_ua_1\", self.cnn_128)\n #serializers.load_npz(\"./cgi-bin/paint_x2_unet/models/model_m_1.6\", self.cnn)\n serializers.load_npz(\n \"./cgi-bin/paint_x2_unet/models/unet_512_standard\", self.cnn_512)\n #serializers.load_npz(\"./cgi-bin/paint_x2_unet/models/model_p2_1\", self.cnn)\n #serializers.load_npz(\"./cgi-bin/paint_x2_unet/models/model_10000\", self.cnn)\n #serializers.load_npz(\"./cgi-bin/paint_x2_unet/models/liner_f\", lnn)\n\n def save_as_img(self, array, name):\n array = array.transpose(1, 2, 0)\n array = array.clip(0, 255).astype(np.uint8)\n array = cuda.to_cpu(array)\n (major, minor, _) = cv2.__version__.split(\".\")\n if major == '3':\n img = cv2.cvtColor(array, cv2.COLOR_YUV2RGB)\n else:\n img = cv2.cvtColor(array, cv2.COLOR_YUV2BGR)\n cv2.imwrite(name, img)\n\n def liner(self, id_str):\n if self.gpu >= 0:\n cuda.get_device(self.gpu).use()\n\n image1 = cv2.imread(path1, cv2.IMREAD_GRAYSCALE)\n image1 = np.asarray(image1, self._dtype)\n if image1.ndim == 2:\n image1 = image1[:, :, np.newaxis]\n img = image1.transpose(2, 0, 1)\n x = np.zeros((1, 3, img.shape[1], img.shape[2]), dtype='f')\n if self.gpu >= 0:\n x = cuda.to_gpu(x)\n\n lnn = lnet.LNET()\n with chainer.no_backprop_mode():\n with chainer.using_config('train', False):\n y = lnn.calc(Variable(x))\n\n self.save_as_img(y.data[0], self.root + \"line/\" + id_str + \".png\")\n\n def colorize(self, id_str, step='C', blur=0, s_size=128,colorize_format=\"png\"):\n if self.gpu >= 0:\n cuda.get_device(self.gpu).use()\n\n _ = {'S': \"ref/\", 'L': \"out_min/\", 'C': \"ref/\"}\n dataset = ImageAndRefDataset(\n [id_str + \".png\"], self.root + \"line/\", self.root + _[step])\n\n _ = {'S': True, 'L': False, 'C': True}\n sample = dataset.get_example(0, minimize=_[step], blur=blur, s_size=s_size)\n\n _ = {'S': 0, 'L': 1, 'C': 0}[step]\n sample_container = np.zeros(\n (1, 4, sample[_].shape[1], sample[_].shape[2]), dtype='f')\n sample_container[0, :] = sample[_]\n\n if self.gpu >= 0:\n sample_container = cuda.to_gpu(sample_container)\n\n cnn = {'S': self.cnn_128, 'L': self.cnn_512, 'C': self.cnn_128}\n with chainer.no_backprop_mode():\n with chainer.using_config('train', False):\n image_conv2d_layer = cnn[step].calc(Variable(sample_container))\n del sample_container\n\n if step == 'C':\n input_bat = np.zeros((1, 4, sample[1].shape[1], sample[1].shape[2]), dtype='f')\n print(input_bat.shape)\n input_bat[0, 0, :] = sample[1]\n\n output = cuda.to_cpu(image_conv2d_layer.data[0])\n del image_conv2d_layer # release memory\n\n for channel in range(3):\n input_bat[0, 1 + channel, :] = cv2.resize(\n output[channel, :],\n (sample[1].shape[2], sample[1].shape[1]),\n interpolation=cv2.INTER_CUBIC)\n\n if self.gpu >= 0:\n link = cuda.to_gpu(input_bat, None)\n else:\n link = input_bat\n with chainer.no_backprop_mode():\n with chainer.using_config('train', False):\n image_conv2d_layer = self.cnn_512.calc(Variable(link))\n del link # release memory\n\n image_out_path = {\n 'S': self.outdir_min + id_str + \".png\",\n 'L': self.outdir + id_str + \".png\",\n 'C': self.outdir + id_str + \"_0.\" + colorize_format}\n self.save_as_img(image_conv2d_layer.data[0], image_out_path[step])\n del image_conv2d_layer\n\n\n\nif __name__ == '__main__':\n for n in range(1):\n p = Painter()\n print(n)\n p.colorize(n * p.batchsize)\n" ]
[ [ "numpy.asarray", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
JuliusvR/L5NeuronSimulation
[ "1fc68c7367c439e1f9c9b73a15a95609858ec720" ]
[ "L5NeuronSimulation/Group_EPSC_Tuning/print_results.py" ]
[ "import sys\nimport numpy as np\nimport pandas as pd\n\ndef main():\n ipscs_filename = sys.argv[-1]\n if __file__ != sys.argv[-1]:\n ipscs_filename = sys.argv[-1]\n else:\n raise Exception(\"Must be run as python print_results <filename>\")\n\n ipsc_csv = pd.read_csv(ipscs_filename)\n ipscs = ipsc_csv[\"EPSC\"]\n\n print(\"Mean:\", np.mean(ipscs) * 1000, \"pA\")\n print(\"Std:\", np.std(ipscs) * 1000, \"pA\")\n\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.std", "pandas.read_csv", "numpy.mean" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
DPBayes/dppp
[ "6b6cbf29a1605aebfeddf2813134fee3db931689" ]
[ "examples/vae.py" ]
[ "# SPDX-License-Identifier: Apache-2.0\n# SPDX-FileCopyrightText: © -2019 Copyright Contributors to the Pyro project.\n# SPDX-FileCopyrightText: © 2019- d3p Developers and their Assignees\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"VAE example from numpyro.\n\noriginal: https://github.com/pyro-ppl/numpyro/blob/master/examples/vae.py\n\"\"\"\n\nimport logging\nlogging.getLogger().setLevel('INFO')\n\nimport os\n\n# allow example to find d3p without installing\nimport sys\nsys.path.append(os.path.dirname(sys.path[0]))\n####\n\nimport argparse\nimport time\n\nimport matplotlib.pyplot as plt\n\nimport jax.numpy as jnp\nfrom jax import jit, lax, random\nfrom jax.experimental import stax\nfrom jax.random import PRNGKey\nimport jax\n\nimport numpy as np\n\nimport numpyro\nimport numpyro.optim as optimizers\nimport numpyro.distributions as dist\nfrom numpyro.primitives import sample, plate\nfrom numpyro.handlers import scale\nfrom numpyro.infer import Trace_ELBO as ELBO, SVI\n\nfrom d3p.svi import DPSVI\nfrom d3p.modelling import sample_multi_posterior_predictive\nfrom d3p.minibatch import split_batchify_data, subsample_batchify_data\nfrom d3p.dputil import approximate_sigma\nfrom d3p.util import is_int_scalar\n\nfrom datasets import MNIST, load_dataset\n\nRESULTS_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__),\n '.results'))\nos.makedirs(RESULTS_DIR, exist_ok=True)\n\n\ndef encoder(hidden_dim, z_dim):\n \"\"\"Defines the encoder, i.e., the network taking us from observations\n to (a distribution of) latent variables.\n\n z is following a normal distribution, thus needs mean and variance.\n\n Network structure:\n x -> dense layer of hidden_dim with softplus activation --> dense layer of z_dim ( = means/loc of z)\n |-> dense layer of z_dim with (elementwise) exp() as activation func ( = variance of z )\n (note: the exp() as activation function serves solely to ensure positivity of the variance)\n\n :param hidden_dim: number of nodes in the hidden layer\n :param z_dim: dimension of the latent variable z\n :return: (init_fun, apply_fun) pair of the encoder: (encoder_init, encode)\n \"\"\"\n return stax.serial(\n stax.Dense(hidden_dim, W_init=stax.randn()), stax.Softplus,\n stax.FanOut(2),\n stax.parallel(stax.Dense(z_dim, W_init=stax.randn()),\n stax.serial(stax.Dense(z_dim, W_init=stax.randn()), stax.Exp)),\n )\n\n\ndef decoder(hidden_dim, out_dim):\n \"\"\"Defines the decoder, i.e., the network taking us from latent\n variables back to observations (or at least observation space).\n\n Network structure:\n z -> dense layer of hidden_dim with softplus activation -> dense layer of out_dim with sigmoid activation\n\n :param hidden_dim: number of nodes in the hidden layer\n :param out_dim: dimensions of the observations\n\n :return: (init_fun, apply_fun) pair of the decoder: (decoder_init, decode)\n \"\"\"\n return stax.serial(\n stax.Dense(hidden_dim, W_init=stax.randn()), stax.Softplus,\n stax.Dense(out_dim, W_init=stax.randn()), stax.Sigmoid,\n )\n\ndef model(batch_or_batchsize, z_dim, hidden_dim, out_dim=None, num_obs_total=None):\n \"\"\"Defines the generative probabilistic model: p(x|z)p(z)\n\n The model is conditioned on the observed data\n\n :param batch: a batch of observations\n :param hidden_dim: dimensions of the hidden layers in the VAE\n :param z_dim: dimensions of the latent variable / code\n :param out_dim: number of dimensions in a single output sample (flattened)\n\n :return: (named) sample x from the model observation distribution p(x|z)p(z)\n \"\"\"\n if is_int_scalar(batch_or_batchsize):\n batch = None\n batch_size = batch_or_batchsize\n if out_dim is None:\n raise ValueError(\"if no batch is provided, out_dim must be given\")\n else:\n batch = batch_or_batchsize\n assert(jnp.ndim(batch) == 3)\n batch_size = jnp.shape(batch)[0]\n batch = jnp.reshape(batch, (batch_size, -1)) # squash each data item into a one-dimensional array (preserving only the batch size on the first axis)\n out_dim = jnp.shape(batch)[1]\n num_obs_total = batch_size if num_obs_total is None else num_obs_total\n\n decode = numpyro.module('decoder', decoder(hidden_dim, out_dim), (batch_size, z_dim))\n with plate('batch', num_obs_total, batch_size):\n z = sample('z', dist.Normal(jnp.zeros((z_dim,)), jnp.ones((z_dim,))).to_event(1)) # prior on z is N(0,I)\n img_loc = decode(z) # evaluate decoder (p(x|z)) on sampled z to get means for output bernoulli distribution\n x = sample('obs', dist.Bernoulli(img_loc).to_event(1), obs=batch) # outputs x are sampled from bernoulli distribution depending on z and conditioned on the observed data\n return x\n\n\ndef guide(batch, z_dim, hidden_dim, out_dim=None, num_obs_total=None):\n \"\"\"Defines the probabilistic guide for z (variational approximation to posterior): q(z) ~ p(z|q)\n :param batch: a batch of observations\n :return: (named) sampled z from the variational (guide) distribution q(z)\n \"\"\"\n assert(jnp.ndim(batch) == 3)\n batch_size = jnp.shape(batch)[0]\n batch = jnp.reshape(batch, (batch_size, -1)) # squash each data item into a one-dimensional array (preserving only the batch size on the first axis)\n out_dim = jnp.shape(batch)[1]\n num_obs_total = batch_size if num_obs_total is None else num_obs_total\n\n encode = numpyro.module('encoder', encoder(hidden_dim, z_dim), (batch_size, out_dim))\n with plate('batch', num_obs_total, batch_size):\n z_loc, z_std = encode(batch) # obtain mean and variance for q(z) ~ p(z|x) from encoder\n z = sample('z', dist.Normal(z_loc, z_std).to_event(1)) # z follows q(z)\n return z\n\n\n@jit\ndef binarize(rng, batch):\n \"\"\"Binarizes a batch of observations with values in [0,1] by sampling from\n a Bernoulli distribution and using the original observations as means.\n\n Reason: This example assumes a Bernoulli distribution for the decoder output\n and thus requires inputs to be binary values as well.\n\n :param rng: rng seed key\n :param batch: Batch of data with continous values in interval [0, 1]\n :return: tuple(rng, binarized_batch).\n \"\"\"\n return random.bernoulli(rng, batch).astype(batch.dtype)\n\n\ndef main(args):\n # loading data\n (train_init, train_fetch_plain), num_samples = load_dataset(MNIST, batch_size=args.batch_size, split='train', batchifier=subsample_batchify_data)\n (test_init, test_fetch_plain), _ = load_dataset(MNIST, batch_size=args.batch_size, split='test', batchifier=split_batchify_data)\n\n def binarize_fetch(fetch_fn):\n @jit\n def fetch_binarized(batch_nr, batchifier_state, binarize_rng):\n batch = fetch_fn(batch_nr, batchifier_state)\n return binarize(binarize_rng, batch[0]), batch[1]\n return fetch_binarized\n\n train_fetch = binarize_fetch(train_fetch_plain)\n test_fetch = binarize_fetch(test_fetch_plain)\n\n # setting up optimizer\n optimizer = optimizers.Adam(args.learning_rate)\n\n # the plate environment in our model scales individual\n # records' contributions to the loss up by num_samples.\n # This can cause numerical instabilities so we scale down\n # the loss by 1/num_samples here.\n sc_model = scale(model, scale=1/num_samples)\n sc_guide = scale(guide, scale=1/num_samples)\n\n if args.no_dp:\n svi = SVI(sc_model, sc_guide, optimizer, ELBO(), num_obs_total=num_samples, z_dim=args.z_dim, hidden_dim=args.hidden_dim)\n else:\n q = args.batch_size / num_samples\n target_eps = args.epsilon\n dp_scale, act_eps, _ = approximate_sigma(\n target_eps=target_eps,\n delta=1/num_samples, q=q,\n num_iter=int(1/q) * args.num_epochs,\n force_smaller=True\n )\n print(f\"using noise scale {dp_scale} for epsilon of {act_eps} (targeted: {target_eps})\")\n\n svi = DPSVI(\n sc_model, sc_guide, optimizer, ELBO(),\n dp_scale=dp_scale, clipping_threshold=10.,\n num_obs_total=num_samples, z_dim=args.z_dim, hidden_dim=args.hidden_dim\n )\n\n # preparing random number generators and initializing svi\n rng = PRNGKey(0)\n rng, binarize_rng, svi_init_rng, batchifier_rng = random.split(rng, 4)\n _, batchifier_state = train_init(rng_key=batchifier_rng)\n sample_batch = train_fetch(0, batchifier_state, binarize_rng)[0]\n svi_state = svi.init(svi_init_rng, sample_batch)\n\n # functions for training tasks\n @jit\n def epoch_train(svi_state, batchifier_state, num_batches, rng):\n \"\"\"Trains one epoch\n\n :param svi_state: current state of the optimizer\n :param rng: rng key\n\n :return: overall training loss over the epoch\n \"\"\"\n\n def body_fn(i, val):\n svi_state, loss = val\n binarize_rng = random.fold_in(rng, i)\n batch = train_fetch(i, batchifier_state, binarize_rng)[0]\n svi_state, batch_loss = svi.update(\n svi_state, batch\n )\n loss += batch_loss / num_batches\n return svi_state, loss\n\n svi_state, loss = lax.fori_loop(0, num_batches, body_fn, (svi_state, 0.))\n return svi_state, loss\n\n @jit\n def eval_test(svi_state, batchifier_state, num_batches, rng):\n \"\"\"Evaluates current model state on test data.\n\n :param svi_state: current state of the optimizer\n :param rng: rng key\n\n :return: loss over the test split\n \"\"\"\n def body_fn(i, loss_sum):\n binarize_rng = random.fold_in(rng, i)\n batch = test_fetch(i, batchifier_state, binarize_rng)[0]\n batch_loss = svi.evaluate(svi_state, batch)\n loss_sum += batch_loss / num_batches\n return loss_sum\n\n return lax.fori_loop(0, num_batches, body_fn, 0.)\n\n def reconstruct_img(epoch, num_epochs, batchifier_state, svi_state, rng):\n \"\"\"Reconstructs an image for the given epoch\n\n Obtains a sample from the testing data set and passes it through the\n VAE. Stores the result as image file 'epoch_{epoch}_recons.png' and\n the original input as 'epoch_{epoch}_original.png' in folder '.results'.\n\n :param epoch: Number of the current epoch\n :param num_epochs: Number of total epochs\n :param opt_state: Current state of the optimizer\n :param rng: rng key\n \"\"\"\n assert(num_epochs > 0)\n img = test_fetch_plain(0, batchifier_state)[0][0]\n plt.imsave(\n os.path.join(RESULTS_DIR, \"epoch_{:0{}d}_original.png\".format(\n epoch, (int(jnp.log10(num_epochs))+1))\n ),\n img,\n cmap='gray'\n )\n rng, rng_binarize = random.split(rng, 2)\n test_sample = binarize(rng_binarize, img)\n test_sample = jnp.reshape(test_sample, (1, *jnp.shape(test_sample)))\n params = svi.get_params(svi_state)\n\n samples = sample_multi_posterior_predictive(\n rng, 10, model, (1, args.z_dim, args.hidden_dim, np.prod(test_sample.shape[1:])),\n guide, (test_sample, args.z_dim, args.hidden_dim), params\n )\n\n img_loc = samples['obs'][0].reshape([28, 28])\n avg_img_loc = jnp.mean(samples['obs'], axis=0).reshape([28, 28])\n plt.imsave(\n os.path.join(RESULTS_DIR, \"epoch_{:0{}d}_recons_single.png\".format(\n epoch, (int(jnp.log10(num_epochs))+1))\n ),\n img_loc,\n cmap='gray'\n )\n plt.imsave(\n os.path.join(RESULTS_DIR, \"epoch_{:0{}d}_recons_avg.png\".format(\n epoch, (int(jnp.log10(num_epochs))+1))\n ),\n avg_img_loc,\n cmap='gray'\n )\n\n # main training loop\n for i in range(args.num_epochs):\n t_start = time.time()\n rng, data_fetch_rng, train_rng = random.split(rng, 3)\n num_train_batches, train_batchifier_state, = train_init(rng_key=data_fetch_rng)\n svi_state, train_loss = epoch_train(\n svi_state, train_batchifier_state, num_train_batches, train_rng\n )\n\n rng, test_fetch_rng, test_rng, recons_rng = random.split(rng, 4)\n num_test_batches, test_batchifier_state = test_init(rng_key=test_fetch_rng)\n test_loss = eval_test(svi_state, test_batchifier_state, num_test_batches, test_rng)\n\n reconstruct_img(i, args.num_epochs, test_batchifier_state, svi_state, recons_rng)\n print(\"Epoch {}: loss = {} (on training set: {}) ({:.2f} s.)\".format(\n i, test_loss, train_loss, time.time() - t_start\n ))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description=\"parse args\")\n parser.add_argument('-n', '--num-epochs', default=20, type=int, help='number of training epochs')\n parser.add_argument('-lr', '--learning-rate', default=1.0e-3, type=float, help='learning rate')\n parser.add_argument('--batch-size', default=128, type=int, help='batch size')\n parser.add_argument('--z-dim', default=50, type=int, help='size of latent')\n parser.add_argument('--hidden-dim', default=400, type=int, help='size of hidden layer in encoder/decoder networks')\n parser.add_argument('--epsilon', default=1., type=float, help='targeted value for privacy parameter epsilon')\n parser.add_argument('--no_dp', default=False, action='store_true', help='Use plain SVI instead of DPSVI algorithm')\n args = parser.parse_args()\n main(args)\n" ]
[ [ "numpy.prod" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Uvindu/Intelligent-Eye-Care-System
[ "ec06fece351ea09145da606d42c27998e14a80fe" ]
[ "previous_studies/gaze_tracking_udith/pupil.py" ]
[ "import numpy as np\r\nimport cv2\r\nm=6\r\ndef high(img):\r\n img = cv2.resize(img,(int(img.shape[1]*m),int(img.shape[0]*m)))\r\n dst = cv2.fastNlMeansDenoising(img,None,200.0, 7, 21)\r\n #dst2 = cv.fastNlMeansDenoising(dst,None,20.0,7,21)\r\n \r\n #dst3 = cv.fastNlMeansDenoising(dst2,None,50,7,21)\r\n '''\r\n dst4 = cv.fastNlMeansDenoisingColored(dst3,None,10,10,7,21)\r\n dst5 = cv.fastNlMeansDenoisingColored(dst4,None,10,10,7,21)\r\n dst6 = cv.fastNlMeansDenoisingColored(dst5,None,10,10,7,21)\r\n dst7 = cv.fastNlMeansDenoisingColored(dst6,None,10,10,7,21)\r\n dst8 = cv.fastNlMeansDenoisingColored(dst7,None,10,10,7,21)\r\n dst9 = cv.fastNlMeansDenoisingColored(dst8,None,10,10,7,21)\r\n dst10 = cv.fastNlMeansDenoisingColored(dst9,None,10,10,7,21)'''\r\n\r\n return dst #RGB\r\n\r\ndef blob(im):\r\n params = cv2.SimpleBlobDetector_Params()\r\n params.filterByArea = True\r\n params.minArea = 270 # The dot in 20pt font has area of about 30\r\n params.filterByColor = 1\r\n #params.blobColor = 255\r\n params.filterByCircularity = 0\r\n params.filterByConvexity = 0\r\n params.filterByInertia = 0\r\n detector = cv2.SimpleBlobDetector_create(params)\r\n keypoints = detector.detect(im)\r\n try:\r\n return(keypoints[0].pt[0]//m,keypoints[0].pt[0]//m)\r\n except:\r\n return(('nun','nun'))\r\n\r\n\r\n\r\n \r\n\r\nclass Pupil(object):\r\n \"\"\"\r\n This class detects the iris of an eye and estimates\r\n the position of the pupil\r\n \"\"\"\r\n\r\n def __init__(self, eye_frame, threshold):\r\n self.iris_frame = None\r\n self.threshold = threshold\r\n self.x = None\r\n self.y = None\r\n\r\n self.detect_iris(eye_frame)\r\n\r\n @staticmethod\r\n def image_processing(eye_frame, threshold):\r\n \"\"\"Performs operations on the eye frame to isolate the iris\r\n\r\n Arguments:\r\n eye_frame (numpy.ndarray): Frame containing an eye and nothing else\r\n threshold (int): Threshold value used to binarize the eye frame\r\n\r\n Returns:\r\n A frame with a single element representing the iris\r\n \"\"\"\r\n kernel = np.ones((3, 3), np.uint8)\r\n new_frame = cv2.bilateralFilter(eye_frame, 10, 15, 15)\r\n new_frame = cv2.erode(new_frame, kernel, iterations=3)\r\n new_frame = cv2.threshold(new_frame, threshold, 255, cv2.THRESH_BINARY)[1]\r\n\r\n return new_frame\r\n\r\n def detect_iris(self, eye_frame):\r\n \"\"\"Detects the iris and estimates the position of the iris by\r\n calculating the centroid.\r\n\r\n Arguments:\r\n eye_frame (numpy.ndarray): Frame containing an eye and nothing else\r\n \"\"\"\r\n self.iris_frame = self.image_processing(eye_frame, self.threshold)\r\n self.x,self.y=np.array(blob(high(eye_frame)),dtype='uint8')\r\n \r\n" ]
[ [ "numpy.ones" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
es94129/hierarchical-explanation
[ "5ccbd7a0f43e47f0df9a73016f36042e09c5544d" ]
[ "hiexpl/algo/scd_lstm.py" ]
[ "import torch\nfrom algo.cd_func import CD\nfrom algo.scd_func import CD_gpu, get_lstm_states\nfrom algo.soc_lstm import ExplanationBase, SOCForLSTM, Batch, append_extra_input, normalize_logit\nimport copy\nfrom utils.args import get_args\n\nargs = get_args()\n\n\nclass CDForLSTM(ExplanationBase):\n def __init__(self, target_model, data_iterator, vocab, pad_variation, tree_path, output_path, config, pad_idx=1):\n super().__init__(target_model, data_iterator, vocab, tree_path, output_path, config, pad_idx)\n self.pad_variation = pad_variation\n\n def explain_single(self, inp, inp_id, region, extra_input=None):\n \"\"\"\n :param region: the input region to be explained\n :param inp: numpy array\n :param inp_id: int\n :return:\n \"\"\"\n inp = torch.from_numpy(inp).long().view(-1,1)\n if self.gpu >= 0:\n inp = inp.to(self.gpu)\n batch = Batch(text=inp)\n if extra_input is not None:\n append_extra_input(batch, extra_input)\n rel_scores, irrel_score = CD(batch, self.model, [region])\n if rel_scores.shape[0] == 2:\n score = rel_scores[0] - rel_scores[1]\n else:\n gt_label = extra_input['gt_label']\n contrib_logits = torch.from_numpy(rel_scores).cuda().view(1,-1)\n score = normalize_logit(contrib_logits, gt_label)\n score = score.item()\n return score\n\n\nclass SCDForLSTM(SOCForLSTM):\n def __init__(self, target_model, lm_model, data_iterator, vocab, tree_path, output_path, config, pad_idx=1):\n super().__init__(target_model, lm_model, data_iterator, vocab, tree_path, output_path, config, pad_idx)\n self.sample_num = config.sample_n if not args.cd_pad else 1\n\n def get_states(self, inp, x_regions, nb_regions, extra_input):\n # suppose only have one x_region and one nb_region\n x_region = x_regions[0]\n nb_region = nb_regions[0]\n\n inp_length = torch.LongTensor([len(inp)])\n fw_pos = torch.LongTensor([min(x_region[1] + 1, len(inp))])\n bw_pos = torch.LongTensor([max(x_region[0] - 1, -1)])\n\n inp_lm = copy.copy(inp)\n for i in range(len(inp_lm)):\n if nb_region[0] <= i <= nb_region[1] and not x_region[0] <= i <= x_region[1]:\n inp_lm[i] = 1\n inp_th = torch.from_numpy(inp_lm).long().view(-1, 1)\n\n if self.gpu >= 0:\n inp_th = inp_th.to(self.gpu)\n inp_length = inp_length.to(self.gpu)\n fw_pos = fw_pos.to(self.gpu)\n bw_pos = bw_pos.to(self.gpu)\n\n batch = Batch(text=inp_th, length=inp_length, fw_pos=fw_pos, bw_pos=bw_pos)\n\n all_filled_inp = []\n max_sample_length = (self.nb_range + 1) if self.nb_method == 'ngram' else (inp_th.size(0) + 1)\n\n if not args.cd_pad:\n fw_sample_outputs, bw_sample_outputs = self.lm_model.sample_n('random', batch,\n max_sample_length=max_sample_length,\n sample_num=self.sample_num)\n for sample_i in range(self.sample_num):\n fw_sample_seq, bw_sample_seq = fw_sample_outputs[:,sample_i].cpu().numpy(), \\\n bw_sample_outputs[:,sample_i].cpu().numpy()\n filled_inp = copy.copy(inp)\n len_bw = x_region[0] - nb_region[0]\n len_fw = nb_region[1] - x_region[1]\n if len_bw > 0:\n filled_inp[nb_region[0]:x_region[0]] = bw_sample_seq[-len_bw:]\n if len_fw > 0:\n filled_inp[x_region[1] + 1:nb_region[1] + 1] = fw_sample_seq[:len_fw]\n\n filled_inp = torch.from_numpy(filled_inp).long()\n if self.gpu >= 0:\n filled_inp = filled_inp.to(self.gpu)\n all_filled_inp.append(filled_inp)\n else:\n # pad the nb region to 1\n filled_inp = copy.copy(inp)\n for i in range(nb_region[0], nb_region[1] + 1):\n if not x_region[0] <= i <= x_region[1]:\n filled_inp[i] = 1\n filled_inp = torch.from_numpy(filled_inp).long()\n if self.gpu >= 0:\n filled_inp = filled_inp.to(self.gpu)\n all_filled_inp.append(filled_inp)\n\n all_filled_inp = torch.stack(all_filled_inp, -1) # [T,B]\n batch = Batch(text=all_filled_inp)\n\n if extra_input is not None:\n append_extra_input(batch, extra_input)\n all_states = get_lstm_states(batch, self.model, self.gpu)\n\n return all_states\n\n def explain_single(self, inp, inp_id, region, extra_input=None):\n \"\"\"\n :param region: the input region to be explained\n :param inp: numpy array\n :param inp_id: int\n :return:\n \"\"\"\n if self.nb_method == 'tree':\n tree = self.trees[inp_id]\n mask_regions = self.get_tree_mask_region(tree, region, inp)\n elif self.nb_method == 'ngram':\n mask_regions = self.get_ngram_mask_region(region, inp)\n else:\n raise NotImplementedError('unknown method %s' % self.nb_method)\n with torch.no_grad():\n if self.sample_num > 0:\n states = self.get_states(inp, [region], mask_regions, extra_input)\n else:\n states = None\n inp = torch.from_numpy(inp).long().view(-1, 1)\n if self.gpu >= 0:\n inp = inp.to(self.gpu)\n batch = Batch(text=inp)\n if extra_input is not None:\n append_extra_input(batch, extra_input)\n rel_scores, irrel_scores, _ = CD_gpu(batch, self.model, [region], states, gpu=self.gpu)\n if rel_scores.shape[0] == 2:\n score = rel_scores[0] - rel_scores[1]\n else:\n gt_label = extra_input['gt_label']\n contrib_logits = rel_scores.view(1,-1)\n score = normalize_logit(contrib_logits, gt_label)\n score = score.item()\n return score" ]
[ [ "torch.stack", "torch.no_grad", "torch.from_numpy" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
microsoft/CodeBERT
[ "c9bc756799967f428d8d48d0c89cd08150deeda0" ]
[ "GraphCodeBERT/clonedetection/run.py" ]
[ "# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nFine-tuning the library models for language modeling on a text file (GPT, GPT-2, BERT, RoBERTa).\nGPT and GPT-2 are fine-tuned using a causal language modeling (CLM) loss while BERT and RoBERTa are fine-tuned\nusing a masked language modeling (MLM) loss.\n\"\"\"\n\nfrom __future__ import absolute_import, division, print_function\n\nimport argparse\nimport glob\nimport logging\nimport os\nimport pickle\nimport random\nimport re\nimport shutil\nimport json\nimport numpy as np\nimport torch\nfrom torch.utils.data import DataLoader, Dataset, SequentialSampler, RandomSampler,TensorDataset\nfrom torch.utils.data.distributed import DistributedSampler\nfrom transformers import (WEIGHTS_NAME, AdamW, get_linear_schedule_with_warmup,\n RobertaConfig, RobertaForSequenceClassification, RobertaTokenizer)\nfrom tqdm import tqdm, trange\nimport multiprocessing\nfrom model import Model\n\ncpu_cont = 16\nlogger = logging.getLogger(__name__)\n\nfrom parser import DFG_python,DFG_java,DFG_ruby,DFG_go,DFG_php,DFG_javascript\nfrom parser import (remove_comments_and_docstrings,\n tree_to_token_index,\n index_to_code_token,\n tree_to_variable_index)\nfrom tree_sitter import Language, Parser\ndfg_function={\n 'python':DFG_python,\n 'java':DFG_java,\n 'ruby':DFG_ruby,\n 'go':DFG_go,\n 'php':DFG_php,\n 'javascript':DFG_javascript\n}\n\n#load parsers\nparsers={} \nfor lang in dfg_function:\n LANGUAGE = Language('parser/my-languages.so', lang)\n parser = Parser()\n parser.set_language(LANGUAGE) \n parser = [parser,dfg_function[lang]] \n parsers[lang]= parser\n \n \n#remove comments, tokenize code and extract dataflow \ndef extract_dataflow(code, parser,lang):\n #remove comments\n try:\n code=remove_comments_and_docstrings(code,lang)\n except:\n pass \n #obtain dataflow\n if lang==\"php\":\n code=\"<?php\"+code+\"?>\" \n try:\n tree = parser[0].parse(bytes(code,'utf8')) \n root_node = tree.root_node \n tokens_index=tree_to_token_index(root_node) \n code=code.split('\\n')\n code_tokens=[index_to_code_token(x,code) for x in tokens_index] \n index_to_code={}\n for idx,(index,code) in enumerate(zip(tokens_index,code_tokens)):\n index_to_code[index]=(idx,code) \n try:\n DFG,_=parser[1](root_node,index_to_code,{}) \n except:\n DFG=[]\n DFG=sorted(DFG,key=lambda x:x[1])\n indexs=set()\n for d in DFG:\n if len(d[-1])!=0:\n indexs.add(d[1])\n for x in d[-1]:\n indexs.add(x)\n new_DFG=[]\n for d in DFG:\n if d[1] in indexs:\n new_DFG.append(d)\n dfg=new_DFG\n except:\n dfg=[]\n return code_tokens,dfg\n\nclass InputFeatures(object):\n \"\"\"A single training/test features for a example.\"\"\"\n def __init__(self,\n input_tokens_1,\n input_ids_1,\n position_idx_1,\n dfg_to_code_1,\n dfg_to_dfg_1,\n input_tokens_2,\n input_ids_2,\n position_idx_2,\n dfg_to_code_2,\n dfg_to_dfg_2,\n label,\n url1,\n url2\n\n ):\n #The first code function\n self.input_tokens_1 = input_tokens_1\n self.input_ids_1 = input_ids_1\n self.position_idx_1=position_idx_1\n self.dfg_to_code_1=dfg_to_code_1\n self.dfg_to_dfg_1=dfg_to_dfg_1\n \n #The second code function\n self.input_tokens_2 = input_tokens_2\n self.input_ids_2 = input_ids_2\n self.position_idx_2=position_idx_2\n self.dfg_to_code_2=dfg_to_code_2\n self.dfg_to_dfg_2=dfg_to_dfg_2\n \n #label\n self.label=label\n self.url1=url1\n self.url2=url2\n \n\ndef convert_examples_to_features(item):\n #source\n url1,url2,label,tokenizer, args,cache,url_to_code=item\n parser=parsers['java']\n \n for url in [url1,url2]:\n if url not in cache:\n func=url_to_code[url]\n \n #extract data flow\n code_tokens,dfg=extract_dataflow(func,parser,'java')\n code_tokens=[tokenizer.tokenize('@ '+x)[1:] if idx!=0 else tokenizer.tokenize(x) for idx,x in enumerate(code_tokens)]\n ori2cur_pos={}\n ori2cur_pos[-1]=(0,0)\n for i in range(len(code_tokens)):\n ori2cur_pos[i]=(ori2cur_pos[i-1][1],ori2cur_pos[i-1][1]+len(code_tokens[i])) \n code_tokens=[y for x in code_tokens for y in x] \n \n #truncating\n code_tokens=code_tokens[:args.code_length+args.data_flow_length-3-min(len(dfg),args.data_flow_length)][:512-3]\n source_tokens =[tokenizer.cls_token]+code_tokens+[tokenizer.sep_token]\n source_ids = tokenizer.convert_tokens_to_ids(source_tokens)\n position_idx = [i+tokenizer.pad_token_id + 1 for i in range(len(source_tokens))]\n dfg=dfg[:args.code_length+args.data_flow_length-len(source_tokens)]\n source_tokens+=[x[0] for x in dfg]\n position_idx+=[0 for x in dfg]\n source_ids+=[tokenizer.unk_token_id for x in dfg]\n padding_length=args.code_length+args.data_flow_length-len(source_ids)\n position_idx+=[tokenizer.pad_token_id]*padding_length\n source_ids+=[tokenizer.pad_token_id]*padding_length \n \n #reindex\n reverse_index={}\n for idx,x in enumerate(dfg):\n reverse_index[x[1]]=idx\n for idx,x in enumerate(dfg):\n dfg[idx]=x[:-1]+([reverse_index[i] for i in x[-1] if i in reverse_index],) \n dfg_to_dfg=[x[-1] for x in dfg]\n dfg_to_code=[ori2cur_pos[x[1]] for x in dfg]\n length=len([tokenizer.cls_token])\n dfg_to_code=[(x[0]+length,x[1]+length) for x in dfg_to_code] \n cache[url]=source_tokens,source_ids,position_idx,dfg_to_code,dfg_to_dfg\n\n \n source_tokens_1,source_ids_1,position_idx_1,dfg_to_code_1,dfg_to_dfg_1=cache[url1] \n source_tokens_2,source_ids_2,position_idx_2,dfg_to_code_2,dfg_to_dfg_2=cache[url2] \n return InputFeatures(source_tokens_1,source_ids_1,position_idx_1,dfg_to_code_1,dfg_to_dfg_1,\n source_tokens_2,source_ids_2,position_idx_2,dfg_to_code_2,dfg_to_dfg_2,\n label,url1,url2)\n\nclass TextDataset(Dataset):\n def __init__(self, tokenizer, args, file_path='train'):\n self.examples = []\n self.args=args\n index_filename=file_path\n \n #load index\n logger.info(\"Creating features from index file at %s \", index_filename)\n url_to_code={}\n with open('/'.join(index_filename.split('/')[:-1])+'/data.jsonl') as f:\n for line in f:\n line=line.strip()\n js=json.loads(line)\n url_to_code[js['idx']]=js['func']\n \n #load code function according to index\n data=[]\n cache={}\n f=open(index_filename)\n with open(index_filename) as f:\n for line in f:\n line=line.strip()\n url1,url2,label=line.split('\\t')\n if url1 not in url_to_code or url2 not in url_to_code:\n continue\n if label=='0':\n label=0\n else:\n label=1\n data.append((url1,url2,label,tokenizer, args,cache,url_to_code))\n \n #only use 10% valid data to keep best model \n if 'valid' in file_path:\n data=random.sample(data,int(len(data)*0.1))\n \n #convert example to input features \n self.examples=[convert_examples_to_features(x) for x in tqdm(data,total=len(data))]\n \n if 'train' in file_path:\n for idx, example in enumerate(self.examples[:3]):\n logger.info(\"*** Example ***\")\n logger.info(\"idx: {}\".format(idx))\n logger.info(\"label: {}\".format(example.label))\n logger.info(\"input_tokens_1: {}\".format([x.replace('\\u0120','_') for x in example.input_tokens_1]))\n logger.info(\"input_ids_1: {}\".format(' '.join(map(str, example.input_ids_1)))) \n logger.info(\"position_idx_1: {}\".format(example.position_idx_1))\n logger.info(\"dfg_to_code_1: {}\".format(' '.join(map(str, example.dfg_to_code_1))))\n logger.info(\"dfg_to_dfg_1: {}\".format(' '.join(map(str, example.dfg_to_dfg_1))))\n \n logger.info(\"input_tokens_2: {}\".format([x.replace('\\u0120','_') for x in example.input_tokens_2]))\n logger.info(\"input_ids_2: {}\".format(' '.join(map(str, example.input_ids_2)))) \n logger.info(\"position_idx_2: {}\".format(example.position_idx_2))\n logger.info(\"dfg_to_code_2: {}\".format(' '.join(map(str, example.dfg_to_code_2))))\n logger.info(\"dfg_to_dfg_2: {}\".format(' '.join(map(str, example.dfg_to_dfg_2))))\n\n\n def __len__(self):\n return len(self.examples)\n \n def __getitem__(self, item):\n #calculate graph-guided masked function\n attn_mask_1= np.zeros((self.args.code_length+self.args.data_flow_length,\n self.args.code_length+self.args.data_flow_length),dtype=np.bool)\n #calculate begin index of node and max length of input\n node_index=sum([i>1 for i in self.examples[item].position_idx_1])\n max_length=sum([i!=1 for i in self.examples[item].position_idx_1])\n #sequence can attend to sequence\n attn_mask_1[:node_index,:node_index]=True\n #special tokens attend to all tokens\n for idx,i in enumerate(self.examples[item].input_ids_1):\n if i in [0,2]:\n attn_mask_1[idx,:max_length]=True\n #nodes attend to code tokens that are identified from\n for idx,(a,b) in enumerate(self.examples[item].dfg_to_code_1):\n if a<node_index and b<node_index:\n attn_mask_1[idx+node_index,a:b]=True\n attn_mask_1[a:b,idx+node_index]=True\n #nodes attend to adjacent nodes \n for idx,nodes in enumerate(self.examples[item].dfg_to_dfg_1):\n for a in nodes:\n if a+node_index<len(self.examples[item].position_idx_1):\n attn_mask_1[idx+node_index,a+node_index]=True \n \n #calculate graph-guided masked function\n attn_mask_2= np.zeros((self.args.code_length+self.args.data_flow_length,\n self.args.code_length+self.args.data_flow_length),dtype=np.bool)\n #calculate begin index of node and max length of input\n node_index=sum([i>1 for i in self.examples[item].position_idx_2])\n max_length=sum([i!=1 for i in self.examples[item].position_idx_2])\n #sequence can attend to sequence\n attn_mask_2[:node_index,:node_index]=True\n #special tokens attend to all tokens\n for idx,i in enumerate(self.examples[item].input_ids_2):\n if i in [0,2]:\n attn_mask_2[idx,:max_length]=True\n #nodes attend to code tokens that are identified from\n for idx,(a,b) in enumerate(self.examples[item].dfg_to_code_2):\n if a<node_index and b<node_index:\n attn_mask_2[idx+node_index,a:b]=True\n attn_mask_2[a:b,idx+node_index]=True\n #nodes attend to adjacent nodes \n for idx,nodes in enumerate(self.examples[item].dfg_to_dfg_2):\n for a in nodes:\n if a+node_index<len(self.examples[item].position_idx_2):\n attn_mask_2[idx+node_index,a+node_index]=True \n \n return (torch.tensor(self.examples[item].input_ids_1),\n torch.tensor(self.examples[item].position_idx_1),\n torch.tensor(attn_mask_1), \n torch.tensor(self.examples[item].input_ids_2),\n torch.tensor(self.examples[item].position_idx_2),\n torch.tensor(attn_mask_2), \n torch.tensor(self.examples[item].label))\n\n\ndef set_seed(args):\n random.seed(args.seed)\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n if args.n_gpu > 0:\n torch.cuda.manual_seed_all(args.seed)\n\n\ndef train(args, train_dataset, model, tokenizer):\n \"\"\" Train the model \"\"\"\n \n #build dataloader\n train_sampler = RandomSampler(train_dataset)\n train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size,num_workers=4)\n \n args.max_steps=args.epochs*len( train_dataloader)\n args.save_steps=len( train_dataloader)//10\n args.warmup_steps=args.max_steps//5\n model.to(args.device)\n \n # Prepare optimizer and schedule (linear warmup and decay)\n no_decay = ['bias', 'LayerNorm.weight']\n optimizer_grouped_parameters = [\n {'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],\n 'weight_decay': args.weight_decay},\n {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}\n ]\n optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)\n scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps,\n num_training_steps=args.max_steps)\n\n # multi-gpu training\n if args.n_gpu > 1:\n model = torch.nn.DataParallel(model)\n\n # Train!\n logger.info(\"***** Running training *****\")\n logger.info(\" Num examples = %d\", len(train_dataset))\n logger.info(\" Num Epochs = %d\", args.epochs)\n logger.info(\" Instantaneous batch size per GPU = %d\", args.train_batch_size//max(args.n_gpu, 1))\n logger.info(\" Total train batch size = %d\",args.train_batch_size*args.gradient_accumulation_steps)\n logger.info(\" Gradient Accumulation steps = %d\", args.gradient_accumulation_steps)\n logger.info(\" Total optimization steps = %d\", args.max_steps)\n \n global_step=0\n tr_loss, logging_loss,avg_loss,tr_nb,tr_num,train_loss = 0.0, 0.0,0.0,0,0,0\n best_f1=0\n\n model.zero_grad()\n \n for idx in range(args.epochs): \n bar = tqdm(train_dataloader,total=len(train_dataloader))\n tr_num=0\n train_loss=0\n for step, batch in enumerate(bar):\n (inputs_ids_1,position_idx_1,attn_mask_1,\n inputs_ids_2,position_idx_2,attn_mask_2,\n labels)=[x.to(args.device) for x in batch]\n model.train()\n loss,logits = model(inputs_ids_1,position_idx_1,attn_mask_1,inputs_ids_2,position_idx_2,attn_mask_2,labels)\n\n if args.n_gpu > 1:\n loss = loss.mean()\n \n if args.gradient_accumulation_steps > 1:\n loss = loss / args.gradient_accumulation_steps\n\n loss.backward()\n torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)\n\n tr_loss += loss.item()\n tr_num+=1\n train_loss+=loss.item()\n if avg_loss==0:\n avg_loss=tr_loss\n \n avg_loss=round(train_loss/tr_num,5)\n bar.set_description(\"epoch {} loss {}\".format(idx,avg_loss))\n \n if (step + 1) % args.gradient_accumulation_steps == 0:\n optimizer.step()\n optimizer.zero_grad()\n scheduler.step() \n global_step += 1\n output_flag=True\n avg_loss=round(np.exp((tr_loss - logging_loss) /(global_step- tr_nb)),4)\n\n if global_step % args.save_steps == 0:\n results = evaluate(args, model, tokenizer, eval_when_training=True) \n \n # Save model checkpoint\n if results['eval_f1']>best_f1:\n best_f1=results['eval_f1']\n logger.info(\" \"+\"*\"*20) \n logger.info(\" Best f1:%s\",round(best_f1,4))\n logger.info(\" \"+\"*\"*20) \n \n checkpoint_prefix = 'checkpoint-best-f1'\n output_dir = os.path.join(args.output_dir, '{}'.format(checkpoint_prefix)) \n if not os.path.exists(output_dir):\n os.makedirs(output_dir) \n model_to_save = model.module if hasattr(model,'module') else model\n output_dir = os.path.join(output_dir, '{}'.format('model.bin')) \n torch.save(model_to_save.state_dict(), output_dir)\n logger.info(\"Saving model checkpoint to %s\", output_dir)\n \ndef evaluate(args, model, tokenizer, eval_when_training=False):\n #build dataloader\n eval_dataset = TextDataset(tokenizer, args, file_path=args.eval_data_file)\n eval_sampler = SequentialSampler(eval_dataset)\n eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler,batch_size=args.eval_batch_size,num_workers=4)\n\n # multi-gpu evaluate\n if args.n_gpu > 1 and eval_when_training is False:\n model = torch.nn.DataParallel(model)\n\n # Eval!\n logger.info(\"***** Running evaluation *****\")\n logger.info(\" Num examples = %d\", len(eval_dataset))\n logger.info(\" Batch size = %d\", args.eval_batch_size)\n \n eval_loss = 0.0\n nb_eval_steps = 0\n model.eval()\n logits=[] \n y_trues=[]\n for batch in eval_dataloader:\n (inputs_ids_1,position_idx_1,attn_mask_1,\n inputs_ids_2,position_idx_2,attn_mask_2,\n labels)=[x.to(args.device) for x in batch]\n with torch.no_grad():\n lm_loss,logit = model(inputs_ids_1,position_idx_1,attn_mask_1,inputs_ids_2,position_idx_2,attn_mask_2,labels)\n eval_loss += lm_loss.mean().item()\n logits.append(logit.cpu().numpy())\n y_trues.append(labels.cpu().numpy())\n nb_eval_steps += 1\n \n #calculate scores\n logits=np.concatenate(logits,0)\n y_trues=np.concatenate(y_trues,0)\n best_threshold=0.5\n best_f1=0\n\n y_preds=logits[:,1]>best_threshold\n from sklearn.metrics import recall_score\n recall=recall_score(y_trues, y_preds)\n from sklearn.metrics import precision_score\n precision=precision_score(y_trues, y_preds) \n from sklearn.metrics import f1_score\n f1=f1_score(y_trues, y_preds) \n result = {\n \"eval_recall\": float(recall),\n \"eval_precision\": float(precision),\n \"eval_f1\": float(f1),\n \"eval_threshold\":best_threshold,\n \n }\n\n logger.info(\"***** Eval results *****\")\n for key in sorted(result.keys()):\n logger.info(\" %s = %s\", key, str(round(result[key],4)))\n\n return result\n\ndef test(args, model, tokenizer, best_threshold=0):\n #build dataloader\n eval_dataset = TextDataset(tokenizer, args, file_path=args.test_data_file)\n eval_sampler = SequentialSampler(eval_dataset)\n eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size,num_workers=4)\n\n # multi-gpu evaluate\n if args.n_gpu > 1:\n model = torch.nn.DataParallel(model)\n\n # Eval!\n logger.info(\"***** Running Test *****\")\n logger.info(\" Num examples = %d\", len(eval_dataset))\n logger.info(\" Batch size = %d\", args.eval_batch_size)\n eval_loss = 0.0\n nb_eval_steps = 0\n model.eval()\n logits=[] \n y_trues=[]\n for batch in eval_dataloader:\n (inputs_ids_1,position_idx_1,attn_mask_1,\n inputs_ids_2,position_idx_2,attn_mask_2,\n labels)=[x.to(args.device) for x in batch]\n with torch.no_grad():\n lm_loss,logit = model(inputs_ids_1,position_idx_1,attn_mask_1,inputs_ids_2,position_idx_2,attn_mask_2,labels)\n eval_loss += lm_loss.mean().item()\n logits.append(logit.cpu().numpy())\n y_trues.append(labels.cpu().numpy())\n nb_eval_steps += 1\n \n #output result\n logits=np.concatenate(logits,0)\n y_preds=logits[:,1]>best_threshold\n with open(os.path.join(args.output_dir,\"predictions.txt\"),'w') as f:\n for example,pred in zip(eval_dataset.examples,y_preds):\n if pred:\n f.write(example.url1+'\\t'+example.url2+'\\t'+'1'+'\\n')\n else:\n f.write(example.url1+'\\t'+example.url2+'\\t'+'0'+'\\n')\n \ndef main():\n parser = argparse.ArgumentParser()\n\n ## Required parameters\n parser.add_argument(\"--train_data_file\", default=None, type=str, required=True,\n help=\"The input training data file (a text file).\")\n parser.add_argument(\"--output_dir\", default=None, type=str, required=True,\n help=\"The output directory where the model predictions and checkpoints will be written.\")\n\n ## Other parameters\n parser.add_argument(\"--eval_data_file\", default=None, type=str,\n help=\"An optional input evaluation data file to evaluate the perplexity on (a text file).\")\n parser.add_argument(\"--test_data_file\", default=None, type=str,\n help=\"An optional input evaluation data file to evaluate the perplexity on (a text file).\")\n \n parser.add_argument(\"--model_name_or_path\", default=None, type=str,\n help=\"The model checkpoint for weights initialization.\")\n\n parser.add_argument(\"--config_name\", default=\"\", type=str,\n help=\"Optional pretrained config name or path if not the same as model_name_or_path\")\n parser.add_argument(\"--tokenizer_name\", default=\"\", type=str,\n help=\"Optional pretrained tokenizer name or path if not the same as model_name_or_path\")\n\n parser.add_argument(\"--code_length\", default=256, type=int,\n help=\"Optional Code input sequence length after tokenization.\") \n parser.add_argument(\"--data_flow_length\", default=64, type=int,\n help=\"Optional Data Flow input sequence length after tokenization.\") \n parser.add_argument(\"--do_train\", action='store_true',\n help=\"Whether to run training.\")\n parser.add_argument(\"--do_eval\", action='store_true',\n help=\"Whether to run eval on the dev set.\")\n parser.add_argument(\"--do_test\", action='store_true',\n help=\"Whether to run eval on the dev set.\") \n parser.add_argument(\"--evaluate_during_training\", action='store_true',\n help=\"Run evaluation during training at each logging step.\")\n\n parser.add_argument(\"--train_batch_size\", default=4, type=int,\n help=\"Batch size per GPU/CPU for training.\")\n parser.add_argument(\"--eval_batch_size\", default=4, type=int,\n help=\"Batch size per GPU/CPU for evaluation.\")\n parser.add_argument('--gradient_accumulation_steps', type=int, default=1,\n help=\"Number of updates steps to accumulate before performing a backward/update pass.\")\n parser.add_argument(\"--learning_rate\", default=5e-5, type=float,\n help=\"The initial learning rate for Adam.\")\n parser.add_argument(\"--weight_decay\", default=0.0, type=float,\n help=\"Weight deay if we apply some.\")\n parser.add_argument(\"--adam_epsilon\", default=1e-8, type=float,\n help=\"Epsilon for Adam optimizer.\")\n parser.add_argument(\"--max_grad_norm\", default=1.0, type=float,\n help=\"Max gradient norm.\")\n parser.add_argument(\"--max_steps\", default=-1, type=int,\n help=\"If > 0: set total number of training steps to perform. Override num_train_epochs.\")\n parser.add_argument(\"--warmup_steps\", default=0, type=int,\n help=\"Linear warmup over warmup_steps.\")\n\n parser.add_argument('--seed', type=int, default=42,\n help=\"random seed for initialization\")\n parser.add_argument('--epochs', type=int, default=1,\n help=\"training epochs\")\n\n args = parser.parse_args()\n\n # Setup CUDA, GPU\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n args.n_gpu = torch.cuda.device_count()\n\n args.device = device\n\n # Setup logging\n logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',datefmt='%m/%d/%Y %H:%M:%S',level=logging.INFO)\n logger.warning(\"device: %s, n_gpu: %s\",device, args.n_gpu,)\n\n\n # Set seed\n set_seed(args)\n config = RobertaConfig.from_pretrained(args.config_name if args.config_name else args.model_name_or_path)\n config.num_labels=1\n tokenizer = RobertaTokenizer.from_pretrained(args.tokenizer_name)\n model = RobertaForSequenceClassification.from_pretrained(args.model_name_or_path,config=config) \n\n model=Model(model,config,tokenizer,args)\n logger.info(\"Training/evaluation parameters %s\", args)\n # Training\n if args.do_train:\n train_dataset = TextDataset(tokenizer, args, file_path=args.train_data_file)\n train(args, train_dataset, model, tokenizer)\n\n # Evaluation\n results = {}\n if args.do_eval:\n checkpoint_prefix = 'checkpoint-best-f1/model.bin'\n output_dir = os.path.join(args.output_dir, '{}'.format(checkpoint_prefix)) \n model.load_state_dict(torch.load(output_dir))\n model.to(args.device)\n result=evaluate(args, model, tokenizer)\n \n if args.do_test:\n checkpoint_prefix = 'checkpoint-best-f1/model.bin'\n output_dir = os.path.join(args.output_dir, '{}'.format(checkpoint_prefix)) \n model.load_state_dict(torch.load(output_dir))\n model.to(args.device)\n test(args, model, tokenizer,best_threshold=0.5)\n\n return results\n\n\nif __name__ == \"__main__\":\n main()\n\n" ]
[ [ "numpy.random.seed", "torch.load", "torch.manual_seed", "torch.utils.data.SequentialSampler", "sklearn.metrics.precision_score", "torch.utils.data.DataLoader", "torch.utils.data.RandomSampler", "torch.cuda.device_count", "torch.tensor", "numpy.concatenate", "torch.nn.DataParallel", "torch.no_grad", "torch.cuda.manual_seed_all", "torch.cuda.is_available", "sklearn.metrics.f1_score", "numpy.exp", "sklearn.metrics.recall_score", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
erkundanec/BasicPython
[ "0d3bd1ccb603b94fc3701783dfb06f831ceb2541" ]
[ "OpenCV/AnacondaProjects/find_game.py" ]
[ "# import the necessary packages\nimport numpy as np\nimport cv2\n \n# load the games image\nimage = cv2.imread(\"games.jpg\",0)\ncv2.imshow('imgShow',image)\ncv2.waitKey(0)\n# find the red color game in the image\nupper = np.array([65, 65, 255])\nlower = np.array([0, 0, 200])\nmask = cv2.inRange(image, lower, upper)\n \n# find contours in the masked image and keep the largest one\n(_, cnts, _) = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,\n\tcv2.CHAIN_APPROX_SIMPLE)\nc = max(cnts, key=cv2.contourArea)\n \n# approximate the contour\nperi = cv2.arcLength(c, True)\napprox = cv2.approxPolyDP(c, 0.05 * peri, True)\n \n# draw a green bounding box surrounding the red game\ncv2.drawContours(image, [approx], -1, (0, 255, 0), 4)\ncv2.imshow(\"Image\", image)\ncv2.waitKey(0)" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
tatonetti-lab/sex_risks
[ "caa7159993921aa2861cae78a56c3e72052b4ea6" ]
[ "Code/drug.py" ]
[ "from collections import Counter\nimport numpy as np\nimport pandas as pd \nimport feather \nfrom scipy import stats\nimport pymysql\nimport pymysql.cursors\nfrom database import Database\nfrom utils import Utils\n\nclass Drug:\n \n db = Database('Mimir from Munnin')\n u = Utils()\n \n def __init__(self, drugID, adrID=None): \n \n self.db = Database('Mimir from Munnin')\n \n self.id = str(drugID)\n self.atc = self._get_atc()\n self.name = self._get_name()\n if adrID: \n self.adr = adrID\n else: \n self.adr = self._get_adr()\n self.pid = self._get_patients_on_drug()\n self.pscores = self._get_propensity_scores()\n self.results = self._get_blank_results()\n \n self.match_f = None\n self.match_m = None\n self.XF = None\n self.XM = None\n self.adr_count_f = None\n self.adr_count_m = None\n \n def _get_atc(self): \n q_atc5 = 'select * from atc_5_name where atc_5_id='+self.id\n q_atc4 = 'select * from atc_4_name where atc_4_id='+self.id\n if self.db.get_list(q_atc4): atc = '4'\n elif self.db.get_list(q_atc5): atc = '5'\n else:raise NameError('Cant find drug in database')\n return atc\n \n def _get_name(self): \n q = 'select atc_'+self.atc+'_name from atc_'+self.atc+'_name where atc_'+self.atc+'_id='+self.id\n name = self.db.get_list(q)[0]\n return name\n \n def _get_adr(self): \n return self.u.load_np('adr')\n \n def _get_patients_on_drug(self): \n q = 'select PID from atc_'+self.atc+'_patient where atc_'+self.atc+'_id = '+self.id\n pid = self.db.get_list(q)\n return pid\n \n def _get_propensity_scores(self): \n df_propensity = self.u.load_df('df_propensity')\n df_propensity = df_propensity.set_index('PID')\n return df_propensity[df_propensity.index.isin(self.pid)]\n \n def _get_blank_results(self):\n df = pd.DataFrame(columns = ['drug', 'itr', 'adr','XFE','XFe','XME','XMe', 'p_value','logROR'])\n df = df.set_index(['drug','itr','adr'])\n return df \n \n def match(self, cal=0.01, bins=100, minRecPerBin=0.005): \n allFemales = self.pscores.copy(deep=True).query('Sex==\"F\"')\n allMales = self.pscores.copy(deep=True).query('Sex==\"M\"')\n\n if (len(allFemales) < 250) or (len(allMales) < 250): \n print(len(allFemales), \"females\")\n print(len(allMales), \"males\")\n raise NameError('Insufficient data for both sexes') \n\n minF = len(allFemales)*minRecPerBin\n minM = len(allMales)*minRecPerBin\n females, males = [], []\n allFemales['bin'] = pd.cut(allFemales.Propensity, bins)\n\n for iBin, fOptions in allFemales.groupby(by='bin'):\n minPS, maxPS = iBin.left, iBin.right\n\n mOptions = allMales.query('Propensity > (@minPS-@cal) and Propensity < (@maxPS+@cal)').index.values\n\n if(len(mOptions)<minM or len(fOptions)<minF):\n continue; # too few data, don't add to matched\n\n females = np.append(females, fOptions.index.values)\n males = np.append(males, (np.random.choice(mOptions, len(fOptions))))\n \n self.match_f = females\n self.match_m = males\n \n self.XF = len(females)\n self.XM = len(males)\n\n def count_adr(self): \n\n # Females \n\n q = 'select meddra_concept_id from hlgt_patient where PID in (\"'+ \"\\\", \\\"\".join(self.match_f) + '\")'\n hglt_f = self.db.get_list(q)\n \n q = 'select meddra_concept_id from soc_patient where PID in (\"'+ \"\\\", \\\"\".join(self.match_f) + '\")'\n soc_f = self.db.get_list(q)\n \n q = 'select meddra_concept_id from pt_patient where PID in (\"'+ \"\\\", \\\"\".join(self.match_f) + '\")'\n pt_f = self.db.get_list(q)\n \n select_f = hglt_f\n select_f.extend(soc_f)\n select_f.extend(pt_f)\n \n # Male\n\n select_m = []\n\n unique_m, counts = np.unique(self.match_m, return_counts=True)\n\n for count in np.unique(counts):\n idx = np.where(counts==count)\n pids = np.take(unique_m, idx)[0]\n\n q = 'select meddra_concept_id from hlgt_patient where PID in (\"'+ \"\\\", \\\"\".join(pids) + '\")'\n to_add_hlgt = self.db.get_list(q)\n \n q = 'select meddra_concept_id from soc_patient where PID in (\"'+ \"\\\", \\\"\".join(pids) + '\")'\n to_add_soc = self.db.get_list(q)\n \n q = 'select meddra_concept_id from pt_patient where PID in (\"'+ \"\\\", \\\"\".join(pids) + '\")'\n to_add_pt = self.db.get_list(q)\n\n for i in range(count): \n select_m.extend(to_add_hlgt)\n select_m.extend(to_add_soc)\n select_m.extend(to_add_pt)\n \n self.adr_count_f = Counter(select_f)\n self.adr_count_m = Counter(select_m)\n \n def assign_abcd(self, itr):\n\n for adr in self.adr: \n self.results.loc[(self.id, itr, adr),['XFE']] = self.adr_count_f[adr]\n self.results.loc[(self.id, itr, adr),['XME']] = self.adr_count_m[adr]\n \n self.results.eval('XFe = @self.XF - XFE', inplace=True)\n self.results.eval('XMe = @self.XM - XME', inplace=True)\n \n def do_chi_square(self):\n\n for idx, data in self.results.iterrows(): \n \n contingencyTable = np.array([[data.XFE, data.XFe], [data.XME, data.XMe]])\n \n if contingencyTable.all():\n chi2, p, dof, expected = stats.chi2_contingency(contingencyTable)\n self.results.at[idx, 'p_value'] = p\n \n def calc_logROR(self): \n self.results.eval('ROR = (XFE/XFe)/(XME/XMe)', inplace=True)\n self.results.eval('logROR = log(ROR)', inplace=True)\n self.results.drop('ROR', axis=1, inplace=True)\n \n def reset_for_next_itr(self): \n self.match_f = None\n self.match_m = None\n self.XF = None\n self.XM = None\n self.adr_count_f = None\n self.adr_count_m = None\n \n def save_results(self, itr): \n assert self.ensure_results(itr)\n assert self.u.save_df(self.results, 'Results/'+self.id)\n return True\n\n def ensure_results(self, itr): \n self.results = self.results.reset_index()\n assert self.results.shape[0] == itr*len(self.adr)\n assert self.results.shape[1] == 9\n return True\n\n" ]
[ [ "numpy.take", "scipy.stats.chi2_contingency", "numpy.unique", "pandas.DataFrame", "numpy.append", "pandas.cut", "numpy.array", "numpy.where" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
a5chin/NumberPlaceDataset
[ "d6e0eba6f0eade1cdb63e354cc066129b5ad04a5" ]
[ "lib/core/detector.py" ]
[ "import torch\nimport numpy as np\nimport cv2\nfrom PIL import Image, ImageOps\nfrom pathlib import Path\nfrom typing import List\n\nfrom lib.core import get_transforms\nfrom lib.model import get_resnet\n\n\nclass Detector:\n def __init__(self, ckpt: str='../logs/NumberPlaceDataset/ckpt/best_ckpt.pth') -> None:\n self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n self.model = get_resnet(pretrained=False, num_classes=10)\n self.model.load_state_dict(torch.load(ckpt, map_location=torch.device(self.device)))\n self.transforms = get_transforms()\n self.table = [[0 for _ in range(9)] for _ in range(9)]\n\n def detect(self, dir: str='../data/problem/example') -> List:\n self.model.eval()\n dir = Path(dir)\n for p in dir.glob('**/*.jpg'):\n column, row = map(int, str(p.stem).strip(''))\n\n gray = Image.open(p).convert('L')\n gray = ImageOps.invert(gray)\n gray = np.array(gray)\n ret, th = cv2.threshold(gray, 0, 255, cv2.THRESH_OTSU)\n th = Image.fromarray(th)\n img = self.transforms['test'](th).view(1, 1, 28, 28)\n\n self.table[column][row] = self.model(img).argmax().item()\n\n return self.table\n" ]
[ [ "torch.device", "numpy.array", "torch.cuda.is_available" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
yongchaoding/bus_passenger_detection
[ "a716631eb6370e795f7ca40a8869d6c4ba290091" ]
[ "models/ssd.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nfrom data import *\nfrom models.layers.prior_box import PriorBox\nfrom models.layers.l2norm import L2Norm\nimport os\n\n\nclass SSD(nn.Module):\n \"\"\"Single Shot Multibox Architecture\n The network is composed of a base VGG network followed by the\n added multibox conv layers. Each multibox layer branches into\n 1) conv2d for class conf scores\n 2) conv2d for localization predictions\n 3) associated priorbox layer to produce default bounding\n boxes specific to the layer's feature map size.\n See: https://arxiv.org/pdf/1512.02325.pdf for more details.\n\n Args:\n phase: (string) Can be \"test\" or \"train\"\n size: input image size\n base: VGG16 layers for input, size of either 300 or 500\n extras: extra layers that feed to multibox loc and conf layers\n head: \"multibox head\" consists of loc and conf conv layers\n \"\"\"\n\n def __init__(self, phase, size, base, extras, head, num_classes):\n super(SSD, self).__init__()\n self.phase = phase\n self.num_classes = num_classes\n self.cfg = bus_passenger_cfg\n self.priorbox = PriorBox(self.cfg)\n self.priors = Variable(self.priorbox.forward(), volatile=True)\n self.size = size\n\n # SSD network\n self.vgg = nn.ModuleList(base)\n # Layer learns to scale the l2 normalized features from conv4_3\n self.L2Norm = L2Norm(512, 20)\n self.extras = nn.ModuleList(extras)\n\n self.loc = nn.ModuleList(head[0])\n self.conf = nn.ModuleList(head[1])\n\n if phase == 'test':\n self.softmax = nn.Softmax(dim=-1)\n self.detect = Detect(num_classes, 0, 200, 0.01, 0.45)\n\n def forward(self, x):\n \"\"\"Applies network layers and ops on input image(s) x.\n\n Args:\n x: input image or batch of images. Shape: [batch,3,300,300].\n\n Return:\n Depending on phase:\n test:\n Variable(tensor) of output class label predictions,\n confidence score, and corresponding location predictions for\n each object detected. Shape: [batch,topk,7]\n\n train:\n list of concat outputs from:\n 1: confidence layers, Shape: [batch*num_priors,num_classes]\n 2: localization layers, Shape: [batch,num_priors*4]\n 3: priorbox layers, Shape: [2,num_priors*4]\n \"\"\"\n sources = list()\n loc = list()\n conf = list()\n\n # apply vgg up to conv4_3 relu\n for k in range(23):\n x = self.vgg[k](x)\n\n s = self.L2Norm(x)\n sources.append(s)\n\n # apply vgg up to fc7\n for k in range(23, len(self.vgg)):\n x = self.vgg[k](x)\n sources.append(x)\n\n # apply extra layers and cache source layer outputs\n for k, v in enumerate(self.extras):\n x = F.relu(v(x), inplace=True)\n if k % 2 == 1:\n sources.append(x)\n\n # apply multibox head to source layers\n for (x, l, c) in zip(sources, self.loc, self.conf):\n loc.append(l(x).permute(0, 2, 3, 1).contiguous())\n conf.append(c(x).permute(0, 2, 3, 1).contiguous())\n\n loc = torch.cat([o.view(o.size(0), -1) for o in loc], 1)\n conf = torch.cat([o.view(o.size(0), -1) for o in conf], 1)\n if self.phase == \"test\":\n output = self.detect(\n loc.view(loc.size(0), -1, 4), # loc preds\n self.softmax(conf.view(conf.size(0), -1,\n self.num_classes)), # conf preds\n self.priors.type(type(x.data)) # default boxes\n )\n else:\n output = (\n loc.view(loc.size(0), -1, 4),\n conf.view(conf.size(0), -1, self.num_classes),\n self.priors\n )\n return output\n\n def load_weights(self, base_file):\n other, ext = os.path.splitext(base_file)\n if ext == '.pkl' or '.pth':\n print('Loading weights into state dict...')\n self.load_state_dict(torch.load(base_file,\n map_location=lambda storage, loc: storage))\n print('Finished!')\n else:\n print('Sorry only .pth and .pkl files supported.')\n\n\n# This function is derived from torchvision VGG make_layers()\n# https://github.com/pytorch/vision/blob/master/torchvision/models/vgg.py\ndef vgg(cfg, i, batch_norm=False):\n layers = []\n in_channels = i\n for v in cfg:\n if v == 'M':\n layers += [nn.MaxPool2d(kernel_size=2, stride=2)]\n elif v == 'C':\n layers += [nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True)]\n else:\n conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)\n if batch_norm:\n layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]\n else:\n layers += [conv2d, nn.ReLU(inplace=True)]\n in_channels = v\n pool5 = nn.MaxPool2d(kernel_size=3, stride=1, padding=1)\n conv6 = nn.Conv2d(512, 1024, kernel_size=3, padding=6, dilation=6)\n conv7 = nn.Conv2d(1024, 1024, kernel_size=1)\n layers += [pool5, conv6,\n nn.ReLU(inplace=True), conv7, nn.ReLU(inplace=True)]\n return layers\n\n\ndef add_extras(cfg, i, batch_norm=False):\n # Extra layers added to VGG for feature scaling\n layers = []\n in_channels = i\n flag = False\n for k, v in enumerate(cfg):\n if in_channels != 'S':\n if v == 'S':\n layers += [nn.Conv2d(in_channels, cfg[k + 1],\n kernel_size=(1, 3)[flag], stride=2, padding=1)]\n else:\n layers += [nn.Conv2d(in_channels, v, kernel_size=(1, 3)[flag])]\n flag = not flag\n in_channels = v\n return layers\n\n\ndef multibox(vgg, extra_layers, cfg, num_classes):\n loc_layers = []\n conf_layers = []\n vgg_source = [21, -2]\n for k, v in enumerate(vgg_source):\n loc_layers += [nn.Conv2d(vgg[v].out_channels,\n cfg[k] * 4, kernel_size=3, padding=1)]\n conf_layers += [nn.Conv2d(vgg[v].out_channels,\n cfg[k] * num_classes, kernel_size=3, padding=1)]\n for k, v in enumerate(extra_layers[1::2], 2):\n loc_layers += [nn.Conv2d(v.out_channels, cfg[k]\n * 4, kernel_size=3, padding=1)]\n conf_layers += [nn.Conv2d(v.out_channels, cfg[k]\n * num_classes, kernel_size=3, padding=1)]\n return vgg, extra_layers, (loc_layers, conf_layers)\n\n\nbase = {\n '300': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'C', 512, 512, 512, 'M',\n 512, 512, 512],\n '512': [],\n}\nextras = {\n '300': [256, 'S', 512, 128, 'S', 256, 128, 256, 128, 256],\n '512': [],\n}\nmbox = {\n '300': [4, 6, 6, 6, 4, 4], # number of boxes per feature map location\n '512': [],\n}\n\n\ndef build_ssd(phase, size=300, num_classes=21):\n if phase != \"test\" and phase != \"train\":\n print(\"ERROR: Phase: \" + phase + \" not recognized\")\n return\n if size != 300:\n print(\"ERROR: You specified size \" + repr(size) + \". However, \" +\n \"currently only SSD300 (size=300) is supported!\")\n return\n base_, extras_, head_ = multibox(vgg(base[str(size)], 3),\n add_extras(extras[str(size)], 1024),\n mbox[str(size)], num_classes)\n return SSD(phase, size, base_, extras_, head_, num_classes)\n" ]
[ [ "torch.nn.Softmax", "torch.load", "torch.nn.ModuleList", "torch.nn.Conv2d", "torch.nn.MaxPool2d", "torch.nn.BatchNorm2d", "torch.nn.ReLU" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
algorithmvisualised/InsertionSort
[ "808e3e5e8be5c5d322be774325312c5767614bc1" ]
[ "insertion.py" ]
[ "import bpy\r\nimport numpy as np\r\nimport random\r\n\r\nSHOW_CODE_ANIMATION = False\r\n\r\nif SHOW_CODE_ANIMATION:\r\n TOTAL_NUMBERS = 16\r\n UNSORTED_ARRAY = np.random.randint(low = 1, high = 10, size=TOTAL_NUMBERS)\r\n print(UNSORTED_ARRAY)\r\n UNSORTED_ARRAY = [8, 3, 1, 6, 4, 2, 9, 5 ]\r\nelse:\r\n TOTAL_NUMBERS = 70\r\n UNSORTED_ARRAY = np.random.randint(low = 1, high = 36 , size=TOTAL_NUMBERS)\r\n print(UNSORTED_ARRAY)\r\n #UNSORTED_ARRAY = [7, 6, 8, 2, 9]\r\n #UNSORTED_ARRAY = [7, 2, 9, 8, 5, 6, 4, 7, 9, 5, 4, 1, 8, 3, 7, 5 ]\r\n #UNSORTED_ARRAY = [ 6, 8, 12, 8, 8, 5, 15, 1, 13, 8, 9, 11, 1 1 14 4 9 4 16 12 2 8 9 4 6 9 9 3 4 4]\r\n\r\nTOTAL_BLOCKS = len(UNSORTED_ARRAY)\r\n\r\nif SHOW_CODE_ANIMATION:\r\n X_AxisReferencePosition = 5.8\r\n Z_AxisReferencePosition = 0\r\nelse:\r\n X_AxisReferencePosition = -0\r\n Z_AxisReferencePosition = -7\r\n\r\n\r\nBLOCK_SIZE = 1\r\nBASE_BUFFER_WIDTH = 2\r\nBASE_WIDTH = TOTAL_BLOCKS * BLOCK_SIZE\r\nBASE_DEPTH = COLUMN_DEPTH = 1\r\nBASE_HEIGHT = 0.05\r\nCOLUMN_BUFFER_HEIGHT = 5\r\nCOLUMN_HEIGHT = np.amax(UNSORTED_ARRAY) + COLUMN_BUFFER_HEIGHT\r\nCOLUMN_WIDTH = 0.05\r\nframe_num = 1\r\nFRAME_RATE = 2\r\nFRAME_RATE_MULTIPLIER = 1\r\nUNIT_PER_FRAME = BLOCK_SIZE / FRAME_RATE\r\nELEMENT_STRING = 'element_{0}_value_{1}'\r\nCODE_LINE_HEIGHT = 2.8\r\n\r\n\r\ntext_animation_map = {}\r\nred = bpy.data.materials.new('Red')\r\nblue = bpy.data.materials.new('Blue')\r\nyellow = bpy.data.materials.new('Yellow')\r\n\r\nbpy.context.scene.frame_set(frame_num)\r\n\r\ncolor_map = {}\r\nhighlight_frame_map = {}\r\ni_watcher_frame_map = {}\r\nj_watcher_frame_map = {}\r\nkey_watcher_frame_map = {}\r\narr_j_watcher_frame_map = {}\r\n\r\nHIGHLIGHT_FRAME_STRING = 'startFrame_{0}_endframe_{1}'\r\n\r\nINACTIVE_CODE_COLOR = (1, 0.05, 0.16, 1)\r\nINACTIVE_CODE_MATERIAL_NAME = 'material_inactive_code'\r\n\r\nACTIVE_CODE_COLOR = (0, 0, 1, 1)\r\nACTIVE_CODE_MATERIAL_NAME = 'material_active_code'\r\n\r\nINACTIVE_CODE_MATERIAL = False\r\nACTIVE_CODE_MATERIAL = False\r\n\r\nWATCHER_COLOR = (0.748, 0.159, 0.568, 1)\r\n\r\n\r\n\r\n\r\ndef createMaterials():\r\n global INACTIVE_CODE_MATERIAL, ACTIVE_CODE_MATERIAL\r\n\r\n INACTIVE_CODE_MATERIAL = bpy.data.materials.new(INACTIVE_CODE_MATERIAL_NAME)\r\n #INACTIVE_CODE_MATERIAL.name = INACTIVE_CODE_MATERIAL_NAME\r\n INACTIVE_CODE_MATERIAL.diffuse_color = INACTIVE_CODE_COLOR\r\n #bpy.context.object.active_material.metallic = 1\r\n #INACTIVE_CODE_MATERIAL.metallic = 0.1\r\n\r\n ACTIVE_CODE_MATERIAL = bpy.data.materials.new(ACTIVE_CODE_MATERIAL_NAME)\r\n ACTIVE_CODE_MATERIAL.diffuse_color = ACTIVE_CODE_COLOR\r\n #ACTIVE_CODE_MATERIAL.metallic = 0.1\r\n\r\ndef mapColorToValues():\r\n global color_map, UNSORTED_ARRAY\r\n for i in np.unique(UNSORTED_ARRAY):\r\n color_map[i] = get_random_color()\r\n return\r\n\r\n\r\ndef setColorToElementFromMap(obj, obj_id, val):\r\n global color_map\r\n material = setColor(obj, obj_id, color_map.get(val))\r\n #material.metallic = 0.1\r\n return\r\n\r\ndef setColor(obj, obj_id, color):\r\n material = bpy.data.materials.new('material_' + obj_id)\r\n material.diffuse_color = color\r\n #material.specular_hardness = 200\r\n obj.data.materials.append(material)\r\n return material\r\n\r\n\r\ndef clearAllMaterial():\r\n #return\r\n for material in bpy.data.materials:\r\n #material.user_clear()\r\n bpy.data.materials.remove(material)\r\n return\r\n\r\ndef clearObject():\r\n for ob in bpy.context.scene.objects:\r\n print(ob.type)\r\n objs = [ob for ob in bpy.context.scene.objects if ob.type in ('MESH', 'FONT')]\r\n bpy.ops.object.delete({\"selected_objects\": objs})\r\n return\r\n\r\ndef createBase():\r\n bpy.ops.mesh.primitive_cube_add(location=(X_AxisReferencePosition, 0, Z_AxisReferencePosition), scale=(BASE_WIDTH, BASE_DEPTH, BASE_HEIGHT))\r\n cube = bpy.context.object\r\n cube.name = 'cube_base'\r\n return\r\n\r\ndef createColumn():\r\n bpy.ops.mesh.primitive_cube_add(\r\n location=(0-COLUMN_WIDTH - BASE_WIDTH/2, 0, COLUMN_HEIGHT/2),\r\n scale=(COLUMN_WIDTH, COLUMN_DEPTH, COLUMN_HEIGHT)\r\n )\r\n cube = bpy.context.object\r\n cube.name = 'cube_column'\r\n return\r\n\r\ndef getElName(i, val):\r\n txt = ELEMENT_STRING\r\n return txt.format(i, val)\r\n\r\ndef getIndexAndValueFromElName(elName):\r\n split = elName.split('_')\r\n return split[1], split[3]\r\n\r\n\r\ndef getXPositionBasedUponIndex(index):\r\n extreme_left = X_AxisReferencePosition - BASE_WIDTH/2\r\n return extreme_left + BLOCK_SIZE * (index + 0.5)\r\n\r\ndef createElements():\r\n extreme_left = X_AxisReferencePosition - BASE_WIDTH/2\r\n for i, val in enumerate(UNSORTED_ARRAY):\r\n #print(\"POSITION WOULD BE\" + str(extreme_left + BLOCK_SIZE * (i+0.5)))\r\n element_location = (getXPositionBasedUponIndex(i) , 0, Z_AxisReferencePosition + val/2)\r\n element_scale = (BLOCK_SIZE, BASE_DEPTH, val)\r\n #print(\"ELEMENT SCALE WOULD BE\")\r\n print(element_scale)\r\n bpy.ops.mesh.primitive_cube_add(\r\n location=element_location,\r\n #scale=element_scale\r\n )\r\n element = bpy.context.object\r\n element.dimensions = element_scale\r\n element.name = getElName(i, val)\r\n element.display.show_shadows = False\r\n setColorToElementFromMap(element, element.name, val)\r\n return\r\n\r\n\r\ndef createAxis():\r\n createBase()\r\n #createColumn()\r\n return\r\n\r\ndef renderChart():\r\n createAxis()\r\n createElements()\r\n #createElementsAsNumber()\r\n return\r\n\r\n\r\ndef registerHighlightCodeFrames(codeLine, startFrame, endFrame):\r\n global highlight_frame_map\r\n highlight_frame_map[HIGHLIGHT_FRAME_STRING.format(startFrame, endFrame)] = {\r\n 'startFrame': startFrame,\r\n 'endFrame': endFrame,\r\n 'activeline': codeLine\r\n }\r\n return\r\n\r\ndef registerIWatcherFrame(value, startFrame, endFrame):\r\n global i_watcher_frame_map\r\n i_watcher_frame_map[HIGHLIGHT_FRAME_STRING.format(startFrame, endFrame)] = {\r\n 'startFrame': startFrame,\r\n 'endFrame': endFrame,\r\n 'value': value\r\n }\r\n return\r\n\r\ndef registerKeyWatcherFrame(value, startFrame, endFrame):\r\n global key_watcher_frame_map\r\n key_watcher_frame_map[HIGHLIGHT_FRAME_STRING.format(startFrame, endFrame)] = {\r\n 'startFrame': startFrame,\r\n 'endFrame': endFrame,\r\n 'value': value\r\n }\r\n return\r\n\r\ndef registerJWatcherFrame(value, startFrame, endFrame):\r\n global j_watcher_frame_map\r\n #print(\"REGISTERING J WATCHER WITH VALUE {0} START FRAME {1} END FRAME {2}\".format(value, startFrame, endFrame))\r\n j_watcher_frame_map[HIGHLIGHT_FRAME_STRING.format(startFrame, endFrame)] = {\r\n 'startFrame': startFrame,\r\n 'endFrame': endFrame,\r\n 'value': value\r\n }\r\n return\r\ndef registerArrjWatcherFrame(value, startFrame, endFrame):\r\n global arr_j_watcher_frame_map\r\n arr_j_watcher_frame_map[HIGHLIGHT_FRAME_STRING.format(startFrame, endFrame)] = {\r\n 'startFrame': startFrame,\r\n 'endFrame': endFrame,\r\n 'value': value\r\n }\r\n return\r\n\r\ndef insertionSort(arr):\r\n global frame_num, FRAME_RATE\r\n original_arr = np.copy(arr)\r\n for i in range(1, len(arr)):\r\n #print(\"FRAME NUM IS !!!\" + str(frame_num))\r\n registerHighlightCodeFrames( 'line0', frame_num,\r\n frame_num + FRAME_RATE_MULTIPLIER * FRAME_RATE)\r\n registerIWatcherFrame(i, frame_num,\r\n frame_num + FRAME_RATE_MULTIPLIER * FRAME_RATE)\r\n registerKeyWatcherFrame(\"\", frame_num,\r\n frame_num + FRAME_RATE_MULTIPLIER * FRAME_RATE)\r\n registerJWatcherFrame(\"\", frame_num,\r\n frame_num + FRAME_RATE_MULTIPLIER * FRAME_RATE)\r\n frame_num += moveElementToDestinationZ(\r\n 'codeFrame',\r\n 10.3,\r\n frame_num,\r\n frame_num + FRAME_RATE_MULTIPLIER * FRAME_RATE\r\n )\r\n #print(\"FRAME NUM IS ****\" + str(frame_num))\r\n key = arr[i]\r\n keyValue = getValueFromElName(key)\r\n moveElementToDestinationX(\r\n 'IText',\r\n getXPositionBasedUponIndex(i),\r\n #getXPositionBasedUponIndex(j + 1),\r\n frame_num,\r\n frame_num + FRAME_RATE_MULTIPLIER * FRAME_RATE\r\n )\r\n moveElementToDestinationX(\r\n 'KeyText',\r\n getXPositionBasedUponIndex(i),\r\n #getXPositionBasedUponIndex(j + 1),\r\n frame_num,\r\n frame_num + FRAME_RATE_MULTIPLIER * FRAME_RATE\r\n )\r\n registerHighlightCodeFrames( 'line1', frame_num,\r\n frame_num + FRAME_RATE_MULTIPLIER * FRAME_RATE)\r\n registerKeyWatcherFrame(keyValue, frame_num,\r\n frame_num + FRAME_RATE_MULTIPLIER * FRAME_RATE)\r\n frame_num += moveElementToDestinationZ(\r\n 'codeFrame',\r\n 7.1,\r\n frame_num,\r\n frame_num + FRAME_RATE_MULTIPLIER * FRAME_RATE\r\n )\r\n j = i -1\r\n\r\n #######\r\n finalKeyPositionX = bpy.data.objects[arr[j]].location[0]\r\n ######\r\n registerHighlightCodeFrames( 'line2', frame_num,\r\n frame_num + FRAME_RATE_MULTIPLIER * FRAME_RATE)\r\n registerJWatcherFrame(j, frame_num,\r\n frame_num + FRAME_RATE_MULTIPLIER * FRAME_RATE)\r\n registerArrjWatcherFrame(getValueFromElName(arr[j]), frame_num,\r\n frame_num + FRAME_RATE_MULTIPLIER * FRAME_RATE)\r\n moveElementToDestinationX(\r\n 'JText',\r\n getXPositionBasedUponIndex(j),\r\n #getXPositionBasedUponIndex(j + 1),\r\n frame_num,\r\n frame_num + FRAME_RATE_MULTIPLIER * FRAME_RATE\r\n )\r\n frame_num += moveElementToDestinationZ(\r\n 'codeFrame',\r\n 3.8,\r\n frame_num,\r\n frame_num + FRAME_RATE_MULTIPLIER * FRAME_RATE\r\n )\r\n while (j >= 0 and True):\r\n #print(\"JTH VALUE IS \" + str(getValueFromElName(arr[j])))\r\n registerHighlightCodeFrames( 'line3', frame_num, frame_num + FRAME_RATE_MULTIPLIER * FRAME_RATE)\r\n registerJWatcherFrame(j, frame_num,\r\n frame_num + FRAME_RATE_MULTIPLIER * FRAME_RATE)\r\n registerArrjWatcherFrame(getValueFromElName(arr[j]), frame_num,\r\n frame_num + FRAME_RATE_MULTIPLIER * FRAME_RATE)\r\n moveElementToDestinationX(\r\n 'JText',\r\n getXPositionBasedUponIndex(j),\r\n #getXPositionBasedUponIndex(j + 1),\r\n frame_num,\r\n frame_num + FRAME_RATE_MULTIPLIER * FRAME_RATE\r\n )\r\n frame_num += moveElementToDestinationZ(\r\n 'codeFrame',\r\n 0.43,\r\n frame_num,\r\n frame_num + FRAME_RATE_MULTIPLIER * FRAME_RATE\r\n )\r\n if( not(keyValue < getValueFromElName(arr[j])) ):\r\n break\r\n #registerHighlightCodeFrames( 'line4', frame_num, frame_num + 2 * FRAME_RATE)\r\n # frame_num += moveElementToDestinationZ(\r\n # 'codeFrame',\r\n # 2,\r\n # frame_num,\r\n # frame_num + 2 * FRAME_RATE\r\n # )\r\n # swapElementsWithAnimation(\r\n # arr[i],\r\n # arr[min_idx],\r\n # current_frame,\r\n # current_frame + FRAME_RATE\r\n # )\r\n\r\n jthplueOneElement = arr[j+1]\r\n jthElement = arr[j]\r\n print(\"J+1 is {0} and J is {1}\".format(jthplueOneElement, jthElement))\r\n arr[j+1] = arr[j]\r\n registerHighlightCodeFrames( 'line4', frame_num,\r\n frame_num + FRAME_RATE_MULTIPLIER * FRAME_RATE)\r\n\r\n frame_num += moveElementToDestinationZ(\r\n 'codeFrame',\r\n -2.8,\r\n frame_num,\r\n frame_num + FRAME_RATE_MULTIPLIER * FRAME_RATE\r\n )\r\n # frame_num += hideJthPlusOneAndMoveJthElementToIt(\r\n # jthElement,\r\n # jthplueOneElement,\r\n # frame_num,\r\n # frame_num + 2 * FRAME_RATE\r\n # )\r\n frame_num += moveElementToDestinationX(\r\n jthElement,\r\n getXPositionBasedUponIndex(j + 1),\r\n #getXPositionBasedUponIndex(j + 1),\r\n frame_num,\r\n frame_num + FRAME_RATE_MULTIPLIER * FRAME_RATE\r\n )\r\n j -= 1\r\n if( j >= 0):\r\n finalKeyPositionX = bpy.data.objects[arr[j]].location[0]\r\n ####\r\n registerHighlightCodeFrames( 'line5', frame_num,\r\n frame_num + FRAME_RATE_MULTIPLIER * FRAME_RATE)\r\n registerJWatcherFrame(j, frame_num,\r\n frame_num + FRAME_RATE_MULTIPLIER * FRAME_RATE)\r\n if(j >= 0):\r\n registerArrjWatcherFrame(getValueFromElName(arr[j]), frame_num, frame_num + 2 * FRAME_RATE)\r\n else :\r\n registerArrjWatcherFrame(\"\", frame_num,\r\n frame_num + FRAME_RATE_MULTIPLIER * FRAME_RATE)\r\n frame_num += moveElementToDestinationZ(\r\n 'codeFrame',\r\n -6.2,\r\n frame_num,\r\n frame_num + FRAME_RATE_MULTIPLIER * FRAME_RATE\r\n )\r\n finalSwapJPlueOne = arr[j+1]\r\n finalSwapJ = key\r\n #print(\"FINAL SWAP J+1 is {0} and final Swap is {1}\".format(finalSwapJPlueOne, finalSwapJ))\r\n arr[j+1] = key\r\n registerHighlightCodeFrames( 'line6', frame_num,\r\n frame_num + FRAME_RATE_MULTIPLIER * FRAME_RATE)\r\n frame_num += moveElementToDestinationZ(\r\n 'codeFrame',\r\n -9.4,\r\n frame_num,\r\n frame_num + FRAME_RATE_MULTIPLIER * FRAME_RATE\r\n )\r\n print(\"SHOULD MOVE ELEMENT {0} to {1}\".format(finalSwapJ, finalKeyPositionX))\r\n #getXPositionBasedUponIndex\r\n\r\n moveElementToDestinationX(\r\n 'KeyText',\r\n getXPositionBasedUponIndex(j + 1),\r\n #getXPositionBasedUponIndex(j + 1),\r\n frame_num,\r\n frame_num + FRAME_RATE_MULTIPLIER * FRAME_RATE\r\n )\r\n frame_num += moveElementToDestinationX(\r\n finalSwapJ,\r\n getXPositionBasedUponIndex(j + 1),\r\n frame_num,\r\n frame_num + FRAME_RATE_MULTIPLIER * FRAME_RATE\r\n )\r\n\r\n #frame_num += hideJthPlusOneAndMoveJthElementToIt(\r\n # key,\r\n # jthplueOneElement,\r\n # frame_num,\r\n # frame_num + 2 * FRAME_RATE\r\n #)\r\n #print(\"ARR WOULD BE\")\r\n #print(arr)\r\n return arr\r\n\r\n\r\n\r\ndef hideJthPlusOneAndMoveJthElementToIt(jthElementId, jthPlusOneElementId, startFrame, endFrame):\r\n global frame_num, FRAME_RATE\r\n jthPlusOneElement = bpy.data.objects[jthPlusOneElementId]\r\n #jthPlusOneElementId.hide()\r\n #element_1 = bpy.data.objects[el_1]\r\n #element_2 = bpy.data.objects[el_2]\r\n\r\n #print(\"ELEMENT IDs are \" + el_1 + \" , \" + el_2 )\r\n #element_1_x = jthPlusOneElement.location[0]\r\n element_2_x = jthPlusOneElement.location[0]\r\n\r\n #print(\"ELEMENT LOCATIONs are \" + str(element_1_x) + \" , \" + str(element_2_x) )\r\n #print(\"SHOULD MOVE ELEMENT {0} to {1}\".format(finalSwapJ, finalKeyPositionX))\r\n return moveElementToDestinationX(\r\n jthElementId,\r\n element_2_x,\r\n startFrame,\r\n endFrame\r\n )\r\n\r\ndef getValueFromElName(el):\r\n idxEl, idxVal = getIndexAndValueFromElName(el)\r\n return int(idxVal)\r\n\r\ndef selectionSort(arr):\r\n global frame_num, FRAME_RATE\r\n original_arr = np.copy(arr)\r\n for i in range(len(arr)):\r\n min_idx = i\r\n ithElement = arr[i]\r\n current_frame = frame_num\r\n frame_num += moveIndexElement(\r\n 'IText',\r\n bpy.data.objects[arr[i]],\r\n current_frame,\r\n current_frame + FRAME_RATE*2\r\n )\r\n print(\"MOVING JTEXT FROM \" + str(current_frame) + \" frame to \" + str(current_frame + FRAME_RATE))\r\n moveIndexElement(\r\n 'JText',\r\n bpy.data.objects[arr[i]],\r\n current_frame,\r\n current_frame + FRAME_RATE*2\r\n )\r\n moveIndexElement(\r\n 'MinText',\r\n bpy.data.objects[arr[i]],\r\n current_frame,\r\n current_frame + FRAME_RATE*2\r\n )\r\n for j in range(i+1, len(arr)):\r\n current_frame = frame_num\r\n minIdxEl, minIdxVal = getIndexAndValueFromElName(arr[min_idx])\r\n JEl, JVal = getIndexAndValueFromElName(arr[j])\r\n print(\"MOVING JTEXT FROM \" + str(current_frame) + \" frame to \" + str(current_frame + FRAME_RATE))\r\n frame_num += moveIndexElement(\r\n 'JText',\r\n bpy.data.objects[arr[j]],\r\n current_frame,\r\n current_frame + FRAME_RATE\r\n )\r\n if(minIdxVal > JVal):\r\n min_idx = j\r\n frame_num += moveIndexElement(\r\n 'MinText',\r\n bpy.data.objects[arr[min_idx]],\r\n current_frame,\r\n current_frame + FRAME_RATE\r\n )\r\n if(min_idx != i):\r\n current_frame = frame_num\r\n print(\"MOVING ELEMENTS FROM \" + str(current_frame) + \" frame to \" + str(current_frame + FRAME_RATE))\r\n frame_num += swapElementsWithAnimation(\r\n arr[i],\r\n arr[min_idx],\r\n current_frame,\r\n current_frame + FRAME_RATE\r\n )\r\n\r\n arr[i], arr[min_idx] = arr[min_idx], arr[i]\r\n return\r\n\r\n\r\n# def resetIndex():\r\n# global frame_num, FRAME_RATE\r\n# extreme_left = 0 - BASE_WIDTH/2\r\n# i = 0\r\n# JtextLeft = extreme_left + BLOCK_SIZE * (i+ 0.5)\r\n# JTextIndex = bpy.data.objects['JText']\r\n# current_frame = frame_num\r\n# JTextIndex.location[0] = JtextLeft\r\n# JTextIndex.keyframe_insert(data_path=\"location\", frame = current_frame + 1, index = -1 )\r\n# current_frame += 1\r\n# bpy.context.scene.frame_set(current_frame)\r\n# return current_frame\r\n\r\n\r\n#def moveIndex\r\n\r\n\r\ndef moveElementToDestinationZ(elId, destination_z, startFrame, endFrame):\r\n if SHOW_CODE_ANIMATION:\r\n element = bpy.data.objects[elId]\r\n source_z = element.location[2]\r\n delta = source_z - destination_z\r\n delta_abs = abs(delta)\r\n total_frames = endFrame - startFrame\r\n #print(\"START FRAME IS \" + str(startFrame) + \" END FRAME IS \" + str(endFrame))\r\n element.keyframe_insert(data_path=\"location\", frame = startFrame, index = -1 )\r\n if(total_frames > 0 ):\r\n distance_per_delta = delta / total_frames\r\n current_frame = startFrame\r\n\r\n #print(\"MOVING \" + elId + \"BY \" + str(delta) + \"IN \" + str(total_frames) + \"FRAMES\")\r\n for i in range(int(total_frames)):\r\n loc = element.location[2] - 1 * distance_per_delta\r\n #print(\"LOCATION IS \"+ str(loc) + \"DESTINATION WAS \" + str(destination_z) )\r\n current_frame += 1\r\n element.location[2] = loc\r\n #print(\"CURRENT FRAME IS \"+ str(current_frame))\r\n element.keyframe_insert(data_path=\"location\", frame = current_frame, index = -1 )\r\n bpy.context.scene.frame_set(current_frame)\r\n\r\n element.location[2] = destination_z\r\n element.keyframe_insert(data_path=\"location\", frame = endFrame, index = -1 )\r\n #current_frame += 1\r\n bpy.context.scene.frame_set(endFrame)\r\n return total_frames\r\n return 0\r\n\r\n\r\ndef moveElementToDestinationX(indexElId, destination_x, startFrame, endFrame):\r\n indexEl = bpy.data.objects[indexElId]\r\n source_x = indexEl.location[0]\r\n delta = source_x - destination_x\r\n delta_abs = abs(delta)\r\n total_frames = endFrame - startFrame\r\n indexEl.keyframe_insert(data_path=\"location\", frame = startFrame, index = -1 )\r\n #print(\"TOTLA FRAMSE ARE \", str(total_frames))\r\n if(total_frames > 0):\r\n distance_per_delta = delta / total_frames\r\n current_frame = startFrame\r\n\r\n #print(\"MOVING \" + indexElId + \"BY \" + str(delta) + \"IN \" + str(total_frames) + \"FRAMES\")\r\n for i in range(int(total_frames)):\r\n loc = indexEl.location[0] - 1 * distance_per_delta\r\n #print(\"LOCATION IS \"+ str(loc))\r\n current_frame += 1\r\n indexEl.location[0] = loc\r\n #print(\"CURRENT FRAME IS \"+ str(current_frame))\r\n indexEl.keyframe_insert(data_path=\"location\", frame = current_frame, index = -1 )\r\n bpy.context.scene.frame_set(current_frame)\r\n\r\n indexEl.location[0] = destination_x\r\n indexEl.keyframe_insert(data_path=\"location\", frame = endFrame, index = -1 )\r\n #current_frame += 1\r\n bpy.context.scene.frame_set(endFrame)\r\n return total_frames\r\n\r\n\r\ndef moveIndexElement(indexElId, destination, startFrame, endFrame):\r\n #return 0\r\n destination_x = destination.location[0]\r\n return moveElementToDestinationX(\r\n indexElId,\r\n destination_x,\r\n startFrame,\r\n endFrame\r\n )\r\n\r\ndef moveIndex(sourceEl, destinationEl):\r\n global frame_num, FRAME_RATE\r\n source = bpy.data.objects[sourceEl]\r\n destination = bpy.data.objects[destinationEl]\r\n JTextIndex = bpy.data.objects['JText']\r\n\r\n source_x = source.location[0]\r\n destination_x = destination.location[0]\r\n\r\n delta = source_x - destination_x\r\n delta_abs = abs(delta)\r\n total_frames = delta_abs / UNIT_PER_FRAME\r\n distance_per_delta = delta / total_frames\r\n\r\n current_frame = frame_num\r\n\r\n #print('DELTA IS '+ str(delta))\r\n #print('DELTA ABS IS '+ str(delta_abs))\r\n #print('TOTAL FRAMES ARE ' + str(total_frames))\r\n #print('distance_per_delta ' + str(distance_per_delta))\r\n\r\n for i in range(int(total_frames)):\r\n JTextIndex.location[0] = JTextIndex.location[0] - 1 * distance_per_delta\r\n JTextIndex.keyframe_insert(data_path=\"location\", frame = current_frame + 1, index = -1 )\r\n current_frame += 1\r\n bpy.context.scene.frame_set(current_frame)\r\n\r\n JTextIndex.location[0] = destination_x\r\n JTextIndex.keyframe_insert(data_path=\"location\", frame = current_frame + 1, index = -1 )\r\n current_frame += 1\r\n bpy.context.scene.frame_set(current_frame)\r\n\r\n return current_frame\r\n\r\n\r\n\r\ndef swapElementsWithAnimation(el_1, el_2, startFrame, endFrame, updateFrameNum = False):\r\n #return 0\r\n global frame_num, FRAME_RATE\r\n element_1 = bpy.data.objects[el_1]\r\n element_2 = bpy.data.objects[el_2]\r\n\r\n print(\"ELEMENT IDs are \" + el_1 + \" , \" + el_2 )\r\n element_1_x = element_1.location[0]\r\n element_2_x = element_2.location[0]\r\n\r\n print(\"ELEMENT LOCATIONs are \" + str(element_1_x) + \" , \" + str(element_2_x) )\r\n moveElementToDestinationX(\r\n el_1,\r\n element_2_x,\r\n startFrame,\r\n endFrame\r\n )\r\n return moveElementToDestinationX(\r\n el_2,\r\n element_1_x,\r\n startFrame,\r\n endFrame\r\n )\r\n\r\ndef get_random_color():\r\n ''' generate rgb using a list comprehension '''\r\n r, g, b = [random.random() for i in range(3)]\r\n return r, g, b, 1\r\n\r\ndef renderIndex():\r\n extreme_left = X_AxisReferencePosition - BASE_WIDTH/2\r\n i = 1\r\n TEXT_SIZE = 0.7\r\n JtextLeft = extreme_left + BLOCK_SIZE * (0.5)\r\n ItextLeft = extreme_left + BLOCK_SIZE * (i + 0.5)\r\n MintextLeft = extreme_left + BLOCK_SIZE * (0.5)\r\n text_dimension = (0.6, 0.6, 0.6)\r\n indexColor = (0.01,0.01,0.01,1)\r\n\r\n JPlusOneLeft = extreme_left + BLOCK_SIZE * (i+1.5)\r\n bpy.ops.object.text_add(\r\n enter_editmode=False,\r\n location=(JtextLeft, -2, Z_AxisReferencePosition + -1.3), scale = text_dimension)\r\n Jtext = bpy.context.object\r\n Jtext.name = 'JText'\r\n Jtext.rotation_euler[0] = 1.5708\r\n Jtext.data.body = 'J'\r\n Jtext.data.size = TEXT_SIZE\r\n #Jtext.dimensions = text_dimension\r\n #bpy.ops.object.convert(target=\"MESH\")\r\n Jtext.display.show_shadows = False\r\n setColor(Jtext, 'JText', indexColor)\r\n\r\n bpy.ops.object.text_add(\r\n enter_editmode=False,\r\n location=(ItextLeft, -2, Z_AxisReferencePosition + -0.7), scale=(0.6, 0.6, 0.6))\r\n KeyText = bpy.context.object\r\n KeyText.name = 'KeyText'\r\n KeyText.rotation_euler[0] = 1.5708\r\n KeyText.data.body = 'KEY'\r\n KeyText.data.size = TEXT_SIZE\r\n\r\n #bpy.ops.object.convert(target=\"MESH\")\r\n KeyText.display.show_shadows = False\r\n KeyText.delta_location[0] = -0.3\r\n\r\n setColor(KeyText, 'KeyText', indexColor)\r\n\r\n bpy.ops.object.text_add(\r\n enter_editmode=False,\r\n location=(ItextLeft, -2, Z_AxisReferencePosition + -2.0), scale=(0.6, 0.6, 0.6))\r\n IText = bpy.context.object\r\n IText.name = 'IText'\r\n IText.rotation_euler[0] = 1.5708\r\n IText.data.body = 'I'\r\n IText.data.size = TEXT_SIZE\r\n\r\n #bpy.ops.object.convert(target=\"MESH\")\r\n IText.display.show_shadows = False\r\n #IText.delta_location[0] = -0.3\r\n setColor(IText, 'IText', indexColor)\r\n\r\n\r\n return\r\n\r\n\r\ndef renderCode():\r\n text_dimension = (1,1,1)\r\n codeColor = (0.01,0.01,0.01,1)\r\n x_position = -16\r\n start_z_position = 8.5\r\n line_height = CODE_LINE_HEIGHT\r\n\r\n\r\n bpy.ops.object.text_add(\r\n enter_editmode=False,\r\n location=(x_position, 0, start_z_position - 0 * line_height ),\r\n scale = text_dimension)\r\n line0 = bpy.context.object\r\n line0.name = 'line0'\r\n line0.data.body = 'for i in range(1, len(arr)):'\r\n line0.rotation_euler[0] = 1.5708\r\n line0.data.materials.append(ACTIVE_CODE_MATERIAL)\r\n line0.data.materials.append(INACTIVE_CODE_MATERIAL)\r\n line0.display.show_shadows = False\r\n #bpy.ops.object.convert(target=\"MESH\")\r\n\r\n bpy.ops.object.text_add(\r\n enter_editmode=False,\r\n location=(x_position, 0, start_z_position - 1 * line_height ),\r\n scale = text_dimension\r\n )\r\n line1 = bpy.context.object\r\n line1.name = 'line1'\r\n line1.data.body = ' key = arr[i]'\r\n line1.rotation_euler[0] = 1.5708\r\n line1.data.materials.append(ACTIVE_CODE_MATERIAL)\r\n line1.data.materials.append(INACTIVE_CODE_MATERIAL)\r\n line1.display.show_shadows = False\r\n #bpy.ops.object.convert(target=\"MESH\")\r\n\r\n bpy.ops.object.text_add(\r\n enter_editmode=False,\r\n location=(x_position, 0, start_z_position - 2 * line_height ),\r\n scale = text_dimension\r\n )\r\n line2 = bpy.context.object\r\n line2.name = 'line2'\r\n line2.data.body = ' j = i-1'\r\n line2.rotation_euler[0] = 1.5708\r\n line2.data.materials.append(ACTIVE_CODE_MATERIAL)\r\n line2.data.materials.append(INACTIVE_CODE_MATERIAL)\r\n line2.display.show_shadows = False\r\n #bpy.ops.object.convert(target=\"MESH\")\r\n\r\n bpy.ops.object.text_add(\r\n enter_editmode=False,\r\n location=(x_position, 0, start_z_position - 3 * line_height ),\r\n scale = text_dimension\r\n )\r\n line3 = bpy.context.object\r\n line3.name = 'line3'\r\n line3.data.body = ' while j >= 0 and key < arr[j] :'\r\n line3.rotation_euler[0] = 1.5708\r\n line3.data.materials.append(ACTIVE_CODE_MATERIAL)\r\n line3.data.materials.append(INACTIVE_CODE_MATERIAL)\r\n line3.display.show_shadows = False\r\n #bpy.ops.object.convert(target=\"MESH\")\r\n\r\n bpy.ops.object.text_add(\r\n enter_editmode=False,\r\n location=(x_position, 0, start_z_position - 4 * line_height ),\r\n scale = text_dimension\r\n )\r\n line4 = bpy.context.object\r\n line4.name = 'line4'\r\n line4.data.body = ' arr[j + 1] = arr[j]'\r\n line4.rotation_euler[0] = 1.5708\r\n line4.data.materials.append(ACTIVE_CODE_MATERIAL)\r\n line4.data.materials.append(INACTIVE_CODE_MATERIAL)\r\n line4.display.show_shadows = False\r\n #bpy.ops.object.convert(target=\"MESH\")\r\n\r\n bpy.ops.object.text_add(\r\n enter_editmode=False,\r\n location=(x_position, 0, start_z_position - 5 * line_height ),\r\n scale = text_dimension\r\n )\r\n line5 = bpy.context.object\r\n line5.name = 'line5'\r\n line5.data.body = ' j -= 1'\r\n line5.rotation_euler[0] = 1.5708\r\n line5.data.materials.append(ACTIVE_CODE_MATERIAL)\r\n line5.data.materials.append(INACTIVE_CODE_MATERIAL)\r\n line5.display.show_shadows = False\r\n #bpy.ops.object.convert(target=\"MESH\")\r\n\r\n bpy.ops.object.text_add(\r\n enter_editmode=False,\r\n location=(x_position, 0, start_z_position - 6 * line_height ),\r\n scale = text_dimension\r\n )\r\n line6 = bpy.context.object\r\n line6.name = 'line6'\r\n line6.data.body = ' arr[j + 1] = key'\r\n line6.rotation_euler[0] = 1.5708\r\n line6.data.materials.append(ACTIVE_CODE_MATERIAL)\r\n line6.data.materials.append(INACTIVE_CODE_MATERIAL)\r\n line6.display.show_shadows = False\r\n #bpy.ops.object.convert(target=\"MESH\")\r\n\r\n renderCodeFrame()\r\n\r\n makeCodeActive('line0')\r\n #makeCodeActive('line1')\r\n\r\n return\r\n\r\ndef makeCodeActive(lineItemToActiveName):\r\n codeLineNameArray = [\r\n 'line0',\r\n 'line1',\r\n 'line2',\r\n 'line3', 'line4', 'line5', 'line6'\r\n ]\r\n for lineItemName in codeLineNameArray:\r\n lineitem = bpy.data.objects[lineItemName]\r\n lineitem.active_material = INACTIVE_CODE_MATERIAL\r\n\r\n lineItemToActive = bpy.data.objects[lineItemToActiveName]\r\n lineItemToActive.active_material = ACTIVE_CODE_MATERIAL\r\n return\r\n\r\ndef renderCodeFrame():\r\n frameLocation = (-11, 9, 10.3)\r\n bpy.ops.mesh.primitive_plane_add(\r\n size=2,\r\n location=frameLocation,\r\n #rotate = (0, 90, 90)\r\n scale=(1, 1, 1)\r\n\r\n )\r\n codeFrame = bpy.context.object\r\n codeFrame.dimensions = (2,20,0)\r\n codeFrame.rotation_euler = (0, 1.5708 , 1.5708)\r\n codeFrame.name = 'codeFrame'\r\n\r\n material = bpy.data.materials.new('material_' + codeFrame.name)\r\n material.diffuse_color = (1, 1, 1, 0.1)\r\n #material.specular_hardness = 200\r\n codeFrame.data.materials.append(material)\r\n codeFrame.display.show_shadows = False\r\n registerHighlightCodeFrames( 'line0', frame_num, frame_num)\r\n moveElementToDestinationZ('codeFrame', 10.3, frame_num, frame_num)\r\n return\r\n\r\n\r\ndef animateString(scene):\r\n global frame_num\r\n current_frame = scene.frame_current\r\n if current_frame in text_animation_map.keys():\r\n text_animate = text_animation_map.get(current_frame)\r\n JCounter = bpy.data.objects['JCounter']\r\n ICounter = bpy.data.objects['ICounter']\r\n print(text_animate)\r\n JCounter.data.body = text_animate['j']\r\n ICounter.data.body = text_animate['i']\r\n\r\n return\r\n\r\ndef highlightWatcher(scene):\r\n current_frame = scene.frame_current\r\n allIWatcherFrames = list(i_watcher_frame_map.keys())\r\n for frameKey in allIWatcherFrames:\r\n startFrame = int(str(frameKey).split('_')[1])\r\n endFrame = int(str(frameKey).split('_')[3])\r\n if(startFrame <= current_frame and current_frame <= endFrame):\r\n frame = i_watcher_frame_map[frameKey]\r\n if(current_frame == endFrame):\r\n renderValueOnWatcher('Iwatcher', 'I = {0}'.format(frame['value']))\r\n break\r\n\r\n allJWatcherFrames = list(j_watcher_frame_map.keys())\r\n for frameKey in allJWatcherFrames:\r\n startFrame = int(str(frameKey).split('_')[1])\r\n endFrame = int(str(frameKey).split('_')[3])\r\n if(startFrame <= current_frame and current_frame <= endFrame):\r\n frame = j_watcher_frame_map[frameKey]\r\n if(current_frame == endFrame):\r\n renderValueOnWatcher('Jwatcher', 'J = {0}'.format(frame['value']))\r\n break\r\n\r\n allKeyWatcherFrames = list(key_watcher_frame_map.keys())\r\n for frameKey in allKeyWatcherFrames:\r\n startFrame = int(str(frameKey).split('_')[1])\r\n endFrame = int(str(frameKey).split('_')[3])\r\n if(startFrame <= current_frame and current_frame <= endFrame):\r\n frame = key_watcher_frame_map[frameKey]\r\n if(current_frame == endFrame):\r\n renderValueOnWatcher('Keywatcher', 'KEY={0}'.format(frame['value']))\r\n break\r\n\r\n allArrJWatcherFrames = list(arr_j_watcher_frame_map.keys())\r\n for frameKey in allArrJWatcherFrames:\r\n startFrame = int(str(frameKey).split('_')[1])\r\n endFrame = int(str(frameKey).split('_')[3])\r\n if(startFrame <= current_frame and current_frame <= endFrame):\r\n frame = arr_j_watcher_frame_map[frameKey]\r\n if(current_frame == endFrame):\r\n renderValueOnWatcher('ArrJwatcher', 'arr[J] = {0}'.format(frame['value']))\r\n break\r\n return\r\n\r\ndef highlishtFrameCode(scene):\r\n current_frame = scene.frame_current\r\n allHighLightFrames = list(highlight_frame_map.keys())\r\n for frameKey in allHighLightFrames:\r\n startFrame = int(str(frameKey).split('_')[1])\r\n endFrame = int(str(frameKey).split('_')[3])\r\n\r\n if(startFrame <= current_frame and current_frame <= endFrame):\r\n frame = highlight_frame_map[frameKey]\r\n #print(\"MAKING \" + frame['activeline'] + \" ACTIVE\")\r\n if(current_frame == endFrame):\r\n makeCodeActive(frame['activeline'])\r\n return\r\n return\r\n\r\ndef setValueToCounter(i, j):\r\n print(\"I is \" + str(i))\r\n global frame_num\r\n text_animation_map[frame_num] = {\r\n 'i': 'I={0}'.format(i),\r\n 'j': 'J={0}'.format(j)\r\n }\r\n return\r\n\r\n\r\n\r\n\r\n\r\ndef renderCounter():\r\n extreme_left = 0 - BASE_WIDTH/2\r\n i = 0\r\n JtextLeft = extreme_left - 1.3\r\n counterColor = (0.5,0.8,0.1,1)\r\n bpy.ops.object.text_add(\r\n enter_editmode=False,\r\n location=(JtextLeft, -2, 2), scale=(0.6, 0.6, 0.6))\r\n JCounter = bpy.context.object\r\n JCounter.name = 'JCounter'\r\n JCounter.rotation_euler[0] = 1.5708\r\n #Jtext.data.body = 'J=0'\r\n #bpy.ops.object.convert(target=\"MESH\")\r\n setColor(JCounter, 'JCounter', counterColor)\r\n JCounter.data.size = 0.6\r\n\r\n bpy.ops.object.text_add(\r\n enter_editmode=False,\r\n location=(JtextLeft, -2, 4), scale=(0.6, 0.6, 0.6))\r\n ICounter = bpy.context.object\r\n ICounter.name = 'ICounter'\r\n ICounter.rotation_euler[0] = 1.5708\r\n #Itext.data.body = 'I=0'\r\n #bpy.ops.object.convert(target=\"MESH\")\r\n setColor(ICounter, 'ICounter', counterColor)\r\n setValueToCounter(0,0)\r\n ICounter.data.size = 0.6\r\n return\r\n\r\n\r\n\r\ndef renderWatcher():\r\n global frame_num\r\n extreme_left = 0 - BASE_WIDTH/2\r\n watcherColor = WATCHER_COLOR\r\n x_position = 4\r\n bpy.ops.object.text_add(\r\n enter_editmode=False,\r\n location=(x_position, -2, -5), scale=(0.6, 0.6, 0.6))\r\n Iwatcher = bpy.context.object\r\n Iwatcher.name = 'Iwatcher'\r\n Iwatcher.rotation_euler[0] = 1.5708\r\n #Jtext.data.body = 'J=0'\r\n #bpy.ops.object.convert(target=\"MESH\")\r\n Iwatcher.display.show_shadows = False\r\n\r\n setColor(Iwatcher, 'Iwatcher', watcherColor)\r\n #Iwatcher.data.size = 0.6\r\n registerIWatcherFrame(0, frame_num, frame_num)\r\n\r\n bpy.ops.object.text_add(\r\n enter_editmode=False,\r\n location=(x_position, -2, -8), scale=(0.6, 0.6, 0.6))\r\n Jwatcher = bpy.context.object\r\n Jwatcher.name = 'Jwatcher'\r\n Jwatcher.rotation_euler[0] = 1.5708\r\n #Jtext.data.body = 'J=0'\r\n #bpy.ops.object.convert(target=\"MESH\")\r\n Jwatcher.display.show_shadows = False\r\n setColor(Jwatcher, 'Jwatcher', watcherColor)\r\n registerJWatcherFrame('', frame_num, frame_num)\r\n\r\n bpy.ops.object.text_add(\r\n enter_editmode=False,\r\n location=(x_position + 4, -2, -8), scale=(0.6, 0.6, 0.6))\r\n Keywatcher = bpy.context.object\r\n Keywatcher.name = 'Keywatcher'\r\n Keywatcher.rotation_euler[0] = 1.5708\r\n #Jtext.data.body = 'J=0'\r\n #bpy.ops.object.convert(target=\"MESH\")\r\n Keywatcher.display.show_shadows = False\r\n setColor(Keywatcher, 'Keywatcher', watcherColor)\r\n registerKeyWatcherFrame('', frame_num, frame_num)\r\n\r\n bpy.ops.object.text_add(\r\n enter_editmode=False,\r\n location=(x_position + 4, -2, -5),\r\n scale=(0.6, 0.6, 0.6))\r\n ArrJwatcher = bpy.context.object\r\n ArrJwatcher.name = 'ArrJwatcher'\r\n ArrJwatcher.rotation_euler[0] = 1.5708\r\n #Jtext.data.body = 'J=0'\r\n #bpy.ops.object.convert(target=\"MESH\")\r\n ArrJwatcher.display.show_shadows = False\r\n setColor(ArrJwatcher, 'ArrJwatcher', watcherColor)\r\n registerArrjWatcherFrame('', frame_num, frame_num)\r\n\r\n #ArrJwatcher\r\n return\r\n\r\ndef renderValueOnWatcher(watcherId, value):\r\n watcher = bpy.data.objects[watcherId]\r\n watcher.data.body = value\r\n\r\ndef mapChartElWithArray(arr):\r\n # map(lambda (i,x): {'name':x, 'rank':i}, enumerate(ranked_users))\r\n arrToReturn = []\r\n for i, val in enumerate(arr):\r\n arrToReturn.append(getElName(i, val))\r\n return arrToReturn\r\n #return map(getElName, enumerate(arr))\r\n #return map( lambda(i,x): getElName(i,x), enumerate(arr))\r\n\r\n\r\n\r\n\r\ndef run():\r\n clearObject()\r\n # clearAllMaterial()\r\n # return\r\n createMaterials()\r\n mapColorToValues()\r\n renderChart()\r\n renderIndex()\r\n #return\r\n if SHOW_CODE_ANIMATION:\r\n renderCode()\r\n renderWatcher()\r\n #return\r\n if animateString in bpy.app.handlers.frame_change_post:\r\n bpy.app.handlers.frame_change_post.remove(animateString)\r\n\r\n if highlightWatcher in bpy.app.handlers.frame_change_post:\r\n bpy.app.handlers.frame_change_post.remove(highlightWatcher)\r\n if highlishtFrameCode in bpy.app.handlers.frame_change_post:\r\n bpy.app.handlers.frame_change_post.remove(highlishtFrameCode)\r\n\r\n if(SHOW_CODE_ANIMATION):\r\n bpy.app.handlers.frame_change_pre.append(highlishtFrameCode)\r\n bpy.app.handlers.frame_change_pre.append(highlightWatcher)\r\n\r\n mappedChartWithArray = mapChartElWithArray(UNSORTED_ARRAY)\r\n for els in mappedChartWithArray:\r\n element = bpy.data.objects[els]\r\n element.keyframe_insert(data_path=\"location\", frame = frame_num, index = -1 )\r\n\r\n insertionSort(mappedChartWithArray)\r\n print(frame_num)\r\n return\r\n\r\nrun()\r\n\r\n\r\n#print(text_animation_map)\r\n#print(highlight_frame_map)\r\n#print(i_watcher_frame_map)\r\n#print(j_watcher_frame_map)\r\n# for frameKey in highlight_frame_map.keys():\r\n# print(frameKey)\r\n\r\n# for frameKey in i_watcher_frame_map.keys():\r\n# print(frameKey)\r\n\r\n\r\n\r\n" ]
[ [ "numpy.copy", "numpy.amax", "numpy.unique", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
KCGallagher/birdspy
[ "234911777db4b0a6402750516e8efd9f62748054" ]
[ "birdspy/dataset_factory.py" ]
[ "#\n# Generates training and test datasets from image data and annotations\n#\n\nimport os\nimport shutil\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom PIL import Image\nfrom typing import List\n\nimport birdspy as bs\n\n\nclass DatasetFactory:\n \"\"\"Factory to generate ground truth images from a given annotation dataset\n \"\"\"\n\n @staticmethod\n def generate_datasets(\n train_path: str, test_path: str, image_path: str, test_frac: int\n ):\n \"\"\"Generates training and testing datasets at the specified locations, from a given \n directory of images. Places 1 in n images into the testing dataset randomly.\n\n Also runs the following checks:\n * All images with invalid data are removed\n * All images with no annotations are optionally removed\n \"\"\"\n annotation_df = bs.ImageFactory.annotation_data(use_citizens=True)\n images_ids = np.unique(annotation_df[\"image_id\"])\n\n for image in images_ids:\n if not os.path.exists(os.path.join(image_path, image)):\n continue # Skip to the nexct image\n\n if not DatasetFactory.check_image(image, annotation_df):\n continue\n\n copy_loc = test_path if (np.random.rand() < (1 / test_frac)) else train_path\n # Test (and save) ground truth if not empty\n if bs.MatlabInterface.save_truth_mat(\n image, annotation_df, copy_loc, save_empty_file=False\n ):\n # This will have saved ground druth .mat file if there are annotations\n # We will also save image data\n shutil.copy(os.path.join(image_path, image), copy_loc)\n\n @staticmethod\n def check_image(image_id, annotation_df):\n \"\"\"Checks whether image is valid, and has annotations,returning validity boolean\"\"\"\n image_df = annotation_df.loc[annotation_df[\"image_id\"] == image_id]\n if (\n len(image_df) <= 0\n ): # Set minimum threshold for number of annotations required in image\n return False\n return True\n" ]
[ [ "numpy.random.rand", "numpy.unique" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
rubenvdg/dask
[ "85f0b14bd36a5135ce51aeee067b6207374b00c4" ]
[ "dask/dataframe/core.py" ]
[ "import copy\nimport operator\nimport warnings\nfrom collections.abc import Iterator, Sequence\nfrom functools import partial, wraps\nfrom numbers import Integral, Number\nfrom operator import getitem\nfrom pprint import pformat\n\nimport numpy as np\nimport pandas as pd\nfrom pandas.api.types import (\n is_bool_dtype,\n is_datetime64_any_dtype,\n is_numeric_dtype,\n is_timedelta64_dtype,\n)\nfrom pandas.util import cache_readonly\nfrom tlz import first, merge, partition_all, remove, unique\n\ntry:\n from chest import Chest as Cache\nexcept ImportError:\n Cache = dict\n\nfrom .. import array as da\nfrom .. import core, threaded\nfrom ..array.core import Array, normalize_arg\nfrom ..base import DaskMethodsMixin, dont_optimize, is_dask_collection, tokenize\nfrom ..blockwise import Blockwise, blockwise, subs\nfrom ..context import globalmethod\nfrom ..delayed import Delayed, delayed, unpack_collections\nfrom ..highlevelgraph import HighLevelGraph\nfrom ..optimization import SubgraphCallable\nfrom ..utils import (\n IndexCallable,\n M,\n OperatorMethodMixin,\n apply,\n derived_from,\n funcname,\n has_keyword,\n is_arraylike,\n iter_chunks,\n key_split,\n memory_repr,\n parse_bytes,\n partial_by_order,\n pseudorandom,\n put_lines,\n random_state_data,\n typename,\n)\nfrom ..widgets import get_template\nfrom . import methods\nfrom .accessor import DatetimeAccessor, StringAccessor\nfrom .categorical import CategoricalAccessor, categorize\nfrom .dispatch import (\n get_parallel_type,\n group_split_dispatch,\n hash_object_dispatch,\n meta_nonempty,\n)\nfrom .optimize import optimize\nfrom .utils import (\n PANDAS_GT_110,\n PANDAS_GT_120,\n check_matching_columns,\n clear_known_categories,\n drop_by_shallow_copy,\n has_known_categories,\n index_summary,\n insert_meta_param_description,\n is_categorical_dtype,\n is_dataframe_like,\n is_index_like,\n is_series_like,\n make_meta,\n raise_on_meta_error,\n valid_divisions,\n)\n\nno_default = \"__no_default__\"\n\npd.set_option(\"compute.use_numexpr\", False)\n\n\ndef _numeric_only(func):\n \"\"\"Decorator for methods that accept a numeric_only kwarg\"\"\"\n\n @wraps(func)\n def wrapper(self, *args, **kwargs):\n # numeric_only is None by default - in that case self = self.\n if kwargs.get(\"numeric_only\") is False:\n raise NotImplementedError(\n \"'numeric_only=False' is not implemented in Dask.\"\n )\n elif kwargs.get(\"numeric_only\") is True:\n self = self._get_numeric_data()\n return func(self, *args, **kwargs)\n\n return wrapper\n\n\ndef _concat(args, ignore_index=False):\n if not args:\n return args\n if isinstance(first(core.flatten(args)), np.ndarray):\n return da.core.concatenate3(args)\n if not has_parallel_type(args[0]):\n try:\n return pd.Series(args)\n except Exception:\n return args\n # We filter out empty partitions here because pandas frequently has\n # inconsistent dtypes in results between empty and non-empty frames.\n # Ideally this would be handled locally for each operation, but in practice\n # this seems easier. TODO: don't do this.\n args2 = [i for i in args if len(i)]\n return (\n args[0]\n if not args2\n else methods.concat(args2, uniform=True, ignore_index=ignore_index)\n )\n\n\ndef finalize(results):\n return _concat(results)\n\n\nclass Scalar(DaskMethodsMixin, OperatorMethodMixin):\n \"\"\"A Dask object to represent a pandas scalar\"\"\"\n\n def __init__(self, dsk, name, meta, divisions=None):\n # divisions is ignored, only present to be compatible with other\n # objects.\n if not isinstance(dsk, HighLevelGraph):\n dsk = HighLevelGraph.from_collections(name, dsk, dependencies=[])\n self.dask = dsk\n self._name = name\n self._parent_meta = pd.Series(dtype=\"float64\")\n\n meta = make_meta(meta, parent_meta=self._parent_meta)\n if is_dataframe_like(meta) or is_series_like(meta) or is_index_like(meta):\n raise TypeError(\n \"Expected meta to specify scalar, got \"\n \"{0}\".format(typename(type(meta)))\n )\n self._meta = meta\n\n def __dask_graph__(self):\n return self.dask\n\n def __dask_keys__(self):\n return [self.key]\n\n def __dask_tokenize__(self):\n return self._name\n\n def __dask_layers__(self):\n return (self._name,)\n\n __dask_optimize__ = globalmethod(\n optimize, key=\"dataframe_optimize\", falsey=dont_optimize\n )\n __dask_scheduler__ = staticmethod(threaded.get)\n\n def __dask_postcompute__(self):\n return first, ()\n\n def __dask_postpersist__(self):\n return self._rebuild, ()\n\n def _rebuild(self, dsk, *, rename=None):\n name = self._name\n if rename:\n name = rename.get(name, name)\n return Scalar(dsk, name, self._meta, self.divisions)\n\n @property\n def _meta_nonempty(self):\n return self._meta\n\n @property\n def dtype(self):\n return self._meta.dtype\n\n def __dir__(self):\n o = set(dir(type(self)))\n o.update(self.__dict__)\n if not hasattr(self._meta, \"dtype\"):\n o.remove(\"dtype\") # dtype only in `dir` if available\n return list(o)\n\n @property\n def divisions(self):\n \"\"\"Dummy divisions to be compat with Series and DataFrame\"\"\"\n return [None, None]\n\n def __repr__(self):\n name = self._name if len(self._name) < 10 else self._name[:7] + \"...\"\n if hasattr(self._meta, \"dtype\"):\n extra = \", dtype=%s\" % self._meta.dtype\n else:\n extra = \", type=%s\" % type(self._meta).__name__\n return \"dd.Scalar<%s%s>\" % (name, extra)\n\n def __array__(self):\n # array interface is required to support pandas instance + Scalar\n # Otherwise, above op results in pd.Series of Scalar (object dtype)\n return np.asarray(self.compute())\n\n @property\n def _args(self):\n return (self.dask, self._name, self._meta)\n\n def __getstate__(self):\n return self._args\n\n def __setstate__(self, state):\n self.dask, self._name, self._meta = state\n\n def __bool__(self):\n raise TypeError(\n \"Trying to convert {} to a boolean value. Because Dask objects are \"\n \"lazily evaluated, they cannot be converted to a boolean value or used \"\n \"in boolean conditions like if statements. Try calling .compute() to \"\n \"force computation prior to converting to a boolean value or using in \"\n \"a conditional statement.\".format(self)\n )\n\n @property\n def key(self):\n return (self._name, 0)\n\n @classmethod\n def _get_unary_operator(cls, op):\n def f(self):\n name = funcname(op) + \"-\" + tokenize(self)\n dsk = {(name, 0): (op, (self._name, 0))}\n meta = op(self._meta_nonempty)\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])\n return Scalar(graph, name, meta)\n\n return f\n\n @classmethod\n def _get_binary_operator(cls, op, inv=False):\n return lambda self, other: _scalar_binary(op, self, other, inv=inv)\n\n def to_delayed(self, optimize_graph=True):\n \"\"\"Convert into a ``dask.delayed`` object.\n\n Parameters\n ----------\n optimize_graph : bool, optional\n If True [default], the graph is optimized before converting into\n ``dask.delayed`` objects.\n \"\"\"\n dsk = self.__dask_graph__()\n if optimize_graph:\n dsk = self.__dask_optimize__(dsk, self.__dask_keys__())\n name = \"delayed-\" + self._name\n dsk = HighLevelGraph.from_collections(name, dsk, dependencies=())\n return Delayed(self.key, dsk)\n\n\ndef _scalar_binary(op, self, other, inv=False):\n name = \"{0}-{1}\".format(funcname(op), tokenize(self, other))\n dependencies = [self]\n\n dsk = {}\n return_type = get_parallel_type(other)\n\n if isinstance(other, Scalar):\n dependencies.append(other)\n other_key = (other._name, 0)\n elif is_dask_collection(other):\n return NotImplemented\n else:\n other_key = other\n\n dsk[(name, 0)] = (\n (op, other_key, (self._name, 0)) if inv else (op, (self._name, 0), other_key)\n )\n\n other_meta = make_meta(other, parent_meta=self._parent_meta)\n other_meta_nonempty = meta_nonempty(other_meta)\n if inv:\n meta = op(other_meta_nonempty, self._meta_nonempty)\n else:\n meta = op(self._meta_nonempty, other_meta_nonempty)\n\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=dependencies)\n if return_type is not Scalar:\n return return_type(graph, name, meta, [other.index.min(), other.index.max()])\n else:\n return Scalar(graph, name, meta)\n\n\nclass _Frame(DaskMethodsMixin, OperatorMethodMixin):\n \"\"\"Superclass for DataFrame and Series\n\n Parameters\n ----------\n dsk: dict\n The dask graph to compute this DataFrame\n name: str\n The key prefix that specifies which keys in the dask comprise this\n particular DataFrame / Series\n meta: pandas.DataFrame, pandas.Series, or pandas.Index\n An empty pandas object with names, dtypes, and indices matching the\n expected output.\n divisions: tuple of index values\n Values along which we partition our blocks on the index\n \"\"\"\n\n def __init__(self, dsk, name, meta, divisions):\n if not isinstance(dsk, HighLevelGraph):\n dsk = HighLevelGraph.from_collections(name, dsk, dependencies=[])\n self.dask = dsk\n self._name = name\n meta = make_meta(meta)\n if not self._is_partition_type(meta):\n raise TypeError(\n \"Expected meta to specify type {0}, got type \"\n \"{1}\".format(type(self).__name__, typename(type(meta)))\n )\n self._meta = meta\n self.divisions = tuple(divisions)\n\n def __dask_graph__(self):\n return self.dask\n\n def __dask_keys__(self):\n return [(self._name, i) for i in range(self.npartitions)]\n\n def __dask_layers__(self):\n return (self._name,)\n\n def __dask_tokenize__(self):\n return self._name\n\n __dask_optimize__ = globalmethod(\n optimize, key=\"dataframe_optimize\", falsey=dont_optimize\n )\n __dask_scheduler__ = staticmethod(threaded.get)\n\n def __dask_postcompute__(self):\n return finalize, ()\n\n def __dask_postpersist__(self):\n return self._rebuild, ()\n\n def _rebuild(self, dsk, *, rename=None):\n name = self._name\n if rename:\n name = rename.get(name, name)\n return type(self)(dsk, name, self._meta, self.divisions)\n\n @property\n def _constructor(self):\n return new_dd_object\n\n @property\n def npartitions(self):\n \"\"\"Return number of partitions\"\"\"\n return len(self.divisions) - 1\n\n @property\n @derived_from(pd.DataFrame)\n def attrs(self):\n return self._meta.attrs\n\n @attrs.setter\n def attrs(self, value):\n self._meta.attrs = dict(value)\n\n @property\n def size(self):\n \"\"\"Size of the Series or DataFrame as a Delayed object.\n\n Examples\n --------\n >>> series.size # doctest: +SKIP\n dd.Scalar<size-ag..., dtype=int64>\n \"\"\"\n return self.reduction(\n methods.size, np.sum, token=\"size\", meta=int, split_every=False\n )\n\n @property\n def _meta_nonempty(self):\n \"\"\"A non-empty version of `_meta` with fake data.\"\"\"\n return meta_nonempty(self._meta)\n\n @property\n def _args(self):\n return (self.dask, self._name, self._meta, self.divisions)\n\n def __getstate__(self):\n return self._args\n\n def __setstate__(self, state):\n self.dask, self._name, self._meta, self.divisions = state\n\n def copy(self):\n \"\"\"Make a copy of the dataframe\n\n This is strictly a shallow copy of the underlying computational graph.\n It does not affect the underlying data\n \"\"\"\n return new_dd_object(self.dask, self._name, self._meta, self.divisions)\n\n def __array__(self, dtype=None, **kwargs):\n self._computed = self.compute()\n x = np.array(self._computed)\n return x\n\n def __array_wrap__(self, array, context=None):\n raise NotImplementedError\n\n def __array_ufunc__(self, numpy_ufunc, method, *inputs, **kwargs):\n out = kwargs.get(\"out\", ())\n for x in inputs + out:\n # ufuncs work with 0-dimensional NumPy ndarrays\n # so we don't want to raise NotImplemented\n if isinstance(x, np.ndarray) and x.shape == ():\n continue\n elif not isinstance(\n x, (Number, Scalar, _Frame, Array, pd.DataFrame, pd.Series, pd.Index)\n ):\n return NotImplemented\n\n if method == \"__call__\":\n if numpy_ufunc.signature is not None:\n return NotImplemented\n if numpy_ufunc.nout > 1:\n # ufuncs with multiple output values\n # are not yet supported for frames\n return NotImplemented\n else:\n return elemwise(numpy_ufunc, *inputs, **kwargs)\n else:\n # ufunc methods are not yet supported for frames\n return NotImplemented\n\n @property\n def _elemwise(self):\n return elemwise\n\n def _repr_data(self):\n raise NotImplementedError\n\n @property\n def _repr_divisions(self):\n name = \"npartitions={0}\".format(self.npartitions)\n if self.known_divisions:\n divisions = pd.Index(self.divisions, name=name)\n else:\n # avoid to be converted to NaN\n divisions = pd.Index([\"\"] * (self.npartitions + 1), name=name)\n return divisions\n\n def __repr__(self):\n data = self._repr_data().to_string(max_rows=5, show_dimensions=False)\n _str_fmt = \"\"\"Dask {klass} Structure:\n{data}\nDask Name: {name}, {task} tasks\"\"\"\n if len(self.columns) == 0:\n data = data.partition(\"\\n\")[-1].replace(\"Index\", \"Divisions\")\n _str_fmt = \"Empty {}\".format(_str_fmt)\n return _str_fmt.format(\n klass=self.__class__.__name__,\n data=data,\n name=key_split(self._name),\n task=len(self.dask),\n )\n\n @property\n def index(self):\n \"\"\"Return dask Index instance\"\"\"\n return self.map_partitions(\n getattr,\n \"index\",\n token=self._name + \"-index\",\n meta=self._meta.index,\n enforce_metadata=False,\n )\n\n @index.setter\n def index(self, value):\n self.divisions = value.divisions\n result = map_partitions(\n methods.assign_index, self, value, enforce_metadata=False\n )\n self.dask = result.dask\n self._name = result._name\n self._meta = result._meta\n\n def reset_index(self, drop=False):\n \"\"\"Reset the index to the default index.\n\n Note that unlike in ``pandas``, the reset ``dask.dataframe`` index will\n not be monotonically increasing from 0. Instead, it will restart at 0\n for each partition (e.g. ``index1 = [0, ..., 10], index2 = [0, ...]``).\n This is due to the inability to statically know the full length of the\n index.\n\n For DataFrame with multi-level index, returns a new DataFrame with\n labeling information in the columns under the index names, defaulting\n to 'level_0', 'level_1', etc. if any are None. For a standard index,\n the index name will be used (if set), otherwise a default 'index' or\n 'level_0' (if 'index' is already taken) will be used.\n\n Parameters\n ----------\n drop : boolean, default False\n Do not try to insert index into dataframe columns.\n \"\"\"\n return self.map_partitions(\n M.reset_index, drop=drop, enforce_metadata=False\n ).clear_divisions()\n\n @property\n def known_divisions(self):\n \"\"\"Whether divisions are already known\"\"\"\n return len(self.divisions) > 0 and self.divisions[0] is not None\n\n def clear_divisions(self):\n \"\"\"Forget division information\"\"\"\n divisions = (None,) * (self.npartitions + 1)\n return type(self)(self.dask, self._name, self._meta, divisions)\n\n def get_partition(self, n):\n \"\"\"Get a dask DataFrame/Series representing the `nth` partition.\"\"\"\n if 0 <= n < self.npartitions:\n name = \"get-partition-%s-%s\" % (str(n), self._name)\n divisions = self.divisions[n : n + 2]\n layer = {(name, 0): (self._name, n)}\n graph = HighLevelGraph.from_collections(name, layer, dependencies=[self])\n return new_dd_object(graph, name, self._meta, divisions)\n else:\n msg = \"n must be 0 <= n < {0}\".format(self.npartitions)\n raise ValueError(msg)\n\n @derived_from(pd.DataFrame)\n def drop_duplicates(\n self, subset=None, split_every=None, split_out=1, ignore_index=False, **kwargs\n ):\n if subset is not None:\n # Let pandas error on bad inputs\n self._meta_nonempty.drop_duplicates(subset=subset, **kwargs)\n kwargs[\"subset\"] = subset\n split_out_setup = split_out_on_cols\n split_out_setup_kwargs = {\"cols\": subset}\n else:\n self._meta_nonempty.drop_duplicates(**kwargs)\n split_out_setup = split_out_setup_kwargs = None\n\n if kwargs.get(\"keep\", True) is False:\n raise NotImplementedError(\"drop_duplicates with keep=False\")\n\n chunk = M.drop_duplicates\n return aca(\n self,\n chunk=chunk,\n aggregate=chunk,\n meta=self._meta,\n token=\"drop-duplicates\",\n split_every=split_every,\n split_out=split_out,\n split_out_setup=split_out_setup,\n split_out_setup_kwargs=split_out_setup_kwargs,\n ignore_index=ignore_index,\n **kwargs,\n )\n\n def __len__(self):\n return self.reduction(\n len, np.sum, token=\"len\", meta=int, split_every=False\n ).compute()\n\n def __bool__(self):\n raise ValueError(\n \"The truth value of a {0} is ambiguous. \"\n \"Use a.any() or a.all().\".format(self.__class__.__name__)\n )\n\n __nonzero__ = __bool__ # python 2\n\n def _scalarfunc(self, cast_type):\n def wrapper():\n raise TypeError(\"cannot convert the series to {0}\".format(str(cast_type)))\n\n return wrapper\n\n def __float__(self):\n return self._scalarfunc(float)\n\n def __int__(self):\n return self._scalarfunc(int)\n\n __long__ = __int__ # python 2\n\n def __complex__(self):\n return self._scalarfunc(complex)\n\n @insert_meta_param_description(pad=12)\n def map_partitions(self, func, *args, **kwargs):\n \"\"\"Apply Python function on each DataFrame partition.\n\n Note that the index and divisions are assumed to remain unchanged.\n\n Parameters\n ----------\n func : function\n Function applied to each partition.\n args, kwargs :\n Arguments and keywords to pass to the function. The partition will\n be the first argument, and these will be passed *after*. Arguments\n and keywords may contain ``Scalar``, ``Delayed``, ``partition_info``\n or regular python objects. DataFrame-like args (both dask and\n pandas) will be repartitioned to align (if necessary) before\n applying the function.\n $META\n\n Examples\n --------\n Given a DataFrame, Series, or Index, such as:\n\n >>> import pandas as pd\n >>> import dask.dataframe as dd\n >>> df = pd.DataFrame({'x': [1, 2, 3, 4, 5],\n ... 'y': [1., 2., 3., 4., 5.]})\n >>> ddf = dd.from_pandas(df, npartitions=2)\n\n One can use ``map_partitions`` to apply a function on each partition.\n Extra arguments and keywords can optionally be provided, and will be\n passed to the function after the partition.\n\n Here we apply a function with arguments and keywords to a DataFrame,\n resulting in a Series:\n\n >>> def myadd(df, a, b=1):\n ... return df.x + df.y + a + b\n >>> res = ddf.map_partitions(myadd, 1, b=2)\n >>> res.dtype\n dtype('float64')\n\n By default, dask tries to infer the output metadata by running your\n provided function on some fake data. This works well in many cases, but\n can sometimes be expensive, or even fail. To avoid this, you can\n manually specify the output metadata with the ``meta`` keyword. This\n can be specified in many forms, for more information see\n ``dask.dataframe.utils.make_meta``.\n\n Here we specify the output is a Series with no name, and dtype\n ``float64``:\n\n >>> res = ddf.map_partitions(myadd, 1, b=2, meta=(None, 'f8'))\n\n Here we map a function that takes in a DataFrame, and returns a\n DataFrame with a new column:\n\n >>> res = ddf.map_partitions(lambda df: df.assign(z=df.x * df.y))\n >>> res.dtypes\n x int64\n y float64\n z float64\n dtype: object\n\n As before, the output metadata can also be specified manually. This\n time we pass in a ``dict``, as the output is a DataFrame:\n\n >>> res = ddf.map_partitions(lambda df: df.assign(z=df.x * df.y),\n ... meta={'x': 'i8', 'y': 'f8', 'z': 'f8'})\n\n In the case where the metadata doesn't change, you can also pass in\n the object itself directly:\n\n >>> res = ddf.map_partitions(lambda df: df.head(), meta=ddf)\n\n Also note that the index and divisions are assumed to remain unchanged.\n If the function you're mapping changes the index/divisions, you'll need\n to clear them afterwards:\n\n >>> ddf.map_partitions(func).clear_divisions() # doctest: +SKIP\n\n Your map function gets information about where it is in the dataframe by\n accepting a special ``partition_info`` keyword argument.\n\n >>> def func(partition, partition_info=None):\n ... pass\n\n This will receive the following information:\n\n >>> partition_info # doctest: +SKIP\n {'number': 1, 'division': 3}\n\n For each argument and keyword arguments that are dask dataframes you will\n receive the number (n) which represents the nth partition of the dataframe\n and the division (the first index value in the partition). If divisions\n are not known (for instance if the index is not sorted) then you will get\n None as the division.\n \"\"\"\n return map_partitions(func, self, *args, **kwargs)\n\n @insert_meta_param_description(pad=12)\n def map_overlap(self, func, before, after, *args, **kwargs):\n \"\"\"Apply a function to each partition, sharing rows with adjacent partitions.\n\n This can be useful for implementing windowing functions such as\n ``df.rolling(...).mean()`` or ``df.diff()``.\n\n Parameters\n ----------\n func : function\n Function applied to each partition.\n before : int\n The number of rows to prepend to partition ``i`` from the end of\n partition ``i - 1``.\n after : int\n The number of rows to append to partition ``i`` from the beginning\n of partition ``i + 1``.\n args, kwargs :\n Arguments and keywords to pass to the function. The partition will\n be the first argument, and these will be passed *after*.\n $META\n\n Notes\n -----\n Given positive integers ``before`` and ``after``, and a function\n ``func``, ``map_overlap`` does the following:\n\n 1. Prepend ``before`` rows to each partition ``i`` from the end of\n partition ``i - 1``. The first partition has no rows prepended.\n\n 2. Append ``after`` rows to each partition ``i`` from the beginning of\n partition ``i + 1``. The last partition has no rows appended.\n\n 3. Apply ``func`` to each partition, passing in any extra ``args`` and\n ``kwargs`` if provided.\n\n 4. Trim ``before`` rows from the beginning of all but the first\n partition.\n\n 5. Trim ``after`` rows from the end of all but the last partition.\n\n Note that the index and divisions are assumed to remain unchanged.\n\n Examples\n --------\n Given a DataFrame, Series, or Index, such as:\n\n >>> import pandas as pd\n >>> import dask.dataframe as dd\n >>> df = pd.DataFrame({'x': [1, 2, 4, 7, 11],\n ... 'y': [1., 2., 3., 4., 5.]})\n >>> ddf = dd.from_pandas(df, npartitions=2)\n\n A rolling sum with a trailing moving window of size 2 can be computed by\n overlapping 2 rows before each partition, and then mapping calls to\n ``df.rolling(2).sum()``:\n\n >>> ddf.compute()\n x y\n 0 1 1.0\n 1 2 2.0\n 2 4 3.0\n 3 7 4.0\n 4 11 5.0\n >>> ddf.map_overlap(lambda df: df.rolling(2).sum(), 2, 0).compute()\n x y\n 0 NaN NaN\n 1 3.0 3.0\n 2 6.0 5.0\n 3 11.0 7.0\n 4 18.0 9.0\n\n The pandas ``diff`` method computes a discrete difference shifted by a\n number of periods (can be positive or negative). This can be\n implemented by mapping calls to ``df.diff`` to each partition after\n prepending/appending that many rows, depending on sign:\n\n >>> def diff(df, periods=1):\n ... before, after = (periods, 0) if periods > 0 else (0, -periods)\n ... return df.map_overlap(lambda df, periods=1: df.diff(periods),\n ... periods, 0, periods=periods)\n >>> diff(ddf, 1).compute()\n x y\n 0 NaN NaN\n 1 1.0 1.0\n 2 2.0 1.0\n 3 3.0 1.0\n 4 4.0 1.0\n\n If you have a ``DatetimeIndex``, you can use a ``pd.Timedelta`` for time-\n based windows.\n\n >>> ts = pd.Series(range(10), index=pd.date_range('2017', periods=10))\n >>> dts = dd.from_pandas(ts, npartitions=2)\n >>> dts.map_overlap(lambda df: df.rolling('2D').sum(),\n ... pd.Timedelta('2D'), 0).compute()\n 2017-01-01 0.0\n 2017-01-02 1.0\n 2017-01-03 3.0\n 2017-01-04 5.0\n 2017-01-05 7.0\n 2017-01-06 9.0\n 2017-01-07 11.0\n 2017-01-08 13.0\n 2017-01-09 15.0\n 2017-01-10 17.0\n Freq: D, dtype: float64\n \"\"\"\n from .rolling import map_overlap\n\n return map_overlap(func, self, before, after, *args, **kwargs)\n\n def memory_usage_per_partition(self, index=True, deep=False):\n \"\"\"Return the memory usage of each partition\n\n Parameters\n ----------\n index : bool, default True\n Specifies whether to include the memory usage of the index in\n returned Series.\n deep : bool, default False\n If True, introspect the data deeply by interrogating\n ``object`` dtypes for system-level memory consumption, and include\n it in the returned values.\n\n Returns\n -------\n Series\n A Series whose index is the partition number and whose values\n are the memory usage of each partition in bytes.\n \"\"\"\n return self.map_partitions(\n total_mem_usage, index=index, deep=deep\n ).clear_divisions()\n\n @insert_meta_param_description(pad=12)\n def reduction(\n self,\n chunk,\n aggregate=None,\n combine=None,\n meta=no_default,\n token=None,\n split_every=None,\n chunk_kwargs=None,\n aggregate_kwargs=None,\n combine_kwargs=None,\n **kwargs,\n ):\n \"\"\"Generic row-wise reductions.\n\n Parameters\n ----------\n chunk : callable\n Function to operate on each partition. Should return a\n ``pandas.DataFrame``, ``pandas.Series``, or a scalar.\n aggregate : callable, optional\n Function to operate on the concatenated result of ``chunk``. If not\n specified, defaults to ``chunk``. Used to do the final aggregation\n in a tree reduction.\n\n The input to ``aggregate`` depends on the output of ``chunk``.\n If the output of ``chunk`` is a:\n\n - scalar: Input is a Series, with one row per partition.\n - Series: Input is a DataFrame, with one row per partition. Columns\n are the rows in the output series.\n - DataFrame: Input is a DataFrame, with one row per partition.\n Columns are the columns in the output dataframes.\n\n Should return a ``pandas.DataFrame``, ``pandas.Series``, or a\n scalar.\n combine : callable, optional\n Function to operate on intermediate concatenated results of\n ``chunk`` in a tree-reduction. If not provided, defaults to\n ``aggregate``. The input/output requirements should match that of\n ``aggregate`` described above.\n $META\n token : str, optional\n The name to use for the output keys.\n split_every : int, optional\n Group partitions into groups of this size while performing a\n tree-reduction. If set to False, no tree-reduction will be used,\n and all intermediates will be concatenated and passed to\n ``aggregate``. Default is 8.\n chunk_kwargs : dict, optional\n Keyword arguments to pass on to ``chunk`` only.\n aggregate_kwargs : dict, optional\n Keyword arguments to pass on to ``aggregate`` only.\n combine_kwargs : dict, optional\n Keyword arguments to pass on to ``combine`` only.\n kwargs :\n All remaining keywords will be passed to ``chunk``, ``combine``,\n and ``aggregate``.\n\n Examples\n --------\n >>> import pandas as pd\n >>> import dask.dataframe as dd\n >>> df = pd.DataFrame({'x': range(50), 'y': range(50, 100)})\n >>> ddf = dd.from_pandas(df, npartitions=4)\n\n Count the number of rows in a DataFrame. To do this, count the number\n of rows in each partition, then sum the results:\n\n >>> res = ddf.reduction(lambda x: x.count(),\n ... aggregate=lambda x: x.sum())\n >>> res.compute()\n x 50\n y 50\n dtype: int64\n\n Count the number of rows in a Series with elements greater than or\n equal to a value (provided via a keyword).\n\n >>> def count_greater(x, value=0):\n ... return (x >= value).sum()\n >>> res = ddf.x.reduction(count_greater, aggregate=lambda x: x.sum(),\n ... chunk_kwargs={'value': 25})\n >>> res.compute()\n 25\n\n Aggregate both the sum and count of a Series at the same time:\n\n >>> def sum_and_count(x):\n ... return pd.Series({'count': x.count(), 'sum': x.sum()},\n ... index=['count', 'sum'])\n >>> res = ddf.x.reduction(sum_and_count, aggregate=lambda x: x.sum())\n >>> res.compute()\n count 50\n sum 1225\n dtype: int64\n\n Doing the same, but for a DataFrame. Here ``chunk`` returns a\n DataFrame, meaning the input to ``aggregate`` is a DataFrame with an\n index with non-unique entries for both 'x' and 'y'. We groupby the\n index, and sum each group to get the final result.\n\n >>> def sum_and_count(x):\n ... return pd.DataFrame({'count': x.count(), 'sum': x.sum()},\n ... columns=['count', 'sum'])\n >>> res = ddf.reduction(sum_and_count,\n ... aggregate=lambda x: x.groupby(level=0).sum())\n >>> res.compute()\n count sum\n x 50 1225\n y 50 3725\n \"\"\"\n if aggregate is None:\n aggregate = chunk\n\n if combine is None:\n if combine_kwargs:\n raise ValueError(\"`combine_kwargs` provided with no `combine`\")\n combine = aggregate\n combine_kwargs = aggregate_kwargs\n\n chunk_kwargs = chunk_kwargs.copy() if chunk_kwargs else {}\n chunk_kwargs[\"aca_chunk\"] = chunk\n\n combine_kwargs = combine_kwargs.copy() if combine_kwargs else {}\n combine_kwargs[\"aca_combine\"] = combine\n\n aggregate_kwargs = aggregate_kwargs.copy() if aggregate_kwargs else {}\n aggregate_kwargs[\"aca_aggregate\"] = aggregate\n\n return aca(\n self,\n chunk=_reduction_chunk,\n aggregate=_reduction_aggregate,\n combine=_reduction_combine,\n meta=meta,\n token=token,\n split_every=split_every,\n chunk_kwargs=chunk_kwargs,\n aggregate_kwargs=aggregate_kwargs,\n combine_kwargs=combine_kwargs,\n **kwargs,\n )\n\n @derived_from(pd.DataFrame)\n def pipe(self, func, *args, **kwargs):\n # Taken from pandas:\n # https://github.com/pydata/pandas/blob/master/pandas/core/generic.py#L2698-L2707\n if isinstance(func, tuple):\n func, target = func\n if target in kwargs:\n raise ValueError(\n \"%s is both the pipe target and a keyword argument\" % target\n )\n kwargs[target] = self\n return func(*args, **kwargs)\n else:\n return func(self, *args, **kwargs)\n\n def random_split(self, frac, random_state=None, shuffle=False):\n \"\"\"Pseudorandomly split dataframe into different pieces row-wise\n\n Parameters\n ----------\n frac : list\n List of floats that should sum to one.\n random_state : int or np.random.RandomState\n If int create a new RandomState with this as the seed.\n Otherwise draw from the passed RandomState.\n shuffle : bool, default False\n If set to True, the dataframe is shuffled (within partition)\n before the split.\n\n Examples\n --------\n\n 50/50 split\n\n >>> a, b = df.random_split([0.5, 0.5]) # doctest: +SKIP\n\n 80/10/10 split, consistent random_state\n\n >>> a, b, c = df.random_split([0.8, 0.1, 0.1], random_state=123) # doctest: +SKIP\n\n See Also\n --------\n dask.DataFrame.sample\n \"\"\"\n if not np.allclose(sum(frac), 1):\n raise ValueError(\"frac should sum to 1\")\n state_data = random_state_data(self.npartitions, random_state)\n token = tokenize(self, frac, random_state)\n name = \"split-\" + token\n layer = {\n (name, i): (pd_split, (self._name, i), frac, state, shuffle)\n for i, state in enumerate(state_data)\n }\n\n out = []\n for i in range(len(frac)):\n name2 = \"split-%d-%s\" % (i, token)\n dsk2 = {\n (name2, j): (getitem, (name, j), i) for j in range(self.npartitions)\n }\n graph = HighLevelGraph.from_collections(\n name2, merge(dsk2, layer), dependencies=[self]\n )\n out_df = type(self)(graph, name2, self._meta, self.divisions)\n out.append(out_df)\n return out\n\n def head(self, n=5, npartitions=1, compute=True):\n \"\"\"First n rows of the dataset\n\n Parameters\n ----------\n n : int, optional\n The number of rows to return. Default is 5.\n npartitions : int, optional\n Elements are only taken from the first ``npartitions``, with a\n default of 1. If there are fewer than ``n`` rows in the first\n ``npartitions`` a warning will be raised and any found rows\n returned. Pass -1 to use all partitions.\n compute : bool, optional\n Whether to compute the result, default is True.\n \"\"\"\n return self._head(n=n, npartitions=npartitions, compute=compute, safe=True)\n\n def _head(self, n, npartitions, compute, safe):\n if npartitions <= -1:\n npartitions = self.npartitions\n if npartitions > self.npartitions:\n msg = \"only {} partitions, head received {}\"\n raise ValueError(msg.format(self.npartitions, npartitions))\n\n name = \"head-%d-%d-%s\" % (npartitions, n, self._name)\n if safe:\n head = safe_head\n else:\n head = M.head\n\n if npartitions > 1:\n name_p = \"head-partial-%d-%s\" % (n, self._name)\n\n dsk = {}\n for i in range(npartitions):\n dsk[(name_p, i)] = (M.head, (self._name, i), n)\n\n concat = (_concat, [(name_p, i) for i in range(npartitions)])\n dsk[(name, 0)] = (head, concat, n)\n else:\n dsk = {(name, 0): (head, (self._name, 0), n)}\n\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])\n result = new_dd_object(\n graph, name, self._meta, [self.divisions[0], self.divisions[npartitions]]\n )\n\n if compute:\n result = result.compute()\n return result\n\n def tail(self, n=5, compute=True):\n \"\"\"Last n rows of the dataset\n\n Caveat, the only checks the last n rows of the last partition.\n \"\"\"\n name = \"tail-%d-%s\" % (n, self._name)\n dsk = {(name, 0): (M.tail, (self._name, self.npartitions - 1), n)}\n\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])\n result = new_dd_object(graph, name, self._meta, self.divisions[-2:])\n\n if compute:\n result = result.compute()\n return result\n\n @property\n def loc(self):\n \"\"\"Purely label-location based indexer for selection by label.\n\n >>> df.loc[\"b\"] # doctest: +SKIP\n >>> df.loc[\"b\":\"d\"] # doctest: +SKIP\n \"\"\"\n from .indexing import _LocIndexer\n\n return _LocIndexer(self)\n\n def _partitions(self, index):\n if not isinstance(index, tuple):\n index = (index,)\n from ..array.slicing import normalize_index\n\n index = normalize_index(index, (self.npartitions,))\n index = tuple(slice(k, k + 1) if isinstance(k, Number) else k for k in index)\n name = \"blocks-\" + tokenize(self, index)\n new_keys = np.array(self.__dask_keys__(), dtype=object)[index].tolist()\n\n divisions = [self.divisions[i] for _, i in new_keys] + [\n self.divisions[new_keys[-1][1] + 1]\n ]\n dsk = {(name, i): tuple(key) for i, key in enumerate(new_keys)}\n\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])\n return new_dd_object(graph, name, self._meta, divisions)\n\n @property\n def partitions(self):\n \"\"\"Slice dataframe by partitions\n\n This allows partitionwise slicing of a Dask Dataframe. You can perform normal\n Numpy-style slicing but now rather than slice elements of the array you\n slice along partitions so, for example, ``df.partitions[:5]`` produces a new\n Dask Dataframe of the first five partitions.\n\n Examples\n --------\n >>> df.partitions[0] # doctest: +SKIP\n >>> df.partitions[:3] # doctest: +SKIP\n >>> df.partitions[::10] # doctest: +SKIP\n\n Returns\n -------\n A Dask DataFrame\n \"\"\"\n return IndexCallable(self._partitions)\n\n # Note: iloc is implemented only on DataFrame\n\n def repartition(\n self,\n divisions=None,\n npartitions=None,\n partition_size=None,\n freq=None,\n force=False,\n ):\n \"\"\"Repartition dataframe along new divisions\n\n Parameters\n ----------\n divisions : list, optional\n List of partitions to be used. Only used if npartitions and\n partition_size isn't specified.\n For convenience if given an integer this will defer to npartitions\n and if given a string it will defer to partition_size (see below)\n npartitions : int, optional\n Number of partitions of output. Only used if partition_size\n isn't specified.\n partition_size: int or string, optional\n Max number of bytes of memory for each partition. Use numbers or\n strings like 5MB. If specified npartitions and divisions will be\n ignored.\n\n .. warning::\n\n This keyword argument triggers computation to determine\n the memory size of each partition, which may be expensive.\n\n freq : str, pd.Timedelta\n A period on which to partition timeseries data like ``'7D'`` or\n ``'12h'`` or ``pd.Timedelta(hours=12)``. Assumes a datetime index.\n force : bool, default False\n Allows the expansion of the existing divisions.\n If False then the new divisions' lower and upper bounds must be\n the same as the old divisions'.\n\n Notes\n -----\n Exactly one of `divisions`, `npartitions`, `partition_size`, or `freq`\n should be specified. A ``ValueError`` will be raised when that is\n not the case.\n\n Examples\n --------\n >>> df = df.repartition(npartitions=10) # doctest: +SKIP\n >>> df = df.repartition(divisions=[0, 5, 10, 20]) # doctest: +SKIP\n >>> df = df.repartition(freq='7d') # doctest: +SKIP\n \"\"\"\n if isinstance(divisions, int):\n npartitions = divisions\n divisions = None\n if isinstance(divisions, str):\n partition_size = divisions\n divisions = None\n if (\n sum(\n [\n partition_size is not None,\n divisions is not None,\n npartitions is not None,\n freq is not None,\n ]\n )\n != 1\n ):\n raise ValueError(\n \"Please provide exactly one of ``npartitions=``, ``freq=``, \"\n \"``divisions=``, ``partition_size=`` keyword arguments\"\n )\n\n if partition_size is not None:\n return repartition_size(self, partition_size)\n elif npartitions is not None:\n return repartition_npartitions(self, npartitions)\n elif divisions is not None:\n return repartition(self, divisions, force=force)\n elif freq is not None:\n return repartition_freq(self, freq=freq)\n\n def shuffle(\n self,\n on,\n npartitions=None,\n max_branch=None,\n shuffle=None,\n ignore_index=False,\n compute=None,\n ):\n \"\"\"Rearrange DataFrame into new partitions\n\n Uses hashing of `on` to map rows to output partitions. After this\n operation, rows with the same value of `on` will be in the same\n partition.\n\n Parameters\n ----------\n on : str, list of str, or Series, Index, or DataFrame\n Column(s) or index to be used to map rows to output partitions\n npartitions : int, optional\n Number of partitions of output. Partition count will not be\n changed by default.\n max_branch: int, optional\n The maximum number of splits per input partition. Used within\n the staged shuffling algorithm.\n shuffle: {'disk', 'tasks'}, optional\n Either ``'disk'`` for single-node operation or ``'tasks'`` for\n distributed operation. Will be inferred by your current scheduler.\n ignore_index: bool, default False\n Ignore index during shuffle. If ``True``, performance may improve,\n but index values will not be preserved.\n compute: bool\n Whether or not to trigger an immediate computation. Defaults to False.\n\n Notes\n -----\n This does not preserve a meaningful index/partitioning scheme. This\n is not deterministic if done in parallel.\n\n Examples\n --------\n >>> df = df.shuffle(df.columns[0]) # doctest: +SKIP\n \"\"\"\n from .shuffle import shuffle as dd_shuffle\n\n return dd_shuffle(\n self,\n on,\n npartitions=npartitions,\n max_branch=max_branch,\n shuffle=shuffle,\n ignore_index=ignore_index,\n compute=compute,\n )\n\n @derived_from(pd.DataFrame)\n def fillna(self, value=None, method=None, limit=None, axis=None):\n axis = self._validate_axis(axis)\n if method is None and limit is not None:\n raise NotImplementedError(\"fillna with set limit and method=None\")\n if isinstance(value, _Frame):\n test_value = value._meta_nonempty.values[0]\n elif isinstance(value, Scalar):\n test_value = value._meta_nonempty\n else:\n test_value = value\n meta = self._meta_nonempty.fillna(\n value=test_value, method=method, limit=limit, axis=axis\n )\n\n if axis == 1 or method is None:\n # Control whether or not dask's partition alignment happens.\n # We don't want for a pandas Series.\n # We do want it for a dask Series\n if is_series_like(value) and not is_dask_collection(value):\n args = ()\n kwargs = {\"value\": value}\n else:\n args = (value,)\n kwargs = {}\n return self.map_partitions(\n M.fillna,\n *args,\n method=method,\n limit=limit,\n axis=axis,\n meta=meta,\n enforce_metadata=False,\n **kwargs,\n )\n\n if method in (\"pad\", \"ffill\"):\n method = \"ffill\"\n skip_check = 0\n before, after = 1 if limit is None else limit, 0\n else:\n method = \"bfill\"\n skip_check = self.npartitions - 1\n before, after = 0, 1 if limit is None else limit\n\n if limit is None:\n name = \"fillna-chunk-\" + tokenize(self, method)\n dsk = {\n (name, i): (\n methods.fillna_check,\n (self._name, i),\n method,\n i != skip_check,\n )\n for i in range(self.npartitions)\n }\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])\n parts = new_dd_object(graph, name, meta, self.divisions)\n else:\n parts = self\n\n return parts.map_overlap(\n M.fillna, before, after, method=method, limit=limit, meta=meta\n )\n\n @derived_from(pd.DataFrame)\n def ffill(self, axis=None, limit=None):\n return self.fillna(method=\"ffill\", limit=limit, axis=axis)\n\n @derived_from(pd.DataFrame)\n def bfill(self, axis=None, limit=None):\n return self.fillna(method=\"bfill\", limit=limit, axis=axis)\n\n def sample(self, n=None, frac=None, replace=False, random_state=None):\n \"\"\"Random sample of items\n\n Parameters\n ----------\n n : int, optional\n Number of items to return is not supported by dask. Use frac\n instead.\n frac : float, optional\n Fraction of axis items to return.\n replace : boolean, optional\n Sample with or without replacement. Default = False.\n random_state : int or ``np.random.RandomState``\n If int we create a new RandomState with this as the seed\n Otherwise we draw from the passed RandomState\n\n See Also\n --------\n DataFrame.random_split\n pandas.DataFrame.sample\n \"\"\"\n if n is not None:\n msg = (\n \"sample does not support the number of sampled items \"\n \"parameter, 'n'. Please use the 'frac' parameter instead.\"\n )\n if isinstance(n, Number) and 0 <= n <= 1:\n warnings.warn(msg)\n frac = n\n else:\n raise ValueError(msg)\n\n if frac is None:\n raise ValueError(\"frac must not be None\")\n\n if random_state is None:\n random_state = np.random.RandomState()\n\n name = \"sample-\" + tokenize(self, frac, replace, random_state)\n\n state_data = random_state_data(self.npartitions, random_state)\n dsk = {\n (name, i): (methods.sample, (self._name, i), state, frac, replace)\n for i, state in enumerate(state_data)\n }\n\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])\n return new_dd_object(graph, name, self._meta, self.divisions)\n\n @derived_from(pd.DataFrame)\n def replace(self, to_replace=None, value=None, regex=False):\n return self.map_partitions(\n M.replace,\n to_replace=to_replace,\n value=value,\n regex=regex,\n enforce_metadata=False,\n )\n\n def to_dask_array(self, lengths=None, meta=None):\n \"\"\"Convert a dask DataFrame to a dask array.\n\n Parameters\n ----------\n lengths : bool or Sequence of ints, optional\n How to determine the chunks sizes for the output array.\n By default, the output array will have unknown chunk lengths\n along the first axis, which can cause some later operations\n to fail.\n\n * True : immediately compute the length of each partition\n * Sequence : a sequence of integers to use for the chunk sizes\n on the first axis. These values are *not* validated for\n correctness, beyond ensuring that the number of items\n matches the number of partitions.\n meta : object, optional\n An optional `meta` parameter can be passed for dask to override the\n default metadata on the underlying dask array.\n\n Returns\n -------\n \"\"\"\n if lengths is True:\n lengths = tuple(self.map_partitions(len, enforce_metadata=False).compute())\n\n arr = self.values\n\n chunks = self._validate_chunks(arr, lengths)\n arr._chunks = chunks\n\n if meta is not None:\n arr._meta = meta\n\n return arr\n\n def to_hdf(self, path_or_buf, key, mode=\"a\", append=False, **kwargs):\n \"\"\"See dd.to_hdf docstring for more information\"\"\"\n from .io import to_hdf\n\n return to_hdf(self, path_or_buf, key, mode, append, **kwargs)\n\n def to_csv(self, filename, **kwargs):\n \"\"\"See dd.to_csv docstring for more information\"\"\"\n from .io import to_csv\n\n return to_csv(self, filename, **kwargs)\n\n def to_sql(\n self,\n name: str,\n uri: str,\n schema=None,\n if_exists: str = \"fail\",\n index: bool = True,\n index_label=None,\n chunksize=None,\n dtype=None,\n method=None,\n compute=True,\n parallel=False,\n ):\n \"\"\"See dd.to_sql docstring for more information\"\"\"\n from .io import to_sql\n\n return to_sql(\n self,\n name=name,\n uri=uri,\n schema=schema,\n if_exists=if_exists,\n index=index,\n index_label=index_label,\n chunksize=chunksize,\n dtype=dtype,\n method=method,\n compute=compute,\n parallel=parallel,\n )\n\n def to_json(self, filename, *args, **kwargs):\n \"\"\"See dd.to_json docstring for more information\"\"\"\n from .io import to_json\n\n return to_json(self, filename, *args, **kwargs)\n\n def to_delayed(self, optimize_graph=True):\n \"\"\"Convert into a list of ``dask.delayed`` objects, one per partition.\n\n Parameters\n ----------\n optimize_graph : bool, optional\n If True [default], the graph is optimized before converting into\n ``dask.delayed`` objects.\n\n Examples\n --------\n >>> partitions = df.to_delayed() # doctest: +SKIP\n\n See Also\n --------\n dask.dataframe.from_delayed\n \"\"\"\n keys = self.__dask_keys__()\n graph = self.__dask_graph__()\n if optimize_graph:\n graph = self.__dask_optimize__(graph, self.__dask_keys__())\n name = \"delayed-\" + self._name\n graph = HighLevelGraph.from_collections(name, graph, dependencies=())\n return [Delayed(k, graph) for k in keys]\n\n @classmethod\n def _get_unary_operator(cls, op):\n return lambda self: elemwise(op, self)\n\n @classmethod\n def _get_binary_operator(cls, op, inv=False):\n if inv:\n return lambda self, other: elemwise(op, other, self)\n else:\n return lambda self, other: elemwise(op, self, other)\n\n def rolling(self, window, min_periods=None, center=False, win_type=None, axis=0):\n \"\"\"Provides rolling transformations.\n\n Parameters\n ----------\n window : int, str, offset\n Size of the moving window. This is the number of observations used\n for calculating the statistic. When not using a ``DatetimeIndex``,\n the window size must not be so large as to span more than one\n adjacent partition. If using an offset or offset alias like '5D',\n the data must have a ``DatetimeIndex``\n\n .. versionchanged:: 0.15.0\n\n Now accepts offsets and string offset aliases\n\n min_periods : int, default None\n Minimum number of observations in window required to have a value\n (otherwise result is NA).\n center : boolean, default False\n Set the labels at the center of the window.\n win_type : string, default None\n Provide a window type. The recognized window types are identical\n to pandas.\n axis : int, default 0\n\n Returns\n -------\n a Rolling object on which to call a method to compute a statistic\n \"\"\"\n from dask.dataframe.rolling import Rolling\n\n if isinstance(window, Integral):\n if window < 0:\n raise ValueError(\"window must be >= 0\")\n\n if min_periods is not None:\n if not isinstance(min_periods, Integral):\n raise ValueError(\"min_periods must be an integer\")\n if min_periods < 0:\n raise ValueError(\"min_periods must be >= 0\")\n\n return Rolling(\n self,\n window=window,\n min_periods=min_periods,\n center=center,\n win_type=win_type,\n axis=axis,\n )\n\n @derived_from(pd.DataFrame)\n def diff(self, periods=1, axis=0):\n \"\"\"\n .. note::\n\n Pandas currently uses an ``object``-dtype column to represent\n boolean data with missing values. This can cause issues for\n boolean-specific operations, like ``|``. To enable boolean-\n specific operations, at the cost of metadata that doesn't match\n pandas, use ``.astype(bool)`` after the ``shift``.\n \"\"\"\n axis = self._validate_axis(axis)\n if not isinstance(periods, Integral):\n raise TypeError(\"periods must be an integer\")\n\n if axis == 1:\n return self.map_partitions(\n M.diff, token=\"diff\", periods=periods, axis=1, enforce_metadata=False\n )\n\n before, after = (periods, 0) if periods > 0 else (0, -periods)\n return self.map_overlap(M.diff, before, after, token=\"diff\", periods=periods)\n\n @derived_from(pd.DataFrame)\n def shift(self, periods=1, freq=None, axis=0):\n axis = self._validate_axis(axis)\n if not isinstance(periods, Integral):\n raise TypeError(\"periods must be an integer\")\n\n if axis == 1:\n return self.map_partitions(\n M.shift,\n token=\"shift\",\n periods=periods,\n freq=freq,\n axis=1,\n enforce_metadata=False,\n )\n\n if freq is None:\n before, after = (periods, 0) if periods > 0 else (0, -periods)\n return self.map_overlap(\n M.shift, before, after, token=\"shift\", periods=periods\n )\n\n # Let pandas error on invalid arguments\n meta = self._meta_nonempty.shift(periods, freq=freq)\n out = self.map_partitions(\n M.shift,\n token=\"shift\",\n periods=periods,\n freq=freq,\n meta=meta,\n enforce_metadata=False,\n transform_divisions=False,\n )\n return maybe_shift_divisions(out, periods, freq=freq)\n\n def _reduction_agg(self, name, axis=None, skipna=True, split_every=False, out=None):\n axis = self._validate_axis(axis)\n\n meta = getattr(self._meta_nonempty, name)(axis=axis, skipna=skipna)\n token = self._token_prefix + name\n\n method = getattr(M, name)\n if axis == 1:\n result = self.map_partitions(\n method, meta=meta, token=token, skipna=skipna, axis=axis\n )\n return handle_out(out, result)\n else:\n result = self.reduction(\n method,\n meta=meta,\n token=token,\n skipna=skipna,\n axis=axis,\n split_every=split_every,\n )\n if isinstance(self, DataFrame):\n result.divisions = (self.columns.min(), self.columns.max())\n return handle_out(out, result)\n\n @derived_from(pd.DataFrame)\n def add_prefix(self, prefix):\n res = self.map_partitions(M.add_prefix, prefix)\n if self.known_divisions and is_series_like(self):\n res.divisions = tuple(prefix + str(division) for division in self.divisions)\n return res\n\n @derived_from(pd.DataFrame)\n def add_suffix(self, suffix):\n res = self.map_partitions(M.add_suffix, suffix)\n if self.known_divisions and is_series_like(self):\n res.divisions = tuple(str(division) + suffix for division in self.divisions)\n return res\n\n @derived_from(pd.DataFrame)\n def abs(self):\n _raise_if_object_series(self, \"abs\")\n meta = self._meta_nonempty.abs()\n return self.map_partitions(M.abs, meta=meta, enforce_metadata=False)\n\n @derived_from(pd.DataFrame)\n def all(self, axis=None, skipna=True, split_every=False, out=None):\n return self._reduction_agg(\n \"all\", axis=axis, skipna=skipna, split_every=split_every, out=out\n )\n\n @derived_from(pd.DataFrame)\n def any(self, axis=None, skipna=True, split_every=False, out=None):\n return self._reduction_agg(\n \"any\", axis=axis, skipna=skipna, split_every=split_every, out=out\n )\n\n @_numeric_only\n @derived_from(pd.DataFrame)\n def sum(\n self,\n axis=None,\n skipna=True,\n split_every=False,\n dtype=None,\n out=None,\n min_count=None,\n numeric_only=None,\n ):\n result = self._reduction_agg(\n \"sum\", axis=axis, skipna=skipna, split_every=split_every, out=out\n )\n if min_count:\n cond = self.notnull().sum(axis=axis) >= min_count\n if is_series_like(cond):\n return result.where(cond, other=np.NaN)\n else:\n return _scalar_binary(\n lambda x, y: result if x is y else np.NaN, cond, True\n )\n else:\n return result\n\n @_numeric_only\n @derived_from(pd.DataFrame)\n def prod(\n self,\n axis=None,\n skipna=True,\n split_every=False,\n dtype=None,\n out=None,\n min_count=None,\n numeric_only=None,\n ):\n result = self._reduction_agg(\n \"prod\", axis=axis, skipna=skipna, split_every=split_every, out=out\n )\n if min_count:\n cond = self.notnull().sum(axis=axis) >= min_count\n if is_series_like(cond):\n return result.where(cond, other=np.NaN)\n else:\n return _scalar_binary(\n lambda x, y: result if x is y else np.NaN, cond, True\n )\n else:\n return result\n\n product = prod # aliased dd.product\n\n @_numeric_only\n @derived_from(pd.DataFrame)\n def max(\n self, axis=None, skipna=True, split_every=False, out=None, numeric_only=None\n ):\n return self._reduction_agg(\n \"max\", axis=axis, skipna=skipna, split_every=split_every, out=out\n )\n\n @_numeric_only\n @derived_from(pd.DataFrame)\n def min(\n self, axis=None, skipna=True, split_every=False, out=None, numeric_only=None\n ):\n return self._reduction_agg(\n \"min\", axis=axis, skipna=skipna, split_every=split_every, out=out\n )\n\n @derived_from(pd.DataFrame)\n def idxmax(self, axis=None, skipna=True, split_every=False):\n fn = \"idxmax\"\n axis = self._validate_axis(axis)\n meta = self._meta_nonempty.idxmax(axis=axis, skipna=skipna)\n if axis == 1:\n return map_partitions(\n M.idxmax,\n self,\n meta=meta,\n token=self._token_prefix + fn,\n skipna=skipna,\n axis=axis,\n enforce_metadata=False,\n )\n else:\n scalar = not is_series_like(meta)\n result = aca(\n [self],\n chunk=idxmaxmin_chunk,\n aggregate=idxmaxmin_agg,\n combine=idxmaxmin_combine,\n meta=meta,\n aggregate_kwargs={\"scalar\": scalar},\n token=self._token_prefix + fn,\n split_every=split_every,\n skipna=skipna,\n fn=fn,\n )\n if isinstance(self, DataFrame):\n result.divisions = (min(self.columns), max(self.columns))\n return result\n\n @derived_from(pd.DataFrame)\n def idxmin(self, axis=None, skipna=True, split_every=False):\n fn = \"idxmin\"\n axis = self._validate_axis(axis)\n meta = self._meta_nonempty.idxmax(axis=axis)\n if axis == 1:\n return map_partitions(\n M.idxmin,\n self,\n meta=meta,\n token=self._token_prefix + fn,\n skipna=skipna,\n axis=axis,\n enforce_metadata=False,\n )\n else:\n scalar = not is_series_like(meta)\n result = aca(\n [self],\n chunk=idxmaxmin_chunk,\n aggregate=idxmaxmin_agg,\n combine=idxmaxmin_combine,\n meta=meta,\n aggregate_kwargs={\"scalar\": scalar},\n token=self._token_prefix + fn,\n split_every=split_every,\n skipna=skipna,\n fn=fn,\n )\n if isinstance(self, DataFrame):\n result.divisions = (min(self.columns), max(self.columns))\n return result\n\n @_numeric_only\n @derived_from(pd.DataFrame)\n def count(self, axis=None, split_every=False, numeric_only=None):\n axis = self._validate_axis(axis)\n token = self._token_prefix + \"count\"\n if axis == 1:\n meta = self._meta_nonempty.count(axis=axis)\n return self.map_partitions(\n M.count, meta=meta, token=token, axis=axis, enforce_metadata=False\n )\n else:\n meta = self._meta_nonempty.count()\n\n # Need the astype(int) for empty dataframes, which sum to float dtype\n result = self.reduction(\n M.count,\n aggregate=_count_aggregate,\n meta=meta,\n token=token,\n split_every=split_every,\n )\n if isinstance(self, DataFrame):\n result.divisions = (self.columns.min(), self.columns.max())\n return result\n\n @derived_from(pd.DataFrame)\n def mode(self, dropna=True, split_every=False):\n mode_series = self.reduction(\n chunk=M.value_counts,\n combine=M.sum,\n aggregate=_mode_aggregate,\n split_every=split_every,\n chunk_kwargs={\"dropna\": dropna},\n aggregate_kwargs={\"dropna\": dropna},\n )\n return mode_series\n\n @_numeric_only\n @derived_from(pd.DataFrame)\n def mean(\n self,\n axis=None,\n skipna=True,\n split_every=False,\n dtype=None,\n out=None,\n numeric_only=None,\n ):\n axis = self._validate_axis(axis)\n _raise_if_object_series(self, \"mean\")\n meta = self._meta_nonempty.mean(axis=axis, skipna=skipna)\n if axis == 1:\n result = map_partitions(\n M.mean,\n self,\n meta=meta,\n token=self._token_prefix + \"mean\",\n axis=axis,\n skipna=skipna,\n enforce_metadata=False,\n )\n return handle_out(out, result)\n else:\n num = self._get_numeric_data()\n s = num.sum(skipna=skipna, split_every=split_every)\n n = num.count(split_every=split_every)\n name = self._token_prefix + \"mean-%s\" % tokenize(self, axis, skipna)\n result = map_partitions(\n methods.mean_aggregate,\n s,\n n,\n token=name,\n meta=meta,\n enforce_metadata=False,\n parent_meta=self._meta,\n )\n if isinstance(self, DataFrame):\n result.divisions = (self.columns.min(), self.columns.max())\n return handle_out(out, result)\n\n @_numeric_only\n @derived_from(pd.DataFrame)\n def var(\n self,\n axis=None,\n skipna=True,\n ddof=1,\n split_every=False,\n dtype=None,\n out=None,\n numeric_only=None,\n ):\n axis = self._validate_axis(axis)\n _raise_if_object_series(self, \"var\")\n meta = self._meta_nonempty.var(axis=axis, skipna=skipna)\n if axis == 1:\n result = map_partitions(\n M.var,\n self,\n meta=meta,\n token=self._token_prefix + \"var\",\n axis=axis,\n skipna=skipna,\n ddof=ddof,\n enforce_metadata=False,\n )\n return handle_out(out, result)\n else:\n if self.ndim == 1:\n result = self._var_1d(self, skipna, ddof, split_every)\n return handle_out(out, result)\n\n # pandas 1.0+ does not implement var on timedelta\n result = self._var_numeric(skipna, ddof, split_every)\n\n if isinstance(self, DataFrame):\n result.divisions = (self.columns.min(), self.columns.max())\n return handle_out(out, result)\n\n def _var_numeric(self, skipna=True, ddof=1, split_every=False):\n num = self.select_dtypes(include=[\"number\", \"bool\"], exclude=[np.timedelta64])\n\n values_dtype = num.values.dtype\n array_values = num.values\n\n if not np.issubdtype(values_dtype, np.number):\n array_values = num.values.astype(\"f8\")\n\n var = da.nanvar if skipna or skipna is None else da.var\n array_var = var(array_values, axis=0, ddof=ddof, split_every=split_every)\n\n name = self._token_prefix + \"var-numeric\" + tokenize(num, split_every)\n cols = num._meta.columns if is_dataframe_like(num) else None\n\n var_shape = num._meta_nonempty.values.var(axis=0).shape\n array_var_name = (array_var._name,) + (0,) * len(var_shape)\n\n layer = {(name, 0): (methods.wrap_var_reduction, array_var_name, cols)}\n graph = HighLevelGraph.from_collections(name, layer, dependencies=[array_var])\n\n return new_dd_object(\n graph, name, num._meta_nonempty.var(), divisions=[None, None]\n )\n\n def _var_timedeltas(self, skipna=True, ddof=1, split_every=False):\n timedeltas = self.select_dtypes(include=[np.timedelta64])\n\n var_timedeltas = [\n self._var_1d(timedeltas[col_idx], skipna, ddof, split_every)\n for col_idx in timedeltas._meta.columns\n ]\n var_timedelta_names = [(v._name, 0) for v in var_timedeltas]\n\n name = (\n self._token_prefix + \"var-timedeltas-\" + tokenize(timedeltas, split_every)\n )\n\n layer = {\n (name, 0): (\n methods.wrap_var_reduction,\n var_timedelta_names,\n timedeltas._meta.columns,\n )\n }\n graph = HighLevelGraph.from_collections(\n name, layer, dependencies=var_timedeltas\n )\n\n return new_dd_object(\n graph, name, timedeltas._meta_nonempty.var(), divisions=[None, None]\n )\n\n def _var_mixed(self, skipna=True, ddof=1, split_every=False):\n data = self.select_dtypes(include=[\"number\", \"bool\", np.timedelta64])\n\n timedelta_vars = self._var_timedeltas(skipna, ddof, split_every)\n numeric_vars = self._var_numeric(skipna, ddof, split_every)\n\n name = self._token_prefix + \"var-mixed-\" + tokenize(data, split_every)\n\n layer = {\n (name, 0): (\n methods.var_mixed_concat,\n (numeric_vars._name, 0),\n (timedelta_vars._name, 0),\n data._meta.columns,\n )\n }\n\n graph = HighLevelGraph.from_collections(\n name, layer, dependencies=[numeric_vars, timedelta_vars]\n )\n return new_dd_object(\n graph, name, self._meta_nonempty.var(), divisions=[None, None]\n )\n\n def _var_1d(self, column, skipna=True, ddof=1, split_every=False):\n is_timedelta = is_timedelta64_dtype(column._meta)\n\n if is_timedelta:\n if not skipna:\n is_nan = column.isna()\n column = column.astype(\"i8\")\n column = column.mask(is_nan)\n else:\n column = column.dropna().astype(\"i8\")\n\n if pd.Int64Dtype.is_dtype(column._meta_nonempty):\n column = column.astype(\"f8\")\n\n if not np.issubdtype(column.dtype, np.number):\n column = column.astype(\"f8\")\n\n name = self._token_prefix + \"var-1d-\" + tokenize(column, split_every)\n\n var = da.nanvar if skipna or skipna is None else da.var\n array_var = var(column.values, axis=0, ddof=ddof, split_every=split_every)\n\n layer = {(name, 0): (methods.wrap_var_reduction, (array_var._name,), None)}\n graph = HighLevelGraph.from_collections(name, layer, dependencies=[array_var])\n\n return new_dd_object(\n graph, name, column._meta_nonempty.var(), divisions=[None, None]\n )\n\n @_numeric_only\n @derived_from(pd.DataFrame)\n def std(\n self,\n axis=None,\n skipna=True,\n ddof=1,\n split_every=False,\n dtype=None,\n out=None,\n numeric_only=None,\n ):\n axis = self._validate_axis(axis)\n _raise_if_object_series(self, \"std\")\n meta = self._meta_nonempty.std(axis=axis, skipna=skipna)\n if axis == 1:\n result = map_partitions(\n M.std,\n self,\n meta=meta,\n token=self._token_prefix + \"std\",\n axis=axis,\n skipna=skipna,\n ddof=ddof,\n enforce_metadata=False,\n parent_meta=self._meta,\n )\n return handle_out(out, result)\n else:\n v = self.var(skipna=skipna, ddof=ddof, split_every=split_every)\n name = self._token_prefix + \"std\"\n result = map_partitions(\n np.sqrt,\n v,\n meta=meta,\n token=name,\n enforce_metadata=False,\n parent_meta=self._meta,\n )\n return handle_out(out, result)\n\n @_numeric_only\n @derived_from(pd.DataFrame)\n def skew(\n self, axis=None, bias=True, nan_policy=\"propagate\", out=None, numeric_only=None\n ):\n \"\"\"\n .. note::\n\n This implementation follows the dask.array.stats implementation\n of skewness and calculates skewness without taking into account\n a bias term for finite sample size, which corresponds to the\n default settings of the scipy.stats skewness calculation. However,\n Pandas corrects for this, so the values differ by a factor of\n (n * (n - 1)) ** 0.5 / (n - 2), where n is the number of samples.\n\n Further, this method currently does not support filtering out NaN\n values, which is again a difference to Pandas.\n \"\"\"\n axis = self._validate_axis(axis)\n _raise_if_object_series(self, \"skew\")\n meta = self._meta_nonempty.skew()\n if axis == 1:\n result = map_partitions(\n M.skew,\n self,\n meta=meta,\n token=self._token_prefix + \"skew\",\n axis=axis,\n enforce_metadata=False,\n )\n return handle_out(out, result)\n else:\n if self.ndim == 1:\n result = self._skew_1d(self, bias=bias, nan_policy=nan_policy)\n return handle_out(out, result)\n else:\n result = self._skew_numeric(bias=bias, nan_policy=nan_policy)\n\n if isinstance(self, DataFrame):\n result.divisions = (self.columns.min(), self.columns.max())\n\n return handle_out(out, result)\n\n def _skew_1d(self, column, bias=True, nan_policy=\"propagate\"):\n \"\"\"1D version of the skew calculation.\n\n Uses the array version from da.stats in case we are passing in a single series\n \"\"\"\n # import depends on scipy, not installed by default\n from ..array import stats as da_stats\n\n if pd.Int64Dtype.is_dtype(column._meta_nonempty):\n column = column.astype(\"f8\")\n\n if not np.issubdtype(column.dtype, np.number):\n column = column.astype(\"f8\")\n\n name = self._token_prefix + \"skew-1d-\" + tokenize(column)\n\n array_skew = da_stats.skew(\n column.values, axis=0, bias=bias, nan_policy=nan_policy\n )\n\n layer = {(name, 0): (methods.wrap_skew_reduction, (array_skew._name,), None)}\n graph = HighLevelGraph.from_collections(name, layer, dependencies=[array_skew])\n\n return new_dd_object(\n graph, name, column._meta_nonempty.skew(), divisions=[None, None]\n )\n\n def _skew_numeric(self, bias=True, nan_policy=\"propagate\"):\n \"\"\"Method for dataframes with numeric columns.\n\n Maps the array version from da.stats onto the numeric array of columns.\n \"\"\"\n # import depends on scipy, not installed by default\n from ..array import stats as da_stats\n\n num = self.select_dtypes(include=[\"number\", \"bool\"], exclude=[np.timedelta64])\n\n values_dtype = num.values.dtype\n array_values = num.values\n\n if not np.issubdtype(values_dtype, np.number):\n array_values = num.values.astype(\"f8\")\n\n array_skew = da_stats.skew(\n array_values, axis=0, bias=bias, nan_policy=nan_policy\n )\n\n name = self._token_prefix + \"var-numeric\" + tokenize(num)\n cols = num._meta.columns if is_dataframe_like(num) else None\n\n skew_shape = num._meta_nonempty.values.var(axis=0).shape\n array_skew_name = (array_skew._name,) + (0,) * len(skew_shape)\n\n layer = {(name, 0): (methods.wrap_skew_reduction, array_skew_name, cols)}\n graph = HighLevelGraph.from_collections(name, layer, dependencies=[array_skew])\n\n return new_dd_object(\n graph, name, num._meta_nonempty.skew(), divisions=[None, None]\n )\n\n @_numeric_only\n @derived_from(pd.DataFrame)\n def kurtosis(\n self,\n axis=None,\n fisher=True,\n bias=True,\n nan_policy=\"propagate\",\n out=None,\n numeric_only=None,\n ):\n \"\"\"\n .. note::\n\n This implementation follows the dask.array.stats implementation\n of kurtosis and calculates kurtosis without taking into account\n a bias term for finite sample size, which corresponds to the\n default settings of the scipy.stats kurtosis calculation. This differs\n from pandas.\n\n Further, this method currently does not support filtering out NaN\n values, which is again a difference to Pandas.\n \"\"\"\n axis = self._validate_axis(axis)\n _raise_if_object_series(self, \"kurtosis\")\n meta = self._meta_nonempty.kurtosis()\n if axis == 1:\n result = map_partitions(\n M.kurtosis,\n self,\n meta=meta,\n token=self._token_prefix + \"kurtosis\",\n axis=axis,\n enforce_metadata=False,\n )\n return handle_out(out, result)\n else:\n if self.ndim == 1:\n result = self._kurtosis_1d(\n self, fisher=fisher, bias=bias, nan_policy=nan_policy\n )\n return handle_out(out, result)\n else:\n result = self._kurtosis_numeric(\n fisher=fisher, bias=bias, nan_policy=nan_policy\n )\n\n if isinstance(self, DataFrame):\n result.divisions = (self.columns.min(), self.columns.max())\n\n return handle_out(out, result)\n\n def _kurtosis_1d(self, column, fisher=True, bias=True, nan_policy=\"propagate\"):\n \"\"\"1D version of the kurtosis calculation.\n\n Uses the array version from da.stats in case we are passing in a single series\n \"\"\"\n # import depends on scipy, not installed by default\n from ..array import stats as da_stats\n\n if pd.api.types.is_integer_dtype(column._meta_nonempty):\n column = column.astype(\"f8\")\n\n if not np.issubdtype(column.dtype, np.number):\n column = column.astype(\"f8\")\n\n name = self._token_prefix + \"kurtosis-1d-\" + tokenize(column)\n\n array_kurtosis = da_stats.kurtosis(\n column.values, axis=0, fisher=fisher, bias=bias, nan_policy=nan_policy\n )\n\n layer = {\n (name, 0): (methods.wrap_kurtosis_reduction, (array_kurtosis._name,), None)\n }\n graph = HighLevelGraph.from_collections(\n name, layer, dependencies=[array_kurtosis]\n )\n\n return new_dd_object(\n graph, name, column._meta_nonempty.kurtosis(), divisions=[None, None]\n )\n\n def _kurtosis_numeric(self, fisher=True, bias=True, nan_policy=\"propagate\"):\n \"\"\"Method for dataframes with numeric columns.\n\n Maps the array version from da.stats onto the numeric array of columns.\n \"\"\"\n # import depends on scipy, not installed by default\n from ..array import stats as da_stats\n\n num = self.select_dtypes(include=[\"number\", \"bool\"], exclude=[np.timedelta64])\n\n values_dtype = num.values.dtype\n array_values = num.values\n\n if not np.issubdtype(values_dtype, np.number):\n array_values = num.values.astype(\"f8\")\n\n array_kurtosis = da_stats.kurtosis(\n array_values, axis=0, fisher=fisher, bias=bias, nan_policy=nan_policy\n )\n\n name = self._token_prefix + \"kurtosis-numeric\" + tokenize(num)\n cols = num._meta.columns if is_dataframe_like(num) else None\n\n kurtosis_shape = num._meta_nonempty.values.var(axis=0).shape\n array_kurtosis_name = (array_kurtosis._name,) + (0,) * len(kurtosis_shape)\n\n layer = {\n (name, 0): (methods.wrap_kurtosis_reduction, array_kurtosis_name, cols)\n }\n graph = HighLevelGraph.from_collections(\n name, layer, dependencies=[array_kurtosis]\n )\n\n return new_dd_object(\n graph, name, num._meta_nonempty.kurtosis(), divisions=[None, None]\n )\n\n @_numeric_only\n @derived_from(pd.DataFrame)\n def sem(self, axis=None, skipna=None, ddof=1, split_every=False, numeric_only=None):\n axis = self._validate_axis(axis)\n _raise_if_object_series(self, \"sem\")\n meta = self._meta_nonempty.sem(axis=axis, skipna=skipna, ddof=ddof)\n if axis == 1:\n return map_partitions(\n M.sem,\n self,\n meta=meta,\n token=self._token_prefix + \"sem\",\n axis=axis,\n skipna=skipna,\n ddof=ddof,\n parent_meta=self._meta,\n )\n else:\n num = self._get_numeric_data()\n v = num.var(skipna=skipna, ddof=ddof, split_every=split_every)\n n = num.count(split_every=split_every)\n name = self._token_prefix + \"sem\"\n result = map_partitions(\n np.sqrt,\n v / n,\n meta=meta,\n token=name,\n enforce_metadata=False,\n parent_meta=self._meta,\n )\n\n if isinstance(self, DataFrame):\n result.divisions = (self.columns.min(), self.columns.max())\n return result\n\n def quantile(self, q=0.5, axis=0, method=\"default\"):\n \"\"\"Approximate row-wise and precise column-wise quantiles of DataFrame\n\n Parameters\n ----------\n q : list/array of floats, default 0.5 (50%)\n Iterable of numbers ranging from 0 to 1 for the desired quantiles\n axis : {0, 1, 'index', 'columns'} (default 0)\n 0 or 'index' for row-wise, 1 or 'columns' for column-wise\n method : {'default', 'tdigest', 'dask'}, optional\n What method to use. By default will use dask's internal custom\n algorithm (``'dask'``). If set to ``'tdigest'`` will use tdigest\n for floats and ints and fallback to the ``'dask'`` otherwise.\n \"\"\"\n axis = self._validate_axis(axis)\n keyname = \"quantiles-concat--\" + tokenize(self, q, axis)\n\n if axis == 1:\n if isinstance(q, list):\n # Not supported, the result will have current index as columns\n raise ValueError(\"'q' must be scalar when axis=1 is specified\")\n return map_partitions(\n M.quantile,\n self,\n q,\n axis,\n token=keyname,\n enforce_metadata=False,\n meta=(q, \"f8\"),\n parent_meta=self._meta,\n )\n else:\n _raise_if_object_series(self, \"quantile\")\n meta = self._meta.quantile(q, axis=axis)\n num = self._get_numeric_data()\n quantiles = tuple(quantile(self[c], q, method) for c in num.columns)\n\n qnames = [(_q._name, 0) for _q in quantiles]\n\n if isinstance(quantiles[0], Scalar):\n layer = {\n (keyname, 0): (type(meta), qnames, num.columns, None, meta.name)\n }\n graph = HighLevelGraph.from_collections(\n keyname, layer, dependencies=quantiles\n )\n divisions = (min(num.columns), max(num.columns))\n return Series(graph, keyname, meta, divisions)\n else:\n layer = {(keyname, 0): (methods.concat, qnames, 1)}\n graph = HighLevelGraph.from_collections(\n keyname, layer, dependencies=quantiles\n )\n return DataFrame(graph, keyname, meta, quantiles[0].divisions)\n\n @derived_from(pd.DataFrame)\n def describe(\n self,\n split_every=False,\n percentiles=None,\n percentiles_method=\"default\",\n include=None,\n exclude=None,\n ):\n\n if self._meta.ndim == 1:\n return self._describe_1d(self, split_every, percentiles, percentiles_method)\n elif (include is None) and (exclude is None):\n data = self._meta.select_dtypes(include=[np.number, np.timedelta64])\n\n # when some numerics/timedeltas are found, by default keep them\n if len(data.columns) == 0:\n chosen_columns = self._meta.columns\n else:\n # check if there are timedelta or boolean columns\n bools_and_timedeltas = self._meta.select_dtypes(\n include=[np.timedelta64, \"bool\"]\n )\n if len(bools_and_timedeltas.columns) == 0:\n return self._describe_numeric(\n self, split_every, percentiles, percentiles_method\n )\n else:\n chosen_columns = data.columns\n elif include == \"all\":\n if exclude is not None:\n msg = \"exclude must be None when include is 'all'\"\n raise ValueError(msg)\n chosen_columns = self._meta.columns\n else:\n chosen_columns = self._meta.select_dtypes(include=include, exclude=exclude)\n\n stats = [\n self._describe_1d(\n self[col_idx], split_every, percentiles, percentiles_method\n )\n for col_idx in chosen_columns\n ]\n stats_names = [(s._name, 0) for s in stats]\n\n name = \"describe--\" + tokenize(self, split_every)\n layer = {(name, 0): (methods.describe_aggregate, stats_names)}\n graph = HighLevelGraph.from_collections(name, layer, dependencies=stats)\n meta = self._meta_nonempty.describe(include=include, exclude=exclude)\n return new_dd_object(graph, name, meta, divisions=[None, None])\n\n def _describe_1d(\n self, data, split_every=False, percentiles=None, percentiles_method=\"default\"\n ):\n if is_bool_dtype(data._meta):\n return self._describe_nonnumeric_1d(data, split_every=split_every)\n elif is_numeric_dtype(data._meta):\n return self._describe_numeric(\n data,\n split_every=split_every,\n percentiles=percentiles,\n percentiles_method=percentiles_method,\n )\n elif is_timedelta64_dtype(data._meta):\n return self._describe_numeric(\n data.dropna().astype(\"i8\"),\n split_every=split_every,\n percentiles=percentiles,\n percentiles_method=percentiles_method,\n is_timedelta_column=True,\n )\n else:\n return self._describe_nonnumeric_1d(data, split_every=split_every)\n\n def _describe_numeric(\n self,\n data,\n split_every=False,\n percentiles=None,\n percentiles_method=\"default\",\n is_timedelta_column=False,\n ):\n\n num = data._get_numeric_data()\n\n if data.ndim == 2 and len(num.columns) == 0:\n raise ValueError(\"DataFrame contains only non-numeric data.\")\n elif data.ndim == 1 and data.dtype == \"object\":\n raise ValueError(\"Cannot compute ``describe`` on object dtype.\")\n if percentiles is None:\n percentiles = [0.25, 0.5, 0.75]\n else:\n # always include the the 50%tle to calculate the median\n # unique removes duplicates and sorts quantiles\n percentiles = np.array(percentiles)\n percentiles = np.append(percentiles, 0.5)\n percentiles = np.unique(percentiles)\n percentiles = list(percentiles)\n stats = [\n num.count(split_every=split_every),\n num.mean(split_every=split_every),\n num.std(split_every=split_every),\n num.min(split_every=split_every),\n num.quantile(percentiles, method=percentiles_method),\n num.max(split_every=split_every),\n ]\n stats_names = [(s._name, 0) for s in stats]\n\n colname = data._meta.name if is_series_like(data._meta) else None\n\n name = \"describe-numeric--\" + tokenize(num, split_every)\n layer = {\n (name, 0): (\n methods.describe_numeric_aggregate,\n stats_names,\n colname,\n is_timedelta_column,\n )\n }\n graph = HighLevelGraph.from_collections(name, layer, dependencies=stats)\n meta = num._meta_nonempty.describe()\n return new_dd_object(graph, name, meta, divisions=[None, None])\n\n def _describe_nonnumeric_1d(self, data, split_every=False):\n vcounts = data.value_counts(split_every=split_every)\n count_nonzero = vcounts[vcounts != 0]\n count_unique = count_nonzero.size\n\n stats = [\n # nunique\n count_unique,\n # count\n data.count(split_every=split_every),\n # most common value\n vcounts._head(1, npartitions=1, compute=False, safe=False),\n ]\n\n if is_datetime64_any_dtype(data._meta):\n min_ts = data.dropna().astype(\"i8\").min(split_every=split_every)\n max_ts = data.dropna().astype(\"i8\").max(split_every=split_every)\n stats.extend([min_ts, max_ts])\n\n stats_names = [(s._name, 0) for s in stats]\n colname = data._meta.name\n\n name = \"describe-nonnumeric-1d--\" + tokenize(data, split_every)\n layer = {\n (name, 0): (methods.describe_nonnumeric_aggregate, stats_names, colname)\n }\n graph = HighLevelGraph.from_collections(name, layer, dependencies=stats)\n meta = data._meta_nonempty.describe()\n return new_dd_object(graph, name, meta, divisions=[None, None])\n\n def _cum_agg(\n self, op_name, chunk, aggregate, axis, skipna=True, chunk_kwargs=None, out=None\n ):\n \"\"\"Wrapper for cumulative operation\"\"\"\n\n axis = self._validate_axis(axis)\n\n if axis == 1:\n name = \"{0}{1}(axis=1)\".format(self._token_prefix, op_name)\n result = self.map_partitions(chunk, token=name, **chunk_kwargs)\n return handle_out(out, result)\n else:\n # cumulate each partitions\n name1 = \"{0}{1}-map\".format(self._token_prefix, op_name)\n cumpart = map_partitions(\n chunk, self, token=name1, meta=self, **chunk_kwargs\n )\n\n name2 = \"{0}{1}-take-last\".format(self._token_prefix, op_name)\n cumlast = map_partitions(\n _take_last,\n cumpart,\n skipna,\n meta=pd.Series([], dtype=\"float\"),\n token=name2,\n )\n\n suffix = tokenize(self)\n name = \"{0}{1}-{2}\".format(self._token_prefix, op_name, suffix)\n cname = \"{0}{1}-cum-last-{2}\".format(self._token_prefix, op_name, suffix)\n\n # aggregate cumulated partisions and its previous last element\n layer = {}\n layer[(name, 0)] = (cumpart._name, 0)\n\n for i in range(1, self.npartitions):\n # store each cumulative step to graph to reduce computation\n if i == 1:\n layer[(cname, i)] = (cumlast._name, i - 1)\n else:\n # aggregate with previous cumulation results\n layer[(cname, i)] = (\n methods._cum_aggregate_apply,\n aggregate,\n (cname, i - 1),\n (cumlast._name, i - 1),\n )\n layer[(name, i)] = (aggregate, (cumpart._name, i), (cname, i))\n graph = HighLevelGraph.from_collections(\n name, layer, dependencies=[cumpart, cumlast]\n )\n result = new_dd_object(graph, name, chunk(self._meta), self.divisions)\n return handle_out(out, result)\n\n @derived_from(pd.DataFrame)\n def cumsum(self, axis=None, skipna=True, dtype=None, out=None):\n return self._cum_agg(\n \"cumsum\",\n chunk=M.cumsum,\n aggregate=methods.cumsum_aggregate,\n axis=axis,\n skipna=skipna,\n chunk_kwargs=dict(axis=axis, skipna=skipna),\n out=out,\n )\n\n @derived_from(pd.DataFrame)\n def cumprod(self, axis=None, skipna=True, dtype=None, out=None):\n return self._cum_agg(\n \"cumprod\",\n chunk=M.cumprod,\n aggregate=methods.cumprod_aggregate,\n axis=axis,\n skipna=skipna,\n chunk_kwargs=dict(axis=axis, skipna=skipna),\n out=out,\n )\n\n @derived_from(pd.DataFrame)\n def cummax(self, axis=None, skipna=True, out=None):\n return self._cum_agg(\n \"cummax\",\n chunk=M.cummax,\n aggregate=methods.cummax_aggregate,\n axis=axis,\n skipna=skipna,\n chunk_kwargs=dict(axis=axis, skipna=skipna),\n out=out,\n )\n\n @derived_from(pd.DataFrame)\n def cummin(self, axis=None, skipna=True, out=None):\n return self._cum_agg(\n \"cummin\",\n chunk=M.cummin,\n aggregate=methods.cummin_aggregate,\n axis=axis,\n skipna=skipna,\n chunk_kwargs=dict(axis=axis, skipna=skipna),\n out=out,\n )\n\n @derived_from(pd.DataFrame)\n def where(self, cond, other=np.nan):\n # cond and other may be dask instance,\n # passing map_partitions via keyword will not be aligned\n return map_partitions(M.where, self, cond, other, enforce_metadata=False)\n\n @derived_from(pd.DataFrame)\n def mask(self, cond, other=np.nan):\n return map_partitions(M.mask, self, cond, other, enforce_metadata=False)\n\n @derived_from(pd.DataFrame)\n def notnull(self):\n return self.map_partitions(M.notnull, enforce_metadata=False)\n\n @derived_from(pd.DataFrame)\n def isnull(self):\n return self.map_partitions(M.isnull, enforce_metadata=False)\n\n @derived_from(pd.DataFrame)\n def isna(self):\n if hasattr(pd, \"isna\"):\n return self.map_partitions(M.isna, enforce_metadata=False)\n else:\n raise NotImplementedError(\n \"Need more recent version of Pandas \"\n \"to support isna. \"\n \"Please use isnull instead.\"\n )\n\n @derived_from(pd.DataFrame)\n def isin(self, values):\n if is_dataframe_like(self._meta):\n # DataFrame.isin does weird alignment stuff\n bad_types = (_Frame, pd.Series, pd.DataFrame)\n else:\n bad_types = (_Frame,)\n if isinstance(values, bad_types):\n raise NotImplementedError(\"Passing a %r to `isin`\" % typename(type(values)))\n meta = self._meta_nonempty.isin(values)\n # We wrap values in a delayed for two reasons:\n # - avoid serializing data in every task\n # - avoid cost of traversal of large list in optimizations\n return self.map_partitions(\n M.isin, delayed(values), meta=meta, enforce_metadata=False\n )\n\n @derived_from(pd.DataFrame)\n def astype(self, dtype):\n # XXX: Pandas will segfault for empty dataframes when setting\n # categorical dtypes. This operation isn't allowed currently anyway. We\n # get the metadata with a non-empty frame to throw the error instead of\n # segfaulting.\n if is_dataframe_like(self._meta) and is_categorical_dtype(dtype):\n meta = self._meta_nonempty.astype(dtype)\n else:\n meta = self._meta.astype(dtype)\n if hasattr(dtype, \"items\"):\n set_unknown = [\n k\n for k, v in dtype.items()\n if is_categorical_dtype(v) and getattr(v, \"categories\", None) is None\n ]\n meta = clear_known_categories(meta, cols=set_unknown)\n elif is_categorical_dtype(dtype) and getattr(dtype, \"categories\", None) is None:\n meta = clear_known_categories(meta)\n return self.map_partitions(\n M.astype, dtype=dtype, meta=meta, enforce_metadata=False\n )\n\n @derived_from(pd.Series)\n def append(self, other, interleave_partitions=False):\n # because DataFrame.append will override the method,\n # wrap by pd.Series.append docstring\n from .multi import concat\n\n if isinstance(other, (list, dict)):\n msg = \"append doesn't support list or dict input\"\n raise NotImplementedError(msg)\n\n return concat(\n [self, other], join=\"outer\", interleave_partitions=interleave_partitions\n )\n\n @derived_from(pd.Series)\n def dot(self, other, meta=no_default):\n if not isinstance(other, _Frame):\n raise TypeError(\"The second operand must be a dask array or dask dataframe\")\n\n if isinstance(other, DataFrame):\n s = self.map_partitions(M.dot, other, token=\"dot\", meta=meta)\n return s.groupby(by=s.index).apply(\n lambda x: x.sum(skipna=False), meta=s._meta_nonempty\n )\n\n def _dot_series(*args, **kwargs):\n # .sum() is invoked on each partition before being applied to all\n # partitions. The return type is expected to be a series, not a numpy object\n return pd.Series(M.dot(*args, **kwargs))\n\n return self.map_partitions(_dot_series, other, token=\"dot\", meta=meta).sum(\n skipna=False\n )\n\n @derived_from(pd.DataFrame)\n def align(self, other, join=\"outer\", axis=None, fill_value=None):\n meta1, meta2 = _emulate(\n M.align, self, other, join, axis=axis, fill_value=fill_value\n )\n aligned = self.map_partitions(\n M.align,\n other,\n join=join,\n axis=axis,\n fill_value=fill_value,\n enforce_metadata=False,\n )\n\n token = tokenize(self, other, join, axis, fill_value)\n\n name1 = \"align1-\" + token\n dsk1 = {\n (name1, i): (getitem, key, 0)\n for i, key in enumerate(aligned.__dask_keys__())\n }\n dsk1.update(aligned.dask)\n result1 = new_dd_object(dsk1, name1, meta1, aligned.divisions)\n\n name2 = \"align2-\" + token\n dsk2 = {\n (name2, i): (getitem, key, 1)\n for i, key in enumerate(aligned.__dask_keys__())\n }\n dsk2.update(aligned.dask)\n result2 = new_dd_object(dsk2, name2, meta2, aligned.divisions)\n\n return result1, result2\n\n @derived_from(pd.DataFrame)\n def combine(self, other, func, fill_value=None, overwrite=True):\n return self.map_partitions(\n M.combine, other, func, fill_value=fill_value, overwrite=overwrite\n )\n\n @derived_from(pd.DataFrame)\n def combine_first(self, other):\n return self.map_partitions(M.combine_first, other)\n\n @classmethod\n def _bind_operator_method(cls, name, op, original=pd.DataFrame):\n \"\"\"bind operator method like DataFrame.add to this class\"\"\"\n raise NotImplementedError\n\n @derived_from(pd.DataFrame)\n def resample(self, rule, closed=None, label=None):\n from .tseries.resample import Resampler\n\n return Resampler(self, rule, closed=closed, label=label)\n\n @derived_from(pd.DataFrame)\n def first(self, offset):\n # Let pandas error on bad args\n self._meta_nonempty.first(offset)\n\n if not self.known_divisions:\n raise ValueError(\"`first` is not implemented for unknown divisions\")\n\n offset = pd.tseries.frequencies.to_offset(offset)\n date = self.divisions[0] + offset\n end = self.loc._get_partitions(date)\n\n is_anchored = offset.is_anchored()\n\n include_right = is_anchored or not hasattr(offset, \"delta\")\n\n if end == self.npartitions - 1:\n divs = self.divisions\n else:\n divs = self.divisions[: end + 1] + (date,)\n\n name = \"first-\" + tokenize(self, offset)\n dsk = {(name, i): (self._name, i) for i in range(end)}\n dsk[(name, end)] = (\n methods.boundary_slice,\n (self._name, end),\n None,\n date,\n include_right,\n True,\n \"loc\",\n )\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])\n return new_dd_object(graph, name, self, divs)\n\n @derived_from(pd.DataFrame)\n def last(self, offset):\n # Let pandas error on bad args\n self._meta_nonempty.first(offset)\n\n if not self.known_divisions:\n raise ValueError(\"`last` is not implemented for unknown divisions\")\n\n offset = pd.tseries.frequencies.to_offset(offset)\n date = self.divisions[-1] - offset\n start = self.loc._get_partitions(date)\n\n if start == 0:\n divs = self.divisions\n else:\n divs = (date,) + self.divisions[start + 1 :]\n\n name = \"last-\" + tokenize(self, offset)\n dsk = {\n (name, i + 1): (self._name, j + 1)\n for i, j in enumerate(range(start, self.npartitions))\n }\n dsk[(name, 0)] = (\n methods.boundary_slice,\n (self._name, start),\n date,\n None,\n True,\n False,\n \"loc\",\n )\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])\n return new_dd_object(graph, name, self, divs)\n\n def nunique_approx(self, split_every=None):\n \"\"\"Approximate number of unique rows.\n\n This method uses the HyperLogLog algorithm for cardinality\n estimation to compute the approximate number of unique rows.\n The approximate error is 0.406%.\n\n Parameters\n ----------\n split_every : int, optional\n Group partitions into groups of this size while performing a\n tree-reduction. If set to False, no tree-reduction will be used.\n Default is 8.\n\n Returns\n -------\n a float representing the approximate number of elements\n \"\"\"\n from . import hyperloglog # here to avoid circular import issues\n\n return aca(\n [self],\n chunk=hyperloglog.compute_hll_array,\n combine=hyperloglog.reduce_state,\n aggregate=hyperloglog.estimate_count,\n split_every=split_every,\n b=16,\n meta=float,\n )\n\n @property\n def values(self):\n \"\"\"Return a dask.array of the values of this dataframe\n\n Warning: This creates a dask.array without precise shape information.\n Operations that depend on shape information, like slicing or reshaping,\n will not work.\n \"\"\"\n return self.map_partitions(methods.values)\n\n def _validate_chunks(self, arr, lengths):\n from dask.array.core import normalize_chunks\n\n if isinstance(lengths, Sequence):\n lengths = tuple(lengths)\n\n if len(lengths) != self.npartitions:\n raise ValueError(\n \"The number of items in 'lengths' does not match \"\n \"the number of partitions. \"\n \"{} != {}\".format(len(lengths), self.npartitions)\n )\n\n if self.ndim == 1:\n chunks = normalize_chunks((lengths,))\n else:\n chunks = normalize_chunks((lengths, (len(self.columns),)))\n\n return chunks\n elif lengths is not None:\n raise ValueError(\"Unexpected value for 'lengths': '{}'\".format(lengths))\n\n return arr._chunks\n\n def _is_index_level_reference(self, key):\n \"\"\"\n Test whether a key is an index level reference\n\n To be considered an index level reference, `key` must match the index name\n and must NOT match the name of any column (if a dataframe).\n \"\"\"\n return (\n self.index.name is not None\n and not is_dask_collection(key)\n and (np.isscalar(key) or isinstance(key, tuple))\n and key == self.index.name\n and key not in getattr(self, \"columns\", ())\n )\n\n def _contains_index_name(self, columns_or_index):\n \"\"\"\n Test whether the input contains a reference to the index of the DataFrame/Series\n \"\"\"\n if isinstance(columns_or_index, list):\n return any(self._is_index_level_reference(n) for n in columns_or_index)\n else:\n return self._is_index_level_reference(columns_or_index)\n\n\ndef _raise_if_object_series(x, funcname):\n \"\"\"\n Utility function to raise an error if an object column does not support\n a certain operation like `mean`.\n \"\"\"\n if isinstance(x, Series) and hasattr(x, \"dtype\") and x.dtype == object:\n raise ValueError(\"`%s` not supported with object series\" % funcname)\n\n\nclass Series(_Frame):\n \"\"\"Parallel Pandas Series\n\n Do not use this class directly. Instead use functions like\n ``dd.read_csv``, ``dd.read_parquet``, or ``dd.from_pandas``.\n\n Parameters\n ----------\n\n dsk: dict\n The dask graph to compute this Series\n _name: str\n The key prefix that specifies which keys in the dask comprise this\n particular Series\n meta: pandas.Series\n An empty ``pandas.Series`` with names, dtypes, and index matching the\n expected output.\n divisions: tuple of index values\n Values along which we partition our blocks on the index\n\n See Also\n --------\n dask.dataframe.DataFrame\n \"\"\"\n\n _partition_type = pd.Series\n _is_partition_type = staticmethod(is_series_like)\n _token_prefix = \"series-\"\n _accessors = set()\n\n def __array_wrap__(self, array, context=None):\n if isinstance(context, tuple) and len(context) > 0:\n if isinstance(context[1][0], np.ndarray) and context[1][0].shape == ():\n index = None\n else:\n index = context[1][0].index\n\n return pd.Series(array, index=index, name=self.name)\n\n @property\n def name(self):\n return self._meta.name\n\n @name.setter\n def name(self, name):\n self._meta.name = name\n renamed = _rename_dask(self, name)\n # update myself\n self.dask = renamed.dask\n self._name = renamed._name\n\n @property\n def ndim(self):\n \"\"\"Return dimensionality\"\"\"\n return 1\n\n @property\n def shape(self):\n \"\"\"\n Return a tuple representing the dimensionality of a Series.\n\n The single element of the tuple is a Delayed result.\n\n Examples\n --------\n >>> series.shape # doctest: +SKIP\n (dd.Scalar<size-ag..., dtype=int64>,)\n \"\"\"\n return (self.size,)\n\n @property\n def dtype(self):\n \"\"\"Return data type\"\"\"\n return self._meta.dtype\n\n @cache_readonly\n def dt(self):\n \"\"\"Namespace of datetime methods\"\"\"\n return DatetimeAccessor(self)\n\n @cache_readonly\n def cat(self):\n return CategoricalAccessor(self)\n\n @cache_readonly\n def str(self):\n \"\"\"Namespace for string methods\"\"\"\n return StringAccessor(self)\n\n def __dir__(self):\n o = set(dir(type(self)))\n o.update(self.__dict__)\n # Remove the `cat` and `str` accessors if not available. We can't\n # decide this statically for the `dt` accessor, as it works on\n # datetime-like things as well.\n for accessor in [\"cat\", \"str\"]:\n if not hasattr(self._meta, accessor):\n o.remove(accessor)\n return list(o)\n\n @property\n def nbytes(self):\n \"\"\"Number of bytes\"\"\"\n return self.reduction(\n methods.nbytes, np.sum, token=\"nbytes\", meta=int, split_every=False\n )\n\n def _repr_data(self):\n return _repr_data_series(self._meta, self._repr_divisions)\n\n def __repr__(self):\n \"\"\"have to overwrite footer\"\"\"\n if self.name is not None:\n footer = \"Name: {name}, dtype: {dtype}\".format(\n name=self.name, dtype=self.dtype\n )\n else:\n footer = \"dtype: {dtype}\".format(dtype=self.dtype)\n\n return \"\"\"Dask {klass} Structure:\n{data}\n{footer}\nDask Name: {name}, {task} tasks\"\"\".format(\n klass=self.__class__.__name__,\n data=self.to_string(),\n footer=footer,\n name=key_split(self._name),\n task=len(self.dask),\n )\n\n def rename(self, index=None, inplace=False, sorted_index=False):\n \"\"\"Alter Series index labels or name\n\n Function / dict values must be unique (1-to-1). Labels not contained in\n a dict / Series will be left as-is. Extra labels listed don't throw an\n error.\n\n Alternatively, change ``Series.name`` with a scalar value.\n\n Parameters\n ----------\n index : scalar, hashable sequence, dict-like or callable, optional\n If dict-like or callable, the transformation is applied to the\n index. Scalar or hashable sequence-like will alter the\n ``Series.name`` attribute.\n inplace : boolean, default False\n Whether to return a new Series or modify this one inplace.\n sorted_index : bool, default False\n If true, the output ``Series`` will have known divisions inferred\n from the input series and the transformation. Ignored for\n non-callable/dict-like ``index`` or when the input series has\n unknown divisions. Note that this may only be set to ``True`` if\n you know that the transformed index is monotonically increasing. Dask\n will check that transformed divisions are monotonic, but cannot\n check all the values between divisions, so incorrectly setting this\n can result in bugs.\n\n Returns\n -------\n renamed : Series\n\n See Also\n --------\n pandas.Series.rename\n \"\"\"\n from pandas.api.types import is_dict_like, is_list_like, is_scalar\n\n import dask.dataframe as dd\n\n if is_scalar(index) or (\n is_list_like(index)\n and not is_dict_like(index)\n and not isinstance(index, dd.Series)\n ):\n res = self if inplace else self.copy()\n res.name = index\n else:\n res = self.map_partitions(M.rename, index, enforce_metadata=False)\n if self.known_divisions:\n if sorted_index and (callable(index) or is_dict_like(index)):\n old = pd.Series(range(self.npartitions + 1), index=self.divisions)\n new = old.rename(index).index\n if not new.is_monotonic_increasing:\n msg = (\n \"sorted_index=True, but the transformed index \"\n \"isn't monotonic_increasing\"\n )\n raise ValueError(msg)\n res.divisions = tuple(methods.tolist(new))\n else:\n res = res.clear_divisions()\n if inplace:\n self.dask = res.dask\n self._name = res._name\n self.divisions = res.divisions\n self._meta = res._meta\n res = self\n return res\n\n @derived_from(pd.Series)\n def round(self, decimals=0):\n return elemwise(M.round, self, decimals)\n\n @derived_from(pd.DataFrame)\n def to_timestamp(self, freq=None, how=\"start\", axis=0):\n df = elemwise(M.to_timestamp, self, freq, how, axis)\n df.divisions = tuple(pd.Index(self.divisions).to_timestamp())\n return df\n\n def quantile(self, q=0.5, method=\"default\"):\n \"\"\"Approximate quantiles of Series\n\n Parameters\n ----------\n q : list/array of floats, default 0.5 (50%)\n Iterable of numbers ranging from 0 to 1 for the desired quantiles\n method : {'default', 'tdigest', 'dask'}, optional\n What method to use. By default will use dask's internal custom\n algorithm (``'dask'``). If set to ``'tdigest'`` will use tdigest\n for floats and ints and fallback to the ``'dask'`` otherwise.\n \"\"\"\n return quantile(self, q, method=method)\n\n def _repartition_quantiles(self, npartitions, upsample=1.0):\n \"\"\"Approximate quantiles of Series used for repartitioning\"\"\"\n from .partitionquantiles import partition_quantiles\n\n return partition_quantiles(self, npartitions, upsample=upsample)\n\n def __getitem__(self, key):\n if isinstance(key, Series) and self.divisions == key.divisions:\n name = \"index-%s\" % tokenize(self, key)\n dsk = partitionwise_graph(operator.getitem, name, self, key)\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self, key])\n return Series(graph, name, self._meta, self.divisions)\n raise NotImplementedError(\n \"Series getitem is only supported for other series objects \"\n \"with matching partition structure\"\n )\n\n @derived_from(pd.DataFrame)\n def _get_numeric_data(self, how=\"any\", subset=None):\n return self\n\n @derived_from(pd.Series)\n def iteritems(self):\n for i in range(self.npartitions):\n s = self.get_partition(i).compute()\n for item in s.iteritems():\n yield item\n\n @derived_from(pd.Series)\n def __iter__(self):\n for i in range(self.npartitions):\n s = self.get_partition(i).compute()\n for row in s:\n yield row\n\n @classmethod\n def _validate_axis(cls, axis=0):\n if axis not in (0, \"index\", None):\n raise ValueError(\"No axis named {0}\".format(axis))\n # convert to numeric axis\n return {None: 0, \"index\": 0}.get(axis, axis)\n\n @derived_from(pd.Series)\n def groupby(\n self, by=None, group_keys=True, sort=None, observed=None, dropna=None, **kwargs\n ):\n from dask.dataframe.groupby import SeriesGroupBy\n\n return SeriesGroupBy(\n self,\n by=by,\n group_keys=group_keys,\n sort=sort,\n observed=observed,\n dropna=dropna,\n **kwargs,\n )\n\n @derived_from(pd.Series)\n def count(self, split_every=False):\n return super().count(split_every=split_every)\n\n @derived_from(pd.Series)\n def mode(self, dropna=True, split_every=False):\n return super().mode(dropna=dropna, split_every=split_every)\n\n @derived_from(pd.Series)\n def explode(self):\n meta = self._meta.explode()\n return self.map_partitions(M.explode, meta=meta, enforce_metadata=False)\n\n def unique(self, split_every=None, split_out=1):\n \"\"\"\n Return Series of unique values in the object. Includes NA values.\n\n Returns\n -------\n uniques : Series\n \"\"\"\n return aca(\n self,\n chunk=methods.unique,\n aggregate=methods.unique,\n meta=self._meta,\n token=\"unique\",\n split_every=split_every,\n series_name=self.name,\n split_out=split_out,\n )\n\n @derived_from(pd.Series)\n def nunique(self, split_every=None):\n return self.drop_duplicates(split_every=split_every).count()\n\n @derived_from(pd.Series)\n def value_counts(\n self,\n sort=None,\n ascending=False,\n dropna=None,\n normalize=False,\n split_every=None,\n split_out=1,\n ):\n \"\"\"\n Note: dropna is only supported in pandas >= 1.1.0, in which case it defaults to\n True.\n \"\"\"\n kwargs = {\"sort\": sort, \"ascending\": ascending}\n\n if dropna is not None:\n if not PANDAS_GT_110:\n raise NotImplementedError(\n \"dropna is not a valid argument for dask.dataframe.value_counts \"\n f\"if pandas < 1.1.0. Pandas version is {pd.__version__}\"\n )\n kwargs[\"dropna\"] = dropna\n\n aggregate_kwargs = {\"normalize\": normalize}\n if split_out > 1:\n aggregate_kwargs[\"total_length\"] = (\n len(self) if dropna is False else len(self.dropna())\n )\n\n return aca(\n self,\n chunk=M.value_counts,\n aggregate=methods.value_counts_aggregate,\n combine=methods.value_counts_combine,\n meta=self._meta.value_counts(normalize=normalize),\n token=\"value-counts\",\n split_every=split_every,\n split_out=split_out,\n split_out_setup=split_out_on_index,\n aggregate_kwargs=aggregate_kwargs,\n **kwargs,\n )\n\n @derived_from(pd.Series)\n def nlargest(self, n=5, split_every=None):\n return aca(\n self,\n chunk=M.nlargest,\n aggregate=M.nlargest,\n meta=self._meta,\n token=\"series-nlargest\",\n split_every=split_every,\n n=n,\n )\n\n @derived_from(pd.Series)\n def nsmallest(self, n=5, split_every=None):\n return aca(\n self,\n chunk=M.nsmallest,\n aggregate=M.nsmallest,\n meta=self._meta,\n token=\"series-nsmallest\",\n split_every=split_every,\n n=n,\n )\n\n @derived_from(pd.Series)\n def isin(self, values):\n # Added just to get the different docstring for Series\n return super().isin(values)\n\n @insert_meta_param_description(pad=12)\n @derived_from(pd.Series)\n def map(self, arg, na_action=None, meta=no_default):\n if is_series_like(arg) and is_dask_collection(arg):\n return series_map(self, arg)\n if not (\n isinstance(arg, dict)\n or callable(arg)\n or is_series_like(arg)\n and not is_dask_collection(arg)\n ):\n raise TypeError(\n \"arg must be pandas.Series, dict or callable.\"\n \" Got {0}\".format(type(arg))\n )\n name = \"map-\" + tokenize(self, arg, na_action)\n dsk = {\n (name, i): (M.map, k, arg, na_action)\n for i, k in enumerate(self.__dask_keys__())\n }\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])\n if meta is no_default:\n meta = _emulate(M.map, self, arg, na_action=na_action, udf=True)\n else:\n meta = make_meta(\n meta,\n index=getattr(make_meta(self), \"index\", None),\n parent_meta=self._meta,\n )\n\n return type(self)(graph, name, meta, self.divisions)\n\n @derived_from(pd.Series)\n def dropna(self):\n return self.map_partitions(M.dropna, enforce_metadata=False)\n\n @derived_from(pd.Series)\n def between(self, left, right, inclusive=\"both\"):\n return self.map_partitions(\n M.between, left=left, right=right, inclusive=inclusive\n )\n\n @derived_from(pd.Series)\n def clip(self, lower=None, upper=None, out=None):\n if out is not None:\n raise ValueError(\"'out' must be None\")\n # np.clip may pass out\n return self.map_partitions(\n M.clip, lower=lower, upper=upper, enforce_metadata=False\n )\n\n @derived_from(pd.Series)\n def clip_lower(self, threshold):\n return self.map_partitions(\n M.clip_lower, threshold=threshold, enforce_metadata=False\n )\n\n @derived_from(pd.Series)\n def clip_upper(self, threshold):\n return self.map_partitions(\n M.clip_upper, threshold=threshold, enforce_metadata=False\n )\n\n @derived_from(pd.Series)\n def align(self, other, join=\"outer\", axis=None, fill_value=None):\n return super().align(other, join=join, axis=axis, fill_value=fill_value)\n\n @derived_from(pd.Series)\n def combine(self, other, func, fill_value=None):\n return self.map_partitions(M.combine, other, func, fill_value=fill_value)\n\n @derived_from(pd.Series)\n def squeeze(self):\n return self\n\n @derived_from(pd.Series)\n def combine_first(self, other):\n return self.map_partitions(M.combine_first, other)\n\n def to_bag(self, index=False, format=\"tuple\"):\n \"\"\"Create a Dask Bag from a Series\"\"\"\n from .io import to_bag\n\n return to_bag(self, index, format=format)\n\n @derived_from(pd.Series)\n def to_frame(self, name=None):\n return self.map_partitions(M.to_frame, name, meta=self._meta.to_frame(name))\n\n @derived_from(pd.Series)\n def to_string(self, max_rows=5):\n # option_context doesn't affect\n return self._repr_data().to_string(max_rows=max_rows)\n\n @classmethod\n def _bind_operator_method(cls, name, op, original=pd.Series):\n \"\"\"bind operator method like Series.add to this class\"\"\"\n\n def meth(self, other, level=None, fill_value=None, axis=0):\n if level is not None:\n raise NotImplementedError(\"level must be None\")\n axis = self._validate_axis(axis)\n meta = _emulate(op, self, other, axis=axis, fill_value=fill_value)\n return map_partitions(\n op, self, other, meta=meta, axis=axis, fill_value=fill_value\n )\n\n meth.__name__ = name\n setattr(cls, name, derived_from(original)(meth))\n\n @classmethod\n def _bind_comparison_method(cls, name, comparison, original=pd.Series):\n \"\"\"bind comparison method like Series.eq to this class\"\"\"\n\n def meth(self, other, level=None, fill_value=None, axis=0):\n if level is not None:\n raise NotImplementedError(\"level must be None\")\n axis = self._validate_axis(axis)\n if fill_value is None:\n return elemwise(comparison, self, other, axis=axis)\n else:\n op = partial(comparison, fill_value=fill_value)\n return elemwise(op, self, other, axis=axis)\n\n meth.__name__ = name\n setattr(cls, name, derived_from(original)(meth))\n\n @insert_meta_param_description(pad=12)\n def apply(self, func, convert_dtype=True, meta=no_default, args=(), **kwds):\n \"\"\"Parallel version of pandas.Series.apply\n\n Parameters\n ----------\n func : function\n Function to apply\n convert_dtype : boolean, default True\n Try to find better dtype for elementwise function results.\n If False, leave as dtype=object.\n $META\n args : tuple\n Positional arguments to pass to function in addition to the value.\n\n Additional keyword arguments will be passed as keywords to the function.\n\n Returns\n -------\n applied : Series or DataFrame if func returns a Series.\n\n Examples\n --------\n >>> import dask.dataframe as dd\n >>> s = pd.Series(range(5), name='x')\n >>> ds = dd.from_pandas(s, npartitions=2)\n\n Apply a function elementwise across the Series, passing in extra\n arguments in ``args`` and ``kwargs``:\n\n >>> def myadd(x, a, b=1):\n ... return x + a + b\n >>> res = ds.apply(myadd, args=(2,), b=1.5) # doctest: +SKIP\n\n By default, dask tries to infer the output metadata by running your\n provided function on some fake data. This works well in many cases, but\n can sometimes be expensive, or even fail. To avoid this, you can\n manually specify the output metadata with the ``meta`` keyword. This\n can be specified in many forms, for more information see\n ``dask.dataframe.utils.make_meta``.\n\n Here we specify the output is a Series with name ``'x'``, and dtype\n ``float64``:\n\n >>> res = ds.apply(myadd, args=(2,), b=1.5, meta=('x', 'f8'))\n\n In the case where the metadata doesn't change, you can also pass in\n the object itself directly:\n\n >>> res = ds.apply(lambda x: x + 1, meta=ds)\n\n See Also\n --------\n dask.Series.map_partitions\n \"\"\"\n if meta is no_default:\n meta = _emulate(\n M.apply,\n self._meta_nonempty,\n func,\n convert_dtype=convert_dtype,\n args=args,\n udf=True,\n **kwds,\n )\n warnings.warn(meta_warning(meta))\n\n return map_partitions(\n M.apply, self, func, convert_dtype, args, meta=meta, **kwds\n )\n\n @derived_from(pd.Series)\n def cov(self, other, min_periods=None, split_every=False):\n from .multi import concat\n\n if not isinstance(other, Series):\n raise TypeError(\"other must be a dask.dataframe.Series\")\n df = concat([self, other], axis=1)\n return cov_corr(df, min_periods, scalar=True, split_every=split_every)\n\n @derived_from(pd.Series)\n def corr(self, other, method=\"pearson\", min_periods=None, split_every=False):\n from .multi import concat\n\n if not isinstance(other, Series):\n raise TypeError(\"other must be a dask.dataframe.Series\")\n if method != \"pearson\":\n raise NotImplementedError(\"Only Pearson correlation has been implemented\")\n df = concat([self, other], axis=1)\n return cov_corr(\n df, min_periods, corr=True, scalar=True, split_every=split_every\n )\n\n @derived_from(pd.Series)\n def autocorr(self, lag=1, split_every=False):\n if not isinstance(lag, Integral):\n raise TypeError(\"lag must be an integer\")\n return self.corr(self if lag == 0 else self.shift(lag), split_every=split_every)\n\n @derived_from(pd.Series)\n def memory_usage(self, index=True, deep=False):\n result = self.map_partitions(\n M.memory_usage, index=index, deep=deep, enforce_metadata=False\n )\n return delayed(sum)(result.to_delayed())\n\n def __divmod__(self, other):\n res1 = self // other\n res2 = self % other\n return res1, res2\n\n def __rdivmod__(self, other):\n res1 = other // self\n res2 = other % self\n return res1, res2\n\n\nclass Index(Series):\n\n _partition_type = pd.Index\n _is_partition_type = staticmethod(is_index_like)\n _token_prefix = \"index-\"\n _accessors = set()\n\n _dt_attributes = {\n \"nanosecond\",\n \"microsecond\",\n \"millisecond\",\n \"dayofyear\",\n \"minute\",\n \"hour\",\n \"day\",\n \"dayofweek\",\n \"second\",\n \"week\",\n \"weekday\",\n \"weekofyear\",\n \"month\",\n \"quarter\",\n \"year\",\n }\n\n _cat_attributes = {\n \"known\",\n \"as_known\",\n \"as_unknown\",\n \"add_categories\",\n \"categories\",\n \"remove_categories\",\n \"reorder_categories\",\n \"as_ordered\",\n \"codes\",\n \"remove_unused_categories\",\n \"set_categories\",\n \"as_unordered\",\n \"ordered\",\n \"rename_categories\",\n }\n\n def __getattr__(self, key):\n if is_categorical_dtype(self.dtype) and key in self._cat_attributes:\n return getattr(self.cat, key)\n elif key in self._dt_attributes:\n return getattr(self.dt, key)\n raise AttributeError(\"'Index' object has no attribute %r\" % key)\n\n def __dir__(self):\n out = super().__dir__()\n out.extend(self._dt_attributes)\n if is_categorical_dtype(self.dtype):\n out.extend(self._cat_attributes)\n return out\n\n @property\n def index(self):\n msg = \"'{0}' object has no attribute 'index'\"\n raise AttributeError(msg.format(self.__class__.__name__))\n\n def __array_wrap__(self, array, context=None):\n return pd.Index(array, name=self.name)\n\n def head(self, n=5, compute=True):\n \"\"\"First n items of the Index.\n\n Caveat, this only checks the first partition.\n \"\"\"\n name = \"head-%d-%s\" % (n, self._name)\n dsk = {(name, 0): (operator.getitem, (self._name, 0), slice(0, n))}\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])\n\n result = new_dd_object(graph, name, self._meta, self.divisions[:2])\n\n if compute:\n result = result.compute()\n return result\n\n @derived_from(pd.Index)\n def max(self, split_every=False):\n return self.reduction(\n M.max,\n meta=self._meta_nonempty.max(),\n token=self._token_prefix + \"max\",\n split_every=split_every,\n )\n\n @derived_from(pd.Index)\n def min(self, split_every=False):\n return self.reduction(\n M.min,\n meta=self._meta_nonempty.min(),\n token=self._token_prefix + \"min\",\n split_every=split_every,\n )\n\n def count(self, split_every=False):\n return self.reduction(\n methods.index_count,\n np.sum,\n token=\"index-count\",\n meta=int,\n split_every=split_every,\n )\n\n @derived_from(pd.Index)\n def shift(self, periods=1, freq=None):\n if isinstance(self._meta, pd.PeriodIndex):\n if freq is not None:\n raise ValueError(\"PeriodIndex doesn't accept `freq` argument\")\n meta = self._meta_nonempty.shift(periods)\n out = self.map_partitions(\n M.shift, periods, meta=meta, token=\"shift\", transform_divisions=False\n )\n else:\n # Pandas will raise for other index types that don't implement shift\n meta = self._meta_nonempty.shift(periods, freq=freq)\n out = self.map_partitions(\n M.shift,\n periods,\n token=\"shift\",\n meta=meta,\n freq=freq,\n transform_divisions=False,\n )\n if freq is None:\n freq = meta.freq\n return maybe_shift_divisions(out, periods, freq=freq)\n\n @derived_from(pd.Index)\n def to_series(self):\n return self.map_partitions(M.to_series, meta=self._meta.to_series())\n\n @derived_from(pd.Index, ua_args=[\"index\"])\n def to_frame(self, index=True, name=None):\n if not index:\n raise NotImplementedError()\n\n return self.map_partitions(\n M.to_frame,\n index,\n name,\n meta=self._meta.to_frame(index, name),\n transform_divisions=False,\n )\n\n @insert_meta_param_description(pad=12)\n @derived_from(pd.Index)\n def map(self, arg, na_action=None, meta=no_default, is_monotonic=False):\n \"\"\"\n Note that this method clears any known divisions.\n\n If your mapping function is monotonically increasing then use `is_monotonic`\n to apply the maping function to the old divisions and assign the new\n divisions to the output.\n\n \"\"\"\n applied = super().map(arg, na_action=na_action, meta=meta)\n if is_monotonic and self.known_divisions:\n applied.divisions = tuple(\n pd.Series(self.divisions).map(arg, na_action=na_action)\n )\n else:\n applied = applied.clear_divisions()\n return applied\n\n\nclass DataFrame(_Frame):\n \"\"\"\n Parallel Pandas DataFrame\n\n Do not use this class directly. Instead use functions like\n ``dd.read_csv``, ``dd.read_parquet``, or ``dd.from_pandas``.\n\n Parameters\n ----------\n dsk: dict\n The dask graph to compute this DataFrame\n name: str\n The key prefix that specifies which keys in the dask comprise this\n particular DataFrame\n meta: pandas.DataFrame\n An empty ``pandas.DataFrame`` with names, dtypes, and index matching\n the expected output.\n divisions: tuple of index values\n Values along which we partition our blocks on the index\n \"\"\"\n\n _partition_type = pd.DataFrame\n _is_partition_type = staticmethod(is_dataframe_like)\n _token_prefix = \"dataframe-\"\n _accessors = set()\n\n def __init__(self, dsk, name, meta, divisions):\n super().__init__(dsk, name, meta, divisions)\n if self.dask.layers[name].collection_annotations is None:\n self.dask.layers[name].collection_annotations = {\n \"npartitions\": self.npartitions,\n \"columns\": [col for col in self.columns],\n \"type\": typename(type(self)),\n \"dataframe_type\": typename(type(self._meta)),\n \"series_dtypes\": {\n col: self._meta[col].dtype\n if hasattr(self._meta[col], \"dtype\")\n else None\n for col in self._meta.columns\n },\n }\n else:\n self.dask.layers[name].collection_annotations.update(\n {\n \"npartitions\": self.npartitions,\n \"columns\": [col for col in self.columns],\n \"type\": typename(type(self)),\n \"dataframe_type\": typename(type(self._meta)),\n \"series_dtypes\": {\n col: self._meta[col].dtype\n if hasattr(self._meta[col], \"dtype\")\n else None\n for col in self._meta.columns\n },\n }\n )\n\n def __array_wrap__(self, array, context=None):\n if isinstance(context, tuple) and len(context) > 0:\n if isinstance(context[1][0], np.ndarray) and context[1][0].shape == ():\n index = None\n else:\n index = context[1][0].index\n\n return pd.DataFrame(array, index=index, columns=self.columns)\n\n @property\n def columns(self):\n return self._meta.columns\n\n @columns.setter\n def columns(self, columns):\n renamed = _rename_dask(self, columns)\n self._meta = renamed._meta\n self._name = renamed._name\n self.dask = renamed.dask\n\n @property\n def iloc(self):\n \"\"\"Purely integer-location based indexing for selection by position.\n\n Only indexing the column positions is supported. Trying to select\n row positions will raise a ValueError.\n\n See :ref:`dataframe.indexing` for more.\n\n Examples\n --------\n >>> df.iloc[:, [2, 0, 1]] # doctest: +SKIP\n \"\"\"\n from .indexing import _iLocIndexer\n\n # For dataframes with unique column names, this will be transformed into a __getitem__ call\n return _iLocIndexer(self)\n\n def __len__(self):\n try:\n s = self.iloc[:, 0]\n except IndexError:\n return super().__len__()\n else:\n return len(s)\n\n def __contains__(self, key):\n return key in self._meta\n\n @property\n def empty(self):\n raise NotImplementedError(\n \"Checking whether a Dask DataFrame has any rows may be expensive. \"\n \"However, checking the number of columns is fast. \"\n \"Depending on which of these results you need, use either \"\n \"`len(df.index) == 0` or `len(df.columns) == 0`\"\n )\n\n def __getitem__(self, key):\n name = \"getitem-%s\" % tokenize(self, key)\n if np.isscalar(key) or isinstance(key, (tuple, str)):\n\n if isinstance(self._meta.index, (pd.DatetimeIndex, pd.PeriodIndex)):\n if key not in self._meta.columns:\n if PANDAS_GT_120:\n warnings.warn(\n \"Indexing a DataFrame with a datetimelike index using a single \"\n \"string to slice the rows, like `frame[string]`, is deprecated \"\n \"and will be removed in a future version. Use `frame.loc[string]` \"\n \"instead.\",\n FutureWarning,\n )\n return self.loc[key]\n\n # error is raised from pandas\n meta = self._meta[_extract_meta(key)]\n dsk = partitionwise_graph(operator.getitem, name, self, key)\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])\n return new_dd_object(graph, name, meta, self.divisions)\n elif isinstance(key, slice):\n from pandas.api.types import is_float_dtype\n\n is_integer_slice = any(\n isinstance(i, Integral) for i in (key.start, key.step, key.stop)\n )\n # Slicing with integer labels is always iloc based except for a\n # float indexer for some reason\n if is_integer_slice and not is_float_dtype(self.index.dtype):\n # NOTE: this always fails currently, as iloc is mostly\n # unsupported, but we call it anyway here for future-proofing\n # and error-attribution purposes\n return self.iloc[key]\n else:\n return self.loc[key]\n\n if isinstance(key, (np.ndarray, list)) or (\n not is_dask_collection(key) and (is_series_like(key) or is_index_like(key))\n ):\n # error is raised from pandas\n meta = self._meta[_extract_meta(key)]\n\n dsk = partitionwise_graph(operator.getitem, name, self, key)\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])\n return new_dd_object(graph, name, meta, self.divisions)\n if isinstance(key, Series):\n # do not perform dummy calculation, as columns will not be changed.\n if self.divisions != key.divisions:\n from .multi import _maybe_align_partitions\n\n self, key = _maybe_align_partitions([self, key])\n dsk = partitionwise_graph(operator.getitem, name, self, key)\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self, key])\n return new_dd_object(graph, name, self, self.divisions)\n if isinstance(key, DataFrame):\n return self.where(key, np.nan)\n\n raise NotImplementedError(key)\n\n def __setitem__(self, key, value):\n if isinstance(key, (tuple, list)) and isinstance(value, DataFrame):\n df = self.assign(**{k: value[c] for k, c in zip(key, value.columns)})\n\n elif isinstance(key, pd.Index) and not isinstance(value, DataFrame):\n key = list(key)\n df = self.assign(**{k: value for k in key})\n elif is_dataframe_like(key) or isinstance(key, DataFrame):\n df = self.where(~key, value)\n elif not isinstance(key, str):\n raise NotImplementedError(f\"Item assignment with {type(key)} not supported\")\n else:\n df = self.assign(**{key: value})\n\n self.dask = df.dask\n self._name = df._name\n self._meta = df._meta\n self.divisions = df.divisions\n\n def __delitem__(self, key):\n result = self.drop([key], axis=1)\n self.dask = result.dask\n self._name = result._name\n self._meta = result._meta\n\n def __setattr__(self, key, value):\n try:\n columns = object.__getattribute__(self, \"_meta\").columns\n except AttributeError:\n columns = ()\n\n # exclude protected attributes from setitem\n if key in columns and key not in [\"divisions\", \"dask\", \"_name\", \"_meta\"]:\n self[key] = value\n else:\n object.__setattr__(self, key, value)\n\n def __getattr__(self, key):\n if key in self.columns:\n return self[key]\n else:\n raise AttributeError(\"'DataFrame' object has no attribute %r\" % key)\n\n def __dir__(self):\n o = set(dir(type(self)))\n o.update(self.__dict__)\n o.update(c for c in self.columns if (isinstance(c, str) and c.isidentifier()))\n return list(o)\n\n def __iter__(self):\n return iter(self._meta)\n\n def _ipython_key_completions_(self):\n return methods.tolist(self.columns)\n\n @property\n def ndim(self):\n \"\"\"Return dimensionality\"\"\"\n return 2\n\n @property\n def shape(self):\n \"\"\"\n Return a tuple representing the dimensionality of the DataFrame.\n\n The number of rows is a Delayed result. The number of columns\n is a concrete integer.\n\n Examples\n --------\n >>> df.size # doctest: +SKIP\n (Delayed('int-07f06075-5ecc-4d77-817e-63c69a9188a8'), 2)\n \"\"\"\n col_size = len(self.columns)\n if col_size == 0:\n return (self.index.shape[0], 0)\n row_size = delayed(int)(self.size / col_size)\n return (row_size, col_size)\n\n @property\n def dtypes(self):\n \"\"\"Return data types\"\"\"\n return self._meta.dtypes\n\n @derived_from(pd.DataFrame)\n def get_dtype_counts(self):\n return self._meta.get_dtype_counts()\n\n @derived_from(pd.DataFrame)\n def get_ftype_counts(self):\n return self._meta.get_ftype_counts()\n\n @derived_from(pd.DataFrame)\n def select_dtypes(self, include=None, exclude=None):\n cs = self._meta.select_dtypes(include=include, exclude=exclude).columns\n return self[list(cs)]\n\n def sort_values(self, by, npartitions=None, ascending=True, **kwargs):\n \"\"\"Sort the dataset by a single column.\n\n Sorting a parallel dataset requires expensive shuffles and is generally\n not recommended. See ``set_index`` for implementation details.\n\n Parameters\n ----------\n by: string\n npartitions: int, None, or 'auto'\n The ideal number of output partitions. If None, use the same as\n the input. If 'auto' then decide by memory use.\n ascending: bool, optional\n Non ascending sort is not supported by Dask.\n Defaults to True.\n\n Examples\n --------\n >>> df2 = df.sort_values('x') # doctest: +SKIP\n \"\"\"\n from .shuffle import sort_values\n\n return sort_values(\n self,\n by,\n ascending=ascending,\n npartitions=npartitions,\n **kwargs,\n )\n\n def set_index(\n self,\n other,\n drop=True,\n sorted=False,\n npartitions=None,\n divisions=None,\n inplace=False,\n **kwargs,\n ):\n \"\"\"Set the DataFrame index (row labels) using an existing column.\n\n This realigns the dataset to be sorted by a new column. This can have a\n significant impact on performance, because joins, groupbys, lookups, etc.\n are all much faster on that column. However, this performance increase\n comes with a cost, sorting a parallel dataset requires expensive shuffles.\n Often we ``set_index`` once directly after data ingest and filtering and\n then perform many cheap computations off of the sorted dataset.\n\n This function operates exactly like ``pandas.set_index`` except with\n different performance costs (dask dataframe ``set_index`` is much more expensive).\n Under normal operation this function does an initial pass over the index column\n to compute approximate quantiles to serve as future divisions. It then passes\n over the data a second time, splitting up each input partition into several\n pieces and sharing those pieces to all of the output partitions now in\n sorted order.\n\n In some cases we can alleviate those costs, for example if your dataset is\n sorted already then we can avoid making many small pieces or if you know\n good values to split the new index column then we can avoid the initial\n pass over the data. For example if your new index is a datetime index and\n your data is already sorted by day then this entire operation can be done\n for free. You can control these options with the following parameters.\n\n Parameters\n ----------\n other: string or Dask Series\n drop: boolean, default True\n Delete column to be used as the new index.\n sorted: bool, optional\n If the index column is already sorted in increasing order.\n Defaults to False\n npartitions: int, None, or 'auto'\n The ideal number of output partitions. If None, use the same as\n the input. If 'auto' then decide by memory use.\n divisions: list, optional\n Known values on which to separate index values of the partitions.\n See https://docs.dask.org/en/latest/dataframe-design.html#partitions\n Defaults to computing this with a single pass over the data. Note\n that if ``sorted=True``, specified divisions are assumed to match\n the existing partitions in the data. If ``sorted=False``, you should\n leave divisions empty and call ``repartition`` after ``set_index``.\n inplace: bool, optional\n Modifying the DataFrame in place is not supported by Dask.\n Defaults to False.\n shuffle: string, 'disk' or 'tasks', optional\n Either ``'disk'`` for single-node operation or ``'tasks'`` for\n distributed operation. Will be inferred by your current scheduler.\n compute: bool, default False\n Whether or not to trigger an immediate computation. Defaults to False.\n Note, that even if you set ``compute=False``, an immediate computation\n will still be triggered if ``divisions`` is ``None``.\n partition_size: int, optional\n Desired size of each partitions in bytes.\n Only used when ``npartition='auto'``\n\n Examples\n --------\n >>> df2 = df.set_index('x') # doctest: +SKIP\n >>> df2 = df.set_index(d.x) # doctest: +SKIP\n >>> df2 = df.set_index(d.timestamp, sorted=True) # doctest: +SKIP\n\n A common case is when we have a datetime column that we know to be\n sorted and is cleanly divided by day. We can set this index for free\n by specifying both that the column is pre-sorted and the particular\n divisions along which is is separated\n\n >>> import pandas as pd\n >>> divisions = pd.date_range('2000', '2010', freq='1D')\n >>> df2 = df.set_index('timestamp', sorted=True, divisions=divisions) # doctest: +SKIP\n \"\"\"\n if inplace:\n raise NotImplementedError(\"The inplace= keyword is not supported\")\n pre_sorted = sorted\n del sorted\n\n if divisions is not None:\n check_divisions(divisions)\n\n if pre_sorted:\n from .shuffle import set_sorted_index\n\n return set_sorted_index(\n self, other, drop=drop, divisions=divisions, **kwargs\n )\n else:\n from .shuffle import set_index\n\n return set_index(\n self,\n other,\n drop=drop,\n npartitions=npartitions,\n divisions=divisions,\n **kwargs,\n )\n\n @derived_from(pd.DataFrame)\n def pop(self, item):\n out = self[item]\n del self[item]\n return out\n\n @derived_from(pd.DataFrame)\n def nlargest(self, n=5, columns=None, split_every=None):\n token = \"dataframe-nlargest\"\n return aca(\n self,\n chunk=M.nlargest,\n aggregate=M.nlargest,\n meta=self._meta,\n token=token,\n split_every=split_every,\n n=n,\n columns=columns,\n )\n\n @derived_from(pd.DataFrame)\n def nsmallest(self, n=5, columns=None, split_every=None):\n token = \"dataframe-nsmallest\"\n return aca(\n self,\n chunk=M.nsmallest,\n aggregate=M.nsmallest,\n meta=self._meta,\n token=token,\n split_every=split_every,\n n=n,\n columns=columns,\n )\n\n @derived_from(pd.DataFrame)\n def groupby(\n self, by=None, group_keys=True, sort=None, observed=None, dropna=None, **kwargs\n ):\n from dask.dataframe.groupby import DataFrameGroupBy\n\n return DataFrameGroupBy(\n self,\n by=by,\n group_keys=group_keys,\n sort=sort,\n observed=observed,\n dropna=dropna,\n **kwargs,\n )\n\n @wraps(categorize)\n def categorize(self, columns=None, index=None, split_every=None, **kwargs):\n return categorize(\n self, columns=columns, index=index, split_every=split_every, **kwargs\n )\n\n @derived_from(pd.DataFrame)\n def assign(self, **kwargs):\n for k, v in kwargs.items():\n if not (\n isinstance(v, Scalar)\n or is_series_like(v)\n or callable(v)\n or pd.api.types.is_scalar(v)\n or is_index_like(v)\n or isinstance(v, Array)\n ):\n raise TypeError(\n \"Column assignment doesn't support type \"\n \"{0}\".format(typename(type(v)))\n )\n if callable(v):\n kwargs[k] = v(self)\n\n if isinstance(v, Array):\n from .io import from_dask_array\n\n if len(v.shape) > 1:\n raise ValueError(\"Array assignment only supports 1-D arrays\")\n if v.npartitions != self.npartitions:\n raise ValueError(\n \"Number of partitions do not match ({0} != {1})\".format(\n v.npartitions, self.npartitions\n )\n )\n kwargs[k] = from_dask_array(v, index=self.index, meta=self._meta)\n\n pairs = list(sum(kwargs.items(), ()))\n\n # Figure out columns of the output\n df2 = self._meta_nonempty.assign(**_extract_meta(kwargs, nonempty=True))\n return elemwise(methods.assign, self, *pairs, meta=df2)\n\n @derived_from(pd.DataFrame, ua_args=[\"index\"])\n def rename(self, index=None, columns=None):\n if index is not None:\n raise ValueError(\"Cannot rename index.\")\n\n # *args here is index, columns but columns arg is already used\n return self.map_partitions(M.rename, None, columns=columns)\n\n def query(self, expr, **kwargs):\n \"\"\"Filter dataframe with complex expression\n\n Blocked version of pd.DataFrame.query\n\n This is like the sequential version except that this will also happen\n in many threads. This may conflict with ``numexpr`` which will use\n multiple threads itself. We recommend that you set numexpr to use a\n single thread\n\n import numexpr\n numexpr.set_num_threads(1)\n\n See also\n --------\n pandas.DataFrame.query\n \"\"\"\n return self.map_partitions(M.query, expr, **kwargs)\n\n @derived_from(pd.DataFrame)\n def eval(self, expr, inplace=None, **kwargs):\n if inplace is None:\n inplace = False\n if \"=\" in expr and inplace in (True, None):\n raise NotImplementedError(\n \"Inplace eval not supported. Please use inplace=False\"\n )\n meta = self._meta.eval(expr, inplace=inplace, **kwargs)\n return self.map_partitions(M.eval, expr, meta=meta, inplace=inplace, **kwargs)\n\n @derived_from(pd.DataFrame)\n def dropna(self, how=\"any\", subset=None, thresh=None):\n return self.map_partitions(\n M.dropna, how=how, subset=subset, thresh=thresh, enforce_metadata=False\n )\n\n @derived_from(pd.DataFrame)\n def clip(self, lower=None, upper=None, out=None):\n if out is not None:\n raise ValueError(\"'out' must be None\")\n return self.map_partitions(\n M.clip, lower=lower, upper=upper, enforce_metadata=False\n )\n\n @derived_from(pd.DataFrame)\n def clip_lower(self, threshold):\n return self.map_partitions(\n M.clip_lower, threshold=threshold, enforce_metadata=False\n )\n\n @derived_from(pd.DataFrame)\n def clip_upper(self, threshold):\n return self.map_partitions(\n M.clip_upper, threshold=threshold, enforce_metadata=False\n )\n\n @derived_from(pd.DataFrame)\n def squeeze(self, axis=None):\n if axis in [None, 1]:\n if len(self.columns) == 1:\n return self[self.columns[0]]\n else:\n return self\n\n elif axis == 0:\n raise NotImplementedError(\n \"{0} does not support squeeze along axis 0\".format(type(self))\n )\n\n elif axis not in [0, 1, None]:\n raise ValueError(\"No axis {0} for object type {1}\".format(axis, type(self)))\n\n @derived_from(pd.DataFrame)\n def to_timestamp(self, freq=None, how=\"start\", axis=0):\n df = elemwise(M.to_timestamp, self, freq, how, axis)\n df.divisions = tuple(pd.Index(self.divisions).to_timestamp())\n return df\n\n @derived_from(pd.DataFrame)\n def explode(self, column):\n meta = self._meta.explode(column)\n return self.map_partitions(M.explode, column, meta=meta, enforce_metadata=False)\n\n def to_bag(self, index=False, format=\"tuple\"):\n \"\"\"Convert to a dask Bag of tuples of each row.\n\n Parameters\n ----------\n index : bool, optional\n If True, the index is included as the first element of each tuple.\n Default is False.\n format : {\"tuple\", \"dict\"},optional\n Whether to return a bag of tuples or dictionaries.\n \"\"\"\n from .io import to_bag\n\n return to_bag(self, index, format)\n\n def to_parquet(self, path, *args, **kwargs):\n \"\"\"See dd.to_parquet docstring for more information\"\"\"\n from .io import to_parquet\n\n return to_parquet(self, path, *args, **kwargs)\n\n def to_orc(self, path, *args, **kwargs):\n \"\"\"See dd.to_orc docstring for more information\"\"\"\n from .io import to_orc\n\n return to_orc(self, path, *args, **kwargs)\n\n @derived_from(pd.DataFrame)\n def to_string(self, max_rows=5):\n # option_context doesn't affect\n return self._repr_data().to_string(max_rows=max_rows, show_dimensions=False)\n\n def _get_numeric_data(self, how=\"any\", subset=None):\n # calculate columns to avoid unnecessary calculation\n numerics = self._meta._get_numeric_data()\n\n if len(numerics.columns) < len(self.columns):\n name = self._token_prefix + \"-get_numeric_data\"\n return self.map_partitions(M._get_numeric_data, meta=numerics, token=name)\n else:\n # use myself if all numerics\n return self\n\n @classmethod\n def _validate_axis(cls, axis=0):\n if axis not in (0, 1, \"index\", \"columns\", None):\n raise ValueError(\"No axis named {0}\".format(axis))\n # convert to numeric axis\n return {None: 0, \"index\": 0, \"columns\": 1}.get(axis, axis)\n\n @derived_from(pd.DataFrame)\n def drop(self, labels=None, axis=0, columns=None, errors=\"raise\"):\n axis = self._validate_axis(axis)\n if axis == 0 and columns is not None:\n # Columns must be specified if axis==0\n return self.map_partitions(drop_by_shallow_copy, columns, errors=errors)\n elif axis == 1:\n return self.map_partitions(drop_by_shallow_copy, labels, errors=errors)\n raise NotImplementedError(\n \"Drop currently only works for axis=1 or when columns is not None\"\n )\n\n def merge(\n self,\n right,\n how=\"inner\",\n on=None,\n left_on=None,\n right_on=None,\n left_index=False,\n right_index=False,\n suffixes=(\"_x\", \"_y\"),\n indicator=False,\n npartitions=None,\n shuffle=None,\n broadcast=None,\n ):\n \"\"\"Merge the DataFrame with another DataFrame\n\n This will merge the two datasets, either on the indices, a certain column\n in each dataset or the index in one dataset and the column in another.\n\n Parameters\n ----------\n right: dask.dataframe.DataFrame\n how : {'left', 'right', 'outer', 'inner'}, default: 'inner'\n How to handle the operation of the two objects:\n\n - left: use calling frame's index (or column if on is specified)\n - right: use other frame's index\n - outer: form union of calling frame's index (or column if on is\n specified) with other frame's index, and sort it\n lexicographically\n - inner: form intersection of calling frame's index (or column if\n on is specified) with other frame's index, preserving the order\n of the calling's one\n\n on : label or list\n Column or index level names to join on. These must be found in both\n DataFrames. If on is None and not merging on indexes then this\n defaults to the intersection of the columns in both DataFrames.\n left_on : label or list, or array-like\n Column to join on in the left DataFrame. Other than in pandas\n arrays and lists are only support if their length is 1.\n right_on : label or list, or array-like\n Column to join on in the right DataFrame. Other than in pandas\n arrays and lists are only support if their length is 1.\n left_index : boolean, default False\n Use the index from the left DataFrame as the join key.\n right_index : boolean, default False\n Use the index from the right DataFrame as the join key.\n suffixes : 2-length sequence (tuple, list, ...)\n Suffix to apply to overlapping column names in the left and\n right side, respectively\n indicator : boolean or string, default False\n If True, adds a column to output DataFrame called \"_merge\" with\n information on the source of each row. If string, column with\n information on source of each row will be added to output DataFrame,\n and column will be named value of string. Information column is\n Categorical-type and takes on a value of \"left_only\" for observations\n whose merge key only appears in `left` DataFrame, \"right_only\" for\n observations whose merge key only appears in `right` DataFrame,\n and \"both\" if the observation’s merge key is found in both.\n npartitions: int or None, optional\n The ideal number of output partitions. This is only utilised when\n performing a hash_join (merging on columns only). If ``None`` then\n ``npartitions = max(lhs.npartitions, rhs.npartitions)``.\n Default is ``None``.\n shuffle: {'disk', 'tasks'}, optional\n Either ``'disk'`` for single-node operation or ``'tasks'`` for\n distributed operation. Will be inferred by your current scheduler.\n broadcast: boolean or float, optional\n Whether to use a broadcast-based join in lieu of a shuffle-based\n join for supported cases. By default, a simple heuristic will be\n used to select the underlying algorithm. If a floating-point value\n is specified, that number will be used as the ``broadcast_bias``\n within the simple heuristic (a large number makes Dask more likely\n to choose the ``broacast_join`` code path). See ``broadcast_join``\n for more information.\n\n Notes\n -----\n\n There are three ways to join dataframes:\n\n 1. Joining on indices. In this case the divisions are\n aligned using the function ``dask.dataframe.multi.align_partitions``.\n Afterwards, each partition is merged with the pandas merge function.\n\n 2. Joining one on index and one on column. In this case the divisions of\n dataframe merged by index (:math:`d_i`) are used to divide the column\n merged dataframe (:math:`d_c`) one using\n ``dask.dataframe.multi.rearrange_by_divisions``. In this case the\n merged dataframe (:math:`d_m`) has the exact same divisions\n as (:math:`d_i`). This can lead to issues if you merge multiple rows from\n (:math:`d_c`) to one row in (:math:`d_i`).\n\n 3. Joining both on columns. In this case a hash join is performed using\n ``dask.dataframe.multi.hash_join``.\n\n \"\"\"\n\n if not is_dataframe_like(right):\n raise ValueError(\"right must be DataFrame\")\n\n from .multi import merge\n\n return merge(\n self,\n right,\n how=how,\n on=on,\n left_on=left_on,\n right_on=right_on,\n left_index=left_index,\n right_index=right_index,\n suffixes=suffixes,\n npartitions=npartitions,\n indicator=indicator,\n shuffle=shuffle,\n broadcast=broadcast,\n )\n\n @derived_from(pd.DataFrame)\n def join(\n self,\n other,\n on=None,\n how=\"left\",\n lsuffix=\"\",\n rsuffix=\"\",\n npartitions=None,\n shuffle=None,\n ):\n if is_series_like(other) and hasattr(other, \"name\"):\n other = other.to_frame()\n\n if not is_dataframe_like(other):\n if not isinstance(other, list) or not all(\n [is_dataframe_like(o) for o in other]\n ):\n raise ValueError(\"other must be DataFrame or list of DataFrames\")\n if how not in [\"outer\", \"left\"]:\n raise ValueError(\"merge_multi only supports left or outer joins\")\n\n from .multi import _recursive_pairwise_outer_join\n\n other = _recursive_pairwise_outer_join(\n other,\n on=on,\n lsuffix=lsuffix,\n rsuffix=rsuffix,\n npartitions=npartitions,\n shuffle=shuffle,\n )\n\n from .multi import merge\n\n return merge(\n self,\n other,\n how=how,\n left_index=on is None,\n right_index=True,\n left_on=on,\n suffixes=(lsuffix, rsuffix),\n npartitions=npartitions,\n shuffle=shuffle,\n )\n\n @derived_from(pd.DataFrame)\n def append(self, other, interleave_partitions=False):\n if isinstance(other, Series):\n msg = (\n \"Unable to appending dd.Series to dd.DataFrame.\"\n \"Use pd.Series to append as row.\"\n )\n raise ValueError(msg)\n elif is_series_like(other):\n other = other.to_frame().T\n return super().append(other, interleave_partitions=interleave_partitions)\n\n @derived_from(pd.DataFrame)\n def iterrows(self):\n for i in range(self.npartitions):\n df = self.get_partition(i).compute()\n for row in df.iterrows():\n yield row\n\n @derived_from(pd.DataFrame)\n def itertuples(self, index=True, name=\"Pandas\"):\n for i in range(self.npartitions):\n df = self.get_partition(i).compute()\n for row in df.itertuples(index=index, name=name):\n yield row\n\n @derived_from(pd.DataFrame)\n def items(self):\n for col_idx, label in enumerate(self.columns):\n yield label, self.iloc[:, col_idx]\n\n @classmethod\n def _bind_operator_method(cls, name, op, original=pd.DataFrame):\n \"\"\"bind operator method like DataFrame.add to this class\"\"\"\n\n # name must be explicitly passed for div method whose name is truediv\n\n def meth(self, other, axis=\"columns\", level=None, fill_value=None):\n if level is not None:\n raise NotImplementedError(\"level must be None\")\n\n axis = self._validate_axis(axis)\n\n if axis in (1, \"columns\"):\n # When axis=1 and other is a series, `other` is transposed\n # and the operator is applied broadcast across rows. This\n # isn't supported with dd.Series.\n if isinstance(other, Series):\n msg = \"Unable to {0} dd.Series with axis=1\".format(name)\n raise ValueError(msg)\n elif is_series_like(other):\n # Special case for pd.Series to avoid unwanted partitioning\n # of other. We pass it in as a kwarg to prevent this.\n meta = _emulate(\n op, self, other=other, axis=axis, fill_value=fill_value\n )\n return map_partitions(\n op,\n self,\n other=other,\n meta=meta,\n axis=axis,\n fill_value=fill_value,\n enforce_metadata=False,\n )\n\n meta = _emulate(op, self, other, axis=axis, fill_value=fill_value)\n return map_partitions(\n op,\n self,\n other,\n meta=meta,\n axis=axis,\n fill_value=fill_value,\n enforce_metadata=False,\n )\n\n meth.__name__ = name\n setattr(cls, name, derived_from(original)(meth))\n\n @classmethod\n def _bind_comparison_method(cls, name, comparison, original=pd.DataFrame):\n \"\"\"bind comparison method like DataFrame.eq to this class\"\"\"\n\n def meth(self, other, axis=\"columns\", level=None):\n if level is not None:\n raise NotImplementedError(\"level must be None\")\n axis = self._validate_axis(axis)\n return elemwise(comparison, self, other, axis=axis)\n\n meth.__name__ = name\n setattr(cls, name, derived_from(original)(meth))\n\n @insert_meta_param_description(pad=12)\n def apply(\n self,\n func,\n axis=0,\n broadcast=None,\n raw=False,\n reduce=None,\n args=(),\n meta=no_default,\n result_type=None,\n **kwds,\n ):\n \"\"\"Parallel version of pandas.DataFrame.apply\n\n This mimics the pandas version except for the following:\n\n 1. Only ``axis=1`` is supported (and must be specified explicitly).\n 2. The user should provide output metadata via the `meta` keyword.\n\n Parameters\n ----------\n func : function\n Function to apply to each column/row\n axis : {0 or 'index', 1 or 'columns'}, default 0\n - 0 or 'index': apply function to each column (NOT SUPPORTED)\n - 1 or 'columns': apply function to each row\n $META\n args : tuple\n Positional arguments to pass to function in addition to the array/series\n\n Additional keyword arguments will be passed as keywords to the function\n\n Returns\n -------\n applied : Series or DataFrame\n\n Examples\n --------\n >>> import pandas as pd\n >>> import dask.dataframe as dd\n >>> df = pd.DataFrame({'x': [1, 2, 3, 4, 5],\n ... 'y': [1., 2., 3., 4., 5.]})\n >>> ddf = dd.from_pandas(df, npartitions=2)\n\n Apply a function to row-wise passing in extra arguments in ``args`` and\n ``kwargs``:\n\n >>> def myadd(row, a, b=1):\n ... return row.sum() + a + b\n >>> res = ddf.apply(myadd, axis=1, args=(2,), b=1.5) # doctest: +SKIP\n\n By default, dask tries to infer the output metadata by running your\n provided function on some fake data. This works well in many cases, but\n can sometimes be expensive, or even fail. To avoid this, you can\n manually specify the output metadata with the ``meta`` keyword. This\n can be specified in many forms, for more information see\n ``dask.dataframe.utils.make_meta``.\n\n Here we specify the output is a Series with name ``'x'``, and dtype\n ``float64``:\n\n >>> res = ddf.apply(myadd, axis=1, args=(2,), b=1.5, meta=('x', 'f8'))\n\n In the case where the metadata doesn't change, you can also pass in\n the object itself directly:\n\n >>> res = ddf.apply(lambda row: row + 1, axis=1, meta=ddf)\n\n See Also\n --------\n dask.DataFrame.map_partitions\n \"\"\"\n\n if broadcast is not None:\n warnings.warn(\n \"The `broadcast` argument is no longer used/supported. \"\n \"It will be dropped in a future release.\",\n category=FutureWarning,\n )\n\n axis = self._validate_axis(axis)\n pandas_kwargs = {\"axis\": axis, \"raw\": raw, \"result_type\": result_type}\n\n kwds.update(pandas_kwargs)\n\n if axis == 0:\n msg = (\n \"dd.DataFrame.apply only supports axis=1\\n\"\n \" Try: df.apply(func, axis=1)\"\n )\n raise NotImplementedError(msg)\n\n if meta is no_default:\n meta = _emulate(\n M.apply, self._meta_nonempty, func, args=args, udf=True, **kwds\n )\n warnings.warn(meta_warning(meta))\n kwds.update({\"parent_meta\": self._meta})\n return map_partitions(M.apply, self, func, args=args, meta=meta, **kwds)\n\n @derived_from(pd.DataFrame)\n def applymap(self, func, meta=\"__no_default__\"):\n return elemwise(M.applymap, self, func, meta=meta)\n\n @derived_from(pd.DataFrame)\n def round(self, decimals=0):\n return elemwise(M.round, self, decimals)\n\n @derived_from(pd.DataFrame)\n def mode(self, dropna=True, split_every=False):\n mode_series_list = []\n for col_index in range(len(self.columns)):\n col_series = self.iloc[:, col_index]\n mode_series = Series.mode(\n col_series, dropna=dropna, split_every=split_every\n )\n mode_series.name = col_series.name\n mode_series_list.append(mode_series)\n\n name = \"concat-\" + tokenize(*mode_series_list)\n\n dsk = {\n (name, 0): (\n apply,\n methods.concat,\n [[(df._name, 0) for df in mode_series_list]],\n {\"axis\": 1},\n )\n }\n\n meta = methods.concat([df._meta for df in mode_series_list], axis=1)\n graph = HighLevelGraph.from_collections(\n name, dsk, dependencies=mode_series_list\n )\n ddf = new_dd_object(graph, name, meta, divisions=(None, None))\n\n return ddf\n\n @derived_from(pd.DataFrame)\n def cov(self, min_periods=None, split_every=False):\n return cov_corr(self, min_periods, split_every=split_every)\n\n @derived_from(pd.DataFrame)\n def corr(self, method=\"pearson\", min_periods=None, split_every=False):\n if method != \"pearson\":\n raise NotImplementedError(\"Only Pearson correlation has been implemented\")\n return cov_corr(self, min_periods, True, split_every=split_every)\n\n def info(self, buf=None, verbose=False, memory_usage=False):\n \"\"\"\n Concise summary of a Dask DataFrame.\n \"\"\"\n\n if buf is None:\n import sys\n\n buf = sys.stdout\n\n lines = [str(type(self))]\n\n if len(self.columns) == 0:\n lines.append(\"Index: 0 entries\")\n lines.append(\"Empty %s\" % type(self).__name__)\n put_lines(buf, lines)\n return\n\n # Group and execute the required computations\n computations = {}\n if verbose:\n computations.update({\"index\": self.index, \"count\": self.count()})\n if memory_usage:\n computations.update(\n {\"memory_usage\": self.map_partitions(M.memory_usage, index=True)}\n )\n computations = dict(\n zip(computations.keys(), da.compute(*computations.values()))\n )\n\n if verbose:\n import textwrap\n\n index = computations[\"index\"]\n counts = computations[\"count\"]\n lines.append(index_summary(index))\n lines.append(\"Data columns (total {} columns):\".format(len(self.columns)))\n\n from pandas.io.formats.printing import pprint_thing\n\n space = max([len(pprint_thing(k)) for k in self.columns]) + 1\n column_width = max(space, 7)\n\n header = (\n textwrap.dedent(\n \"\"\"\\\n # {{column:<{column_width}}} Non-Null Count Dtype\n --- {{underl:<{column_width}}} -------------- -----\"\"\"\n )\n .format(column_width=column_width)\n .format(column=\"Column\", underl=\"------\")\n )\n column_template = textwrap.dedent(\n \"\"\"\\\n {{i:^3}} {{name:<{column_width}}} {{count}} non-null {{dtype}}\"\"\".format(\n column_width=column_width\n )\n )\n column_info = [\n column_template.format(\n i=pprint_thing(i),\n name=pprint_thing(name),\n count=pprint_thing(count),\n dtype=pprint_thing(dtype),\n )\n for i, (name, count, dtype) in enumerate(\n zip(self.columns, counts, self.dtypes)\n )\n ]\n lines.extend(header.split(\"\\n\"))\n else:\n column_info = [index_summary(self.columns, name=\"Columns\")]\n\n lines.extend(column_info)\n dtype_counts = [\n \"%s(%d)\" % k\n for k in sorted(self.dtypes.value_counts().iteritems(), key=str)\n ]\n lines.append(\"dtypes: {}\".format(\", \".join(dtype_counts)))\n\n if memory_usage:\n memory_int = computations[\"memory_usage\"].sum()\n lines.append(\"memory usage: {}\\n\".format(memory_repr(memory_int)))\n\n put_lines(buf, lines)\n\n @derived_from(pd.DataFrame)\n def memory_usage(self, index=True, deep=False):\n result = self.map_partitions(M.memory_usage, index=index, deep=deep)\n result = result.groupby(result.index).sum()\n return result\n\n def pivot_table(self, index=None, columns=None, values=None, aggfunc=\"mean\"):\n \"\"\"\n Create a spreadsheet-style pivot table as a DataFrame. Target ``columns``\n must have category dtype to infer result's ``columns``.\n ``index``, ``columns``, ``values`` and ``aggfunc`` must be all scalar.\n\n Parameters\n ----------\n values : scalar\n column to aggregate\n index : scalar\n column to be index\n columns : scalar\n column to be columns\n aggfunc : {'mean', 'sum', 'count'}, default 'mean'\n\n Returns\n -------\n table : DataFrame\n \"\"\"\n from .reshape import pivot_table\n\n return pivot_table(\n self, index=index, columns=columns, values=values, aggfunc=aggfunc\n )\n\n def melt(\n self,\n id_vars=None,\n value_vars=None,\n var_name=None,\n value_name=\"value\",\n col_level=None,\n ):\n \"\"\"\n Unpivots a DataFrame from wide format to long format,\n optionally leaving identifier variables set.\n\n This function is useful to massage a DataFrame into a format where\n one or more columns are identifier variables (``id_vars``), while\n all other columns, considered measured variables (``value_vars``),\n are \"unpivoted\" to the row axis, leaving just two non-identifier\n columns, 'variable' and 'value'.\n\n Parameters\n ----------\n frame : DataFrame\n id_vars : tuple, list, or ndarray, optional\n Column(s) to use as identifier variables.\n value_vars : tuple, list, or ndarray, optional\n Column(s) to unpivot. If not specified, uses all columns that\n are not set as `id_vars`.\n var_name : scalar\n Name to use for the 'variable' column. If None it uses\n ``frame.columns.name`` or 'variable'.\n value_name : scalar, default 'value'\n Name to use for the 'value' column.\n col_level : int or string, optional\n If columns are a MultiIndex then use this level to melt.\n\n Returns\n -------\n DataFrame\n Unpivoted DataFrame.\n\n See Also\n --------\n pandas.DataFrame.melt\n \"\"\"\n from .reshape import melt\n\n return melt(\n self,\n id_vars=id_vars,\n value_vars=value_vars,\n var_name=var_name,\n value_name=value_name,\n col_level=col_level,\n )\n\n def to_records(self, index=False, lengths=None):\n from .io import to_records\n\n if lengths is True:\n lengths = tuple(self.map_partitions(len).compute())\n\n records = to_records(self)\n\n chunks = self._validate_chunks(records, lengths)\n records._chunks = (chunks[0],)\n\n return records\n\n @derived_from(pd.DataFrame)\n def to_html(self, max_rows=5):\n # pd.Series doesn't have html repr\n data = self._repr_data().to_html(max_rows=max_rows, show_dimensions=False)\n return get_template(\"dataframe.html.j2\").render(\n data=data, name=self._name, task=self.dask\n )\n\n def _repr_data(self):\n meta = self._meta\n index = self._repr_divisions\n cols = meta.columns\n if len(cols) == 0:\n series_df = pd.DataFrame([[]] * len(index), columns=cols, index=index)\n else:\n series_df = pd.concat(\n [_repr_data_series(s, index=index) for _, s in meta.iteritems()], axis=1\n )\n return series_df\n\n def _repr_html_(self):\n data = self._repr_data().to_html(\n max_rows=5, show_dimensions=False, notebook=True\n )\n return get_template(\"dataframe.html.j2\").render(\n data=data, name=self._name, task=self.dask\n )\n\n def _select_columns_or_index(self, columns_or_index):\n \"\"\"\n Parameters\n ----------\n columns_or_index\n Column or index name, or a list of these\n\n Returns\n -------\n dd.DataFrame\n Dask DataFrame with columns corresponding to each column or\n index level in columns_or_index. If included, the column\n corresponding to the index level is named _index\n \"\"\"\n\n # Ensure columns_or_index is a list\n columns_or_index = (\n columns_or_index\n if isinstance(columns_or_index, list)\n else [columns_or_index]\n )\n\n column_names = [\n n for n in columns_or_index if self._is_column_label_reference(n)\n ]\n\n selected_df = self[column_names]\n if self._contains_index_name(columns_or_index):\n # Index name was included\n selected_df = selected_df.assign(_index=self.index)\n\n return selected_df\n\n def _is_column_label_reference(self, key):\n \"\"\"\n Test whether a key is a column label reference\n\n To be considered a column label reference, `key` must match the name of at\n least one column.\n \"\"\"\n return (\n not is_dask_collection(key)\n and (np.isscalar(key) or isinstance(key, tuple))\n and key in self.columns\n )\n\n\n# bind operators\nfor op in [\n operator.abs,\n operator.add,\n operator.and_,\n operator.eq,\n operator.gt,\n operator.ge,\n operator.inv,\n operator.lt,\n operator.le,\n operator.mod,\n operator.mul,\n operator.ne,\n operator.neg,\n operator.or_,\n operator.pow,\n operator.sub,\n operator.truediv,\n operator.floordiv,\n operator.xor,\n]:\n _Frame._bind_operator(op)\n Scalar._bind_operator(op)\n\nfor name in [\n \"add\",\n \"sub\",\n \"mul\",\n \"div\",\n \"divide\",\n \"truediv\",\n \"floordiv\",\n \"mod\",\n \"pow\",\n \"radd\",\n \"rsub\",\n \"rmul\",\n \"rdiv\",\n \"rtruediv\",\n \"rfloordiv\",\n \"rmod\",\n \"rpow\",\n]:\n meth = getattr(pd.DataFrame, name)\n DataFrame._bind_operator_method(name, meth)\n\n meth = getattr(pd.Series, name)\n Series._bind_operator_method(name, meth)\n\nfor name in [\"lt\", \"gt\", \"le\", \"ge\", \"ne\", \"eq\"]:\n meth = getattr(pd.DataFrame, name)\n DataFrame._bind_comparison_method(name, meth)\n\n meth = getattr(pd.Series, name)\n Series._bind_comparison_method(name, meth)\n\n\ndef is_broadcastable(dfs, s):\n \"\"\"\n This Series is broadcastable against another dataframe in the sequence\n \"\"\"\n return (\n isinstance(s, Series)\n and s.npartitions == 1\n and s.known_divisions\n and any(\n s.divisions == (df.columns.min(), df.columns.max())\n for df in dfs\n if isinstance(df, DataFrame)\n )\n )\n\n\ndef elemwise(op, *args, **kwargs):\n \"\"\"Elementwise operation for Dask dataframes\n\n Parameters\n ----------\n op: callable\n Function to apply across input dataframes\n *args: DataFrames, Series, Scalars, Arrays,\n The arguments of the operation\n **kwrags: scalars\n meta: pd.DataFrame, pd.Series (optional)\n Valid metadata for the operation. Will evaluate on a small piece of\n data if not provided.\n transform_divisions: boolean\n If the input is a ``dask.dataframe.Index`` we normally will also apply\n the function onto the divisions and apply those transformed divisions\n to the output. You can pass ``transform_divisions=False`` to override\n this behavior\n\n Examples\n --------\n >>> elemwise(operator.add, df.x, df.y) # doctest: +SKIP\n \"\"\"\n meta = kwargs.pop(\"meta\", no_default)\n out = kwargs.pop(\"out\", None)\n transform_divisions = kwargs.pop(\"transform_divisions\", True)\n\n _name = funcname(op) + \"-\" + tokenize(op, *args, **kwargs)\n\n args = _maybe_from_pandas(args)\n\n from .multi import _maybe_align_partitions\n\n args = _maybe_align_partitions(args)\n dasks = [arg for arg in args if isinstance(arg, (_Frame, Scalar, Array))]\n dfs = [df for df in dasks if isinstance(df, _Frame)]\n\n # Clean up dask arrays if present\n deps = dasks.copy()\n for i, a in enumerate(dasks):\n if not isinstance(a, Array):\n continue\n # Ensure that they have similar-ish chunk structure\n if not all(not a.chunks or len(a.chunks[0]) == df.npartitions for df in dfs):\n msg = (\n \"When combining dask arrays with dataframes they must \"\n \"match chunking exactly. Operation: %s\" % funcname(op)\n )\n raise ValueError(msg)\n # Rechunk to have a single chunk along all other axes\n if a.ndim > 1:\n a = a.rechunk({i + 1: d for i, d in enumerate(a.shape[1:])})\n dasks[i] = a\n\n divisions = dfs[0].divisions\n if transform_divisions and isinstance(dfs[0], Index) and len(dfs) == 1:\n try:\n divisions = op(\n *[pd.Index(arg.divisions) if arg is dfs[0] else arg for arg in args],\n **kwargs,\n )\n if isinstance(divisions, pd.Index):\n divisions = methods.tolist(divisions)\n except Exception:\n pass\n else:\n if not valid_divisions(divisions):\n divisions = [None] * (dfs[0].npartitions + 1)\n\n _is_broadcastable = partial(is_broadcastable, dfs)\n dfs = list(remove(_is_broadcastable, dfs))\n\n other = [\n (i, arg)\n for i, arg in enumerate(args)\n if not isinstance(arg, (_Frame, Scalar, Array))\n ]\n\n # adjust the key length of Scalar\n dsk = partitionwise_graph(op, _name, *args, **kwargs)\n\n graph = HighLevelGraph.from_collections(_name, dsk, dependencies=deps)\n\n if meta is no_default:\n if len(dfs) >= 2 and not all(hasattr(d, \"npartitions\") for d in dasks):\n # should not occur in current funcs\n msg = \"elemwise with 2 or more DataFrames and Scalar is not supported\"\n raise NotImplementedError(msg)\n # For broadcastable series, use no rows.\n parts = [\n d._meta\n if _is_broadcastable(d)\n else np.empty((), dtype=d.dtype)\n if isinstance(d, Array)\n else d._meta_nonempty\n for d in dasks\n ]\n with raise_on_meta_error(funcname(op)):\n meta = partial_by_order(*parts, function=op, other=other)\n\n result = new_dd_object(graph, _name, meta, divisions)\n return handle_out(out, result)\n\n\ndef handle_out(out, result):\n \"\"\"Handle out parameters\n\n If out is a dask.DataFrame, dask.Series or dask.Scalar then\n this overwrites the contents of it with the result\n \"\"\"\n if isinstance(out, tuple):\n if len(out) == 1:\n out = out[0]\n elif len(out) > 1:\n raise NotImplementedError(\"The out parameter is not fully supported\")\n else:\n out = None\n\n # Notice, we use .__class__ as opposed to type() in order to support\n # object proxies see <https://github.com/dask/dask/pull/6981>\n if out is not None and out.__class__ != result.__class__:\n raise TypeError(\n \"Mismatched types between result and out parameter. \"\n \"out=%s, result=%s\" % (str(type(out)), str(type(result)))\n )\n\n if isinstance(out, DataFrame):\n if len(out.columns) != len(result.columns):\n raise ValueError(\n \"Mismatched columns count between result and out parameter. \"\n \"out=%s, result=%s\" % (str(len(out.columns)), str(len(result.columns)))\n )\n\n if isinstance(out, (Series, DataFrame, Scalar)):\n out._meta = result._meta\n out._name = result._name\n out.dask = result.dask\n\n if not isinstance(out, Scalar):\n out.divisions = result.divisions\n elif out is not None:\n msg = (\n \"The out parameter is not fully supported.\"\n \" Received type %s, expected %s \"\n % (typename(type(out)), typename(type(result)))\n )\n raise NotImplementedError(msg)\n else:\n return result\n\n\ndef _maybe_from_pandas(dfs):\n from .io import from_pandas\n\n dfs = [\n from_pandas(df, 1)\n if (is_series_like(df) or is_dataframe_like(df)) and not is_dask_collection(df)\n else df\n for df in dfs\n ]\n return dfs\n\n\ndef hash_shard(\n df, nparts, split_out_setup=None, split_out_setup_kwargs=None, ignore_index=False\n):\n if split_out_setup:\n h = split_out_setup(df, **(split_out_setup_kwargs or {}))\n else:\n h = df\n\n h = hash_object_dispatch(h, index=False)\n if is_series_like(h):\n h = h.values\n np.mod(h, nparts, out=h)\n return group_split_dispatch(df, h, nparts, ignore_index=ignore_index)\n\n\ndef split_evenly(df, k):\n \"\"\"Split dataframe into k roughly equal parts\"\"\"\n divisions = np.linspace(0, len(df), k + 1).astype(int)\n return {i: df.iloc[divisions[i] : divisions[i + 1]] for i in range(k)}\n\n\ndef split_out_on_index(df):\n h = df.index\n if isinstance(h, pd.MultiIndex):\n h = pd.DataFrame([], index=h).reset_index()\n return h\n\n\ndef split_out_on_cols(df, cols=None):\n return df[cols]\n\n\n@insert_meta_param_description\ndef apply_concat_apply(\n args,\n chunk=None,\n aggregate=None,\n combine=None,\n meta=no_default,\n token=None,\n chunk_kwargs=None,\n aggregate_kwargs=None,\n combine_kwargs=None,\n split_every=None,\n split_out=None,\n split_out_setup=None,\n split_out_setup_kwargs=None,\n sort=None,\n ignore_index=False,\n **kwargs,\n):\n \"\"\"Apply a function to blocks, then concat, then apply again\n\n Parameters\n ----------\n args :\n Positional arguments for the `chunk` function. All `dask.dataframe`\n objects should be partitioned and indexed equivalently.\n chunk : function [block-per-arg] -> block\n Function to operate on each block of data\n aggregate : function concatenated-block -> block\n Function to operate on the concatenated result of chunk\n combine : function concatenated-block -> block, optional\n Function to operate on intermediate concatenated results of chunk\n in a tree-reduction. If not provided, defaults to aggregate.\n $META\n token : str, optional\n The name to use for the output keys.\n chunk_kwargs : dict, optional\n Keywords for the chunk function only.\n aggregate_kwargs : dict, optional\n Keywords for the aggregate function only.\n combine_kwargs : dict, optional\n Keywords for the combine function only.\n split_every : int, optional\n Group partitions into groups of this size while performing a\n tree-reduction. If set to False, no tree-reduction will be used,\n and all intermediates will be concatenated and passed to ``aggregate``.\n Default is 8.\n split_out : int, optional\n Number of output partitions. Split occurs after first chunk reduction.\n split_out_setup : callable, optional\n If provided, this function is called on each chunk before performing\n the hash-split. It should return a pandas object, where each row\n (excluding the index) is hashed. If not provided, the chunk is hashed\n as is.\n split_out_setup_kwargs : dict, optional\n Keywords for the `split_out_setup` function only.\n sort : bool, default None\n If allowed, sort the keys of the output aggregation.\n ignore_index : bool, default False\n If True, do not preserve index values throughout ACA operations.\n kwargs :\n All remaining keywords will be passed to ``chunk``, ``aggregate``, and\n ``combine``.\n\n Examples\n --------\n >>> def chunk(a_block, b_block):\n ... pass\n\n >>> def agg(df):\n ... pass\n\n >>> apply_concat_apply([a, b], chunk=chunk, aggregate=agg) # doctest: +SKIP\n \"\"\"\n if chunk_kwargs is None:\n chunk_kwargs = dict()\n if aggregate_kwargs is None:\n aggregate_kwargs = dict()\n chunk_kwargs.update(kwargs)\n aggregate_kwargs.update(kwargs)\n\n if combine is None:\n if combine_kwargs:\n raise ValueError(\"`combine_kwargs` provided with no `combine`\")\n combine = aggregate\n combine_kwargs = aggregate_kwargs\n else:\n if combine_kwargs is None:\n combine_kwargs = dict()\n combine_kwargs.update(kwargs)\n\n if not isinstance(args, (tuple, list)):\n args = [args]\n\n dfs = [arg for arg in args if isinstance(arg, _Frame)]\n\n npartitions = set(arg.npartitions for arg in dfs)\n if len(npartitions) > 1:\n raise ValueError(\"All arguments must have same number of partitions\")\n npartitions = npartitions.pop()\n\n if split_every is None:\n split_every = 8\n elif split_every is False:\n split_every = npartitions\n elif split_every < 2 or not isinstance(split_every, Integral):\n raise ValueError(\"split_every must be an integer >= 2\")\n\n token_key = tokenize(\n token or (chunk, aggregate),\n meta,\n args,\n chunk_kwargs,\n aggregate_kwargs,\n combine_kwargs,\n split_every,\n split_out,\n split_out_setup,\n split_out_setup_kwargs,\n )\n\n # Chunk\n a = \"{0}-chunk-{1}\".format(token or funcname(chunk), token_key)\n if len(args) == 1 and isinstance(args[0], _Frame) and not chunk_kwargs:\n dsk = {\n (a, 0, i, 0): (chunk, key) for i, key in enumerate(args[0].__dask_keys__())\n }\n else:\n dsk = {\n (a, 0, i, 0): (\n apply,\n chunk,\n [(x._name, i) if isinstance(x, _Frame) else x for x in args],\n chunk_kwargs,\n )\n for i in range(npartitions)\n }\n\n # Split\n if split_out and split_out > 1:\n split_prefix = \"split-%s\" % token_key\n shard_prefix = \"shard-%s\" % token_key\n for i in range(npartitions):\n dsk[(split_prefix, i)] = (\n hash_shard,\n (a, 0, i, 0),\n split_out,\n split_out_setup,\n split_out_setup_kwargs,\n ignore_index,\n )\n for j in range(split_out):\n dsk[(shard_prefix, 0, i, j)] = (getitem, (split_prefix, i), j)\n a = shard_prefix\n else:\n split_out = 1\n\n # Combine\n b = \"{0}-combine-{1}\".format(token or funcname(combine), token_key)\n k = npartitions\n depth = 0\n while k > split_every:\n for part_i, inds in enumerate(partition_all(split_every, range(k))):\n for j in range(split_out):\n conc = (_concat, [(a, depth, i, j) for i in inds], ignore_index)\n if combine_kwargs:\n dsk[(b, depth + 1, part_i, j)] = (\n apply,\n combine,\n [conc],\n combine_kwargs,\n )\n else:\n dsk[(b, depth + 1, part_i, j)] = (combine, conc)\n k = part_i + 1\n a = b\n depth += 1\n\n if sort is not None:\n if sort and split_out > 1:\n raise NotImplementedError(\n \"Cannot guarantee sorted keys for `split_out>1`.\"\n \" Try using split_out=1, or grouping with sort=False.\"\n )\n aggregate_kwargs = aggregate_kwargs or {}\n aggregate_kwargs[\"sort\"] = sort\n\n # Aggregate\n for j in range(split_out):\n b = \"{0}-agg-{1}\".format(token or funcname(aggregate), token_key)\n conc = (_concat, [(a, depth, i, j) for i in range(k)], ignore_index)\n if aggregate_kwargs:\n dsk[(b, j)] = (apply, aggregate, [conc], aggregate_kwargs)\n else:\n dsk[(b, j)] = (aggregate, conc)\n\n if meta is no_default:\n meta_chunk = _emulate(chunk, *args, udf=True, **chunk_kwargs)\n meta = _emulate(\n aggregate, _concat([meta_chunk], ignore_index), udf=True, **aggregate_kwargs\n )\n meta = make_meta(\n meta,\n index=(getattr(make_meta(dfs[0]), \"index\", None) if dfs else None),\n parent_meta=dfs[0]._meta,\n )\n\n graph = HighLevelGraph.from_collections(b, dsk, dependencies=dfs)\n\n divisions = [None] * (split_out + 1)\n\n return new_dd_object(graph, b, meta, divisions, parent_meta=dfs[0]._meta)\n\n\naca = apply_concat_apply\n\n\ndef _extract_meta(x, nonempty=False):\n \"\"\"\n Extract internal cache data (``_meta``) from dd.DataFrame / dd.Series\n \"\"\"\n if isinstance(x, (Scalar, _Frame)):\n return x._meta_nonempty if nonempty else x._meta\n elif isinstance(x, list):\n return [_extract_meta(_x, nonempty) for _x in x]\n elif isinstance(x, tuple):\n return tuple([_extract_meta(_x, nonempty) for _x in x])\n elif isinstance(x, dict):\n res = {}\n for k in x:\n res[k] = _extract_meta(x[k], nonempty)\n return res\n elif isinstance(x, Delayed):\n raise ValueError(\n \"Cannot infer dataframe metadata with a `dask.delayed` argument\"\n )\n else:\n return x\n\n\ndef _emulate(func, *args, **kwargs):\n \"\"\"\n Apply a function using args / kwargs. If arguments contain dd.DataFrame /\n dd.Series, using internal cache (``_meta``) for calculation\n \"\"\"\n with raise_on_meta_error(funcname(func), udf=kwargs.pop(\"udf\", False)):\n return func(*_extract_meta(args, True), **_extract_meta(kwargs, True))\n\n\n@insert_meta_param_description\ndef map_partitions(\n func,\n *args,\n meta=no_default,\n enforce_metadata=True,\n transform_divisions=True,\n **kwargs,\n):\n \"\"\"Apply Python function on each DataFrame partition.\n\n Parameters\n ----------\n func : function\n Function applied to each partition.\n args, kwargs :\n Arguments and keywords to pass to the function. At least one of the\n args should be a Dask.dataframe. Arguments and keywords may contain\n ``Scalar``, ``Delayed`` or regular python objects. DataFrame-like args\n (both dask and pandas) will be repartitioned to align (if necessary)\n before applying the function.\n enforce_metadata : bool\n Whether or not to enforce the structure of the metadata at runtime.\n This will rename and reorder columns for each partition,\n and will raise an error if this doesn't work or types don't match.\n $META\n \"\"\"\n name = kwargs.pop(\"token\", None)\n parent_meta = kwargs.pop(\"parent_meta\", None)\n\n if has_keyword(func, \"partition_info\"):\n kwargs[\"partition_info\"] = {\"number\": -1, \"divisions\": None}\n\n assert callable(func)\n if name is not None:\n token = tokenize(meta, *args, **kwargs)\n else:\n name = funcname(func)\n token = tokenize(func, meta, *args, **kwargs)\n name = \"{0}-{1}\".format(name, token)\n\n from .multi import _maybe_align_partitions\n\n args = _maybe_from_pandas(args)\n args = _maybe_align_partitions(args)\n dfs = [df for df in args if isinstance(df, _Frame)]\n meta_index = getattr(make_meta(dfs[0]), \"index\", None) if dfs else None\n if parent_meta is None and dfs:\n parent_meta = dfs[0]._meta\n\n if meta is no_default:\n # Use non-normalized kwargs here, as we want the real values (not\n # delayed values)\n meta = _emulate(func, *args, udf=True, **kwargs)\n else:\n meta = make_meta(meta, index=meta_index, parent_meta=parent_meta)\n\n if has_keyword(func, \"partition_info\"):\n kwargs[\"partition_info\"] = \"__dummy__\"\n\n if all(isinstance(arg, Scalar) for arg in args):\n layer = {\n (name, 0): (apply, func, (tuple, [(arg._name, 0) for arg in args]), kwargs)\n }\n graph = HighLevelGraph.from_collections(name, layer, dependencies=args)\n return Scalar(graph, name, meta)\n elif not (has_parallel_type(meta) or is_arraylike(meta) and meta.shape):\n # If `meta` is not a pandas object, the concatenated results will be a\n # different type\n meta = make_meta(_concat([meta]), index=meta_index)\n\n # Ensure meta is empty series\n meta = make_meta(meta, parent_meta=parent_meta)\n\n args2 = []\n dependencies = []\n for arg in args:\n if isinstance(arg, _Frame):\n args2.append(arg)\n dependencies.append(arg)\n continue\n arg = normalize_arg(arg)\n arg2, collections = unpack_collections(arg)\n if collections:\n args2.append(arg2)\n dependencies.extend(collections)\n else:\n args2.append(arg)\n\n kwargs3 = {}\n simple = True\n for k, v in kwargs.items():\n v = normalize_arg(v)\n v, collections = unpack_collections(v)\n dependencies.extend(collections)\n kwargs3[k] = v\n if collections:\n simple = False\n\n if enforce_metadata:\n dsk = partitionwise_graph(\n apply_and_enforce,\n name,\n *args2,\n dependencies=dependencies,\n _func=func,\n _meta=meta,\n **kwargs3,\n )\n else:\n kwargs4 = kwargs if simple else kwargs3\n dsk = partitionwise_graph(\n func, name, *args2, **kwargs4, dependencies=dependencies\n )\n\n divisions = dfs[0].divisions\n if transform_divisions and isinstance(dfs[0], Index) and len(dfs) == 1:\n try:\n divisions = func(\n *[pd.Index(a.divisions) if a is dfs[0] else a for a in args], **kwargs\n )\n if isinstance(divisions, pd.Index):\n divisions = methods.tolist(divisions)\n except Exception:\n pass\n else:\n if not valid_divisions(divisions):\n divisions = [None] * (dfs[0].npartitions + 1)\n\n if has_keyword(func, \"partition_info\"):\n dsk = dict(dsk)\n\n for k, v in dsk.items():\n subgraph = v[0]\n number = k[-1]\n assert isinstance(number, int)\n info = {\"number\": number, \"division\": divisions[number]}\n # Replace the __dummy__ keyword argument with `info`\n subgraph_dsk = copy.copy(subgraph.dsk)\n [(key, task)] = subgraph_dsk.items()\n subgraph_dsk[key] = subs(task, {\"__dummy__\": info})\n dsk[k] = (\n SubgraphCallable(\n dsk=subgraph_dsk,\n outkey=subgraph.outkey,\n inkeys=subgraph.inkeys,\n name=f\"{subgraph.name}-info-{tokenize(info)}\",\n ),\n ) + v[1:]\n\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=dependencies)\n return new_dd_object(graph, name, meta, divisions)\n\n\ndef apply_and_enforce(*args, **kwargs):\n \"\"\"Apply a function, and enforce the output to match meta\n\n Ensures the output has the same columns, even if empty.\"\"\"\n func = kwargs.pop(\"_func\")\n meta = kwargs.pop(\"_meta\")\n df = func(*args, **kwargs)\n if is_dataframe_like(df) or is_series_like(df) or is_index_like(df):\n if not len(df):\n return meta\n if is_dataframe_like(df):\n check_matching_columns(meta, df)\n c = meta.columns\n else:\n c = meta.name\n return _rename(c, df)\n return df\n\n\ndef _rename(columns, df):\n \"\"\"\n Rename columns of pd.DataFrame or name of pd.Series.\n Not for dd.DataFrame or dd.Series.\n\n Parameters\n ----------\n columns : tuple, string, pd.DataFrame or pd.Series\n Column names, Series name or pandas instance which has the\n target column names / name.\n df : pd.DataFrame or pd.Series\n target DataFrame / Series to be renamed\n \"\"\"\n assert not isinstance(df, _Frame)\n\n if columns is no_default:\n return df\n\n if isinstance(columns, Iterator):\n columns = list(columns)\n\n if is_dataframe_like(df):\n if is_dataframe_like(columns):\n columns = columns.columns\n if not isinstance(columns, pd.Index):\n columns = pd.Index(columns)\n if (\n len(columns) == len(df.columns)\n and type(columns) is type(df.columns)\n and columns.equals(df.columns)\n ):\n # if target is identical, rename is not necessary\n return df\n # deep=False doesn't doesn't copy any data/indices, so this is cheap\n df = df.copy(deep=False)\n df.columns = columns\n return df\n elif is_series_like(df) or is_index_like(df):\n if is_series_like(columns) or is_index_like(columns):\n columns = columns.name\n if df.name == columns:\n return df\n return df.rename(columns)\n # map_partition may pass other types\n return df\n\n\ndef _rename_dask(df, names):\n \"\"\"\n Destructively rename columns of dd.DataFrame or name of dd.Series.\n Not for pd.DataFrame or pd.Series.\n\n Internally used to overwrite dd.DataFrame.columns and dd.Series.name\n We can't use map_partition because it applies function then rename\n\n Parameters\n ----------\n df : dd.DataFrame or dd.Series\n target DataFrame / Series to be renamed\n names : tuple, string\n Column names/Series name\n \"\"\"\n\n assert isinstance(df, _Frame)\n metadata = _rename(names, df._meta)\n name = \"rename-{0}\".format(tokenize(df, metadata))\n\n dsk = partitionwise_graph(_rename, name, metadata, df)\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[df])\n return new_dd_object(graph, name, metadata, df.divisions)\n\n\ndef quantile(df, q, method=\"default\"):\n \"\"\"Approximate quantiles of Series.\n\n Parameters\n ----------\n q : list/array of floats\n Iterable of numbers ranging from 0 to 100 for the desired quantiles\n method : {'default', 'tdigest', 'dask'}, optional\n What method to use. By default will use dask's internal custom\n algorithm (``'dask'``). If set to ``'tdigest'`` will use tdigest for\n floats and ints and fallback to the ``'dask'`` otherwise.\n \"\"\"\n # current implementation needs q to be sorted so\n # sort if array-like, otherwise leave it alone\n q_ndarray = np.array(q)\n if q_ndarray.ndim > 0:\n q_ndarray.sort(kind=\"mergesort\")\n q = q_ndarray\n\n assert isinstance(df, Series)\n\n allowed_methods = [\"default\", \"dask\", \"tdigest\"]\n if method not in allowed_methods:\n raise ValueError(\"method can only be 'default', 'dask' or 'tdigest'\")\n\n if method == \"default\":\n internal_method = \"dask\"\n else:\n internal_method = method\n\n # currently, only Series has quantile method\n if isinstance(df, Index):\n series_typ = df._meta.to_series()._constructor\n meta = df._meta_nonempty.to_series().quantile(q)\n else:\n if is_series_like(df._meta):\n series_typ = df._meta._constructor\n else:\n series_typ = df._meta._constructor_sliced\n meta = df._meta_nonempty.quantile(q)\n\n if is_series_like(meta):\n # Index.quantile(list-like) must be pd.Series, not pd.Index\n df_name = df.name\n finalize_tsk = lambda tsk: (series_typ, tsk, q, None, df_name)\n return_type = Series\n else:\n finalize_tsk = lambda tsk: (getitem, tsk, 0)\n return_type = Scalar\n q = [q]\n\n # pandas uses quantile in [0, 1]\n # numpy / everyone else uses [0, 100]\n qs = np.asarray(q) * 100\n token = tokenize(df, qs)\n\n if len(qs) == 0:\n name = \"quantiles-\" + token\n empty_index = pd.Index([], dtype=float)\n\n return Series(\n {(name, 0): series_typ([], name=df.name, index=empty_index, dtype=\"float\")},\n name,\n df._meta,\n [None, None],\n )\n else:\n new_divisions = [np.min(q), np.max(q)]\n\n df = df.dropna()\n\n if internal_method == \"tdigest\" and (\n np.issubdtype(df.dtype, np.floating) or np.issubdtype(df.dtype, np.integer)\n ):\n\n from dask.utils import import_required\n\n import_required(\n \"crick\", \"crick is a required dependency for using the t-digest method.\"\n )\n\n from dask.array.percentile import _percentiles_from_tdigest, _tdigest_chunk\n\n name = \"quantiles_tdigest-1-\" + token\n val_dsk = {\n (name, i): (_tdigest_chunk, (getattr, key, \"values\"))\n for i, key in enumerate(df.__dask_keys__())\n }\n\n name2 = \"quantiles_tdigest-2-\" + token\n merge_dsk = {\n (name2, 0): finalize_tsk((_percentiles_from_tdigest, qs, sorted(val_dsk)))\n }\n else:\n\n from dask.array.dispatch import percentile_lookup as _percentile\n from dask.array.percentile import merge_percentiles\n\n # Add 0 and 100 during calculation for more robust behavior (hopefully)\n calc_qs = np.pad(qs, 1, mode=\"constant\")\n calc_qs[-1] = 100\n name = \"quantiles-1-\" + token\n val_dsk = {\n (name, i): (_percentile, key, calc_qs)\n for i, key in enumerate(df.__dask_keys__())\n }\n\n name2 = \"quantiles-2-\" + token\n merge_dsk = {\n (name2, 0): finalize_tsk(\n (merge_percentiles, qs, [calc_qs] * df.npartitions, sorted(val_dsk))\n )\n }\n dsk = merge(val_dsk, merge_dsk)\n graph = HighLevelGraph.from_collections(name2, dsk, dependencies=[df])\n return return_type(graph, name2, meta, new_divisions)\n\n\ndef cov_corr(df, min_periods=None, corr=False, scalar=False, split_every=False):\n \"\"\"DataFrame covariance and pearson correlation.\n\n Computes pairwise covariance or correlation of columns, excluding NA/null\n values.\n\n Parameters\n ----------\n df : DataFrame\n min_periods : int, optional\n Minimum number of observations required per pair of columns\n to have a valid result.\n corr : bool, optional\n If True, compute the Pearson correlation. If False [default], compute\n the covariance.\n scalar : bool, optional\n If True, compute covariance between two variables as a scalar. Only\n valid if `df` has 2 columns. If False [default], compute the entire\n covariance/correlation matrix.\n split_every : int, optional\n Group partitions into groups of this size while performing a\n tree-reduction. If set to False, no tree-reduction will be used.\n Default is False.\n \"\"\"\n if min_periods is None:\n min_periods = 2\n elif min_periods < 2:\n raise ValueError(\"min_periods must be >= 2\")\n\n if split_every is False:\n split_every = df.npartitions\n elif split_every < 2 or not isinstance(split_every, Integral):\n raise ValueError(\"split_every must be an integer >= 2\")\n\n df = df._get_numeric_data()\n\n if scalar and len(df.columns) != 2:\n raise ValueError(\"scalar only valid for 2 column dataframe\")\n\n token = tokenize(df, min_periods, scalar, split_every)\n\n funcname = \"corr\" if corr else \"cov\"\n a = \"{0}-chunk-{1}\".format(funcname, df._name)\n dsk = {\n (a, i): (cov_corr_chunk, f, corr) for (i, f) in enumerate(df.__dask_keys__())\n }\n\n prefix = \"{0}-combine-{1}-\".format(funcname, df._name)\n k = df.npartitions\n b = a\n depth = 0\n while k > split_every:\n b = prefix + str(depth)\n for part_i, inds in enumerate(partition_all(split_every, range(k))):\n dsk[(b, part_i)] = (cov_corr_combine, [(a, i) for i in inds], corr)\n k = part_i + 1\n a = b\n depth += 1\n\n name = \"{0}-{1}\".format(funcname, token)\n dsk[(name, 0)] = (\n cov_corr_agg,\n [(a, i) for i in range(k)],\n df.columns,\n min_periods,\n corr,\n scalar,\n )\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[df])\n if scalar:\n return Scalar(graph, name, \"f8\")\n meta = make_meta(\n [(c, \"f8\") for c in df.columns], index=df.columns, parent_meta=df._meta\n )\n return DataFrame(graph, name, meta, (df.columns[0], df.columns[-1]))\n\n\ndef cov_corr_chunk(df, corr=False):\n \"\"\"Chunk part of a covariance or correlation computation\"\"\"\n shape = (df.shape[1], df.shape[1])\n df = df.astype(\"float64\", copy=False)\n sums = np.zeros_like(df.values, shape=shape)\n counts = np.zeros_like(df.values, shape=shape)\n for idx, col in enumerate(df):\n mask = df.iloc[:, idx].notnull()\n sums[idx] = df[mask].sum().values\n counts[idx] = df[mask].count().values\n cov = df.cov().values\n dtype = [(\"sum\", sums.dtype), (\"count\", counts.dtype), (\"cov\", cov.dtype)]\n if corr:\n with warnings.catch_warnings(record=True):\n warnings.simplefilter(\"always\")\n mu = (sums / counts).T\n m = np.zeros_like(df.values, shape=shape)\n mask = df.isnull().values\n for idx, x in enumerate(df):\n # Avoid using ufunc.outer (not supported by cupy)\n mu_discrepancy = (\n np.subtract(df.iloc[:, idx].values[:, None], mu[idx][None, :]) ** 2\n )\n mu_discrepancy[mask] = np.nan\n m[idx] = np.nansum(mu_discrepancy, axis=0)\n m = m.T\n dtype.append((\"m\", m.dtype))\n\n out = {\"sum\": sums, \"count\": counts, \"cov\": cov * (counts - 1)}\n if corr:\n out[\"m\"] = m\n return out\n\n\ndef cov_corr_combine(data_in, corr=False):\n\n data = {\"sum\": None, \"count\": None, \"cov\": None}\n if corr:\n data[\"m\"] = None\n\n for k in data.keys():\n data[k] = [d[k] for d in data_in]\n data[k] = np.concatenate(data[k]).reshape((len(data[k]),) + data[k][0].shape)\n\n sums = np.nan_to_num(data[\"sum\"])\n counts = data[\"count\"]\n\n cum_sums = np.cumsum(sums, 0)\n cum_counts = np.cumsum(counts, 0)\n\n s1 = cum_sums[:-1]\n s2 = sums[1:]\n n1 = cum_counts[:-1]\n n2 = counts[1:]\n with np.errstate(invalid=\"ignore\"):\n d = (s2 / n2) - (s1 / n1)\n C = np.nansum(\n (n1 * n2) / (n1 + n2) * (d * d.transpose((0, 2, 1))), 0\n ) + np.nansum(data[\"cov\"], 0)\n\n out = {\"sum\": cum_sums[-1], \"count\": cum_counts[-1], \"cov\": C}\n\n if corr:\n nobs = np.where(cum_counts[-1], cum_counts[-1], np.nan)\n mu = cum_sums[-1] / nobs\n counts_na = np.where(counts, counts, np.nan)\n m = np.nansum(data[\"m\"] + counts * (sums / counts_na - mu) ** 2, axis=0)\n out[\"m\"] = m\n return out\n\n\ndef cov_corr_agg(data, cols, min_periods=2, corr=False, scalar=False):\n out = cov_corr_combine(data, corr)\n counts = out[\"count\"]\n C = out[\"cov\"]\n C[counts < min_periods] = np.nan\n if corr:\n m2 = out[\"m\"]\n den = np.sqrt(m2 * m2.T)\n else:\n den = np.where(counts, counts, np.nan) - 1\n with np.errstate(invalid=\"ignore\", divide=\"ignore\"):\n mat = C / den\n if scalar:\n return float(mat[0, 1])\n return pd.DataFrame(mat, columns=cols, index=cols)\n\n\ndef pd_split(df, p, random_state=None, shuffle=False):\n \"\"\"Split DataFrame into multiple pieces pseudorandomly\n\n >>> df = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6],\n ... 'b': [2, 3, 4, 5, 6, 7]})\n\n >>> a, b = pd_split(\n ... df, [0.5, 0.5], random_state=123, shuffle=True\n ... ) # roughly 50/50 split\n >>> a\n a b\n 3 4 5\n 0 1 2\n 5 6 7\n >>> b\n a b\n 1 2 3\n 4 5 6\n 2 3 4\n \"\"\"\n p = list(p)\n if shuffle:\n if not isinstance(random_state, np.random.RandomState):\n random_state = np.random.RandomState(random_state)\n df = df.sample(frac=1.0, random_state=random_state)\n index = pseudorandom(len(df), p, random_state)\n return [df.iloc[index == i] for i in range(len(p))]\n\n\ndef _take_last(a, skipna=True):\n \"\"\"\n take last row (Series) of DataFrame / last value of Series\n considering NaN.\n\n Parameters\n ----------\n a : pd.DataFrame or pd.Series\n skipna : bool, default True\n Whether to exclude NaN\n\n \"\"\"\n\n def _last_valid(s):\n for i in range(1, min(10, len(s) + 1)):\n val = s.iloc[-i]\n if not pd.isnull(val):\n return val\n else:\n nonnull = s[s.notna()]\n if not nonnull.empty:\n return nonnull.iloc[-1]\n return None\n\n if skipna is False:\n return a.iloc[-1]\n else:\n # take last valid value excluding NaN, NaN location may be different\n # in each column\n if is_dataframe_like(a):\n # create Series from appropriate backend dataframe library\n series_typ = type(a.iloc[0:1, 0])\n if a.empty:\n return series_typ([], dtype=\"float\")\n return series_typ(\n {col: _last_valid(a[col]) for col in a.columns}, index=a.columns\n )\n else:\n return _last_valid(a)\n\n\ndef check_divisions(divisions):\n if not isinstance(divisions, (list, tuple)):\n raise ValueError(\"New division must be list or tuple\")\n divisions = list(divisions)\n if divisions != sorted(divisions):\n raise ValueError(\"New division must be sorted\")\n if len(divisions[:-1]) != len(list(unique(divisions[:-1]))):\n msg = \"New division must be unique, except for the last element\"\n raise ValueError(msg)\n\n\ndef repartition_divisions(a, b, name, out1, out2, force=False):\n \"\"\"dask graph to repartition dataframe by new divisions\n\n Parameters\n ----------\n a : tuple\n old divisions\n b : tuple, list\n new divisions\n name : str\n name of old dataframe\n out1 : str\n name of temporary splits\n out2 : str\n name of new dataframe\n force : bool, default False\n Allows the expansion of the existing divisions.\n If False then the new divisions lower and upper bounds must be\n the same as the old divisions.\n\n Examples\n --------\n >>> from pprint import pprint\n >>> pprint(repartition_divisions([1, 3, 7], [1, 4, 6, 7], 'a', 'b', 'c')) # doctest: +ELLIPSIS\n {('b', 0): (<function boundary_slice at ...>, ('a', 0), 1, 3, False),\n ('b', 1): (<function boundary_slice at ...>, ('a', 1), 3, 4, False),\n ('b', 2): (<function boundary_slice at ...>, ('a', 1), 4, 6, False),\n ('b', 3): (<function boundary_slice at ...>, ('a', 1), 6, 7, True),\n ('c', 0): (<function concat at ...>, [('b', 0), ('b', 1)]),\n ('c', 1): ('b', 2),\n ('c', 2): ('b', 3)}\n \"\"\"\n check_divisions(b)\n\n if len(b) < 2:\n # minimum division is 2 elements, like [0, 0]\n raise ValueError(\"New division must be longer than 2 elements\")\n\n if force:\n if a[0] < b[0]:\n msg = (\n \"left side of the new division must be equal or smaller \"\n \"than old division\"\n )\n raise ValueError(msg)\n if a[-1] > b[-1]:\n msg = (\n \"right side of the new division must be equal or larger \"\n \"than old division\"\n )\n raise ValueError(msg)\n else:\n if a[0] != b[0]:\n msg = \"left side of old and new divisions are different\"\n raise ValueError(msg)\n if a[-1] != b[-1]:\n msg = \"right side of old and new divisions are different\"\n raise ValueError(msg)\n\n def _is_single_last_div(x):\n \"\"\"Whether last division only contains single label\"\"\"\n return len(x) >= 2 and x[-1] == x[-2]\n\n c = [a[0]]\n d = dict()\n low = a[0]\n\n i, j = 1, 1 # indices for old/new divisions\n k = 0 # index for temp divisions\n\n last_elem = _is_single_last_div(a)\n\n # process through old division\n # left part of new division can be processed in this loop\n while i < len(a) and j < len(b):\n if a[i] < b[j]:\n # tuple is something like:\n # (methods.boundary_slice, ('from_pandas-#', 0), 3, 4, False))\n d[(out1, k)] = (methods.boundary_slice, (name, i - 1), low, a[i], False)\n low = a[i]\n i += 1\n elif a[i] > b[j]:\n d[(out1, k)] = (methods.boundary_slice, (name, i - 1), low, b[j], False)\n low = b[j]\n j += 1\n else:\n d[(out1, k)] = (methods.boundary_slice, (name, i - 1), low, b[j], False)\n low = b[j]\n if len(a) == i + 1 or a[i] < a[i + 1]:\n j += 1\n i += 1\n c.append(low)\n k += 1\n\n # right part of new division can remain\n if a[-1] < b[-1] or b[-1] == b[-2]:\n for _j in range(j, len(b)):\n # always use right-most of old division\n # because it may contain last element\n m = len(a) - 2\n d[(out1, k)] = (methods.boundary_slice, (name, m), low, b[_j], False)\n low = b[_j]\n c.append(low)\n k += 1\n else:\n # even if new division is processed through,\n # right-most element of old division can remain\n if last_elem and i < len(a):\n d[(out1, k)] = (methods.boundary_slice, (name, i - 1), a[i], a[i], False)\n k += 1\n c.append(a[-1])\n\n # replace last element of tuple with True\n d[(out1, k - 1)] = d[(out1, k - 1)][:-1] + (True,)\n\n i, j = 0, 1\n\n last_elem = _is_single_last_div(c)\n\n while j < len(b):\n tmp = []\n while c[i] < b[j]:\n tmp.append((out1, i))\n i += 1\n while (\n last_elem\n and c[i] == b[-1]\n and (b[-1] != b[-2] or j == len(b) - 1)\n and i < k\n ):\n # append if last split is not included\n tmp.append((out1, i))\n i += 1\n if len(tmp) == 0:\n # dummy slice to return empty DataFrame or Series,\n # which retain original data attributes (columns / name)\n d[(out2, j - 1)] = (methods.boundary_slice, (name, 0), a[0], a[0], False)\n elif len(tmp) == 1:\n d[(out2, j - 1)] = tmp[0]\n else:\n if not tmp:\n raise ValueError(\n \"check for duplicate partitions\\nold:\\n%s\\n\\n\"\n \"new:\\n%s\\n\\ncombined:\\n%s\" % (pformat(a), pformat(b), pformat(c))\n )\n d[(out2, j - 1)] = (methods.concat, tmp)\n j += 1\n return d\n\n\ndef repartition_freq(df, freq=None):\n \"\"\"Repartition a timeseries dataframe by a new frequency\"\"\"\n if not isinstance(df.divisions[0], pd.Timestamp):\n raise TypeError(\"Can only repartition on frequency for timeseries\")\n\n freq = _map_freq_to_period_start(freq)\n\n try:\n start = df.divisions[0].ceil(freq)\n except ValueError:\n start = df.divisions[0]\n divisions = methods.tolist(\n pd.date_range(start=start, end=df.divisions[-1], freq=freq)\n )\n if not len(divisions):\n divisions = [df.divisions[0], df.divisions[-1]]\n else:\n divisions.append(df.divisions[-1])\n if divisions[0] != df.divisions[0]:\n divisions = [df.divisions[0]] + divisions\n\n return df.repartition(divisions=divisions)\n\n\ndef _map_freq_to_period_start(freq):\n \"\"\"Ensure that the frequency pertains to the **start** of a period.\n\n If e.g. `freq='M'`, then the divisions are:\n - 2021-31-1 00:00:00 (start of February partition)\n - 2021-2-28 00:00:00 (start of March partition)\n - ...\n\n but this **should** be:\n - 2021-2-1 00:00:00 (start of February partition)\n - 2021-3-1 00:00:00 (start of March partition)\n - ...\n\n Therefore, we map `freq='M'` to `freq='MS'` (same for quarter and year).\n \"\"\"\n\n if not isinstance(freq, str):\n return freq\n\n offset = pd.tseries.frequencies.to_offset(freq)\n offset_type_name = type(offset).__name__\n\n if not offset_type_name.endswith(\"End\"):\n return freq\n\n new_offset = offset_type_name[: -len(\"End\")] + \"Begin\"\n try:\n new_offset_type = getattr(pd.tseries.offsets, new_offset)\n if \"-\" in freq:\n _, anchor = freq.split(\"-\")\n anchor = \"-\" + anchor\n else:\n anchor = \"\"\n n = str(offset.n) if offset.n != 1 else \"\"\n return f\"{n}{new_offset_type._prefix}{anchor}\"\n except AttributeError:\n return freq\n\n\ndef repartition_size(df, size):\n \"\"\"\n Repartition dataframe so that new partitions have approximately `size` memory usage each\n \"\"\"\n if isinstance(size, str):\n size = parse_bytes(size)\n size = int(size)\n\n mem_usages = df.map_partitions(total_mem_usage, deep=True).compute()\n\n # 1. split each partition that is larger than partition_size\n nsplits = 1 + mem_usages // size\n if np.any(nsplits > 1):\n split_name = \"repartition-split-{}-{}\".format(size, tokenize(df))\n df = _split_partitions(df, nsplits, split_name)\n # update mem_usages to account for the split partitions\n split_mem_usages = []\n for n, usage in zip(nsplits, mem_usages):\n split_mem_usages.extend([usage / n] * n)\n mem_usages = pd.Series(split_mem_usages)\n\n # 2. now that all partitions are less than size, concat them up to size\n assert np.all(mem_usages <= size)\n new_npartitions = list(map(len, iter_chunks(mem_usages, size)))\n new_partitions_boundaries = np.cumsum(new_npartitions)\n new_name = \"repartition-{}-{}\".format(size, tokenize(df))\n return _repartition_from_boundaries(df, new_partitions_boundaries, new_name)\n\n\ndef total_mem_usage(df, index=True, deep=False):\n mem_usage = df.memory_usage(index=index, deep=deep)\n if is_series_like(mem_usage):\n mem_usage = mem_usage.sum()\n return mem_usage\n\n\ndef repartition_npartitions(df, npartitions):\n \"\"\"Repartition dataframe to a smaller number of partitions\"\"\"\n new_name = \"repartition-%d-%s\" % (npartitions, tokenize(df))\n if df.npartitions == npartitions:\n return df\n elif df.npartitions > npartitions:\n npartitions_ratio = df.npartitions / npartitions\n new_partitions_boundaries = [\n int(new_partition_index * npartitions_ratio)\n for new_partition_index in range(npartitions + 1)\n ]\n return _repartition_from_boundaries(df, new_partitions_boundaries, new_name)\n else:\n original_divisions = divisions = pd.Series(df.divisions)\n if df.known_divisions and (\n np.issubdtype(divisions.dtype, np.datetime64)\n or np.issubdtype(divisions.dtype, np.number)\n ):\n if np.issubdtype(divisions.dtype, np.datetime64):\n divisions = divisions.values.astype(\"float64\")\n\n if is_series_like(divisions):\n divisions = divisions.values\n\n n = len(divisions)\n divisions = np.interp(\n x=np.linspace(0, n, npartitions + 1),\n xp=np.linspace(0, n, n),\n fp=divisions,\n )\n if np.issubdtype(original_divisions.dtype, np.datetime64):\n divisions = methods.tolist(\n pd.Series(divisions).astype(original_divisions.dtype)\n )\n elif np.issubdtype(original_divisions.dtype, np.integer):\n divisions = divisions.astype(original_divisions.dtype)\n\n if isinstance(divisions, np.ndarray):\n divisions = divisions.tolist()\n\n divisions = list(divisions)\n divisions[0] = df.divisions[0]\n divisions[-1] = df.divisions[-1]\n\n return df.repartition(divisions=divisions)\n else:\n div, mod = divmod(npartitions, df.npartitions)\n nsplits = [div] * df.npartitions\n nsplits[-1] += mod\n return _split_partitions(df, nsplits, new_name)\n\n\ndef _repartition_from_boundaries(df, new_partitions_boundaries, new_name):\n if not isinstance(new_partitions_boundaries, list):\n new_partitions_boundaries = list(new_partitions_boundaries)\n if new_partitions_boundaries[0] > 0:\n new_partitions_boundaries.insert(0, 0)\n if new_partitions_boundaries[-1] < df.npartitions:\n new_partitions_boundaries.append(df.npartitions)\n dsk = {}\n for i, (start, end) in enumerate(\n zip(new_partitions_boundaries, new_partitions_boundaries[1:])\n ):\n dsk[new_name, i] = (methods.concat, [(df._name, j) for j in range(start, end)])\n divisions = [df.divisions[i] for i in new_partitions_boundaries]\n graph = HighLevelGraph.from_collections(new_name, dsk, dependencies=[df])\n return new_dd_object(graph, new_name, df._meta, divisions)\n\n\ndef _split_partitions(df, nsplits, new_name):\n \"\"\"Split a Dask dataframe into new partitions\n\n Parameters\n ----------\n df: DataFrame or Series\n nsplits: List[int]\n Number of target dataframes for each partition\n The length of nsplits should be the same as df.npartitions\n new_name: str\n\n See Also\n --------\n repartition_npartitions\n repartition_size\n \"\"\"\n if len(nsplits) != df.npartitions:\n raise ValueError(\"nsplits should have len={}\".format(df.npartitions))\n\n dsk = {}\n split_name = \"split-{}\".format(tokenize(df, nsplits))\n j = 0\n for i, k in enumerate(nsplits):\n if k == 1:\n dsk[new_name, j] = (df._name, i)\n j += 1\n else:\n dsk[split_name, i] = (split_evenly, (df._name, i), k)\n for jj in range(k):\n dsk[new_name, j] = (getitem, (split_name, i), jj)\n j += 1\n\n divisions = [None] * (1 + sum(nsplits))\n graph = HighLevelGraph.from_collections(new_name, dsk, dependencies=[df])\n return new_dd_object(graph, new_name, df._meta, divisions)\n\n\ndef repartition(df, divisions=None, force=False):\n \"\"\"Repartition dataframe along new divisions\n\n Dask.DataFrame objects are partitioned along their index. Often when\n multiple dataframes interact we need to align these partitionings. The\n ``repartition`` function constructs a new DataFrame object holding the same\n data but partitioned on different values. It does this by performing a\n sequence of ``loc`` and ``concat`` calls to split and merge the previous\n generation of partitions.\n\n Parameters\n ----------\n\n divisions : list\n List of partitions to be used\n force : bool, default False\n Allows the expansion of the existing divisions.\n If False then the new divisions lower and upper bounds must be\n the same as the old divisions.\n\n Examples\n --------\n\n >>> df = df.repartition([0, 5, 10, 20]) # doctest: +SKIP\n\n Also works on Pandas objects\n\n >>> ddf = dd.repartition(df, [0, 5, 10, 20]) # doctest: +SKIP\n \"\"\"\n\n token = tokenize(df, divisions)\n if isinstance(df, _Frame):\n tmp = \"repartition-split-\" + token\n out = \"repartition-merge-\" + token\n dsk = repartition_divisions(\n df.divisions, divisions, df._name, tmp, out, force=force\n )\n graph = HighLevelGraph.from_collections(out, dsk, dependencies=[df])\n return new_dd_object(graph, out, df._meta, divisions)\n elif is_dataframe_like(df) or is_series_like(df):\n name = \"repartition-dataframe-\" + token\n from .utils import shard_df_on_index\n\n dfs = shard_df_on_index(df, divisions[1:-1])\n dsk = dict(((name, i), df) for i, df in enumerate(dfs))\n return new_dd_object(dsk, name, df, divisions)\n raise ValueError(\"Data must be DataFrame or Series\")\n\n\ndef _reduction_chunk(x, aca_chunk=None, **kwargs):\n o = aca_chunk(x, **kwargs)\n # Return a dataframe so that the concatenated version is also a dataframe\n return o.to_frame().T if is_series_like(o) else o\n\n\ndef _reduction_combine(x, aca_combine=None, **kwargs):\n if isinstance(x, list):\n x = pd.Series(x)\n o = aca_combine(x, **kwargs)\n # Return a dataframe so that the concatenated version is also a dataframe\n return o.to_frame().T if is_series_like(o) else o\n\n\ndef _reduction_aggregate(x, aca_aggregate=None, **kwargs):\n if isinstance(x, list):\n x = pd.Series(x)\n return aca_aggregate(x, **kwargs)\n\n\ndef idxmaxmin_chunk(x, fn=None, skipna=True):\n minmax = \"max\" if fn == \"idxmax\" else \"min\"\n if len(x) > 0:\n idx = getattr(x, fn)(skipna=skipna)\n value = getattr(x, minmax)(skipna=skipna)\n else:\n idx = value = pd.Series([], dtype=\"i8\")\n if is_series_like(idx):\n return pd.DataFrame({\"idx\": idx, \"value\": value})\n return pd.DataFrame({\"idx\": [idx], \"value\": [value]})\n\n\ndef idxmaxmin_row(x, fn=None, skipna=True):\n minmax = \"max\" if fn == \"idxmax\" else \"min\"\n if len(x) > 0:\n x = x.set_index(\"idx\")\n idx = [getattr(x.value, fn)(skipna=skipna)]\n value = [getattr(x.value, minmax)(skipna=skipna)]\n else:\n idx = value = pd.Series([], dtype=\"i8\")\n return pd.DataFrame({\"idx\": idx, \"value\": value})\n\n\ndef idxmaxmin_combine(x, fn=None, skipna=True):\n if len(x) <= 1:\n return x\n return (\n x.groupby(level=0)\n .apply(idxmaxmin_row, fn=fn, skipna=skipna)\n .reset_index(level=1, drop=True)\n )\n\n\ndef idxmaxmin_agg(x, fn=None, skipna=True, scalar=False):\n res = idxmaxmin_combine(x, fn, skipna=skipna)[\"idx\"]\n if len(res) == 0:\n raise ValueError(\"attempt to get argmax of an empty sequence\")\n if scalar:\n return res[0]\n res.name = None\n return res\n\n\ndef _mode_aggregate(df, dropna):\n value_count_series = df.sum()\n max_val = value_count_series.max(skipna=dropna)\n mode_series = (\n value_count_series[value_count_series == max_val]\n .index.to_series()\n .sort_values()\n .reset_index(drop=True)\n )\n return mode_series\n\n\ndef _count_aggregate(x):\n return x.sum().astype(\"int64\")\n\n\ndef safe_head(df, n):\n r = M.head(df, n)\n if len(r) != n:\n msg = (\n \"Insufficient elements for `head`. {0} elements \"\n \"requested, only {1} elements available. Try passing larger \"\n \"`npartitions` to `head`.\"\n )\n warnings.warn(msg.format(n, len(r)))\n return r\n\n\ndef maybe_shift_divisions(df, periods, freq):\n \"\"\"Maybe shift divisions by periods of size freq\n\n Used to shift the divisions for the `shift` method. If freq isn't a fixed\n size (not anchored or relative), then the divisions are shifted\n appropriately. Otherwise the divisions are cleared.\n\n Parameters\n ----------\n df : dd.DataFrame, dd.Series, or dd.Index\n periods : int\n The number of periods to shift.\n freq : DateOffset, timedelta, or time rule string\n The frequency to shift by.\n \"\"\"\n if isinstance(freq, str):\n freq = pd.tseries.frequencies.to_offset(freq)\n\n is_offset = isinstance(freq, pd.DateOffset)\n if is_offset:\n if freq.is_anchored() or not hasattr(freq, \"delta\"):\n # Can't infer divisions on relative or anchored offsets, as\n # divisions may now split identical index value.\n # (e.g. index_partitions = [[1, 2, 3], [3, 4, 5]])\n return df.clear_divisions()\n if df.known_divisions:\n divs = pd.Series(range(len(df.divisions)), index=df.divisions)\n divisions = divs.shift(periods, freq=freq).index\n return df.__class__(df.dask, df._name, df._meta, divisions)\n return df\n\n\n@wraps(pd.to_datetime)\ndef to_datetime(arg, meta=None, **kwargs):\n tz_kwarg = {\"tz\": \"utc\"} if kwargs.get(\"utc\") else {}\n if meta is None:\n if isinstance(arg, Index):\n meta = pd.DatetimeIndex([], **tz_kwarg)\n meta.name = arg.name\n elif not (is_dataframe_like(arg) or is_series_like(arg)):\n raise NotImplementedError(\n \"dask.dataframe.to_datetime does not support \"\n \"non-index-able arguments (like scalars)\"\n )\n else:\n meta = pd.Series([pd.Timestamp(\"2000\", **tz_kwarg)])\n meta.index = meta.index.astype(arg.index.dtype)\n meta.index.name = arg.index.name\n return map_partitions(pd.to_datetime, arg, meta=meta, **kwargs)\n\n\n@wraps(pd.to_timedelta)\ndef to_timedelta(arg, unit=\"ns\", errors=\"raise\"):\n meta = pd.Series([pd.Timedelta(1, unit=unit)])\n return map_partitions(pd.to_timedelta, arg, unit=unit, errors=errors, meta=meta)\n\n\nif hasattr(pd, \"isna\"):\n\n @wraps(pd.isna)\n def isna(arg):\n return map_partitions(pd.isna, arg)\n\n\ndef _repr_data_series(s, index):\n \"\"\"A helper for creating the ``_repr_data`` property\"\"\"\n npartitions = len(index) - 1\n if is_categorical_dtype(s):\n if has_known_categories(s):\n dtype = \"category[known]\"\n else:\n dtype = \"category[unknown]\"\n else:\n dtype = str(s.dtype)\n return pd.Series([dtype] + [\"...\"] * npartitions, index=index, name=s.name)\n\n\ndef has_parallel_type(x):\n \"\"\"Does this object have a dask dataframe equivalent?\"\"\"\n return get_parallel_type(x) is not Scalar\n\n\ndef new_dd_object(dsk, name, meta, divisions, parent_meta=None):\n \"\"\"Generic constructor for dask.dataframe objects.\n\n Decides the appropriate output class based on the type of `meta` provided.\n \"\"\"\n if has_parallel_type(meta):\n return get_parallel_type(meta)(dsk, name, meta, divisions)\n elif is_arraylike(meta) and meta.shape:\n import dask.array as da\n\n chunks = ((np.nan,) * (len(divisions) - 1),) + tuple(\n (d,) for d in meta.shape[1:]\n )\n if len(chunks) > 1:\n layer = dsk.layers[name]\n if isinstance(layer, Blockwise):\n layer.new_axes[\"j\"] = chunks[1][0]\n layer.output_indices = layer.output_indices + (\"j\",)\n else:\n suffix = (0,) * (len(chunks) - 1)\n for i in range(len(chunks[0])):\n layer[(name, i) + suffix] = layer.pop((name, i))\n return da.Array(dsk, name=name, chunks=chunks, dtype=meta.dtype)\n else:\n return get_parallel_type(meta)(dsk, name, meta, divisions)\n\n\ndef partitionwise_graph(func, name, *args, **kwargs):\n \"\"\"\n Apply a function partition-wise across arguments to create layer of a graph\n\n This applies a function, ``func``, in an embarrassingly parallel fashion\n across partitions/chunks in the provided arguments. It handles Dataframes,\n Arrays, and scalars smoothly, and relies on the ``blockwise`` machinery\n to provide a nicely symbolic graph.\n\n It is most commonly used in other graph-building functions to create the\n appropriate layer of the resulting dataframe.\n\n Parameters\n ----------\n func: callable\n name: str\n descriptive name for the operation\n *args:\n **kwargs:\n\n Returns\n -------\n out: Blockwise graph\n\n Examples\n --------\n >>> subgraph = partitionwise_graph(function, x, y, z=123) # doctest: +SKIP\n >>> layer = partitionwise_graph(function, df, x, z=123) # doctest: +SKIP\n >>> graph = HighLevelGraph.from_collections(name, layer, dependencies=[df, x]) # doctest: +SKIP\n >>> result = new_dd_object(graph, name, metadata, df.divisions) # doctest: +SKIP\n\n See Also\n --------\n map_partitions\n \"\"\"\n pairs = []\n numblocks = {}\n for arg in args:\n if isinstance(arg, _Frame):\n pairs.extend([arg._name, \"i\"])\n numblocks[arg._name] = (arg.npartitions,)\n elif isinstance(arg, Scalar):\n pairs.extend([arg._name, \"i\"])\n numblocks[arg._name] = (1,)\n elif isinstance(arg, Array):\n if arg.ndim == 1:\n pairs.extend([arg.name, \"i\"])\n elif arg.ndim == 0:\n pairs.extend([arg.name, \"\"])\n elif arg.ndim == 2:\n pairs.extend([arg.name, \"ij\"])\n else:\n raise ValueError(\"Can't add multi-dimensional array to dataframes\")\n numblocks[arg._name] = arg.numblocks\n else:\n pairs.extend([arg, None])\n return blockwise(\n func, name, \"i\", *pairs, numblocks=numblocks, concatenate=True, **kwargs\n )\n\n\ndef meta_warning(df):\n \"\"\"\n Provide an informative message when the user is asked to provide metadata\n \"\"\"\n if is_dataframe_like(df):\n meta_str = {k: str(v) for k, v in df.dtypes.to_dict().items()}\n elif is_series_like(df):\n meta_str = (df.name, str(df.dtype))\n else:\n meta_str = None\n msg = (\n \"\\nYou did not provide metadata, so Dask is running your \"\n \"function on a small dataset to guess output types. \"\n \"It is possible that Dask will guess incorrectly.\\n\"\n \"To provide an explicit output types or to silence this message, \"\n \"please provide the `meta=` keyword, as described in the map or \"\n \"apply function that you are using.\"\n )\n if meta_str:\n msg += (\n \"\\n\"\n \" Before: .apply(func)\\n\"\n \" After: .apply(func, meta=%s)\\n\" % str(meta_str)\n )\n return msg\n\n\ndef prefix_reduction(f, ddf, identity, **kwargs):\n \"\"\"Computes the prefix sums of f on df\n\n If df has partitions [P1, P2, ..., Pn], then returns the DataFrame with\n partitions [f(identity, P1),\n f(f(identity, P1), P2),\n f(f(f(identity, P1), P2), P3),\n ...]\n\n Parameters\n ----------\n f : callable\n an associative function f\n ddf : dd.DataFrame\n identity : pd.DataFrame\n an identity element of f, that is f(identity, df) = f(df, identity) = df\n \"\"\"\n dsk = dict()\n name = \"prefix_reduction-\" + tokenize(f, ddf, identity, **kwargs)\n meta = ddf._meta\n n = len(ddf.divisions) - 1\n divisions = [None] * (n + 1)\n\n N = 1\n while N < n:\n N *= 2\n for i in range(n):\n dsk[(name, i, 1, 0)] = (apply, f, [(ddf._name, i), identity], kwargs)\n for i in range(n, N):\n dsk[(name, i, 1, 0)] = identity\n\n d = 1\n while d < N:\n for i in range(0, N, 2 * d):\n dsk[(name, i + 2 * d - 1, 2 * d, 0)] = (\n apply,\n f,\n [(name, i + d - 1, d, 0), (name, i + 2 * d - 1, d, 0)],\n kwargs,\n )\n d *= 2\n\n dsk[(name, N - 1, N, 1)] = identity\n\n while d > 1:\n d //= 2\n for i in range(0, N, 2 * d):\n dsk[(name, i + d - 1, d, 1)] = (name, i + 2 * d - 1, 2 * d, 1)\n dsk[(name, i + 2 * d - 1, d, 1)] = (\n apply,\n f,\n [(name, i + 2 * d - 1, 2 * d, 1), (name, i + d - 1, d, 0)],\n kwargs,\n )\n\n for i in range(n):\n dsk[(name, i)] = (apply, f, [(name, i, 1, 1), identity], kwargs)\n\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[ddf])\n return new_dd_object(graph, name, meta, divisions)\n\n\ndef suffix_reduction(f, ddf, identity, **kwargs):\n \"\"\"Computes the suffix sums of f on df\n\n If df has partitions [P1, P2, ..., Pn], then returns the DataFrame with\n partitions [f(P1, f(P2, ...f(Pn, identity)...)),\n f(P2, ...f(Pn, identity)...),\n ...f(Pn, identity)...,\n ...]\n\n Parameters\n ----------\n f : callable\n an associative function f\n ddf : dd.DataFrame\n identity : pd.DataFrame\n an identity element of f, that is f(identity, df) = f(df, identity) = df\n kwargs : ??\n keyword arguments of f ??\n \"\"\"\n dsk = dict()\n name = \"suffix_reduction-\" + tokenize(f, ddf, identity, **kwargs)\n meta = ddf._meta\n n = len(ddf.divisions) - 1\n divisions = [None] * (n + 1)\n\n N = 1\n while N < n:\n N *= 2\n for i in range(n):\n dsk[(name, i, 1, 0)] = (apply, f, [(ddf._name, n - 1 - i), identity], kwargs)\n for i in range(n, N):\n dsk[(name, i, 1, 0)] = identity\n\n d = 1\n while d < N:\n for i in range(0, N, 2 * d):\n dsk[(name, i + 2 * d - 1, 2 * d, 0)] = (\n apply,\n f,\n [(name, i + 2 * d - 1, d, 0), (name, i + d - 1, d, 0)],\n kwargs,\n )\n d *= 2\n\n dsk[(name, N - 1, N, 1)] = identity\n\n while d > 1:\n d //= 2\n for i in range(0, N, 2 * d):\n dsk[(name, i + d - 1, d, 1)] = (name, i + 2 * d - 1, 2 * d, 1)\n dsk[(name, i + 2 * d - 1, d, 1)] = (\n apply,\n f,\n [(name, i + d - 1, d, 0), (name, i + 2 * d - 1, 2 * d, 1)],\n kwargs,\n )\n\n for i in range(n):\n dsk[(name, i)] = (apply, f, [(name, n - 1 - i, 1, 1), identity], kwargs)\n\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[ddf])\n return new_dd_object(graph, name, meta, divisions)\n\n\ndef mapseries(base_chunk, concat_map):\n return base_chunk.map(concat_map)\n\n\ndef mapseries_combine(index, concat_result):\n final_series = concat_result.sort_index()\n final_series = index.to_series().map(final_series)\n return final_series\n\n\ndef series_map(base_series, map_series):\n npartitions = base_series.npartitions\n split_out = map_series.npartitions\n\n dsk = {}\n\n base_token_key = tokenize(base_series, split_out)\n base_split_prefix = \"base-split-{}\".format(base_token_key)\n base_shard_prefix = \"base-shard-{}\".format(base_token_key)\n for i, key in enumerate(base_series.__dask_keys__()):\n dsk[(base_split_prefix, i)] = (hash_shard, key, split_out)\n for j in range(split_out):\n dsk[(base_shard_prefix, 0, i, j)] = (getitem, (base_split_prefix, i), j)\n\n map_token_key = tokenize(map_series)\n map_split_prefix = \"map-split-{}\".format(map_token_key)\n map_shard_prefix = \"map-shard-{}\".format(map_token_key)\n for i, key in enumerate(map_series.__dask_keys__()):\n dsk[(map_split_prefix, i)] = (\n hash_shard,\n key,\n split_out,\n split_out_on_index,\n None,\n )\n for j in range(split_out):\n dsk[(map_shard_prefix, 0, i, j)] = (getitem, (map_split_prefix, i), j)\n\n token_key = tokenize(base_series, map_series)\n map_prefix = \"map-series-{}\".format(token_key)\n for i in range(npartitions):\n for j in range(split_out):\n dsk[(map_prefix, i, j)] = (\n mapseries,\n (base_shard_prefix, 0, i, j),\n (_concat, [(map_shard_prefix, 0, k, j) for k in range(split_out)]),\n )\n\n final_prefix = \"map-series-combine-{}\".format(token_key)\n for i, key in enumerate(base_series.index.__dask_keys__()):\n dsk[(final_prefix, i)] = (\n mapseries_combine,\n key,\n (_concat, [(map_prefix, i, j) for j in range(split_out)]),\n )\n\n meta = map_series._meta.copy()\n meta.index = base_series._meta.index\n meta = make_meta(meta)\n\n dependencies = [base_series, map_series, base_series.index]\n graph = HighLevelGraph.from_collections(\n final_prefix, dsk, dependencies=dependencies\n )\n divisions = list(base_series.divisions)\n\n return new_dd_object(graph, final_prefix, meta, divisions)\n" ]
[ [ "pandas.tseries.frequencies.to_offset", "pandas.Series", "numpy.sqrt", "numpy.linspace", "numpy.asarray", "numpy.issubdtype", "pandas.api.types.is_scalar", "pandas.api.types.is_datetime64_any_dtype", "numpy.nan_to_num", "numpy.cumsum", "pandas.DataFrame", "numpy.all", "pandas.api.types.is_numeric_dtype", "numpy.max", "numpy.concatenate", "numpy.zeros_like", "numpy.any", "pandas.Int64Dtype.is_dtype", "pandas.api.types.is_bool_dtype", "numpy.where", "numpy.pad", "numpy.unique", "numpy.subtract", "pandas.Index", "pandas.DatetimeIndex", "numpy.nansum", "pandas.api.types.is_integer_dtype", "pandas.set_option", "pandas.api.types.is_timedelta64_dtype", "numpy.min", "pandas.Timedelta", "numpy.append", "pandas.api.types.is_list_like", "pandas.date_range", "numpy.errstate", "numpy.array", "numpy.random.RandomState", "pandas.api.types.is_float_dtype", "pandas.isnull", "pandas.api.types.is_dict_like", "numpy.isscalar", "numpy.mod", "pandas.Timestamp", "pandas.io.formats.printing.pprint_thing", "numpy.empty" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
Fermilab-Quantum-Science/Z2Sim-public
[ "dfbefffd933aa2e39a0cb9f668b424596dfa7d35" ]
[ "z2_sim/src/QuantumCircuits/Cirq_Code/production.py" ]
[ "\"\"\"production.py - production code for computing observables.\"\"\"\nimport time\n\nfrom typing import Optional, Callable, Sequence\n\nimport numpy as np\nimport cirq\nimport qsimcirq\n\nfrom z2_sim.src.QuantumCircuits.Cirq_Code.Z2GaugeCirq import (\n make_trotter_circuit,\n make_trotter_circuit_ancillafree,\n make_ancillafree_inputs\n)\nfrom z2_sim.src.QuantumCircuits.Cirq_Code import util\n\n\ndef compute_obs_with_intermediate_state_vector(\n n: int,\n trotter_steps: int,\n jcoup: float,\n dt: float,\n all_observables: Optional[bool] = True,\n qsim_options: Optional[dict] = dict(t=32, f=4),\n decompbasis: Optional[str] = None,\n obc: Optional[bool] = True,\n noise_models: Optional[Sequence[cirq.devices.NoiseModel]] = None,\n verbose: Optional[bool] = False,\n) -> np.ndarray:\n \"\"\"\n Compute the time-dependent expectation value\n\n f_i = <0|U*(t) X_i U(t) X_s|0>\n\n for the Z2 trotter simulation.\n\n TODO:\n - units for J, dt?\n - Add option to combine single qubit gates and Ising exp(isZZ) style gates?\n Need to consult with Martin about the representation for these gates\n as they will likely not be supported with out-of-the-box cirq.\n\n Args:\n n: dimension of grid. Total number of qubits is `n ** 2 + 1`\n trotter_steps: Total number of trotter steps to simulate, with observable\n sampled after each step.\n jcoup: Transverse/E-field parameter.\n dt: Timestep size.\n all_observables: If True, compute the expectation value `f_i` for every\n i=1, ..., n**2. Otherwise, compute a single expectation value at\n each step using a source qubit near the center of the grid.\n decompbasis: Decomposition basis for the trotter circuits. If `None`,\n this will use `exp(i * m * ZZ)` Ising style gates natively\n obc: Flag setting open boundary conditions on the trotter simulation.\n verbose: Print statements to stdout.\n\n Returns:\n If `all_observables` is True, return an array with shape\n\n `(trotter_steps, n, n)`\n\n Otherwise, return an array with shape `(trotter_steps,)`. The first\n dimension of these outputs contains the observable values corresponding\n to each timestep from `i=1,...,trotter_steps`.\n \"\"\"\n\n vprint = lambda *args: print(*args) if verbose else None\n if decompbasis is None:\n decompbasis = 'MS'\n\n if verbose:\n # store times for computing expectation values and simulating\n # first axis: each trotter step\n # second axis: 0th element is computation, 1st element is observable time\n times_out = np.zeros((trotter_steps, 2))\n\n # Generate the first timestep that also prepares the correlator.\n initial_circuit, (source, ancilla) = make_trotter_circuit(n, n, n_timesteps=1, Jcoup=jcoup, Gamma=1/jcoup, dt=dt, initial=True, obc=obc, decompbasis=decompbasis)\n # Generate the unitaries applying all remaining trottersteps after the first.\n stepper_circuit, (source, ancilla) = make_trotter_circuit(n, n, n_timesteps=1, Jcoup=jcoup, Gamma=1/jcoup, dt=dt, initial=False, obc=obc, decompbasis=decompbasis)\n # In the case of noisy simulations we consider the contribution of noise\n # in the state preparation circuit to be negligible compared to the effects\n # due to noisy trotter steps.\n # WARNING: This will \"stack\" noise models; make sure that the noise models\n # all tag their noisy operations with the appropriate tag!!\n if noise_models:\n for noise_model in noise_models:\n stepper_circuit = stepper_circuit.with_noise(noise_model)\n qsim_simulator = qsimcirq.QSimSimulator(qsim_options=qsim_options)\n\n vprint(\"Number of qubits:\", len(stepper_circuit.all_qubits()))\n vprint(\"\\tJ={}, dt={}\".format(jcoup, dt))\n observables = [cirq.X(source) * cirq.X(ancilla) + 1j * cirq.X(source) * cirq.Y(ancilla)]\n # Mirror observables over the diagonal\n if all_observables:\n triu_idx = []\n for i in range(n):\n for j in range(n):\n if j < i:\n continue\n triu_idx.append((i, j))\n\n observables = [cirq.X(cirq.GridQubit(*idx)) * cirq.X(ancilla) + 1j * cirq.X(cirq.GridQubit(*idx)) * cirq.Y(ancilla) for idx in triu_idx]\n\n # Here we save intermediate wavefunctions and compute observables by\n # invoking qsim's function on an empty circuit with the saved state as input\n empty_c = cirq.Circuit()\n qubits = stepper_circuit.all_qubits()\n empty_c += cirq.IdentityGate(len(qubits)).on(*qubits)\n\n out = np.zeros((trotter_steps), dtype=np.complex64)\n if all_observables:\n out = np.zeros((trotter_steps, n, n), dtype=np.complex64)\n\n state = None\n for k in range(trotter_steps):\n # The first trotter step has a distinct generator for an initial state\n circuit = stepper_circuit\n if k == 0:\n circuit = initial_circuit\n t0 = time.time()\n\n result = qsim_simulator.simulate_sweep(\n circuit, initial_state=state, params=None)[0]\n\n dt = time.time() - t0\n if verbose:\n print(\"computation dt: \", dt)\n times_out[k, 0] = dt\n t0 = time.time()\n\n state = result.final_state_vector\n expectation_batch = qsim_simulator.simulate_expectation_values_sweep(\n empty_c, observables=observables, initial_state=state,\n params=None)[0]\n dt = time.time() - t0\n if verbose:\n print(\"observable dt: \", dt)\n times_out[k, 1] = dt\n if all_observables:\n for idx, (row, col) in enumerate(triu_idx):\n out[k, row, col] = expectation_batch[idx]\n out[k, col, row] = expectation_batch[idx]\n else:\n out[k] = expectation_batch[0]\n if verbose:\n return out, times_out\n\n return out\n\n\ndef compute_ancillafree_obs_with_intermediate_state_vector(\n n: int,\n trotter_steps: int,\n jcoup: float,\n dt: float,\n all_observables: Optional[bool] = True,\n qsim_options: Optional[dict] = dict(t=32, f=4),\n decompbasis: Optional[str] = None,\n obc: Optional[bool] = True,\n noise_models: Optional[Sequence[cirq.devices.NoiseModel]] = None,\n verbose: Optional[bool] = False,\n) -> np.ndarray:\n \"\"\"\n Compute the time-dependent expectation value\n\n f_i = <0|U*(t) X_i U(t) X_s|0>\n\n for the Z2 trotter simulation without using any ancilla.\n\n For documentation see `compute_obs_with_intermediate_state_vector`\n \"\"\"\n\n vprint = lambda *args: print(*args) if verbose else None\n if decompbasis is None:\n decompbasis = 'MS'\n\n if verbose:\n # store times for computing expectation values and simulating\n # first axis: each of the 4 pseudo-observables\n # second axi: each trotter step\n # third axis: 0th element is computation, 1st element is observable time\n times_out = np.zeros((4, trotter_steps, 2))\n\n # Generate the unitaries applying all remaining trottersteps after the first.\n stepper_circuit, source = make_trotter_circuit_ancillafree(n, n, n_timesteps=1, Jcoup=jcoup, Gamma=1/jcoup, dt=dt, obc=obc, decompbasis=decompbasis)\n # In the case of noisy simulations we consider the contribution of noise\n # in the state preparation circuit to be negligible compared to the effects\n # due to noisy trotter steps.\n # WARNING: This will \"stack\" noise models; make sure that the noise models\n # all tag their noisy operations with the appropriate tag!!\n if noise_models:\n for noise_model in noise_models:\n stepper_circuit = stepper_circuit.with_noise(noise_model)\n qubits = list(stepper_circuit.all_qubits())\n\n # Construct a list of four circuits whose outputs will combine to yield\n # the desired observable\n input_circuits = make_ancillafree_inputs(source, qubits)\n\n qsim_simulator = qsimcirq.QSimSimulator(qsim_options=qsim_options)\n\n vprint(\"Number of qubits:\", len(stepper_circuit.all_qubits()))\n vprint(\"\\tJ={}, dt={}\".format(jcoup, dt))\n observables = [cirq.X(source)]\n # Mirror observables over the diagonal\n if all_observables:\n triu_idx = []\n for i in range(n):\n for j in range(n):\n if j < i:\n continue\n triu_idx.append((i, j))\n\n observables = [cirq.X(cirq.GridQubit(*idx)) for idx in triu_idx]\n\n # Here we save intermediate wavefunctions and compute observables by\n # invoking qsim's function on an empty circuit with the saved state as input\n empty_c = cirq.Circuit(cirq.I.on_each(qubits))\n\n # We will initially compute four separate observables, and then sum along\n # the last axis to get the desired observable\n out_raw = np.zeros((4, trotter_steps), dtype=np.complex64)\n if all_observables:\n out_raw = np.zeros((4, trotter_steps, n, n), dtype=np.complex64)\n for j in range(4):\n # Select the appropriate state-preparation circuit\n initial_circuit = input_circuits[j]\n state = None\n for k in range(trotter_steps):\n # The first trotter step has a distinct generator for an initial state\n circuit = stepper_circuit\n if k == 0:\n circuit = initial_circuit + stepper_circuit\n\n t0 = time.time()\n result = qsim_simulator.simulate_sweep(\n circuit, initial_state=state, params=None)[0]\n dt = time.time() - t0\n if verbose:\n print(\"computation dt: \", dt)\n times_out[j, k, 0] = dt\n\n t0 = time.time()\n state = result.final_state_vector\n expectation_batch = qsim_simulator.simulate_expectation_values_sweep(\n empty_c, observables=observables, initial_state=state,\n params=None)[0]\n dt = time.time() - t0\n if verbose:\n print(\"observable dt: \", dt)\n times_out[j, k, 1] = dt\n if all_observables:\n\n for idx, (row, col) in enumerate(triu_idx):\n out_raw[j, k, row, col] = expectation_batch[idx]\n out_raw[j, k, col, row] = expectation_batch[idx]\n else:\n out_raw[j, k] = expectation_batch[0]\n\n # Postprocessing the ouputs of the circuit sweep follows from some algebra.\n # See the appendix of the manuscript draft.\n dim_arr = np.ones((1, out_raw.ndim), int).ravel()\n dim_arr[0] = -1\n mask = np.array([-1j, 1j, 1, -1]).reshape(dim_arr) / 2\n out = (out_raw * mask).sum(axis=0)\n if verbose:\n return out, times_out\n\n return out\n\n\ndef compute_noisy_obs(\n n: int,\n trotter_start: int,\n trotter_stop: int,\n jcoup: float,\n dt: float,\n all_observables: Optional[bool] = True,\n qsim_options: Optional[dict] = dict(t=32, f=4, r=100),\n decompbasis: Optional[str] = None,\n obc: Optional[bool] = True,\n noise_models: Optional[Sequence[cirq.devices.NoiseModel]] = None,\n verbose: Optional[bool] = False,\n) -> np.ndarray:\n \"\"\"\n Compute the time-dependent expectation value\n\n f_i = <0|U*(t) X_i U(t) X_s|0>\n\n for the Z2 trotter simulation.\n\n For documentation see `compute_obs_with_intermediate_state_vector`. Docs\n below detail the modified arguments. NOTE: for trotter_stop=y,\n trotter_start=x, the results will contain a total of `y - x` trotter steps\n simulated. Runtime scales like `poly(y - x)`.\n\n Args:\n trotter_start: At which trotter step to begin the simulation. The first\n entry will be the output of a circuit containing `trotter_start`\n many strotter steps (i.e. inclusive indexing).\n trotter_stop: At which trotter step to end the simulation. The final\n entry will be the output of a circuit containing `trotter_stop - 1`\n many trotter steps (i.e exclusive indexing).\n\n Returns:\n If `all_observables` is True, return an array with shape\n\n `(trotter_stop - trotter_start, n, n)`\n \"\"\"\n\n vprint = lambda *args: print(*args) if verbose else None\n if decompbasis is None:\n decompbasis = 'MS'\n\n if trotter_start < 1:\n raise ValueError(\"`trotter_start` must be at least 1.\")\n if trotter_stop <= trotter_start:\n raise ValueError(\"`trotter_stop` must be greater than `trotter_start`\")\n\n # Just get a template for defining observables.\n template, (source, ancilla) = make_trotter_circuit(n, n, n_timesteps=1, Jcoup=jcoup, Gamma=1/jcoup, dt=dt, initial=True, obc=obc, decompbasis=decompbasis)\n\n vprint(\"Number of qubits:\", len(template.all_qubits()))\n vprint(\"\\tJ={}, dt={}\".format(jcoup, dt))\n observables = [cirq.X(source) * cirq.X(ancilla) + 1j * cirq.X(source) * cirq.Y(ancilla)]\n\n # Mirror observables over the diagonal\n if all_observables:\n triu_idx = []\n for i in range(n):\n for j in range(n):\n if j < i:\n continue\n triu_idx.append((i, j))\n\n observables = [cirq.X(cirq.GridQubit(*idx)) * cirq.X(ancilla) + 1j * cirq.X(cirq.GridQubit(*idx)) * cirq.Y(ancilla) for idx in triu_idx]\n #\n # if all_observables:\n # observables = util.make_all_plaq_observables(source, ancilla, template)\n # # Reserve the identities of the source qubits for later\n # gridqubits = list(set(template.all_qubits()))\n # gridqubits.remove(ancilla)\n\n trotter_steps = trotter_stop - trotter_start\n out = np.zeros((trotter_steps), dtype=np.complex64)\n if all_observables:\n out = np.zeros((trotter_steps, n, n), dtype=np.complex64)\n\n # The off-by-one is due to the initial circuit containing a trotter step\n qsim_simulator = qsimcirq.QSimSimulator(qsim_options=qsim_options)\n\n for k, n_timesteps in enumerate(range(trotter_start, trotter_stop, 1)):\n\n # Generate the unitaries applying all remaining trottersteps after the first.\n complete_circuit, (source, ancilla) = make_trotter_circuit(n, n, n_timesteps=n_timesteps, Jcoup=jcoup, Gamma=1/jcoup, dt=dt, initial=True, obc=obc, decompbasis=decompbasis)\n # WARNING: This will \"stack\" noise models; make sure that the noise models\n # all tag their noisy operations with the appropriate tag!!\n if noise_models:\n for noise_model in noise_models:\n complete_circuit = complete_circuit.with_noise(noise_model)\n\n\n cts = count_operations_by_nqubits(complete_circuit)\n circuit_len = len(complete_circuit)\n print(\"RUNNING CIRCUIT FOR \")\n print(f\"\\tlength={circuit_len}\")\n print(\"\\t\",cts)\n print(f\"\\ttrottersteps={n_timesteps}\")\n print(\"\\toptions:\", qsim_options)\n\n expectation_batch = qsim_simulator.simulate_expectation_values(\n complete_circuit,\n observables=observables,\n initial_state=None,\n )\n\n if all_observables:\n for idx, (row, col) in enumerate(triu_idx):\n out[k, row, col] = expectation_batch[idx]\n out[k, col, row] = expectation_batch[idx]\n else:\n out[k] = expectation_batch[0]\n\n return out\n\n\n## DIAGNOSTIC CODE: DELETEME\nfrom collections import Counter\ndef count_operations_by_nqubits(circuit):\n ops = list(circuit.all_operations())\n counts = [x.gate.num_qubits() for x in ops]\n return Counter(counts)\n\n\ndef run_noiseless_parameter_sweep(\n n: int,\n trotter_steps: int,\n jcoup_arr: np.ndarray,\n dt_arr: np.ndarray,\n func: Callable,\n all_observables: Optional[bool] = True,\n **kwargs,\n) -> np.ndarray:\n \"\"\"Simulate a sweep over a set of physical parameters.\n\n This will compute the observables at every timestep, for every pair of\n parameters between `jcoup_arr` and `dt_arr`.\n\n Args:\n n: dimension of grid. Total number of qubits is `n ** 2 + 1`\n trotter_steps: Total number of trotter steps to simulate, with observable\n sampled after each step.\n jcoup_arr: Sweep over transverse/E-field parameter.\n dt_arr: Sweep over timestep size.\n func: This is the function with which to perform the sweep. This is\n expected to have a fixed signature, see the function\n\n `compute_obs_with_intermediate_state_vector`\n\n for reference.\n all_observables: If True, compute the expectation value `f_s` for every\n s=1, ..., n**2. Otherwise, compute a single expectation value at\n each step using a source qubit near the center of the grid.\n kwargs: see `compute_obs_with_intermediate_state_vector`.\n\n Returns:\n If `all_observables` is True, return an array with shape\n\n `(len(jcoup_arr), len(dt_arr), trotter_steps, n, n)`\n\n Otherwise, return an array with shape\n `(len(jcoup_arr), len(dt_arr), trotter_steps)`.\n \"\"\"\n n_j = len(jcoup_arr)\n n_dt = len(dt_arr)\n\n out = np.zeros((n_j, n_dt, trotter_steps), dtype=np.complex64)\n if all_observables:\n out = np.zeros((n_j, n_dt, trotter_steps, n, n), dtype=np.complex64)\n\n for j, jcoup in enumerate(jcoup_arr):\n for k, dt in enumerate(dt_arr):\n out[j,k] = func(n, trotter_steps, jcoup, dt, all_observables=all_observables, **kwargs)\n\n return out" ]
[ [ "numpy.array", "numpy.zeros", "numpy.ones" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
FastTrackOrg/FastAnalysis
[ "fa8e8c72be034c3eb0fa3f40718134f784445a5c" ]
[ "fastanalysis/tests/test_plot.py" ]
[ "import pytest\nimport pandas\nimport numpy as np\n\nimport load\nimport plot\n\n\ndef test_velocity_dist_default_key():\n \"\"\"Test velocity distribution.\"\"\"\n tracking = load.Load(\"tests/tracking.txt\")\n plotObj = plot.Plot(tracking)\n velocityTest = plotObj.velocityDistribution(ids=[0, 1])\n refData = tracking.getObjects(0)\n a = (np.sqrt(np.diff(refData.xBody.values)**2 +\n np.diff(refData.yBody.values)**2)) / np.diff(refData.imageNumber.values)\n refData = tracking.getObjects(1)\n b = (np.sqrt(np.diff(refData.xBody.values)**2 +\n np.diff(refData.yBody.values)**2)) / np.diff(refData.imageNumber.values)\n pooled = np.concatenate((a, b))\n np.testing.assert_array_equal(pooled, velocityTest[1][0])\n\n velocityTest = plotObj.velocityDistribution(ids=[0, 1], pooled=False)\n np.testing.assert_array_equal(a, velocityTest[1][0])\n np.testing.assert_array_equal(b, velocityTest[1][1])\n\n refData = tracking.getObjectsInFrames(0, indexes=list(range(0, 100)))\n a = (np.sqrt(np.diff(refData.xBody.values)**2 +\n np.diff(refData.yBody.values)**2)) / np.diff(refData.imageNumber.values)\n velocityTest = plotObj.velocityDistribution(\n ids=[0], pooled=True, indexes=(0, 100))\n np.testing.assert_array_equal(a, velocityTest[1][0])\n\n\ndef test_velocity_dist_head():\n \"\"\"Test velocity distribution.\"\"\"\n tracking = load.Load(\"tests/tracking.txt\")\n plotObj = plot.Plot(tracking)\n velocityTest = plotObj.velocityDistribution(ids=[0, 1], key=\"Head\")\n refData = tracking.getObjects(0)\n a = (np.sqrt(np.diff(refData.xHead.values)**2 +\n np.diff(refData.yHead.values)**2)) / np.diff(refData.imageNumber.values)\n refData = tracking.getObjects(1)\n b = (np.sqrt(np.diff(refData.xHead.values)**2 +\n np.diff(refData.yHead.values)**2)) / np.diff(refData.imageNumber.values)\n pooled = np.concatenate((a, b))\n np.testing.assert_array_equal(pooled, velocityTest[1][0])\n\n velocityTest = plotObj.velocityDistribution(\n ids=[0, 1], pooled=False, key=\"Head\")\n np.testing.assert_array_equal(a, velocityTest[1][0])\n np.testing.assert_array_equal(b, velocityTest[1][1])\n\n refData = tracking.getObjectsInFrames(0, indexes=list(range(0, 100)))\n a = (np.sqrt(np.diff(refData.xHead.values)**2 +\n np.diff(refData.yHead.values)**2)) / np.diff(refData.imageNumber.values)\n velocityTest = plotObj.velocityDistribution(\n ids=[0], pooled=True, indexes=(0, 100), key=\"Head\")\n np.testing.assert_array_equal(a, velocityTest[1][0])\n" ]
[ [ "numpy.concatenate", "numpy.testing.assert_array_equal", "numpy.diff" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
sashatankov/terrier
[ "c50a22a6a6d0ef5d01c2ad8760f02bcffb15f688" ]
[ "script/model/training_util/data_transforming_util.py" ]
[ "import numpy as np\n\nfrom info import data_info\nfrom type import OpUnit\n\n\ndef _tuple_num_linear_train_transform(x, y):\n # Linearly transform down the target according to the tuple num value in the input\n tuple_num = np.copy(x[:, data_info.TUPLE_NUM_INDEX])\n return y / tuple_num[:, np.newaxis]\n\n\ndef _tuple_num_linear_predict_transform(x, y):\n # Linearly transform up the target according to the tuple num value in the input\n tuple_num = np.copy(x[:, data_info.TUPLE_NUM_INDEX])\n return y * tuple_num[:, np.newaxis]\n\n\n# Transform the target linearly according to the tuple num\n_tuple_num_linear_transformer = (_tuple_num_linear_train_transform, _tuple_num_linear_predict_transform)\n\n\ndef _tuple_num_linear_log_train_transform(x, y):\n # Transform down the target according to the linear-log (nlogn) tuple num value in the input\n tuple_num = np.copy(x[:, data_info.TUPLE_NUM_INDEX])\n return y / (tuple_num * np.log2(tuple_num) + 1e-6)[:, np.newaxis]\n\n\ndef _tuple_num_linear_log_predict_transform(x, y):\n # Transform up the target according to the linear-log (nlogn) tuple num value in the input\n tuple_num = np.copy(x[:, data_info.TUPLE_NUM_INDEX])\n return y * (tuple_num * np.log2(tuple_num) + 1e-6)[:, np.newaxis]\n\n\n# Transform the target in a linear-log way (nlogn) according to the tuple num\n_tuple_num_linear_log_transformer = (_tuple_num_linear_log_train_transform, _tuple_num_linear_log_predict_transform)\n\n# Map the opunit to the transformer it needs for mini-model training\nOPUNIT_MODELING_TRANSFORMER_MAP = {\n OpUnit.GC_DEALLOC: None,\n OpUnit.GC_UNLINK: None,\n OpUnit.LOG_SERIAL: None,\n OpUnit.LOG_CONSUME: None,\n OpUnit.TXN_BEGIN: None,\n OpUnit.TXN_COMMIT: None,\n\n # Execution engine opunits\n OpUnit.SEQ_SCAN: _tuple_num_linear_transformer,\n OpUnit.IDX_SCAN: _tuple_num_linear_transformer,\n OpUnit.HASHJOIN_BUILD: _tuple_num_linear_transformer,\n OpUnit.HASHJOIN_PROBE: _tuple_num_linear_transformer,\n OpUnit.AGG_BUILD: _tuple_num_linear_transformer,\n OpUnit.AGG_ITERATE: _tuple_num_linear_transformer,\n OpUnit.SORT_ITERATE: _tuple_num_linear_transformer,\n OpUnit.INSERT: _tuple_num_linear_transformer,\n OpUnit.UPDATE: _tuple_num_linear_transformer,\n OpUnit.DELETE: _tuple_num_linear_transformer,\n OpUnit.OP_INTEGER_PLUS_OR_MINUS: _tuple_num_linear_transformer,\n OpUnit.OP_INTEGER_MULTIPLY: _tuple_num_linear_transformer,\n OpUnit.OP_INTEGER_DIVIDE: _tuple_num_linear_transformer,\n OpUnit.OP_INTEGER_COMPARE: _tuple_num_linear_transformer,\n OpUnit.OP_DECIMAL_PLUS_OR_MINUS: _tuple_num_linear_transformer,\n OpUnit.OP_DECIMAL_MULTIPLY: _tuple_num_linear_transformer,\n OpUnit.OP_DECIMAL_DIVIDE: _tuple_num_linear_transformer,\n OpUnit.OP_DECIMAL_COMPARE: _tuple_num_linear_transformer,\n OpUnit.OUTPUT: _tuple_num_linear_transformer,\n\n OpUnit.SORT_BUILD: _tuple_num_linear_log_transformer,\n}\n" ]
[ [ "numpy.copy", "numpy.log2" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
RaghavendraSingh/TensorFlowOnSpark
[ "3a207830a8fce53ced20df2f5b7efa3be0cc40c4" ]
[ "test/test_TFCluster.py" ]
[ "import unittest\nimport test\nfrom tensorflowonspark import TFCluster, TFNode\n\n\nclass TFClusterTest(test.SparkTest):\n @classmethod\n def setUpClass(cls):\n super(TFClusterTest, cls).setUpClass()\n\n @classmethod\n def tearDownClass(cls):\n super(TFClusterTest, cls).tearDownClass()\n\n def test_basic_tf(self):\n \"\"\"Single-node TF graph (w/ args) running independently on multiple executors.\"\"\"\n def _map_fun(args, ctx):\n import tensorflow as tf\n x = tf.constant(args['x'])\n y = tf.constant(args['y'])\n sum = tf.add(x, y)\n with tf.Session() as sess:\n result = sess.run([sum])\n assert result[0] == 3\n\n args = {'x': 1, 'y': 2}\n cluster = TFCluster.run(self.sc, _map_fun, tf_args=args, num_executors=self.num_workers, num_ps=0)\n cluster.shutdown()\n\n def test_inputmode_spark(self):\n \"\"\"Distributed TF cluster w/ InputMode.SPARK\"\"\"\n def _map_fun(args, ctx):\n import tensorflow as tf\n cluster, server = TFNode.start_cluster_server(ctx)\n if ctx.job_name == \"ps\":\n server.join()\n elif ctx.job_name == \"worker\":\n with tf.device(tf.train.replica_device_setter(\n worker_device=\"/job:worker/task:%d\" % ctx.task_index,\n cluster=cluster)):\n x = tf.placeholder(tf.int32, [None, 1])\n sq = tf.square(x)\n init_op = tf.global_variables_initializer()\n with tf.train.MonitoredTrainingSession(is_chief=(ctx.task_index == 0)) as sess:\n tf_feed = TFNode.DataFeed(ctx.mgr, False)\n while not sess.should_stop() and not tf_feed.should_stop():\n outputs = sess.run([sq], feed_dict={x: tf_feed.next_batch(10)})\n tf_feed.batch_results(outputs[0])\n\n input = [[x] for x in range(1000)] # set up input as tensors of shape [1] to match placeholder\n rdd = self.sc.parallelize(input, 10)\n cluster = TFCluster.run(self.sc, _map_fun, tf_args={}, num_executors=self.num_workers, num_ps=0, input_mode=TFCluster.InputMode.SPARK)\n rdd_out = cluster.inference(rdd)\n rdd_sum = rdd_out.sum()\n self.assertEqual(rdd_sum, sum([x * x for x in range(1000)]))\n cluster.shutdown()\n\n\nif __name__ == '__main__':\n unittest.main()\n" ]
[ [ "tensorflow.constant", "tensorflow.placeholder", "tensorflow.train.replica_device_setter", "tensorflow.global_variables_initializer", "tensorflow.add", "tensorflow.square", "tensorflow.Session", "tensorflow.train.MonitoredTrainingSession" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
poldracklab/fmriprep
[ "5e5c1b61af35cded21879b0dc1a83673c8a46430" ]
[ "fmriprep/config.py" ]
[ "# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n#\n# Copyright 2021 The NiPreps Developers <[email protected]>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# We support and encourage derived works from this project, please read\n# about our expectations at\n#\n# https://www.nipreps.org/community/licensing/\n#\nr\"\"\"\nA Python module to maintain unique, run-wide *fMRIPrep* settings.\n\nThis module implements the memory structures to keep a consistent, singleton config.\nSettings are passed across processes via filesystem, and a copy of the settings for\neach run and subject is left under\n``<fmriprep_dir>/sub-<participant_id>/log/<run_unique_id>/fmriprep.toml``.\nSettings are stored using :abbr:`ToML (Tom's Markup Language)`.\nThe module has a :py:func:`~fmriprep.config.to_filename` function to allow writting out\nthe settings to hard disk in *ToML* format, which looks like:\n\n.. literalinclude:: ../fmriprep/data/tests/config.toml\n :language: toml\n :name: fmriprep.toml\n :caption: **Example file representation of fMRIPrep settings**.\n\nThis config file is used to pass the settings across processes,\nusing the :py:func:`~fmriprep.config.load` function.\n\nConfiguration sections\n----------------------\n.. autoclass:: environment\n :members:\n.. autoclass:: execution\n :members:\n.. autoclass:: workflow\n :members:\n.. autoclass:: nipype\n :members:\n\nUsage\n-----\nA config file is used to pass settings and collect information as the execution\ngraph is built across processes.\n\n.. code-block:: Python\n\n from fmriprep import config\n config_file = config.execution.work_dir / '.fmriprep.toml'\n config.to_filename(config_file)\n # Call build_workflow(config_file, retval) in a subprocess\n with Manager() as mgr:\n from .workflow import build_workflow\n retval = mgr.dict()\n p = Process(target=build_workflow, args=(str(config_file), retval))\n p.start()\n p.join()\n config.load(config_file)\n # Access configs from any code section as:\n value = config.section.setting\n\nLogging\n-------\n.. autoclass:: loggers\n :members:\n\nOther responsibilities\n----------------------\nThe :py:mod:`config` is responsible for other conveniency actions.\n\n * Switching Python's :obj:`multiprocessing` to *forkserver* mode.\n * Set up a filter for warnings as early as possible.\n * Automated I/O magic operations. Some conversions need to happen in the\n store/load processes (e.g., from/to :obj:`~pathlib.Path` \\<-\\> :obj:`str`,\n :py:class:`~bids.layout.BIDSLayout`, etc.)\n\n\"\"\"\nimport os\nfrom multiprocessing import set_start_method\n\n# Disable NiPype etelemetry always\n_disable_et = bool(\n os.getenv(\"NO_ET\") is not None or os.getenv(\"NIPYPE_NO_ET\") is not None\n)\nos.environ[\"NIPYPE_NO_ET\"] = \"1\"\nos.environ[\"NO_ET\"] = \"1\"\n\nCONFIG_FILENAME = \"fmriprep.toml\"\n\ntry:\n set_start_method(\"forkserver\")\nexcept RuntimeError:\n pass # context has been already set\nfinally:\n # Defer all custom import for after initializing the forkserver and\n # ignoring the most annoying warnings\n import sys\n import random\n from uuid import uuid4\n from time import strftime\n\n from pathlib import Path\n from nipype import __version__ as _nipype_ver\n from templateflow import __version__ as _tf_ver\n from . import __version__\n\nif not hasattr(sys, \"_is_pytest_session\"):\n sys._is_pytest_session = False # Trick to avoid sklearn's FutureWarnings\n# Disable all warnings in main and children processes only on production versions\nif not any(\n (\n \"+\" in __version__,\n __version__.endswith(\".dirty\"),\n os.getenv(\"FMRIPREP_DEV\", \"0\").lower() in (\"1\", \"on\", \"true\", \"y\", \"yes\"),\n )\n):\n from ._warnings import logging\n\n os.environ[\"PYTHONWARNINGS\"] = \"ignore\"\nelif os.getenv(\"FMRIPREP_WARNINGS\", \"0\").lower() in (\"1\", \"on\", \"true\", \"y\", \"yes\"):\n # allow disabling warnings on development versions\n # https://github.com/nipreps/fmriprep/pull/2080#discussion_r409118765\n from ._warnings import logging\nelse:\n import logging\n\nlogging.addLevelName(25, \"IMPORTANT\") # Add a new level between INFO and WARNING\nlogging.addLevelName(15, \"VERBOSE\") # Add a new level between INFO and DEBUG\n\nDEFAULT_MEMORY_MIN_GB = 0.01\n\n# Ping NiPype eTelemetry once if env var was not set\n# workers on the pool will have the env variable set from the master process\nif not _disable_et:\n # Just get so analytics track one hit\n from contextlib import suppress\n from requests import get as _get_url, ConnectionError, ReadTimeout\n\n with suppress((ConnectionError, ReadTimeout)):\n _get_url(\"https://rig.mit.edu/et/projects/nipy/nipype\", timeout=0.05)\n\n# Execution environment\n_exec_env = os.name\n_docker_ver = None\n# special variable set in the container\nif os.getenv(\"IS_DOCKER_8395080871\"):\n _exec_env = \"singularity\"\n _cgroup = Path(\"/proc/1/cgroup\")\n if _cgroup.exists() and \"docker\" in _cgroup.read_text():\n _docker_ver = os.getenv(\"DOCKER_VERSION_8395080871\")\n _exec_env = \"fmriprep-docker\" if _docker_ver else \"docker\"\n del _cgroup\n\n_fs_license = os.getenv(\"FS_LICENSE\")\nif not _fs_license and os.getenv(\"FREESURFER_HOME\"):\n _fs_home = os.getenv(\"FREESURFER_HOME\")\n if _fs_home and (Path(_fs_home) / \"license.txt\").is_file():\n _fs_license = str(Path(_fs_home) / \"license.txt\")\n del _fs_home\n\n_templateflow_home = Path(\n os.getenv(\n \"TEMPLATEFLOW_HOME\", os.path.join(os.getenv(\"HOME\"), \".cache\", \"templateflow\")\n )\n)\n\ntry:\n from psutil import virtual_memory\n\n _free_mem_at_start = round(virtual_memory().free / 1024 ** 3, 1)\nexcept Exception:\n _free_mem_at_start = None\n\n_oc_limit = \"n/a\"\n_oc_policy = \"n/a\"\ntry:\n # Memory policy may have a large effect on types of errors experienced\n _proc_oc_path = Path(\"/proc/sys/vm/overcommit_memory\")\n if _proc_oc_path.exists():\n _oc_policy = {\"0\": \"heuristic\", \"1\": \"always\", \"2\": \"never\"}.get(\n _proc_oc_path.read_text().strip(), \"unknown\"\n )\n if _oc_policy != \"never\":\n _proc_oc_kbytes = Path(\"/proc/sys/vm/overcommit_kbytes\")\n if _proc_oc_kbytes.exists():\n _oc_limit = _proc_oc_kbytes.read_text().strip()\n if (\n _oc_limit in (\"0\", \"n/a\")\n and Path(\"/proc/sys/vm/overcommit_ratio\").exists()\n ):\n _oc_limit = \"{}%\".format(\n Path(\"/proc/sys/vm/overcommit_ratio\").read_text().strip()\n )\nexcept Exception:\n pass\n\n\n# Debug modes are names that influence the exposure of internal details to\n# the user, either through additional derivatives or increased verbosity\nDEBUG_MODES = (\"compcor\", \"fieldmaps\")\n\n\nclass _Config:\n \"\"\"An abstract class forbidding instantiation.\"\"\"\n\n _paths = tuple()\n\n def __init__(self):\n \"\"\"Avert instantiation.\"\"\"\n raise RuntimeError(\"Configuration type is not instantiable.\")\n\n @classmethod\n def load(cls, settings, init=True, ignore=None):\n \"\"\"Store settings from a dictionary.\"\"\"\n ignore = ignore or {}\n for k, v in settings.items():\n if k in ignore or v is None:\n continue\n if k in cls._paths:\n setattr(cls, k, Path(v).absolute())\n elif hasattr(cls, k):\n setattr(cls, k, v)\n\n if init:\n try:\n cls.init()\n except AttributeError:\n pass\n\n @classmethod\n def get(cls):\n \"\"\"Return defined settings.\"\"\"\n from niworkflows.utils.spaces import SpatialReferences, Reference\n\n out = {}\n for k, v in cls.__dict__.items():\n if k.startswith(\"_\") or v is None:\n continue\n if callable(getattr(cls, k)):\n continue\n if k in cls._paths:\n v = str(v)\n if isinstance(v, SpatialReferences):\n v = \" \".join([str(s) for s in v.references]) or None\n if isinstance(v, Reference):\n v = str(v) or None\n out[k] = v\n return out\n\n\nclass environment(_Config):\n \"\"\"\n Read-only options regarding the platform and environment.\n\n Crawls runtime descriptive settings (e.g., default FreeSurfer license,\n execution environment, nipype and *fMRIPrep* versions, etc.).\n The ``environment`` section is not loaded in from file,\n only written out when settings are exported.\n This config section is useful when reporting issues,\n and these variables are tracked whenever the user does not\n opt-out using the ``--notrack`` argument.\n\n \"\"\"\n\n cpu_count = os.cpu_count()\n \"\"\"Number of available CPUs.\"\"\"\n exec_docker_version = _docker_ver\n \"\"\"Version of Docker Engine.\"\"\"\n exec_env = _exec_env\n \"\"\"A string representing the execution platform.\"\"\"\n free_mem = _free_mem_at_start\n \"\"\"Free memory at start.\"\"\"\n overcommit_policy = _oc_policy\n \"\"\"Linux's kernel virtual memory overcommit policy.\"\"\"\n overcommit_limit = _oc_limit\n \"\"\"Linux's kernel virtual memory overcommit limits.\"\"\"\n nipype_version = _nipype_ver\n \"\"\"Nipype's current version.\"\"\"\n templateflow_version = _tf_ver\n \"\"\"The TemplateFlow client version installed.\"\"\"\n version = __version__\n \"\"\"*fMRIPrep*'s version.\"\"\"\n\n\nclass nipype(_Config):\n \"\"\"Nipype settings.\"\"\"\n\n crashfile_format = \"txt\"\n \"\"\"The file format for crashfiles, either text or pickle.\"\"\"\n get_linked_libs = False\n \"\"\"Run NiPype's tool to enlist linked libraries for every interface.\"\"\"\n memory_gb = None\n \"\"\"Estimation in GB of the RAM this workflow can allocate at any given time.\"\"\"\n nprocs = os.cpu_count()\n \"\"\"Number of processes (compute tasks) that can be run in parallel (multiprocessing only).\"\"\"\n omp_nthreads = None\n \"\"\"Number of CPUs a single process can access for multithreaded execution.\"\"\"\n plugin = \"MultiProc\"\n \"\"\"NiPype's execution plugin.\"\"\"\n plugin_args = {\n \"maxtasksperchild\": 1,\n \"raise_insufficient\": False,\n }\n \"\"\"Settings for NiPype's execution plugin.\"\"\"\n resource_monitor = False\n \"\"\"Enable resource monitor.\"\"\"\n stop_on_first_crash = True\n \"\"\"Whether the workflow should stop or continue after the first error.\"\"\"\n\n @classmethod\n def get_plugin(cls):\n \"\"\"Format a dictionary for Nipype consumption.\"\"\"\n out = {\n \"plugin\": cls.plugin,\n \"plugin_args\": cls.plugin_args,\n }\n if cls.plugin in (\"MultiProc\", \"LegacyMultiProc\"):\n out[\"plugin_args\"][\"n_procs\"] = int(cls.nprocs)\n if cls.memory_gb:\n out[\"plugin_args\"][\"memory_gb\"] = float(cls.memory_gb)\n return out\n\n @classmethod\n def init(cls):\n \"\"\"Set NiPype configurations.\"\"\"\n from nipype import config as ncfg\n\n # Configure resource_monitor\n if cls.resource_monitor:\n ncfg.update_config(\n {\n \"monitoring\": {\n \"enabled\": cls.resource_monitor,\n \"sample_frequency\": \"0.5\",\n \"summary_append\": True,\n }\n }\n )\n ncfg.enable_resource_monitor()\n\n # Nipype config (logs and execution)\n ncfg.update_config(\n {\n \"execution\": {\n \"crashdump_dir\": str(execution.log_dir),\n \"crashfile_format\": cls.crashfile_format,\n \"get_linked_libs\": cls.get_linked_libs,\n \"stop_on_first_crash\": cls.stop_on_first_crash,\n \"check_version\": False, # disable future telemetry\n }\n }\n )\n\n if cls.omp_nthreads is None:\n cls.omp_nthreads = min(\n cls.nprocs - 1 if cls.nprocs > 1 else os.cpu_count(), 8\n )\n\n\nclass execution(_Config):\n \"\"\"Configure run-level settings.\"\"\"\n\n anat_derivatives = None\n \"\"\"A path where anatomical derivatives are found to fast-track *sMRIPrep*.\"\"\"\n bids_dir = None\n \"\"\"An existing path to the dataset, which must be BIDS-compliant.\"\"\"\n bids_database_dir = None\n \"\"\"Path to the directory containing SQLite database indices for the input BIDS dataset.\"\"\"\n bids_description_hash = None\n \"\"\"Checksum (SHA256) of the ``dataset_description.json`` of the BIDS dataset.\"\"\"\n bids_filters = None\n \"\"\"A dictionary of BIDS selection filters.\"\"\"\n boilerplate_only = False\n \"\"\"Only generate a boilerplate.\"\"\"\n sloppy = False\n \"\"\"Run in sloppy mode (meaning, suboptimal parameters that minimize run-time).\"\"\"\n debug = []\n \"\"\"Debug mode(s).\"\"\"\n echo_idx = None\n \"\"\"Select a particular echo for multi-echo EPI datasets.\"\"\"\n fmriprep_dir = None\n \"\"\"Root of fMRIPrep BIDS Derivatives dataset. Depends on output_layout.\"\"\"\n fs_license_file = _fs_license\n \"\"\"An existing file containing a FreeSurfer license.\"\"\"\n fs_subjects_dir = None\n \"\"\"FreeSurfer's subjects directory.\"\"\"\n layout = None\n \"\"\"A :py:class:`~bids.layout.BIDSLayout` object, see :py:func:`init`.\"\"\"\n log_dir = None\n \"\"\"The path to a directory that contains execution logs.\"\"\"\n log_level = 25\n \"\"\"Output verbosity.\"\"\"\n low_mem = None\n \"\"\"Utilize uncompressed NIfTIs and other tricks to minimize memory allocation.\"\"\"\n md_only_boilerplate = False\n \"\"\"Do not convert boilerplate from MarkDown to LaTex and HTML.\"\"\"\n notrack = False\n \"\"\"Do not monitor *fMRIPrep* using Sentry.io.\"\"\"\n output_dir = None\n \"\"\"Folder where derivatives will be stored.\"\"\"\n me_output_echos = False\n \"\"\"Output individual echo time series with slice, motion and susceptibility correction\"\"\"\n output_layout = None\n \"\"\"Layout of derivatives within output_dir.\"\"\"\n output_spaces = None\n \"\"\"List of (non)standard spaces designated (with the ``--output-spaces`` flag of\n the command line) as spatial references for outputs.\"\"\"\n reports_only = False\n \"\"\"Only build the reports, based on the reportlets found in a cached working directory.\"\"\"\n run_uuid = f\"{strftime('%Y%m%d-%H%M%S')}_{uuid4()}\"\n \"\"\"Unique identifier of this particular run.\"\"\"\n participant_label = None\n \"\"\"List of participant identifiers that are to be preprocessed.\"\"\"\n task_id = None\n \"\"\"Select a particular task from all available in the dataset.\"\"\"\n templateflow_home = _templateflow_home\n \"\"\"The root folder of the TemplateFlow client.\"\"\"\n work_dir = Path(\"work\").absolute()\n \"\"\"Path to a working directory where intermediate results will be available.\"\"\"\n write_graph = False\n \"\"\"Write out the computational graph corresponding to the planned preprocessing.\"\"\"\n\n _layout = None\n\n _paths = (\n \"anat_derivatives\",\n \"bids_dir\",\n \"bids_database_dir\",\n \"fmriprep_dir\",\n \"fs_license_file\",\n \"fs_subjects_dir\",\n \"layout\",\n \"log_dir\",\n \"output_dir\",\n \"templateflow_home\",\n \"work_dir\",\n )\n\n @classmethod\n def init(cls):\n \"\"\"Create a new BIDS Layout accessible with :attr:`~execution.layout`.\"\"\"\n if cls.fs_license_file and Path(cls.fs_license_file).is_file():\n os.environ[\"FS_LICENSE\"] = str(cls.fs_license_file)\n\n if cls._layout is None:\n import re\n from bids.layout.index import BIDSLayoutIndexer\n from bids.layout import BIDSLayout\n\n _db_path = cls.bids_database_dir or (\n cls.work_dir / cls.run_uuid / \"bids_db\"\n )\n _db_path.mkdir(exist_ok=True, parents=True)\n\n # Recommended after PyBIDS 12.1\n _indexer = BIDSLayoutIndexer(\n validate=False,\n ignore=(\n \"code\",\n \"stimuli\",\n \"sourcedata\",\n \"models\",\n re.compile(r\"^\\.\"),\n re.compile(\n r\"sub-[a-zA-Z0-9]+(/ses-[a-zA-Z0-9]+)?/(beh|dwi|eeg|ieeg|meg|perf)\"\n ),\n ),\n )\n cls._layout = BIDSLayout(\n str(cls.bids_dir),\n database_path=_db_path,\n reset_database=cls.bids_database_dir is None,\n indexer=_indexer,\n )\n cls.bids_database_dir = _db_path\n cls.layout = cls._layout\n if cls.bids_filters:\n from bids.layout import Query\n\n # unserialize pybids Query enum values\n for acq, filters in cls.bids_filters.items():\n cls.bids_filters[acq] = {\n k: getattr(Query, v[7:-4])\n if not isinstance(v, Query) and \"Query\" in v\n else v\n for k, v in filters.items()\n }\n\n if \"all\" in cls.debug:\n cls.debug = list(DEBUG_MODES)\n\n\n# These variables are not necessary anymore\ndel _fs_license\ndel _exec_env\ndel _nipype_ver\ndel _templateflow_home\ndel _tf_ver\ndel _free_mem_at_start\ndel _oc_limit\ndel _oc_policy\n\n\nclass workflow(_Config):\n \"\"\"Configure the particular execution graph of this workflow.\"\"\"\n\n anat_only = False\n \"\"\"Execute the anatomical preprocessing only.\"\"\"\n aroma_err_on_warn = None\n \"\"\"Cast AROMA warnings to errors.\"\"\"\n aroma_melodic_dim = None\n \"\"\"Number of ICA components to be estimated by MELODIC\n (positive = exact, negative = maximum).\"\"\"\n bold2t1w_dof = None\n \"\"\"Degrees of freedom of the BOLD-to-T1w registration steps.\"\"\"\n bold2t1w_init = \"register\"\n \"\"\"Whether to use standard coregistration ('register') or to initialize coregistration from the\n BOLD image-header ('header').\"\"\"\n cifti_output = None\n \"\"\"Generate HCP Grayordinates, accepts either ``'91k'`` (default) or ``'170k'``.\"\"\"\n dummy_scans = None\n \"\"\"Set a number of initial scans to be considered nonsteady states.\"\"\"\n fmap_bspline = None\n \"\"\"Regularize fieldmaps with a field of B-Spline basis.\"\"\"\n fmap_demean = None\n \"\"\"Remove the mean from fieldmaps.\"\"\"\n force_syn = None\n \"\"\"Run *fieldmap-less* susceptibility-derived distortions estimation.\"\"\"\n hires = None\n \"\"\"Run FreeSurfer ``recon-all`` with the ``-hires`` flag.\"\"\"\n ignore = None\n \"\"\"Ignore particular steps for *fMRIPrep*.\"\"\"\n longitudinal = False\n \"\"\"Run FreeSurfer ``recon-all`` with the ``-logitudinal`` flag.\"\"\"\n medial_surface_nan = None\n \"\"\"Fill medial surface with :abbr:`NaNs (not-a-number)` when sampling.\"\"\"\n regressors_all_comps = None\n \"\"\"Return all CompCor components.\"\"\"\n regressors_dvars_th = None\n \"\"\"Threshold for DVARS.\"\"\"\n regressors_fd_th = None\n \"\"\"Threshold for :abbr:`FD (frame-wise displacement)`.\"\"\"\n run_reconall = True\n \"\"\"Run FreeSurfer's surface reconstruction.\"\"\"\n skull_strip_fixed_seed = False\n \"\"\"Fix a seed for skull-stripping.\"\"\"\n skull_strip_template = \"OASIS30ANTs\"\n \"\"\"Change default brain extraction template.\"\"\"\n skull_strip_t1w = \"force\"\n \"\"\"Skip brain extraction of the T1w image (default is ``force``, meaning that\n *fMRIPrep* will run brain extraction of the T1w).\"\"\"\n slice_time_ref = 0.5\n \"\"\"The time of the reference slice to correct BOLD values to, as a fraction\n acquisition time. 0 indicates the start, 0.5 the midpoint, and 1 the end\n of acquisition. The alias `start` corresponds to 0, and `middle` to 0.5.\n The default value is 0.5.\"\"\"\n spaces = None\n \"\"\"Keeps the :py:class:`~niworkflows.utils.spaces.SpatialReferences`\n instance keeping standard and nonstandard spaces.\"\"\"\n topup_max_vols = 5\n \"\"\"Maximum number of volumes to use with TOPUP, per-series (EPI or BOLD).\"\"\"\n use_aroma = None\n \"\"\"Run ICA-:abbr:`AROMA (automatic removal of motion artifacts)`.\"\"\"\n use_bbr = None\n \"\"\"Run boundary-based registration for BOLD-to-T1w registration.\"\"\"\n use_syn_sdc = None\n \"\"\"Run *fieldmap-less* susceptibility-derived distortions estimation\n in the absence of any alternatives.\"\"\"\n\n\nclass loggers:\n \"\"\"Keep loggers easily accessible (see :py:func:`init`).\"\"\"\n\n _fmt = \"%(asctime)s,%(msecs)d %(name)-2s \" \"%(levelname)-2s:\\n\\t %(message)s\"\n _datefmt = \"%y%m%d-%H:%M:%S\"\n\n default = logging.getLogger()\n \"\"\"The root logger.\"\"\"\n cli = logging.getLogger(\"cli\")\n \"\"\"Command-line interface logging.\"\"\"\n workflow = logging.getLogger(\"nipype.workflow\")\n \"\"\"NiPype's workflow logger.\"\"\"\n interface = logging.getLogger(\"nipype.interface\")\n \"\"\"NiPype's interface logger.\"\"\"\n utils = logging.getLogger(\"nipype.utils\")\n \"\"\"NiPype's utils logger.\"\"\"\n\n @classmethod\n def init(cls):\n \"\"\"\n Set the log level, initialize all loggers into :py:class:`loggers`.\n\n * Add new logger levels (25: IMPORTANT, and 15: VERBOSE).\n * Add a new sub-logger (``cli``).\n * Logger configuration.\n\n \"\"\"\n from nipype import config as ncfg\n\n _handler = logging.StreamHandler(stream=sys.stdout)\n _handler.setFormatter(logging.Formatter(fmt=cls._fmt, datefmt=cls._datefmt))\n cls.cli.addHandler(_handler)\n cls.default.setLevel(execution.log_level)\n cls.cli.setLevel(execution.log_level)\n cls.interface.setLevel(execution.log_level)\n cls.workflow.setLevel(execution.log_level)\n cls.utils.setLevel(execution.log_level)\n ncfg.update_config(\n {\"logging\": {\"log_directory\": str(execution.log_dir), \"log_to_file\": True}}\n )\n\n\nclass seeds(_Config):\n \"\"\"Initialize the PRNG and track random seed assignments\"\"\"\n\n _random_seed = None\n master = None\n \"\"\"Master random seed to initialize the Pseudorandom Number Generator (PRNG)\"\"\"\n ants = None\n \"\"\"Seed used for antsRegistration, antsAI, antsMotionCorr\"\"\"\n numpy = None\n \"\"\"Seed used by NumPy\"\"\"\n\n @classmethod\n def init(cls):\n if cls._random_seed is not None:\n cls.master = cls._random_seed\n if cls.master is None:\n cls.master = random.randint(1, 65536)\n random.seed(cls.master) # initialize the PRNG\n # functions to set program specific seeds\n cls.ants = _set_ants_seed()\n cls.numpy = _set_numpy_seed()\n\n\ndef _set_ants_seed():\n \"\"\"Fix random seed for antsRegistration, antsAI, antsMotionCorr\"\"\"\n val = random.randint(1, 65536)\n os.environ[\"ANTS_RANDOM_SEED\"] = str(val)\n return val\n\n\ndef _set_numpy_seed():\n \"\"\"NumPy's random seed is independant from Python's `random` module\"\"\"\n import numpy as np\n val = random.randint(1, 65536)\n np.random.seed(val)\n return val\n\n\ndef from_dict(settings, init=True, ignore=None):\n \"\"\"Read settings from a flat dictionary.\n\n Arguments\n ---------\n setting : dict\n Settings to apply to any configuration\n init : `bool` or :py:class:`~collections.abc.Container`\n Initialize all, none, or a subset of configurations.\n ignore : :py:class:`~collections.abc.Container`\n Collection of keys in ``setting`` to ignore\n \"\"\"\n\n # Accept global True/False or container of configs to initialize\n def initialize(x):\n return init if init in (True, False) else x in init\n\n nipype.load(settings, init=initialize('nipype'), ignore=ignore)\n execution.load(settings, init=initialize('execution'), ignore=ignore)\n workflow.load(settings, init=initialize('workflow'), ignore=ignore)\n seeds.load(settings, init=initialize('seeds'), ignore=ignore)\n\n loggers.init()\n\n\ndef load(filename, skip=None, init=True):\n \"\"\"Load settings from file.\n\n Arguments\n ---------\n filename : :py:class:`os.PathLike`\n TOML file containing fMRIPrep configuration.\n skip : dict or None\n Sets of values to ignore during load, keyed by section name\n init : `bool` or :py:class:`~collections.abc.Container`\n Initialize all, none, or a subset of configurations.\n \"\"\"\n from toml import loads\n\n skip = skip or {}\n\n # Accept global True/False or container of configs to initialize\n def initialize(x):\n return init if init in (True, False) else x in init\n\n filename = Path(filename)\n settings = loads(filename.read_text())\n for sectionname, configs in settings.items():\n if sectionname != \"environment\":\n section = getattr(sys.modules[__name__], sectionname)\n ignore = skip.get(sectionname)\n section.load(configs, ignore=ignore, init=initialize(sectionname))\n init_spaces()\n\n\ndef get(flat=False):\n \"\"\"Get config as a dict.\"\"\"\n settings = {\n \"environment\": environment.get(),\n \"execution\": execution.get(),\n \"workflow\": workflow.get(),\n \"nipype\": nipype.get(),\n \"seeds\": seeds.get(),\n }\n if not flat:\n return settings\n\n return {\n \".\".join((section, k)): v\n for section, configs in settings.items()\n for k, v in configs.items()\n }\n\n\ndef dumps():\n \"\"\"Format config into toml.\"\"\"\n from toml import dumps\n\n return dumps(get())\n\n\ndef to_filename(filename):\n \"\"\"Write settings to file.\"\"\"\n filename = Path(filename)\n filename.write_text(dumps())\n\n\ndef init_spaces(checkpoint=True):\n \"\"\"Initialize the :attr:`~workflow.spaces` setting.\"\"\"\n from niworkflows.utils.spaces import Reference, SpatialReferences\n\n spaces = execution.output_spaces or SpatialReferences()\n if not isinstance(spaces, SpatialReferences):\n spaces = SpatialReferences(\n [ref for s in spaces.split(\" \") for ref in Reference.from_string(s)]\n )\n\n if checkpoint and not spaces.is_cached():\n spaces.checkpoint()\n\n # Add the default standard space if not already present (required by several sub-workflows)\n if \"MNI152NLin2009cAsym\" not in spaces.get_spaces(nonstandard=False, dim=(3,)):\n spaces.add(Reference(\"MNI152NLin2009cAsym\", {}))\n\n # Ensure user-defined spatial references for outputs are correctly parsed.\n # Certain options require normalization to a space not explicitly defined by users.\n # These spaces will not be included in the final outputs.\n if workflow.use_aroma:\n # Make sure there's a normalization to FSL for AROMA to use.\n spaces.add(Reference(\"MNI152NLin6Asym\", {\"res\": \"2\"}))\n\n cifti_output = workflow.cifti_output\n if cifti_output:\n # CIFTI grayordinates to corresponding FSL-MNI resolutions.\n vol_res = \"2\" if cifti_output == \"91k\" else \"1\"\n spaces.add(Reference(\"fsaverage\", {\"den\": \"164k\"}))\n spaces.add(Reference(\"MNI152NLin6Asym\", {\"res\": vol_res}))\n\n # Make the SpatialReferences object available\n workflow.spaces = spaces\n" ]
[ [ "numpy.random.seed" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
fepegar/InnerEye-DeepLearning
[ "6aab3b1f931ad9c5f9546d77b22676ac4f34da2d" ]
[ "Tests/ML/models/test_parallel.py" ]
[ "# ------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.\n# ------------------------------------------------------------------------------------------\nfrom typing import Any, List\n\nimport pytest\nimport torch\nfrom torch import Tensor\n\nfrom InnerEye.ML.models.architectures.base_model import BaseSegmentationModel, CropSizeConstraints\nfrom InnerEye.ML.models.parallel.model_parallel import group_layers_with_balanced_memory, \\\n move_to_device, partition_layers\nfrom InnerEye.ML.utils.ml_util import is_gpu_available\n\nno_gpu = not is_gpu_available()\nno_or_single_gpu = not torch.cuda.is_available() or torch.cuda.device_count() <= 1\n\n\nclass SimpleModel(BaseSegmentationModel):\n \"\"\"\n A simple neural network model to test model parallelisation functions.\n \"\"\"\n\n def __init__(self, input_channels: Any, channels: Any, n_classes: int, kernel_size: int):\n # minimum crop size: Network first reduces size by 4, then halves, then multiplies by 2 and adds 1\n # 64 -> 62 -> 30 -> 61 -> 61\n super().__init__(name='SimpleModel',\n input_channels=input_channels,\n crop_size_constraints=CropSizeConstraints(minimum_size=6))\n self.channels = channels\n self.n_classes = n_classes\n self.kernel_size = kernel_size\n self._model = torch.nn.Sequential(\n torch.nn.Conv3d(input_channels, channels[0], kernel_size=self.kernel_size),\n torch.nn.Conv3d(channels[0], channels[1], kernel_size=self.kernel_size, stride=2),\n torch.nn.ConvTranspose3d(channels[1], channels[0], kernel_size=self.kernel_size, stride=2),\n torch.nn.ConvTranspose3d(channels[0], n_classes, kernel_size=1)\n )\n\n def forward(self, x: Any): # type: ignore\n return self._model(x)\n\n def get_all_child_layers(self) -> List[torch.nn.Module]:\n return list(self._model.children())\n\n\[email protected]\[email protected](no_gpu, reason=\"CUDA capable GPU is not available\")\ndef test_move_to_device() -> None:\n def assert_device_matches(tensors: List[Tensor], target_device: torch.device) -> None:\n for tensor in tensors:\n assert tensor.device == target_device\n\n target_device = torch.device('cuda:0')\n input_tensor_1 = torch.tensor(3, device=torch.device('cpu'))\n input_tensor_2 = torch.tensor(3, device=torch.device('cuda:0'))\n tensors = [input_tensor_1, input_tensor_2]\n moved = list(move_to_device(tensors, target_device=target_device))\n assert_device_matches(moved, target_device)\n\n if torch.cuda.device_count() > 1:\n target_device = torch.device('cuda:1')\n moved = list(move_to_device(tensors, target_device=target_device))\n assert_device_matches(moved, target_device)\n\n # Not supplying a target device should leave the tensor untouched\n moved = list(move_to_device(tensors, target_device=None))\n assert moved[0].device == tensors[0].device\n assert moved[1].device == tensors[1].device\n\n\[email protected]\[email protected](no_or_single_gpu, reason=\"CUDA capable GPUs are not available\")\ndef test_group_layers_with_balanced_memory() -> None:\n model = SimpleModel(input_channels=1, channels=[2, 3], n_classes=2, kernel_size=1).cuda()\n model.generate_model_summary(crop_size=(8, 8, 8))\n groups = group_layers_with_balanced_memory(model.get_all_child_layers(), num_groups=2, summary=model.summary)\n\n for group_id, group in enumerate(groups):\n assert len(group) == 2\n if group_id == 0:\n assert isinstance(group[0], torch.nn.Conv3d)\n assert isinstance(group[1], torch.nn.Conv3d)\n elif group_id == 1:\n assert isinstance(group[0], torch.nn.ConvTranspose3d)\n assert isinstance(group[1], torch.nn.ConvTranspose3d)\n\n\[email protected]\[email protected](no_or_single_gpu, reason=\"CUDA capable GPUs are not available\")\ndef test_partition_layers() -> None:\n model = SimpleModel(input_channels=1, channels=[2, 3], n_classes=2, kernel_size=1).cuda()\n model.generate_model_summary(crop_size=(8, 8, 8))\n summary = model.summary\n devices = [torch.device('cuda:{}'.format(ii)) for ii in range(2)]\n all_layers = model.get_all_child_layers()\n\n if summary is None:\n raise RuntimeError(\n \"Network summary is required to partition UNet3D. Call model.generate_model_summary() first.\")\n\n partition_layers(layers=all_layers, summary=summary, target_devices=devices)\n\n assert all_layers[0].weight.device == torch.device(\"cuda:0\")\n assert all_layers[1].weight.device == torch.device(\"cuda:0\")\n assert all_layers[2].weight.device == torch.device(\"cuda:1\")\n assert all_layers[3].weight.device == torch.device(\"cuda:1\")\n" ]
[ [ "torch.nn.ConvTranspose3d", "torch.nn.Conv3d", "torch.cuda.is_available", "torch.device", "torch.cuda.device_count" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
zhixuanli/FFT_python_implementation
[ "5559e69495191350102f341541f266d20e39a6a8" ]
[ "utils.py" ]
[ "import numpy as np\nimport cv2\nimport os\n\nfrom fourier_transform import *\n\n\ndef img_FFT(img):\n if len(img.shape) == 2 or len(img.shape) == 3:\n return FFT_2D(img)\n else:\n raise ValueError(\"Please input a gray or RGB image!\")\n\n\ndef img_FFT_inverse(img):\n if len(img.shape) == 2 or len(img.shape) == 3:\n return inverseFFT_2D(img)\n else:\n raise ValueError(\"Please input a gray or RGB image!\")\n\n\ndef findpower2(num):\n \"\"\"find the nearest number that is the power of 2\"\"\"\n if num & (num-1) == 0:\n return num\n\n bin_num = bin(num)\n origin_bin_num = str(bin_num)[2:]\n near_power2 = pow(10, len(origin_bin_num))\n near_power2 = \"0b\" + str(near_power2)\n near_power2 = int(near_power2, base=2)\n\n return near_power2\n\n\ndef image_padding(img):\n \"\"\" padding the image size to power of 2, for fft computation requirement\"\"\"\n if len(img.shape) == 2 or len(img.shape) == 3:\n h, w = img.shape[0], img.shape[1]\n\n h_pad = findpower2(h)-h\n w_pad = findpower2(w)-w\n\n img = np.pad(img, pad_width=((0, h_pad), (0, w_pad), (0, 0)), mode='constant')\n\n return img\n\n\ndef image_fft(img_path, result_folder_path=\"result/\"):\n \"\"\" read, padding, fft, cut to origin size and save \"\"\"\n data_root, img_name = os.path.split(img_path)\n\n if img_name[-3:] != \"png\" and img_name[-3:] != \"tif\":\n return 0\n\n if not os.path.exists(result_folder_path):\n os.mkdir(result_folder_path)\n\n img_origin = cv2.imread(img_path)\n img = image_padding(img_origin)\n\n img_fft = img_FFT(img)\n\n if len(img_origin) == 2:\n img_fft = img_fft[:img_origin.shape[0], :img_origin.shape[1]]\n else:\n img_fft = img_fft[:img_origin.shape[0], :img_origin.shape[1], :]\n\n img_fft_complex = img_fft.copy()\n\n # save real value for human seeing\n img_fft = np.real(img_fft)\n name, _ = img_name.split(\".\")\n save_img_name = result_folder_path + name + \"_fft.png\"\n cv2.imwrite(save_img_name, img_fft)\n\n return img_fft_complex\n\n\ndef image_fft_inverse(img_fft_complex, img_path, result_folder_path=\"result/\"):\n \"\"\" inverse the read fft_img, cut to origin size and save \"\"\"\n if not os.path.exists(result_folder_path):\n os.mkdir(result_folder_path)\n _, img_name = os.path.split(img_path)\n\n img_fft = image_padding(img_fft_complex)\n\n img_origin = img_FFT_inverse(img_fft)\n img_ifft = np.real(img_origin)\n\n name, _ = img_name.split(\".\")\n save_img_name = result_folder_path + name + \"_inverse.png\"\n\n if len(img_origin) == 2:\n img_ifft = img_ifft[:img_fft_complex.shape[0], :img_fft_complex.shape[1]]\n else:\n img_ifft = img_ifft[:img_fft_complex.shape[0], :img_fft_complex.shape[1], :]\n\n cv2.imwrite(save_img_name, img_ifft)\n\n return img_origin\n\n\nif __name__ == '__main__':\n x = np.mgrid[:8, :8][0]\n # x = np.mgrid[:4, :4][0]\n print(x)\n print(\"-------------------\")\n # print(np.allclose(FFT(x), np.fft.fft(x)))\n # print(np.allclose(FFT_2D(x), np.fft.fft2(x)))\n # print(FFT_2D(x))\n print(inverseFFT_2D(x))\n # print(inverseDFT_2D(x))\n\n print(\"-------------------\")\n print(np.fft.ifft2(x))\n print(\"-------------------\")\n # print(np.fft.fft(x))\n\n # print(np.allclose(np.fft.fft(x), np.fft.fft2(x)))\n" ]
[ [ "numpy.real", "numpy.fft.ifft2", "numpy.pad" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Njreardo/tensorflow
[ "de52bc1b16ec15c2afc5696edd89480f8bad5257" ]
[ "tensorflow/python/ops/image_ops_test.py" ]
[ "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for tensorflow.ops.image_ops.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport colorsys\nimport functools\nimport itertools\nimport math\nimport os\nimport time\n\nfrom absl.testing import parameterized\nimport numpy as np\nfrom six.moves import xrange # pylint: disable=redefined-builtin\n\nfrom tensorflow.core.protobuf import config_pb2\nfrom tensorflow.python.client import session\nfrom tensorflow.python.compat import compat\nfrom tensorflow.python.data.experimental.ops import get_single_element\nfrom tensorflow.python.data.ops import dataset_ops\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import gen_image_ops\nfrom tensorflow.python.ops import gradients\nfrom tensorflow.python.ops import image_ops\nfrom tensorflow.python.ops import image_ops_impl\nfrom tensorflow.python.ops import io_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import random_ops\nfrom tensorflow.python.ops import stateless_random_ops\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.platform import googletest\nfrom tensorflow.python.platform import test\n\n\nclass RGBToHSVTest(test_util.TensorFlowTestCase):\n\n def testBatch(self):\n # Build an arbitrary RGB image\n np.random.seed(7)\n batch_size = 5\n shape = (batch_size, 2, 7, 3)\n\n for nptype in [np.float32, np.float64]:\n inp = np.random.rand(*shape).astype(nptype)\n\n # Convert to HSV and back, as a batch and individually\n with self.cached_session(use_gpu=True) as sess:\n batch0 = constant_op.constant(inp)\n batch1 = image_ops.rgb_to_hsv(batch0)\n batch2 = image_ops.hsv_to_rgb(batch1)\n split0 = array_ops.unstack(batch0)\n split1 = list(map(image_ops.rgb_to_hsv, split0))\n split2 = list(map(image_ops.hsv_to_rgb, split1))\n join1 = array_ops.stack(split1)\n join2 = array_ops.stack(split2)\n batch1, batch2, join1, join2 = self.evaluate(\n [batch1, batch2, join1, join2])\n\n # Verify that processing batch elements together is the same as separate\n self.assertAllClose(batch1, join1)\n self.assertAllClose(batch2, join2)\n self.assertAllClose(batch2, inp)\n\n def testRGBToHSVRoundTrip(self):\n data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]\n for nptype in [np.float32, np.float64]:\n rgb_np = np.array(data, dtype=nptype).reshape([2, 2, 3]) / 255.\n with self.cached_session(use_gpu=True):\n hsv = image_ops.rgb_to_hsv(rgb_np)\n rgb = image_ops.hsv_to_rgb(hsv)\n rgb_tf = self.evaluate(rgb)\n self.assertAllClose(rgb_tf, rgb_np)\n\n\nclass RGBToYIQTest(test_util.TensorFlowTestCase):\n\n @test_util.run_without_tensor_float_32(\n \"Calls rgb_to_yiq and yiq_to_rgb, which use matmul\")\n def testBatch(self):\n # Build an arbitrary RGB image\n np.random.seed(7)\n batch_size = 5\n shape = (batch_size, 2, 7, 3)\n\n for nptype in [np.float32, np.float64]:\n inp = np.random.rand(*shape).astype(nptype)\n\n # Convert to YIQ and back, as a batch and individually\n with self.cached_session(use_gpu=True) as sess:\n batch0 = constant_op.constant(inp)\n batch1 = image_ops.rgb_to_yiq(batch0)\n batch2 = image_ops.yiq_to_rgb(batch1)\n split0 = array_ops.unstack(batch0)\n split1 = list(map(image_ops.rgb_to_yiq, split0))\n split2 = list(map(image_ops.yiq_to_rgb, split1))\n join1 = array_ops.stack(split1)\n join2 = array_ops.stack(split2)\n batch1, batch2, join1, join2 = self.evaluate(\n [batch1, batch2, join1, join2])\n\n # Verify that processing batch elements together is the same as separate\n self.assertAllClose(batch1, join1, rtol=1e-4, atol=1e-4)\n self.assertAllClose(batch2, join2, rtol=1e-4, atol=1e-4)\n self.assertAllClose(batch2, inp, rtol=1e-4, atol=1e-4)\n\n\nclass RGBToYUVTest(test_util.TensorFlowTestCase):\n\n @test_util.run_without_tensor_float_32(\n \"Calls rgb_to_yuv and yuv_to_rgb, which use matmul\")\n def testBatch(self):\n # Build an arbitrary RGB image\n np.random.seed(7)\n batch_size = 5\n shape = (batch_size, 2, 7, 3)\n\n for nptype in [np.float32, np.float64]:\n inp = np.random.rand(*shape).astype(nptype)\n\n # Convert to YUV and back, as a batch and individually\n with self.cached_session(use_gpu=True) as sess:\n batch0 = constant_op.constant(inp)\n batch1 = image_ops.rgb_to_yuv(batch0)\n batch2 = image_ops.yuv_to_rgb(batch1)\n split0 = array_ops.unstack(batch0)\n split1 = list(map(image_ops.rgb_to_yuv, split0))\n split2 = list(map(image_ops.yuv_to_rgb, split1))\n join1 = array_ops.stack(split1)\n join2 = array_ops.stack(split2)\n batch1, batch2, join1, join2 = self.evaluate(\n [batch1, batch2, join1, join2])\n\n # Verify that processing batch elements together is the same as separate\n self.assertAllClose(batch1, join1, rtol=1e-4, atol=1e-4)\n self.assertAllClose(batch2, join2, rtol=1e-4, atol=1e-4)\n self.assertAllClose(batch2, inp, rtol=1e-4, atol=1e-4)\n\n\nclass GrayscaleToRGBTest(test_util.TensorFlowTestCase):\n\n def _RGBToGrayscale(self, images):\n is_batch = True\n if len(images.shape) == 3:\n is_batch = False\n images = np.expand_dims(images, axis=0)\n out_shape = images.shape[0:3] + (1,)\n out = np.zeros(shape=out_shape, dtype=np.uint8)\n for batch in xrange(images.shape[0]):\n for y in xrange(images.shape[1]):\n for x in xrange(images.shape[2]):\n red = images[batch, y, x, 0]\n green = images[batch, y, x, 1]\n blue = images[batch, y, x, 2]\n gray = 0.2989 * red + 0.5870 * green + 0.1140 * blue\n out[batch, y, x, 0] = int(gray)\n if not is_batch:\n out = np.squeeze(out, axis=0)\n return out\n\n def _TestRGBToGrayscale(self, x_np):\n y_np = self._RGBToGrayscale(x_np)\n\n with self.cached_session(use_gpu=True):\n x_tf = constant_op.constant(x_np, shape=x_np.shape)\n y = image_ops.rgb_to_grayscale(x_tf)\n y_tf = self.evaluate(y)\n self.assertAllEqual(y_tf, y_np)\n\n def testBasicRGBToGrayscale(self):\n # 4-D input with batch dimension.\n x_np = np.array(\n [[1, 2, 3], [4, 10, 1]], dtype=np.uint8).reshape([1, 1, 2, 3])\n self._TestRGBToGrayscale(x_np)\n\n # 3-D input with no batch dimension.\n x_np = np.array([[1, 2, 3], [4, 10, 1]], dtype=np.uint8).reshape([1, 2, 3])\n self._TestRGBToGrayscale(x_np)\n\n def testBasicGrayscaleToRGB(self):\n # 4-D input with batch dimension.\n x_np = np.array([[1, 2]], dtype=np.uint8).reshape([1, 1, 2, 1])\n y_np = np.array(\n [[1, 1, 1], [2, 2, 2]], dtype=np.uint8).reshape([1, 1, 2, 3])\n\n with self.cached_session(use_gpu=True):\n x_tf = constant_op.constant(x_np, shape=x_np.shape)\n y = image_ops.grayscale_to_rgb(x_tf)\n y_tf = self.evaluate(y)\n self.assertAllEqual(y_tf, y_np)\n\n # 3-D input with no batch dimension.\n x_np = np.array([[1, 2]], dtype=np.uint8).reshape([1, 2, 1])\n y_np = np.array([[1, 1, 1], [2, 2, 2]], dtype=np.uint8).reshape([1, 2, 3])\n\n with self.cached_session(use_gpu=True):\n x_tf = constant_op.constant(x_np, shape=x_np.shape)\n y = image_ops.grayscale_to_rgb(x_tf)\n y_tf = self.evaluate(y)\n self.assertAllEqual(y_tf, y_np)\n\n def testGrayscaleToRGBInputValidation(self):\n # tests whether the grayscale_to_rgb function raises\n # an exception if the input images' last dimension is\n # not of size 1, i.e. the images have shape\n # [batch size, height, width] or [height, width]\n\n # tests if an exception is raised if a three dimensional\n # input is used, i.e. the images have shape [batch size, height, width]\n with self.cached_session(use_gpu=True):\n # 3-D input with batch dimension.\n x_np = np.array([[1, 2]], dtype=np.uint8).reshape([1, 1, 2])\n\n x_tf = constant_op.constant(x_np, shape=x_np.shape)\n\n # this is the error message we expect the function to raise\n err_msg = \"Last dimension of a grayscale image should be size 1\"\n with self.assertRaisesRegex(ValueError, err_msg):\n image_ops.grayscale_to_rgb(x_tf)\n\n # tests if an exception is raised if a two dimensional\n # input is used, i.e. the images have shape [height, width]\n with self.cached_session(use_gpu=True):\n # 1-D input without batch dimension.\n x_np = np.array([[1, 2]], dtype=np.uint8).reshape([2])\n\n x_tf = constant_op.constant(x_np, shape=x_np.shape)\n\n # this is the error message we expect the function to raise\n err_msg = \"must be at least two-dimensional\"\n with self.assertRaisesRegex(ValueError, err_msg):\n image_ops.grayscale_to_rgb(x_tf)\n\n def testShapeInference(self):\n # Shape function requires placeholders and a graph.\n with ops.Graph().as_default():\n # Shape inference works and produces expected output where possible\n rgb_shape = [7, None, 19, 3]\n gray_shape = rgb_shape[:-1] + [1]\n with self.cached_session(use_gpu=True):\n rgb_tf = array_ops.placeholder(dtypes.uint8, shape=rgb_shape)\n gray = image_ops.rgb_to_grayscale(rgb_tf)\n self.assertEqual(gray_shape, gray.get_shape().as_list())\n\n with self.cached_session(use_gpu=True):\n gray_tf = array_ops.placeholder(dtypes.uint8, shape=gray_shape)\n rgb = image_ops.grayscale_to_rgb(gray_tf)\n self.assertEqual(rgb_shape, rgb.get_shape().as_list())\n\n # Shape inference does not break for unknown shapes\n with self.cached_session(use_gpu=True):\n rgb_tf_unknown = array_ops.placeholder(dtypes.uint8)\n gray_unknown = image_ops.rgb_to_grayscale(rgb_tf_unknown)\n self.assertFalse(gray_unknown.get_shape())\n\n with self.cached_session(use_gpu=True):\n gray_tf_unknown = array_ops.placeholder(dtypes.uint8)\n rgb_unknown = image_ops.grayscale_to_rgb(gray_tf_unknown)\n self.assertFalse(rgb_unknown.get_shape())\n\n\nclass AdjustGamma(test_util.TensorFlowTestCase):\n\n def test_adjust_gamma_less_zero_float32(self):\n \"\"\"White image should be returned for gamma equal to zero\"\"\"\n with self.cached_session():\n x_data = np.random.uniform(0, 1.0, (8, 8))\n x_np = np.array(x_data, dtype=np.float32)\n\n x = constant_op.constant(x_np, shape=x_np.shape)\n\n err_msg = \"Gamma should be a non-negative real number\"\n with self.assertRaisesRegex(\n (ValueError, errors.InvalidArgumentError), err_msg):\n image_ops.adjust_gamma(x, gamma=-1)\n\n def test_adjust_gamma_less_zero_uint8(self):\n \"\"\"White image should be returned for gamma equal to zero\"\"\"\n with self.cached_session():\n x_data = np.random.uniform(0, 255, (8, 8))\n x_np = np.array(x_data, dtype=np.uint8)\n\n x = constant_op.constant(x_np, shape=x_np.shape)\n\n err_msg = \"Gamma should be a non-negative real number\"\n with self.assertRaisesRegex(\n (ValueError, errors.InvalidArgumentError), err_msg):\n image_ops.adjust_gamma(x, gamma=-1)\n\n def test_adjust_gamma_less_zero_tensor(self):\n \"\"\"White image should be returned for gamma equal to zero\"\"\"\n with self.cached_session():\n x_data = np.random.uniform(0, 1.0, (8, 8))\n x_np = np.array(x_data, dtype=np.float32)\n\n x = constant_op.constant(x_np, shape=x_np.shape)\n y = constant_op.constant(-1.0, dtype=dtypes.float32)\n\n err_msg = \"Gamma should be a non-negative real number\"\n with self.assertRaisesRegex(\n (ValueError, errors.InvalidArgumentError), err_msg):\n image = image_ops.adjust_gamma(x, gamma=y)\n self.evaluate(image)\n\n def _test_adjust_gamma_uint8(self, gamma):\n \"\"\"Verifying the output with expected results for gamma\n\n correction for uint8 images\n \"\"\"\n with self.cached_session():\n x_np = np.random.uniform(0, 255, (8, 8)).astype(np.uint8)\n x = constant_op.constant(x_np, shape=x_np.shape)\n y = image_ops.adjust_gamma(x, gamma=gamma)\n y_tf = np.trunc(self.evaluate(y))\n\n # calculate gamma correction using numpy\n # firstly, transform uint8 to float representation\n # then perform correction\n y_np = np.power(x_np / 255.0, gamma)\n # convert correct numpy image back to uint8 type\n y_np = np.trunc(np.clip(y_np * 255.5, 0, 255.0))\n\n self.assertAllClose(y_tf, y_np, 1e-6)\n\n def _test_adjust_gamma_float32(self, gamma):\n \"\"\"Verifying the output with expected results for gamma\n\n correction for float32 images\n \"\"\"\n with self.cached_session():\n x_np = np.random.uniform(0, 1.0, (8, 8))\n x = constant_op.constant(x_np, shape=x_np.shape)\n y = image_ops.adjust_gamma(x, gamma=gamma)\n y_tf = self.evaluate(y)\n\n y_np = np.clip(np.power(x_np, gamma), 0, 1.0)\n\n self.assertAllClose(y_tf, y_np, 1e-6)\n\n def test_adjust_gamma_one_float32(self):\n \"\"\"Same image should be returned for gamma equal to one\"\"\"\n self._test_adjust_gamma_float32(1.0)\n\n def test_adjust_gamma_one_uint8(self):\n self._test_adjust_gamma_uint8(1.0)\n\n def test_adjust_gamma_zero_uint8(self):\n \"\"\"White image should be returned for gamma equal\n\n to zero for uint8 images\n \"\"\"\n self._test_adjust_gamma_uint8(gamma=0.0)\n\n def test_adjust_gamma_less_one_uint8(self):\n \"\"\"Verifying the output with expected results for gamma\n\n correction with gamma equal to half for uint8 images\n \"\"\"\n self._test_adjust_gamma_uint8(gamma=0.5)\n\n def test_adjust_gamma_greater_one_uint8(self):\n \"\"\"Verifying the output with expected results for gamma\n\n correction for uint8 images\n \"\"\"\n self._test_adjust_gamma_uint8(gamma=1.0)\n\n def test_adjust_gamma_less_one_float32(self):\n \"\"\"Verifying the output with expected results for gamma\n\n correction with gamma equal to half for float32 images\n \"\"\"\n self._test_adjust_gamma_float32(0.5)\n\n def test_adjust_gamma_greater_one_float32(self):\n \"\"\"Verifying the output with expected results for gamma\n\n correction with gamma equal to two for float32 images\n \"\"\"\n self._test_adjust_gamma_float32(1.0)\n\n def test_adjust_gamma_zero_float32(self):\n \"\"\"White image should be returned for gamma equal\n\n to zero for float32 images\n \"\"\"\n self._test_adjust_gamma_float32(0.0)\n\n\nclass AdjustHueTest(test_util.TensorFlowTestCase):\n\n def testAdjustNegativeHue(self):\n x_shape = [2, 2, 3]\n x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]\n x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)\n\n delta = -0.25\n y_data = [0, 13, 1, 54, 226, 59, 8, 234, 150, 255, 39, 1]\n y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)\n\n with self.cached_session(use_gpu=True):\n x = constant_op.constant(x_np, shape=x_shape)\n y = image_ops.adjust_hue(x, delta)\n y_tf = self.evaluate(y)\n self.assertAllEqual(y_tf, y_np)\n\n def testAdjustPositiveHue(self):\n x_shape = [2, 2, 3]\n x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]\n x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)\n\n delta = 0.25\n y_data = [13, 0, 11, 226, 54, 221, 234, 8, 92, 1, 217, 255]\n y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)\n\n with self.cached_session(use_gpu=True):\n x = constant_op.constant(x_np, shape=x_shape)\n y = image_ops.adjust_hue(x, delta)\n y_tf = self.evaluate(y)\n self.assertAllEqual(y_tf, y_np)\n\n def testBatchAdjustHue(self):\n x_shape = [2, 1, 2, 3]\n x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]\n x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)\n\n delta = 0.25\n y_data = [13, 0, 11, 226, 54, 221, 234, 8, 92, 1, 217, 255]\n y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)\n\n with self.cached_session(use_gpu=True):\n x = constant_op.constant(x_np, shape=x_shape)\n y = image_ops.adjust_hue(x, delta)\n y_tf = self.evaluate(y)\n self.assertAllEqual(y_tf, y_np)\n\n def _adjustHueNp(self, x_np, delta_h):\n self.assertEqual(x_np.shape[-1], 3)\n x_v = x_np.reshape([-1, 3])\n y_v = np.ndarray(x_v.shape, dtype=x_v.dtype)\n channel_count = x_v.shape[0]\n for i in xrange(channel_count):\n r = x_v[i][0]\n g = x_v[i][1]\n b = x_v[i][2]\n h, s, v = colorsys.rgb_to_hsv(r, g, b)\n h += delta_h\n h = math.fmod(h + 10.0, 1.0)\n r, g, b = colorsys.hsv_to_rgb(h, s, v)\n y_v[i][0] = r\n y_v[i][1] = g\n y_v[i][2] = b\n return y_v.reshape(x_np.shape)\n\n def _adjustHueTf(self, x_np, delta_h):\n with self.cached_session(use_gpu=True):\n x = constant_op.constant(x_np)\n y = image_ops.adjust_hue(x, delta_h)\n y_tf = self.evaluate(y)\n return y_tf\n\n def testAdjustRandomHue(self):\n x_shapes = [\n [2, 2, 3],\n [4, 2, 3],\n [2, 4, 3],\n [2, 5, 3],\n [1000, 1, 3],\n ]\n test_styles = [\n \"all_random\",\n \"rg_same\",\n \"rb_same\",\n \"gb_same\",\n \"rgb_same\",\n ]\n for x_shape in x_shapes:\n for test_style in test_styles:\n x_np = np.random.rand(*x_shape) * 255.\n delta_h = np.random.rand() * 2.0 - 1.0\n if test_style == \"all_random\":\n pass\n elif test_style == \"rg_same\":\n x_np[..., 1] = x_np[..., 0]\n elif test_style == \"rb_same\":\n x_np[..., 2] = x_np[..., 0]\n elif test_style == \"gb_same\":\n x_np[..., 2] = x_np[..., 1]\n elif test_style == \"rgb_same\":\n x_np[..., 1] = x_np[..., 0]\n x_np[..., 2] = x_np[..., 0]\n else:\n raise AssertionError(\"Invalid test style: %s\" % (test_style))\n y_np = self._adjustHueNp(x_np, delta_h)\n y_tf = self._adjustHueTf(x_np, delta_h)\n self.assertAllClose(y_tf, y_np, rtol=2e-5, atol=1e-5)\n\n def testInvalidShapes(self):\n fused = False\n if not fused:\n # The tests are known to pass with the fused adjust_hue. We will enable\n # them when the fused implementation is the default.\n return\n x_np = np.random.rand(2, 3) * 255.\n delta_h = np.random.rand() * 2.0 - 1.0\n fused = False\n with self.assertRaisesRegex(ValueError, \"Shape must be at least rank 3\"):\n self._adjustHueTf(x_np, delta_h)\n x_np = np.random.rand(4, 2, 4) * 255.\n delta_h = np.random.rand() * 2.0 - 1.0\n with self.assertRaisesOpError(\"input must have 3 channels\"):\n self._adjustHueTf(x_np, delta_h)\n\n\nclass FlipImageBenchmark(test.Benchmark):\n\n def _benchmarkFlipLeftRight(self, device, cpu_count):\n image_shape = [299, 299, 3]\n warmup_rounds = 100\n benchmark_rounds = 1000\n config = config_pb2.ConfigProto()\n if cpu_count is not None:\n config.inter_op_parallelism_threads = 1\n config.intra_op_parallelism_threads = cpu_count\n with session.Session(\"\", graph=ops.Graph(), config=config) as sess:\n with ops.device(device):\n inputs = variables.Variable(\n random_ops.random_uniform(image_shape, dtype=dtypes.float32) * 255,\n trainable=False,\n dtype=dtypes.float32)\n run_op = image_ops.flip_left_right(inputs)\n self.evaluate(variables.global_variables_initializer())\n for i in xrange(warmup_rounds + benchmark_rounds):\n if i == warmup_rounds:\n start = time.time()\n self.evaluate(run_op)\n end = time.time()\n step_time = (end - start) / benchmark_rounds\n tag = device + \"_%s\" % (cpu_count if cpu_count is not None else \"_all\")\n print(\"benchmarkFlipLeftRight_299_299_3_%s step_time: %.2f us\" %\n (tag, step_time * 1e6))\n self.report_benchmark(\n name=\"benchmarkFlipLeftRight_299_299_3_%s\" % (tag),\n iters=benchmark_rounds,\n wall_time=step_time)\n\n def _benchmarkRandomFlipLeftRight(self, device, cpu_count):\n image_shape = [299, 299, 3]\n warmup_rounds = 100\n benchmark_rounds = 1000\n config = config_pb2.ConfigProto()\n if cpu_count is not None:\n config.inter_op_parallelism_threads = 1\n config.intra_op_parallelism_threads = cpu_count\n with session.Session(\"\", graph=ops.Graph(), config=config) as sess:\n with ops.device(device):\n inputs = variables.Variable(\n random_ops.random_uniform(image_shape, dtype=dtypes.float32) * 255,\n trainable=False,\n dtype=dtypes.float32)\n run_op = image_ops.random_flip_left_right(inputs)\n self.evaluate(variables.global_variables_initializer())\n for i in xrange(warmup_rounds + benchmark_rounds):\n if i == warmup_rounds:\n start = time.time()\n self.evaluate(run_op)\n end = time.time()\n step_time = (end - start) / benchmark_rounds\n tag = device + \"_%s\" % (cpu_count if cpu_count is not None else \"_all\")\n print(\"benchmarkRandomFlipLeftRight_299_299_3_%s step_time: %.2f us\" %\n (tag, step_time * 1e6))\n self.report_benchmark(\n name=\"benchmarkRandomFlipLeftRight_299_299_3_%s\" % (tag),\n iters=benchmark_rounds,\n wall_time=step_time)\n\n def _benchmarkBatchedRandomFlipLeftRight(self, device, cpu_count):\n image_shape = [16, 299, 299, 3]\n warmup_rounds = 100\n benchmark_rounds = 1000\n config = config_pb2.ConfigProto()\n if cpu_count is not None:\n config.inter_op_parallelism_threads = 1\n config.intra_op_parallelism_threads = cpu_count\n with session.Session(\"\", graph=ops.Graph(), config=config) as sess:\n with ops.device(device):\n inputs = variables.Variable(\n random_ops.random_uniform(image_shape, dtype=dtypes.float32) * 255,\n trainable=False,\n dtype=dtypes.float32)\n run_op = image_ops.random_flip_left_right(inputs)\n self.evaluate(variables.global_variables_initializer())\n for i in xrange(warmup_rounds + benchmark_rounds):\n if i == warmup_rounds:\n start = time.time()\n self.evaluate(run_op)\n end = time.time()\n step_time = (end - start) / benchmark_rounds\n tag = device + \"_%s\" % (cpu_count if cpu_count is not None else \"_all\")\n print(\"benchmarkBatchedRandomFlipLeftRight_16_299_299_3_%s step_time: \"\n \"%.2f us\" %\n (tag, step_time * 1e6))\n self.report_benchmark(\n name=\"benchmarkBatchedRandomFlipLeftRight_16_299_299_3_%s\" % (tag),\n iters=benchmark_rounds,\n wall_time=step_time)\n\n def benchmarkFlipLeftRightCpu1(self):\n self._benchmarkFlipLeftRight(\"/cpu:0\", 1)\n\n def benchmarkFlipLeftRightCpuAll(self):\n self._benchmarkFlipLeftRight(\"/cpu:0\", None)\n\n def benchmarkFlipLeftRightGpu(self):\n self._benchmarkFlipLeftRight(test.gpu_device_name(), None)\n\n def benchmarkRandomFlipLeftRightCpu1(self):\n self._benchmarkRandomFlipLeftRight(\"/cpu:0\", 1)\n\n def benchmarkRandomFlipLeftRightCpuAll(self):\n self._benchmarkRandomFlipLeftRight(\"/cpu:0\", None)\n\n def benchmarkRandomFlipLeftRightGpu(self):\n self._benchmarkRandomFlipLeftRight(test.gpu_device_name(), None)\n\n def benchmarkBatchedRandomFlipLeftRightCpu1(self):\n self._benchmarkBatchedRandomFlipLeftRight(\"/cpu:0\", 1)\n\n def benchmarkBatchedRandomFlipLeftRightCpuAll(self):\n self._benchmarkBatchedRandomFlipLeftRight(\"/cpu:0\", None)\n\n def benchmarkBatchedRandomFlipLeftRightGpu(self):\n self._benchmarkBatchedRandomFlipLeftRight(test.gpu_device_name(), None)\n\n\nclass AdjustHueBenchmark(test.Benchmark):\n\n def _benchmarkAdjustHue(self, device, cpu_count):\n image_shape = [299, 299, 3]\n warmup_rounds = 100\n benchmark_rounds = 1000\n config = config_pb2.ConfigProto()\n if cpu_count is not None:\n config.inter_op_parallelism_threads = 1\n config.intra_op_parallelism_threads = cpu_count\n with self.benchmark_session(config=config, device=device) as sess:\n inputs = variables.Variable(\n random_ops.random_uniform(image_shape, dtype=dtypes.float32) * 255,\n trainable=False,\n dtype=dtypes.float32)\n delta = constant_op.constant(0.1, dtype=dtypes.float32)\n outputs = image_ops.adjust_hue(inputs, delta)\n run_op = control_flow_ops.group(outputs)\n self.evaluate(variables.global_variables_initializer())\n for i in xrange(warmup_rounds + benchmark_rounds):\n if i == warmup_rounds:\n start = time.time()\n self.evaluate(run_op)\n end = time.time()\n step_time = (end - start) / benchmark_rounds\n tag = device + \"_%s\" % (cpu_count if cpu_count is not None else \"_all\")\n print(\"benchmarkAdjustHue_299_299_3_%s step_time: %.2f us\" %\n (tag, step_time * 1e6))\n self.report_benchmark(\n name=\"benchmarkAdjustHue_299_299_3_%s\" % (tag),\n iters=benchmark_rounds,\n wall_time=step_time)\n\n def benchmarkAdjustHueCpu1(self):\n self._benchmarkAdjustHue(\"/cpu:0\", 1)\n\n def benchmarkAdjustHueCpuAll(self):\n self._benchmarkAdjustHue(\"/cpu:0\", None)\n\n def benchmarkAdjustHueGpu(self):\n self._benchmarkAdjustHue(test.gpu_device_name(), None)\n\n\nclass AdjustSaturationBenchmark(test.Benchmark):\n\n def _benchmarkAdjustSaturation(self, device, cpu_count):\n image_shape = [299, 299, 3]\n warmup_rounds = 100\n benchmark_rounds = 1000\n config = config_pb2.ConfigProto()\n if cpu_count is not None:\n config.inter_op_parallelism_threads = 1\n config.intra_op_parallelism_threads = cpu_count\n with self.benchmark_session(config=config, device=device) as sess:\n inputs = variables.Variable(\n random_ops.random_uniform(image_shape, dtype=dtypes.float32) * 255,\n trainable=False,\n dtype=dtypes.float32)\n delta = constant_op.constant(0.1, dtype=dtypes.float32)\n outputs = image_ops.adjust_saturation(inputs, delta)\n run_op = control_flow_ops.group(outputs)\n self.evaluate(variables.global_variables_initializer())\n for _ in xrange(warmup_rounds):\n self.evaluate(run_op)\n start = time.time()\n for _ in xrange(benchmark_rounds):\n self.evaluate(run_op)\n end = time.time()\n step_time = (end - start) / benchmark_rounds\n tag = device + \"_%s\" % (cpu_count if cpu_count is not None else \"_all\")\n print(\"benchmarkAdjustSaturation_299_299_3_%s step_time: %.2f us\" %\n (tag, step_time * 1e6))\n self.report_benchmark(\n name=\"benchmarkAdjustSaturation_299_299_3_%s\" % (tag),\n iters=benchmark_rounds,\n wall_time=step_time)\n\n def benchmarkAdjustSaturationCpu1(self):\n self._benchmarkAdjustSaturation(\"/cpu:0\", 1)\n\n def benchmarkAdjustSaturationCpuAll(self):\n self._benchmarkAdjustSaturation(\"/cpu:0\", None)\n\n def benchmarkAdjustSaturationGpu(self):\n self._benchmarkAdjustSaturation(test.gpu_device_name(), None)\n\n\nclass ResizeBilinearBenchmark(test.Benchmark):\n\n def _benchmarkResize(self, image_size, num_channels):\n batch_size = 1\n num_ops = 1000\n img = variables.Variable(\n random_ops.random_normal(\n [batch_size, image_size[0], image_size[1], num_channels]),\n name=\"img\")\n\n deps = []\n for _ in xrange(num_ops):\n with ops.control_dependencies(deps):\n resize_op = image_ops.resize_bilinear(\n img, [299, 299], align_corners=False)\n deps = [resize_op]\n benchmark_op = control_flow_ops.group(*deps)\n\n with self.benchmark_session() as sess:\n self.evaluate(variables.global_variables_initializer())\n results = self.run_op_benchmark(\n sess,\n benchmark_op,\n name=(\"resize_bilinear_%s_%s_%s\" % (image_size[0], image_size[1],\n num_channels)))\n print(\"%s : %.2f ms/img\" %\n (results[\"name\"],\n 1000 * results[\"wall_time\"] / (batch_size * num_ops)))\n\n def benchmarkSimilar3Channel(self):\n self._benchmarkResize((183, 229), 3)\n\n def benchmarkScaleUp3Channel(self):\n self._benchmarkResize((141, 186), 3)\n\n def benchmarkScaleDown3Channel(self):\n self._benchmarkResize((749, 603), 3)\n\n def benchmarkSimilar1Channel(self):\n self._benchmarkResize((183, 229), 1)\n\n def benchmarkScaleUp1Channel(self):\n self._benchmarkResize((141, 186), 1)\n\n def benchmarkScaleDown1Channel(self):\n self._benchmarkResize((749, 603), 1)\n\n\nclass ResizeBicubicBenchmark(test.Benchmark):\n\n def _benchmarkResize(self, image_size, num_channels):\n batch_size = 1\n num_ops = 1000\n img = variables.Variable(\n random_ops.random_normal(\n [batch_size, image_size[0], image_size[1], num_channels]),\n name=\"img\")\n\n deps = []\n for _ in xrange(num_ops):\n with ops.control_dependencies(deps):\n resize_op = image_ops.resize_bicubic(\n img, [299, 299], align_corners=False)\n deps = [resize_op]\n benchmark_op = control_flow_ops.group(*deps)\n\n with self.benchmark_session() as sess:\n self.evaluate(variables.global_variables_initializer())\n results = self.run_op_benchmark(\n sess,\n benchmark_op,\n min_iters=20,\n name=(\"resize_bicubic_%s_%s_%s\" % (image_size[0], image_size[1],\n num_channels)))\n print(\"%s : %.2f ms/img\" %\n (results[\"name\"],\n 1000 * results[\"wall_time\"] / (batch_size * num_ops)))\n\n def benchmarkSimilar3Channel(self):\n self._benchmarkResize((183, 229), 3)\n\n def benchmarkScaleUp3Channel(self):\n self._benchmarkResize((141, 186), 3)\n\n def benchmarkScaleDown3Channel(self):\n self._benchmarkResize((749, 603), 3)\n\n def benchmarkSimilar1Channel(self):\n self._benchmarkResize((183, 229), 1)\n\n def benchmarkScaleUp1Channel(self):\n self._benchmarkResize((141, 186), 1)\n\n def benchmarkScaleDown1Channel(self):\n self._benchmarkResize((749, 603), 1)\n\n def benchmarkSimilar4Channel(self):\n self._benchmarkResize((183, 229), 4)\n\n def benchmarkScaleUp4Channel(self):\n self._benchmarkResize((141, 186), 4)\n\n def benchmarkScaleDown4Channel(self):\n self._benchmarkResize((749, 603), 4)\n\n\nclass ResizeAreaBenchmark(test.Benchmark):\n\n def _benchmarkResize(self, image_size, num_channels):\n batch_size = 1\n num_ops = 1000\n img = variables.Variable(\n random_ops.random_normal(\n [batch_size, image_size[0], image_size[1], num_channels]),\n name=\"img\")\n\n deps = []\n for _ in xrange(num_ops):\n with ops.control_dependencies(deps):\n resize_op = image_ops.resize_area(img, [299, 299], align_corners=False)\n deps = [resize_op]\n benchmark_op = control_flow_ops.group(*deps)\n\n with self.benchmark_session() as sess:\n self.evaluate(variables.global_variables_initializer())\n results = self.run_op_benchmark(\n sess,\n benchmark_op,\n name=(\"resize_area_%s_%s_%s\" % (image_size[0], image_size[1],\n num_channels)))\n print(\"%s : %.2f ms/img\" %\n (results[\"name\"],\n 1000 * results[\"wall_time\"] / (batch_size * num_ops)))\n\n def benchmarkSimilar3Channel(self):\n self._benchmarkResize((183, 229), 3)\n\n def benchmarkScaleUp3Channel(self):\n self._benchmarkResize((141, 186), 3)\n\n def benchmarkScaleDown3Channel(self):\n self._benchmarkResize((749, 603), 3)\n\n def benchmarkSimilar1Channel(self):\n self._benchmarkResize((183, 229), 1)\n\n def benchmarkScaleUp1Channel(self):\n self._benchmarkResize((141, 186), 1)\n\n def benchmarkScaleDown1Channel(self):\n self._benchmarkResize((749, 603), 1)\n\n\nclass AdjustSaturationTest(test_util.TensorFlowTestCase):\n\n def testHalfSaturation(self):\n x_shape = [2, 2, 3]\n x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]\n x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)\n\n saturation_factor = 0.5\n y_data = [6, 9, 13, 140, 180, 226, 135, 121, 234, 172, 255, 128]\n y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)\n\n with self.cached_session(use_gpu=True):\n x = constant_op.constant(x_np, shape=x_shape)\n y = image_ops.adjust_saturation(x, saturation_factor)\n y_tf = self.evaluate(y)\n self.assertAllEqual(y_tf, y_np)\n\n def testTwiceSaturation(self):\n x_shape = [2, 2, 3]\n x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]\n x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)\n\n saturation_factor = 2.0\n y_data = [0, 5, 13, 0, 106, 226, 30, 0, 234, 89, 255, 0]\n y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)\n\n with self.cached_session(use_gpu=True):\n x = constant_op.constant(x_np, shape=x_shape)\n y = image_ops.adjust_saturation(x, saturation_factor)\n y_tf = self.evaluate(y)\n self.assertAllEqual(y_tf, y_np)\n\n def testBatchSaturation(self):\n x_shape = [2, 1, 2, 3]\n x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]\n x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)\n\n saturation_factor = 0.5\n y_data = [6, 9, 13, 140, 180, 226, 135, 121, 234, 172, 255, 128]\n y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)\n\n with self.cached_session(use_gpu=True):\n x = constant_op.constant(x_np, shape=x_shape)\n y = image_ops.adjust_saturation(x, saturation_factor)\n y_tf = self.evaluate(y)\n self.assertAllEqual(y_tf, y_np)\n\n def _adjustSaturationNp(self, x_np, scale):\n self.assertEqual(x_np.shape[-1], 3)\n x_v = x_np.reshape([-1, 3])\n y_v = np.ndarray(x_v.shape, dtype=x_v.dtype)\n channel_count = x_v.shape[0]\n for i in xrange(channel_count):\n r = x_v[i][0]\n g = x_v[i][1]\n b = x_v[i][2]\n h, s, v = colorsys.rgb_to_hsv(r, g, b)\n s *= scale\n s = min(1.0, max(0.0, s))\n r, g, b = colorsys.hsv_to_rgb(h, s, v)\n y_v[i][0] = r\n y_v[i][1] = g\n y_v[i][2] = b\n return y_v.reshape(x_np.shape)\n\n @test_util.run_deprecated_v1\n def testAdjustRandomSaturation(self):\n x_shapes = [\n [2, 2, 3],\n [4, 2, 3],\n [2, 4, 3],\n [2, 5, 3],\n [1000, 1, 3],\n ]\n test_styles = [\n \"all_random\",\n \"rg_same\",\n \"rb_same\",\n \"gb_same\",\n \"rgb_same\",\n ]\n with self.cached_session(use_gpu=True):\n for x_shape in x_shapes:\n for test_style in test_styles:\n x_np = np.random.rand(*x_shape) * 255.\n scale = np.random.rand()\n if test_style == \"all_random\":\n pass\n elif test_style == \"rg_same\":\n x_np[..., 1] = x_np[..., 0]\n elif test_style == \"rb_same\":\n x_np[..., 2] = x_np[..., 0]\n elif test_style == \"gb_same\":\n x_np[..., 2] = x_np[..., 1]\n elif test_style == \"rgb_same\":\n x_np[..., 1] = x_np[..., 0]\n x_np[..., 2] = x_np[..., 0]\n else:\n raise AssertionError(\"Invalid test style: %s\" % (test_style))\n y_baseline = self._adjustSaturationNp(x_np, scale)\n y_fused = image_ops.adjust_saturation(x_np, scale).eval()\n self.assertAllClose(y_fused, y_baseline, rtol=2e-5, atol=1e-5)\n\n\nclass FlipTransposeRotateTest(test_util.TensorFlowTestCase,\n parameterized.TestCase):\n\n def testInvolutionLeftRight(self):\n x_np = np.array([[1, 2, 3], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])\n with self.cached_session(use_gpu=True):\n x_tf = constant_op.constant(x_np, shape=x_np.shape)\n y = image_ops.flip_left_right(image_ops.flip_left_right(x_tf))\n y_tf = self.evaluate(y)\n self.assertAllEqual(y_tf, x_np)\n\n def testInvolutionLeftRightWithBatch(self):\n x_np = np.array(\n [[[1, 2, 3], [1, 2, 3]], [[1, 2, 3], [1, 2, 3]]],\n dtype=np.uint8).reshape([2, 2, 3, 1])\n with self.cached_session(use_gpu=True):\n x_tf = constant_op.constant(x_np, shape=x_np.shape)\n y = image_ops.flip_left_right(image_ops.flip_left_right(x_tf))\n y_tf = self.evaluate(y)\n self.assertAllEqual(y_tf, x_np)\n\n @test_util.run_deprecated_v1\n def testLeftRight(self):\n x_np = np.array([[1, 2, 3], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])\n y_np = np.array([[3, 2, 1], [3, 2, 1]], dtype=np.uint8).reshape([2, 3, 1])\n\n with self.cached_session(use_gpu=True):\n x_tf = constant_op.constant(x_np, shape=x_np.shape)\n y = image_ops.flip_left_right(x_tf)\n self.assertTrue(y.op.name.startswith(\"flip_left_right\"))\n y_tf = self.evaluate(y)\n self.assertAllEqual(y_tf, y_np)\n\n def testLeftRightWithBatch(self):\n x_np = np.array(\n [[[1, 2, 3], [1, 2, 3]], [[1, 2, 3], [1, 2, 3]]],\n dtype=np.uint8).reshape([2, 2, 3, 1])\n y_np = np.array(\n [[[3, 2, 1], [3, 2, 1]], [[3, 2, 1], [3, 2, 1]]],\n dtype=np.uint8).reshape([2, 2, 3, 1])\n\n with self.cached_session(use_gpu=True):\n x_tf = constant_op.constant(x_np, shape=x_np.shape)\n y = image_ops.flip_left_right(x_tf)\n y_tf = self.evaluate(y)\n self.assertAllEqual(y_tf, y_np)\n\n @test_util.run_deprecated_v1\n def testRandomFlipLeftRight(self):\n x_np = np.array([[1, 2, 3], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])\n y_np = np.array([[3, 2, 1], [3, 2, 1]], dtype=np.uint8).reshape([2, 3, 1])\n seed = 42\n\n with self.cached_session(use_gpu=True):\n x_tf = constant_op.constant(x_np, shape=x_np.shape)\n y = image_ops.random_flip_left_right(x_tf, seed=seed)\n self.assertTrue(y.op.name.startswith(\"random_flip_left_right\"))\n\n count_flipped = 0\n count_unflipped = 0\n for _ in range(100):\n y_tf = self.evaluate(y)\n if y_tf[0][0] == 1:\n self.assertAllEqual(y_tf, x_np)\n count_unflipped += 1\n else:\n self.assertAllEqual(y_tf, y_np)\n count_flipped += 1\n\n # 100 trials\n # Mean: 50\n # Std Dev: ~5\n # Six Sigma: 50 - (5 * 6) = 20\n self.assertGreaterEqual(count_flipped, 20)\n self.assertGreaterEqual(count_unflipped, 20)\n\n # TODO(b/162345082): stateless random op generates different random number\n # with xla_gpu. Update tests such that there is a single ground truth result\n # to test against.\n @parameterized.named_parameters(\n (\"_RandomFlipLeftRight\", image_ops.stateless_random_flip_left_right),\n (\"_RandomFlipUpDown\", image_ops.stateless_random_flip_up_down),\n )\n def testRandomFlipStateless(self, func):\n with test_util.use_gpu():\n x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1])\n y_np = np.array([[3, 2, 1], [6, 5, 4]], dtype=np.uint8).reshape([2, 3, 1])\n if \"RandomFlipUpDown\" in self.id():\n y_np = np.array(\n [[4, 5, 6], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])\n\n x_tf = constant_op.constant(x_np, shape=x_np.shape)\n\n iterations = 2\n flip_counts = [None for _ in range(iterations)]\n flip_sequences = [\"\" for _ in range(iterations)]\n test_seed = (1, 2)\n split_seeds = stateless_random_ops.split(test_seed, 10)\n seeds_list = self.evaluate(split_seeds)\n for i in range(iterations):\n count_flipped = 0\n count_unflipped = 0\n flip_seq = \"\"\n for seed in seeds_list:\n y_tf = func(x_tf, seed=seed)\n y_tf_eval = self.evaluate(y_tf)\n if y_tf_eval[0][0] == 1:\n self.assertAllEqual(y_tf_eval, x_np)\n count_unflipped += 1\n flip_seq += \"U\"\n else:\n self.assertAllEqual(y_tf_eval, y_np)\n count_flipped += 1\n flip_seq += \"F\"\n\n flip_counts[i] = (count_flipped, count_unflipped)\n flip_sequences[i] = flip_seq\n\n # Verify that results are deterministic.\n for i in range(1, iterations):\n self.assertAllEqual(flip_counts[0], flip_counts[i])\n self.assertAllEqual(flip_sequences[0], flip_sequences[i])\n\n # TODO(b/162345082): stateless random op generates different random number\n # with xla_gpu. Update tests such that there is a single ground truth result\n # to test against.\n @parameterized.named_parameters(\n (\"_RandomFlipLeftRight\", image_ops.stateless_random_flip_left_right),\n (\"_RandomFlipUpDown\", image_ops.stateless_random_flip_up_down)\n )\n def testRandomFlipStatelessWithBatch(self, func):\n with test_util.use_gpu():\n batch_size = 16\n\n # create single item of test data\n x_np_raw = np.array(\n [[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([1, 2, 3, 1])\n y_np_raw = np.array(\n [[3, 2, 1], [6, 5, 4]], dtype=np.uint8).reshape([1, 2, 3, 1])\n if \"RandomFlipUpDown\" in self.id():\n y_np_raw = np.array(\n [[4, 5, 6], [1, 2, 3]], dtype=np.uint8).reshape([1, 2, 3, 1])\n\n # create batched test data\n x_np = np.vstack([x_np_raw for _ in range(batch_size)])\n y_np = np.vstack([y_np_raw for _ in range(batch_size)])\n\n x_tf = constant_op.constant(x_np, shape=x_np.shape)\n\n iterations = 2\n flip_counts = [None for _ in range(iterations)]\n flip_sequences = [\"\" for _ in range(iterations)]\n test_seed = (1, 2)\n split_seeds = stateless_random_ops.split(test_seed, 10)\n seeds_list = self.evaluate(split_seeds)\n for i in range(iterations):\n count_flipped = 0\n count_unflipped = 0\n flip_seq = \"\"\n for seed in seeds_list:\n y_tf = func(x_tf, seed=seed)\n y_tf_eval = self.evaluate(y_tf)\n for j in range(batch_size):\n if y_tf_eval[j][0][0] == 1:\n self.assertAllEqual(y_tf_eval[j], x_np[j])\n count_unflipped += 1\n flip_seq += \"U\"\n else:\n self.assertAllEqual(y_tf_eval[j], y_np[j])\n count_flipped += 1\n flip_seq += \"F\"\n\n flip_counts[i] = (count_flipped, count_unflipped)\n flip_sequences[i] = flip_seq\n\n for i in range(1, iterations):\n self.assertAllEqual(flip_counts[0], flip_counts[i])\n self.assertAllEqual(flip_sequences[0], flip_sequences[i])\n\n @test_util.run_deprecated_v1\n def testRandomFlipLeftRightWithBatch(self):\n batch_size = 16\n seed = 42\n\n # create single item of test data\n x_np_raw = np.array(\n [[1, 2, 3], [1, 2, 3]], dtype=np.uint8\n ).reshape([1, 2, 3, 1])\n y_np_raw = np.array(\n [[3, 2, 1], [3, 2, 1]], dtype=np.uint8\n ).reshape([1, 2, 3, 1])\n\n # create batched test data\n x_np = np.vstack([x_np_raw for _ in range(batch_size)])\n y_np = np.vstack([y_np_raw for _ in range(batch_size)])\n\n with self.cached_session(use_gpu=True):\n x_tf = constant_op.constant(x_np, shape=x_np.shape)\n y = image_ops.random_flip_left_right(x_tf, seed=seed)\n self.assertTrue(y.op.name.startswith(\"random_flip_left_right\"))\n\n count_flipped = 0\n count_unflipped = 0\n for _ in range(100):\n y_tf = self.evaluate(y)\n\n # check every element of the batch\n for i in range(batch_size):\n if y_tf[i][0][0] == 1:\n self.assertAllEqual(y_tf[i], x_np[i])\n count_unflipped += 1\n else:\n self.assertAllEqual(y_tf[i], y_np[i])\n count_flipped += 1\n\n # 100 trials, each containing batch_size elements\n # Mean: 50 * batch_size\n # Std Dev: ~5 * sqrt(batch_size)\n # Six Sigma: 50 * batch_size - (5 * 6 * sqrt(batch_size))\n # = 50 * batch_size - 30 * sqrt(batch_size) = 800 - 30 * 4 = 680\n six_sigma = 50 * batch_size - 30 * np.sqrt(batch_size)\n self.assertGreaterEqual(count_flipped, six_sigma)\n self.assertGreaterEqual(count_unflipped, six_sigma)\n\n def testInvolutionUpDown(self):\n x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1])\n\n with self.cached_session(use_gpu=True):\n x_tf = constant_op.constant(x_np, shape=x_np.shape)\n y = image_ops.flip_up_down(image_ops.flip_up_down(x_tf))\n y_tf = self.evaluate(y)\n self.assertAllEqual(y_tf, x_np)\n\n def testInvolutionUpDownWithBatch(self):\n x_np = np.array(\n [[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]],\n dtype=np.uint8).reshape([2, 2, 3, 1])\n\n with self.cached_session(use_gpu=True):\n x_tf = constant_op.constant(x_np, shape=x_np.shape)\n y = image_ops.flip_up_down(image_ops.flip_up_down(x_tf))\n y_tf = self.evaluate(y)\n self.assertAllEqual(y_tf, x_np)\n\n @test_util.run_deprecated_v1\n def testUpDown(self):\n x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1])\n y_np = np.array([[4, 5, 6], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])\n\n with self.cached_session(use_gpu=True):\n x_tf = constant_op.constant(x_np, shape=x_np.shape)\n y = image_ops.flip_up_down(x_tf)\n self.assertTrue(y.op.name.startswith(\"flip_up_down\"))\n y_tf = self.evaluate(y)\n self.assertAllEqual(y_tf, y_np)\n\n def testUpDownWithBatch(self):\n x_np = np.array(\n [[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]],\n dtype=np.uint8).reshape([2, 2, 3, 1])\n y_np = np.array(\n [[[4, 5, 6], [1, 2, 3]], [[10, 11, 12], [7, 8, 9]]],\n dtype=np.uint8).reshape([2, 2, 3, 1])\n\n with self.cached_session(use_gpu=True):\n x_tf = constant_op.constant(x_np, shape=x_np.shape)\n y = image_ops.flip_up_down(x_tf)\n y_tf = self.evaluate(y)\n self.assertAllEqual(y_tf, y_np)\n\n @test_util.run_deprecated_v1\n def testRandomFlipUpDown(self):\n x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1])\n y_np = np.array([[4, 5, 6], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])\n\n seed = 42\n\n with self.cached_session(use_gpu=True):\n x_tf = constant_op.constant(x_np, shape=x_np.shape)\n y = image_ops.random_flip_up_down(x_tf, seed=seed)\n self.assertTrue(y.op.name.startswith(\"random_flip_up_down\"))\n count_flipped = 0\n count_unflipped = 0\n for _ in range(100):\n y_tf = self.evaluate(y)\n if y_tf[0][0] == 1:\n self.assertAllEqual(y_tf, x_np)\n count_unflipped += 1\n else:\n self.assertAllEqual(y_tf, y_np)\n count_flipped += 1\n\n # 100 trials\n # Mean: 50\n # Std Dev: ~5\n # Six Sigma: 50 - (5 * 6) = 20\n self.assertGreaterEqual(count_flipped, 20)\n self.assertGreaterEqual(count_unflipped, 20)\n\n @test_util.run_deprecated_v1\n def testRandomFlipUpDownWithBatch(self):\n batch_size = 16\n seed = 42\n\n # create single item of test data\n x_np_raw = np.array(\n [[1, 2, 3], [4, 5, 6]], dtype=np.uint8\n ).reshape([1, 2, 3, 1])\n y_np_raw = np.array(\n [[4, 5, 6], [1, 2, 3]], dtype=np.uint8\n ).reshape([1, 2, 3, 1])\n\n # create batched test data\n x_np = np.vstack([x_np_raw for _ in range(batch_size)])\n y_np = np.vstack([y_np_raw for _ in range(batch_size)])\n\n with self.cached_session(use_gpu=True):\n x_tf = constant_op.constant(x_np, shape=x_np.shape)\n y = image_ops.random_flip_up_down(x_tf, seed=seed)\n self.assertTrue(y.op.name.startswith(\"random_flip_up_down\"))\n\n count_flipped = 0\n count_unflipped = 0\n for _ in range(100):\n y_tf = self.evaluate(y)\n\n # check every element of the batch\n for i in range(batch_size):\n if y_tf[i][0][0] == 1:\n self.assertAllEqual(y_tf[i], x_np[i])\n count_unflipped += 1\n else:\n self.assertAllEqual(y_tf[i], y_np[i])\n count_flipped += 1\n\n # 100 trials, each containing batch_size elements\n # Mean: 50 * batch_size\n # Std Dev: ~5 * sqrt(batch_size)\n # Six Sigma: 50 * batch_size - (5 * 6 * sqrt(batch_size))\n # = 50 * batch_size - 30 * sqrt(batch_size) = 800 - 30 * 4 = 680\n six_sigma = 50 * batch_size - 30 * np.sqrt(batch_size)\n self.assertGreaterEqual(count_flipped, six_sigma)\n self.assertGreaterEqual(count_unflipped, six_sigma)\n\n def testInvolutionTranspose(self):\n x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1])\n\n with self.cached_session(use_gpu=True):\n x_tf = constant_op.constant(x_np, shape=x_np.shape)\n y = image_ops.transpose(image_ops.transpose(x_tf))\n y_tf = self.evaluate(y)\n self.assertAllEqual(y_tf, x_np)\n\n def testInvolutionTransposeWithBatch(self):\n x_np = np.array(\n [[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]],\n dtype=np.uint8).reshape([2, 2, 3, 1])\n\n with self.cached_session(use_gpu=True):\n x_tf = constant_op.constant(x_np, shape=x_np.shape)\n y = image_ops.transpose(image_ops.transpose(x_tf))\n y_tf = self.evaluate(y)\n self.assertAllEqual(y_tf, x_np)\n\n @test_util.run_deprecated_v1\n def testTranspose(self):\n x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1])\n y_np = np.array([[1, 4], [2, 5], [3, 6]], dtype=np.uint8).reshape([3, 2, 1])\n\n with self.cached_session(use_gpu=True):\n x_tf = constant_op.constant(x_np, shape=x_np.shape)\n y = image_ops.transpose(x_tf)\n self.assertTrue(y.op.name.startswith(\"transpose\"))\n y_tf = self.evaluate(y)\n self.assertAllEqual(y_tf, y_np)\n\n def testTransposeWithBatch(self):\n x_np = np.array(\n [[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]],\n dtype=np.uint8).reshape([2, 2, 3, 1])\n\n y_np = np.array(\n [[[1, 4], [2, 5], [3, 6]], [[7, 10], [8, 11], [9, 12]]],\n dtype=np.uint8).reshape([2, 3, 2, 1])\n\n with self.cached_session(use_gpu=True):\n x_tf = constant_op.constant(x_np, shape=x_np.shape)\n y = image_ops.transpose(x_tf)\n y_tf = self.evaluate(y)\n self.assertAllEqual(y_tf, y_np)\n\n @test_util.run_deprecated_v1\n def testPartialShapes(self):\n p_unknown_rank = array_ops.placeholder(dtypes.uint8)\n p_unknown_dims_3 = array_ops.placeholder(\n dtypes.uint8, shape=[None, None, None])\n p_unknown_dims_4 = array_ops.placeholder(\n dtypes.uint8, shape=[None, None, None, None])\n p_unknown_width = array_ops.placeholder(dtypes.uint8, shape=[64, None, 3])\n p_unknown_batch = array_ops.placeholder(\n dtypes.uint8, shape=[None, 64, 64, 3])\n p_wrong_rank = array_ops.placeholder(dtypes.uint8, shape=[None, None])\n p_zero_dim = array_ops.placeholder(dtypes.uint8, shape=[64, 0, 3])\n\n #Ops that support 3D input\n for op in [\n image_ops.flip_left_right, image_ops.flip_up_down,\n image_ops.random_flip_left_right, image_ops.random_flip_up_down,\n image_ops.transpose, image_ops.rot90\n ]:\n transformed_unknown_rank = op(p_unknown_rank)\n self.assertIsNone(transformed_unknown_rank.get_shape().ndims)\n transformed_unknown_dims_3 = op(p_unknown_dims_3)\n self.assertEqual(3, transformed_unknown_dims_3.get_shape().ndims)\n transformed_unknown_width = op(p_unknown_width)\n self.assertEqual(3, transformed_unknown_width.get_shape().ndims)\n\n with self.assertRaisesRegex(ValueError, \"must be > 0\"):\n op(p_zero_dim)\n\n #Ops that support 4D input\n for op in [\n image_ops.flip_left_right, image_ops.flip_up_down,\n image_ops.random_flip_left_right, image_ops.random_flip_up_down,\n image_ops.transpose, image_ops.rot90\n ]:\n transformed_unknown_dims_4 = op(p_unknown_dims_4)\n self.assertEqual(4, transformed_unknown_dims_4.get_shape().ndims)\n transformed_unknown_batch = op(p_unknown_batch)\n self.assertEqual(4, transformed_unknown_batch.get_shape().ndims)\n with self.assertRaisesRegex(ValueError,\n \"must be at least three-dimensional\"):\n op(p_wrong_rank)\n\n def testRot90GroupOrder(self):\n image = np.arange(24, dtype=np.uint8).reshape([2, 4, 3])\n with self.cached_session(use_gpu=True):\n rotated = image\n for _ in xrange(4):\n rotated = image_ops.rot90(rotated)\n self.assertAllEqual(image, self.evaluate(rotated))\n\n def testRot90GroupOrderWithBatch(self):\n image = np.arange(48, dtype=np.uint8).reshape([2, 2, 4, 3])\n with self.cached_session(use_gpu=True):\n rotated = image\n for _ in xrange(4):\n rotated = image_ops.rot90(rotated)\n self.assertAllEqual(image, self.evaluate(rotated))\n\n @test_util.run_deprecated_v1\n def testRot90NumpyEquivalence(self):\n image = np.arange(24, dtype=np.uint8).reshape([2, 4, 3])\n with self.cached_session(use_gpu=True):\n k_placeholder = array_ops.placeholder(dtypes.int32, shape=[])\n y_tf = image_ops.rot90(image, k_placeholder)\n for k in xrange(4):\n y_np = np.rot90(image, k=k)\n self.assertAllEqual(y_np, y_tf.eval({k_placeholder: k}))\n\n @test_util.run_deprecated_v1\n def testRot90NumpyEquivalenceWithBatch(self):\n image = np.arange(48, dtype=np.uint8).reshape([2, 2, 4, 3])\n with self.cached_session(use_gpu=True):\n k_placeholder = array_ops.placeholder(dtypes.int32, shape=[])\n y_tf = image_ops.rot90(image, k_placeholder)\n for k in xrange(4):\n y_np = np.rot90(image, k=k, axes=(1, 2))\n self.assertAllEqual(y_np, y_tf.eval({k_placeholder: k}))\n\n def testFlipImageUnknownShape(self):\n expected_output = constant_op.constant([[[[3, 4, 5], [0, 1, 2]],\n [[9, 10, 11], [6, 7, 8]]]])\n\n def generator():\n image_input = np.array(\n [[[[0, 1, 2], [3, 4, 5]], [[6, 7, 8], [9, 10, 11]]]], np.int32)\n yield image_input\n\n dataset = dataset_ops.Dataset.from_generator(\n generator,\n output_types=dtypes.int32,\n output_shapes=tensor_shape.TensorShape([1, 2, 2, 3]))\n dataset = dataset.map(image_ops.flip_left_right)\n\n image_flipped_via_dataset_map = get_single_element.get_single_element(\n dataset.take(1))\n self.assertAllEqual(image_flipped_via_dataset_map, expected_output)\n\n\nclass AdjustContrastTest(test_util.TensorFlowTestCase):\n\n def _testContrast(self, x_np, y_np, contrast_factor):\n with self.cached_session(use_gpu=True):\n x = constant_op.constant(x_np, shape=x_np.shape)\n y = image_ops.adjust_contrast(x, contrast_factor)\n y_tf = self.evaluate(y)\n self.assertAllClose(y_tf, y_np, 1e-6)\n\n def testDoubleContrastUint8(self):\n x_shape = [1, 2, 2, 3]\n x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]\n x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)\n\n y_data = [0, 0, 0, 62, 169, 255, 28, 0, 255, 135, 255, 0]\n y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)\n\n self._testContrast(x_np, y_np, contrast_factor=2.0)\n\n def testDoubleContrastFloat(self):\n x_shape = [1, 2, 2, 3]\n x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]\n x_np = np.array(x_data, dtype=np.float).reshape(x_shape) / 255.\n\n y_data = [\n -45.25, -90.75, -92.5, 62.75, 169.25, 333.5, 28.75, -84.75, 349.5,\n 134.75, 409.25, -116.5\n ]\n y_np = np.array(y_data, dtype=np.float).reshape(x_shape) / 255.\n\n self._testContrast(x_np, y_np, contrast_factor=2.0)\n\n def testHalfContrastUint8(self):\n x_shape = [1, 2, 2, 3]\n x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]\n x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)\n\n y_data = [22, 52, 65, 49, 118, 172, 41, 54, 176, 67, 178, 59]\n y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)\n\n self._testContrast(x_np, y_np, contrast_factor=0.5)\n\n def testBatchDoubleContrast(self):\n x_shape = [2, 1, 2, 3]\n x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]\n x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)\n\n y_data = [0, 0, 0, 81, 200, 255, 10, 0, 255, 116, 255, 0]\n y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)\n\n self._testContrast(x_np, y_np, contrast_factor=2.0)\n\n def _adjustContrastNp(self, x_np, contrast_factor):\n mean = np.mean(x_np, (1, 2), keepdims=True)\n y_np = mean + contrast_factor * (x_np - mean)\n return y_np\n\n def _adjustContrastTf(self, x_np, contrast_factor):\n with self.cached_session(use_gpu=True):\n x = constant_op.constant(x_np)\n y = image_ops.adjust_contrast(x, contrast_factor)\n y_tf = self.evaluate(y)\n return y_tf\n\n def testRandomContrast(self):\n x_shapes = [\n [1, 2, 2, 3],\n [2, 1, 2, 3],\n [1, 2, 2, 3],\n [2, 5, 5, 3],\n [2, 1, 1, 3],\n ]\n for x_shape in x_shapes:\n x_np = np.random.rand(*x_shape) * 255.\n contrast_factor = np.random.rand() * 2.0 + 0.1\n y_np = self._adjustContrastNp(x_np, contrast_factor)\n y_tf = self._adjustContrastTf(x_np, contrast_factor)\n self.assertAllClose(y_tf, y_np, rtol=1e-5, atol=1e-5)\n\n def testContrastFactorShape(self):\n x_shape = [1, 2, 2, 3]\n x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]\n x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)\n with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),\n \"contrast_factor must be scalar|\"\n \"Shape must be rank 0 but is rank 1\"):\n image_ops.adjust_contrast(x_np, [2.0])\n\n\nclass AdjustBrightnessTest(test_util.TensorFlowTestCase):\n\n def _testBrightness(self, x_np, y_np, delta, tol=1e-6):\n with self.cached_session(use_gpu=True):\n x = constant_op.constant(x_np, shape=x_np.shape)\n y = image_ops.adjust_brightness(x, delta)\n y_tf = self.evaluate(y)\n self.assertAllClose(y_tf, y_np, tol)\n\n def testPositiveDeltaUint8(self):\n x_shape = [2, 2, 3]\n x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]\n x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)\n\n y_data = [10, 15, 23, 64, 145, 236, 47, 18, 244, 100, 255, 11]\n y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)\n\n self._testBrightness(x_np, y_np, delta=10. / 255.)\n\n def testPositiveDeltaFloat32(self):\n x_shape = [2, 2, 3]\n x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]\n x_np = np.array(x_data, dtype=np.float32).reshape(x_shape) / 255.\n\n y_data = [10, 15, 23, 64, 145, 236, 47, 18, 244, 100, 265, 11]\n y_np = np.array(y_data, dtype=np.float32).reshape(x_shape) / 255.\n\n self._testBrightness(x_np, y_np, delta=10. / 255.)\n\n def testPositiveDeltaFloat16(self):\n x_shape = [2, 2, 3]\n x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]\n x_np = np.array(x_data, dtype=np.float16).reshape(x_shape) / 255.\n\n y_data = [10, 15, 23, 64, 145, 236, 47, 18, 244, 100, 265, 11]\n y_np = np.array(y_data, dtype=np.float16).reshape(x_shape) / 255.\n\n self._testBrightness(x_np, y_np, delta=10. / 255., tol=1e-3)\n\n def testNegativeDelta(self):\n x_shape = [2, 2, 3]\n x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]\n x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)\n\n y_data = [0, 0, 3, 44, 125, 216, 27, 0, 224, 80, 245, 0]\n y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)\n\n self._testBrightness(x_np, y_np, delta=-10. / 255.)\n\n\nclass PerImageWhiteningTest(test_util.TensorFlowTestCase):\n\n def _NumpyPerImageWhitening(self, x):\n num_pixels = np.prod(x.shape)\n mn = np.mean(x)\n std = np.std(x)\n stddev = max(std, 1.0 / math.sqrt(num_pixels))\n\n y = x.astype(np.float32)\n y -= mn\n y /= stddev\n return y\n\n def testBasic(self):\n x_shape = [13, 9, 3]\n x_np = np.arange(0, np.prod(x_shape), dtype=np.float32).reshape(x_shape)\n y_np = self._NumpyPerImageWhitening(x_np)\n\n with self.cached_session(use_gpu=True):\n x = constant_op.constant(x_np, shape=x_shape)\n y = image_ops.per_image_standardization(x)\n y_tf = self.evaluate(y)\n self.assertAllClose(y_tf, y_np, atol=1e-4)\n\n def testUniformImage(self):\n im_np = np.ones([19, 19, 3]).astype(np.float32) * 249\n im = constant_op.constant(im_np)\n whiten = image_ops.per_image_standardization(im)\n with self.cached_session(use_gpu=True):\n whiten_np = self.evaluate(whiten)\n self.assertFalse(np.any(np.isnan(whiten_np)))\n\n def testBatchWhitening(self):\n imgs_np = np.random.uniform(0., 255., [4, 24, 24, 3])\n whiten_np = [self._NumpyPerImageWhitening(img) for img in imgs_np]\n with self.cached_session(use_gpu=True):\n imgs = constant_op.constant(imgs_np)\n whiten = image_ops.per_image_standardization(imgs)\n whiten_tf = self.evaluate(whiten)\n for w_tf, w_np in zip(whiten_tf, whiten_np):\n self.assertAllClose(w_tf, w_np, atol=1e-4)\n\n def testPreservesDtype(self):\n imgs_npu8 = np.random.uniform(0., 255., [2, 5, 5, 3]).astype(np.uint8)\n imgs_tfu8 = constant_op.constant(imgs_npu8)\n whiten_tfu8 = image_ops.per_image_standardization(imgs_tfu8)\n self.assertEqual(whiten_tfu8.dtype, dtypes.uint8)\n\n imgs_npf16 = np.random.uniform(0., 255., [2, 5, 5, 3]).astype(np.float16)\n imgs_tff16 = constant_op.constant(imgs_npf16)\n whiten_tff16 = image_ops.per_image_standardization(imgs_tff16)\n self.assertEqual(whiten_tff16.dtype, dtypes.float16)\n\n\nclass CropToBoundingBoxTest(test_util.TensorFlowTestCase):\n\n def _CropToBoundingBox(self, x, offset_height, offset_width, target_height,\n target_width, use_tensor_inputs):\n if use_tensor_inputs:\n offset_height = ops.convert_to_tensor(offset_height)\n offset_width = ops.convert_to_tensor(offset_width)\n target_height = ops.convert_to_tensor(target_height)\n target_width = ops.convert_to_tensor(target_width)\n x_tensor = array_ops.placeholder(x.dtype, shape=[None] * x.ndim)\n feed_dict = {x_tensor: x}\n else:\n x_tensor = x\n feed_dict = {}\n\n y = image_ops.crop_to_bounding_box(x_tensor, offset_height, offset_width,\n target_height, target_width)\n if not use_tensor_inputs:\n self.assertTrue(y.get_shape().is_fully_defined())\n\n with self.cached_session(use_gpu=True):\n return y.eval(feed_dict=feed_dict)\n\n def _assertReturns(self,\n x,\n x_shape,\n offset_height,\n offset_width,\n y,\n y_shape,\n use_tensor_inputs_options=None):\n use_tensor_inputs_options = use_tensor_inputs_options or [False, True]\n target_height, target_width, _ = y_shape\n x = np.array(x).reshape(x_shape)\n y = np.array(y).reshape(y_shape)\n\n for use_tensor_inputs in use_tensor_inputs_options:\n y_tf = self._CropToBoundingBox(x, offset_height, offset_width,\n target_height, target_width,\n use_tensor_inputs)\n self.assertAllClose(y, y_tf)\n\n def _assertRaises(self,\n x,\n x_shape,\n offset_height,\n offset_width,\n target_height,\n target_width,\n err_msg,\n use_tensor_inputs_options=None):\n use_tensor_inputs_options = use_tensor_inputs_options or [False, True]\n x = np.array(x).reshape(x_shape)\n\n for use_tensor_inputs in use_tensor_inputs_options:\n try:\n self._CropToBoundingBox(x, offset_height, offset_width, target_height,\n target_width, use_tensor_inputs)\n except Exception as e:\n if err_msg not in str(e):\n raise\n else:\n raise AssertionError(\"Exception not raised: %s\" % err_msg)\n\n def _assertShapeInference(self, pre_shape, height, width, post_shape):\n image = array_ops.placeholder(dtypes.float32, shape=pre_shape)\n y = image_ops.crop_to_bounding_box(image, 0, 0, height, width)\n self.assertEqual(y.get_shape().as_list(), post_shape)\n\n @test_util.run_deprecated_v1\n def testNoOp(self):\n x_shape = [10, 10, 10]\n x = np.random.uniform(size=x_shape)\n self._assertReturns(x, x_shape, 0, 0, x, x_shape)\n\n @test_util.run_deprecated_v1\n def testCrop(self):\n x = [1, 2, 3, 4, 5, 6, 7, 8, 9]\n x_shape = [3, 3, 1]\n\n offset_height, offset_width = [1, 0]\n y_shape = [2, 3, 1]\n y = [4, 5, 6, 7, 8, 9]\n self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)\n\n offset_height, offset_width = [0, 1]\n y_shape = [3, 2, 1]\n y = [2, 3, 5, 6, 8, 9]\n self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)\n\n offset_height, offset_width = [0, 0]\n y_shape = [2, 3, 1]\n y = [1, 2, 3, 4, 5, 6]\n self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)\n\n offset_height, offset_width = [0, 0]\n y_shape = [3, 2, 1]\n y = [1, 2, 4, 5, 7, 8]\n self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)\n\n @test_util.run_deprecated_v1\n def testShapeInference(self):\n self._assertShapeInference([55, 66, 3], 55, 66, [55, 66, 3])\n self._assertShapeInference([59, 69, 3], 55, 66, [55, 66, 3])\n self._assertShapeInference([None, 66, 3], 55, 66, [55, 66, 3])\n self._assertShapeInference([None, 69, 3], 55, 66, [55, 66, 3])\n self._assertShapeInference([55, None, 3], 55, 66, [55, 66, 3])\n self._assertShapeInference([59, None, 3], 55, 66, [55, 66, 3])\n self._assertShapeInference([None, None, 3], 55, 66, [55, 66, 3])\n self._assertShapeInference([55, 66, None], 55, 66, [55, 66, None])\n self._assertShapeInference([59, 69, None], 55, 66, [55, 66, None])\n self._assertShapeInference([None, None, None], 55, 66, [55, 66, None])\n self._assertShapeInference(None, 55, 66, [55, 66, None])\n\n @test_util.run_deprecated_v1\n def testNon3DInput(self):\n # Input image is not 3D\n x = [0] * 15\n offset_height, offset_width = [0, 0]\n target_height, target_width = [2, 2]\n\n for x_shape in ([3, 5], [1, 3, 5, 1, 1]):\n self._assertRaises(x, x_shape, offset_height, offset_width, target_height,\n target_width,\n \"must have either 3 or 4 dimensions.\")\n\n @test_util.run_deprecated_v1\n def testZeroLengthInput(self):\n # Input image has 0-length dimension(s).\n # Each line is a test configuration:\n # x_shape, target_height, target_width\n test_config = (([0, 2, 2], 1, 1), ([2, 0, 2], 1, 1), ([2, 2, 0], 1, 1),\n ([0, 2, 2], 0, 1), ([2, 0, 2], 1, 0))\n offset_height, offset_width = [0, 0]\n x = []\n\n for x_shape, target_height, target_width in test_config:\n self._assertRaises(\n x,\n x_shape,\n offset_height,\n offset_width,\n target_height,\n target_width,\n \"inner 3 dims of 'image.shape' must be > 0\",\n use_tensor_inputs_options=[False])\n # Multiple assertion could fail, but the evaluation order is arbitrary.\n # Match gainst generic pattern.\n self._assertRaises(\n x,\n x_shape,\n offset_height,\n offset_width,\n target_height,\n target_width,\n \"assertion failed:\",\n use_tensor_inputs_options=[True])\n\n @test_util.run_deprecated_v1\n def testBadParams(self):\n x_shape = [4, 4, 1]\n x = np.zeros(x_shape)\n\n # Each line is a test configuration:\n # (offset_height, offset_width, target_height, target_width), err_msg\n test_config = (([-1, 0, 3, 3], \"offset_height must be >= 0\"), ([\n 0, -1, 3, 3\n ], \"offset_width must be >= 0\"), ([0, 0, 0, 3],\n \"target_height must be > 0\"),\n ([0, 0, 3, 0], \"target_width must be > 0\"),\n ([2, 0, 3, 3], \"height must be >= target + offset\"),\n ([0, 2, 3, 3], \"width must be >= target + offset\"))\n\n for params, err_msg in test_config:\n self._assertRaises(x, x_shape, *params, err_msg=err_msg)\n\n @test_util.run_deprecated_v1\n def testNameScope(self):\n image = array_ops.placeholder(dtypes.float32, shape=[55, 66, 3])\n y = image_ops.crop_to_bounding_box(image, 0, 0, 55, 66)\n self.assertTrue(y.name.startswith(\"crop_to_bounding_box\"))\n\n\nclass CentralCropTest(test_util.TensorFlowTestCase):\n\n def _assertShapeInference(self, pre_shape, fraction, post_shape):\n image = array_ops.placeholder(dtypes.float32, shape=pre_shape)\n y = image_ops.central_crop(image, fraction)\n if post_shape is None:\n self.assertEqual(y.get_shape().dims, None)\n else:\n self.assertEqual(y.get_shape().as_list(), post_shape)\n\n def testNoOp(self):\n x_shapes = [[13, 9, 3], [5, 13, 9, 3]]\n for x_shape in x_shapes:\n x_np = np.ones(x_shape, dtype=np.float32)\n for use_gpu in [True, False]:\n with self.cached_session(use_gpu=use_gpu):\n x = constant_op.constant(x_np, shape=x_shape)\n y = image_ops.central_crop(x, 1.0)\n y_tf = self.evaluate(y)\n self.assertAllEqual(y_tf, x_np)\n\n def testCropping(self):\n x_shape = [4, 8, 1]\n x_np = np.array(\n [[1, 2, 3, 4, 5, 6, 7, 8], [1, 2, 3, 4, 5, 6, 7, 8],\n [1, 2, 3, 4, 5, 6, 7, 8], [1, 2, 3, 4, 5, 6, 7, 8]],\n dtype=np.int32).reshape(x_shape)\n y_np = np.array([[3, 4, 5, 6], [3, 4, 5, 6]]).reshape([2, 4, 1])\n for use_gpu in [True, False]:\n with self.cached_session(use_gpu=use_gpu):\n x = constant_op.constant(x_np, shape=x_shape)\n y = image_ops.central_crop(x, 0.5)\n y_tf = self.evaluate(y)\n self.assertAllEqual(y_tf, y_np)\n self.assertAllEqual(y_tf.shape, y_np.shape)\n\n x_shape = [2, 4, 8, 1]\n x_np = np.array(\n [[1, 2, 3, 4, 5, 6, 7, 8], [1, 2, 3, 4, 5, 6, 7, 8],\n [1, 2, 3, 4, 5, 6, 7, 8], [1, 2, 3, 4, 5, 6, 7, 8],\n [8, 7, 6, 5, 4, 3, 2, 1], [8, 7, 6, 5, 4, 3, 2, 1],\n [8, 7, 6, 5, 4, 3, 2, 1], [8, 7, 6, 5, 4, 3, 2, 1]],\n dtype=np.int32).reshape(x_shape)\n y_np = np.array([[[3, 4, 5, 6], [3, 4, 5, 6]],\n [[6, 5, 4, 3], [6, 5, 4, 3]]]).reshape([2, 2, 4, 1])\n with self.cached_session(use_gpu=True):\n x = constant_op.constant(x_np, shape=x_shape)\n y = image_ops.central_crop(x, 0.5)\n y_tf = self.evaluate(y)\n self.assertAllEqual(y_tf, y_np)\n self.assertAllEqual(y_tf.shape, y_np.shape)\n\n def testCropping2(self):\n # Test case for 10315\n x_shapes = [[240, 320, 3], [5, 240, 320, 3]]\n expected_y_shapes = [[80, 106, 3], [5, 80, 106, 3]]\n\n for x_shape, y_shape in zip(x_shapes, expected_y_shapes):\n x_np = np.zeros(x_shape, dtype=np.int32)\n y_np = np.zeros(y_shape, dtype=np.int32)\n for use_gpu in [True, False]:\n with self.cached_session(use_gpu=use_gpu):\n y_tf = self.evaluate(image_ops.central_crop(x_np, 0.33))\n self.assertAllEqual(y_tf, y_np)\n self.assertAllEqual(y_tf.shape, y_np.shape)\n\n def testShapeInference(self):\n # Shape function requires placeholders and a graph.\n with ops.Graph().as_default():\n # Test no-op fraction=1.0, with 3-D tensors.\n self._assertShapeInference([50, 60, 3], 1.0, [50, 60, 3])\n self._assertShapeInference([None, 60, 3], 1.0, [None, 60, 3])\n self._assertShapeInference([50, None, 3], 1.0, [50, None, 3])\n self._assertShapeInference([None, None, 3], 1.0, [None, None, 3])\n self._assertShapeInference([50, 60, None], 1.0, [50, 60, None])\n self._assertShapeInference([None, None, None], 1.0, [None, None, None])\n\n # Test no-op fraction=0.5, with 3-D tensors.\n self._assertShapeInference([50, 60, 3], 0.5, [26, 30, 3])\n self._assertShapeInference([None, 60, 3], 0.5, [None, 30, 3])\n self._assertShapeInference([50, None, 3], 0.5, [26, None, 3])\n self._assertShapeInference([None, None, 3], 0.5, [None, None, 3])\n self._assertShapeInference([50, 60, None], 0.5, [26, 30, None])\n self._assertShapeInference([None, None, None], 0.5, [None, None, None])\n\n # Test no-op fraction=1.0, with 4-D tensors.\n self._assertShapeInference([5, 50, 60, 3], 1.0, [5, 50, 60, 3])\n self._assertShapeInference([5, None, 60, 3], 1.0, [5, None, 60, 3])\n self._assertShapeInference([5, 50, None, 3], 1.0, [5, 50, None, 3])\n self._assertShapeInference([5, None, None, 3], 1.0, [5, None, None, 3])\n self._assertShapeInference([5, 50, 60, None], 1.0, [5, 50, 60, None])\n self._assertShapeInference([5, None, None, None], 1.0,\n [5, None, None, None])\n self._assertShapeInference([None, None, None, None], 1.0,\n [None, None, None, None])\n\n # Test no-op fraction=0.5, with 4-D tensors.\n self._assertShapeInference([5, 50, 60, 3], 0.5, [5, 26, 30, 3])\n self._assertShapeInference([5, None, 60, 3], 0.5, [5, None, 30, 3])\n self._assertShapeInference([5, 50, None, 3], 0.5, [5, 26, None, 3])\n self._assertShapeInference([5, None, None, 3], 0.5, [5, None, None, 3])\n self._assertShapeInference([5, 50, 60, None], 0.5, [5, 26, 30, None])\n self._assertShapeInference([5, None, None, None], 0.5,\n [5, None, None, None])\n self._assertShapeInference([None, None, None, None], 0.5,\n [None, None, None, None])\n\n def testErrorOnInvalidCentralCropFractionValues(self):\n x_shape = [13, 9, 3]\n x_np = np.ones(x_shape, dtype=np.float32)\n for use_gpu in [True, False]:\n with self.cached_session(use_gpu=use_gpu):\n x = constant_op.constant(x_np, shape=x_shape)\n with self.assertRaises(ValueError):\n _ = image_ops.central_crop(x, 0.0)\n with self.assertRaises(ValueError):\n _ = image_ops.central_crop(x, 1.01)\n\n def testErrorOnInvalidShapes(self):\n x_shapes = [None, [], [3], [3, 9], [3, 9, 3, 9, 3]]\n for x_shape in x_shapes:\n x_np = np.ones(x_shape, dtype=np.float32)\n for use_gpu in [True, False]:\n with self.cached_session(use_gpu=use_gpu):\n x = constant_op.constant(x_np, shape=x_shape)\n with self.assertRaises(ValueError):\n _ = image_ops.central_crop(x, 0.5)\n\n def testNameScope(self):\n # Testing name scope requires a graph.\n with ops.Graph().as_default():\n x_shape = [13, 9, 3]\n x_np = np.ones(x_shape, dtype=np.float32)\n for use_gpu in [True, False]:\n with self.cached_session(use_gpu=use_gpu):\n y = image_ops.central_crop(x_np, 1.0)\n self.assertTrue(y.op.name.startswith(\"central_crop\"))\n\n\nclass PadToBoundingBoxTest(test_util.TensorFlowTestCase):\n\n def _PadToBoundingBox(self, x, offset_height, offset_width, target_height,\n target_width, use_tensor_inputs):\n if use_tensor_inputs:\n offset_height = ops.convert_to_tensor(offset_height)\n offset_width = ops.convert_to_tensor(offset_width)\n target_height = ops.convert_to_tensor(target_height)\n target_width = ops.convert_to_tensor(target_width)\n x_tensor = array_ops.placeholder(x.dtype, shape=[None] * x.ndim)\n feed_dict = {x_tensor: x}\n else:\n x_tensor = x\n feed_dict = {}\n\n y = image_ops.pad_to_bounding_box(x_tensor, offset_height, offset_width,\n target_height, target_width)\n if not use_tensor_inputs:\n self.assertTrue(y.get_shape().is_fully_defined())\n\n with self.cached_session(use_gpu=True):\n return y.eval(feed_dict=feed_dict)\n\n def _assertReturns(self,\n x,\n x_shape,\n offset_height,\n offset_width,\n y,\n y_shape,\n use_tensor_inputs_options=None):\n use_tensor_inputs_options = use_tensor_inputs_options or [False, True]\n target_height, target_width, _ = y_shape\n x = np.array(x).reshape(x_shape)\n y = np.array(y).reshape(y_shape)\n\n for use_tensor_inputs in use_tensor_inputs_options:\n y_tf = self._PadToBoundingBox(x, offset_height, offset_width,\n target_height, target_width,\n use_tensor_inputs)\n self.assertAllClose(y, y_tf)\n\n def _assertRaises(self,\n x,\n x_shape,\n offset_height,\n offset_width,\n target_height,\n target_width,\n err_msg,\n use_tensor_inputs_options=None):\n use_tensor_inputs_options = use_tensor_inputs_options or [False, True]\n x = np.array(x).reshape(x_shape)\n\n for use_tensor_inputs in use_tensor_inputs_options:\n try:\n self._PadToBoundingBox(x, offset_height, offset_width, target_height,\n target_width, use_tensor_inputs)\n except Exception as e:\n if err_msg not in str(e):\n raise\n else:\n raise AssertionError(\"Exception not raised: %s\" % err_msg)\n\n def _assertShapeInference(self, pre_shape, height, width, post_shape):\n image = array_ops.placeholder(dtypes.float32, shape=pre_shape)\n y = image_ops.pad_to_bounding_box(image, 0, 0, height, width)\n self.assertEqual(y.get_shape().as_list(), post_shape)\n\n def testInt64(self):\n x = [1, 2, 3, 4, 5, 6, 7, 8, 9]\n x_shape = [3, 3, 1]\n\n y = [0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\n y_shape = [4, 3, 1]\n x = np.array(x).reshape(x_shape)\n y = np.array(y).reshape(y_shape)\n\n i = constant_op.constant([1, 0, 4, 3], dtype=dtypes.int64)\n y_tf = image_ops.pad_to_bounding_box(x, i[0], i[1], i[2], i[3])\n with self.cached_session(use_gpu=True):\n self.assertAllClose(y, self.evaluate(y_tf))\n\n @test_util.run_deprecated_v1\n def testNoOp(self):\n x_shape = [10, 10, 10]\n x = np.random.uniform(size=x_shape)\n offset_height, offset_width = [0, 0]\n self._assertReturns(x, x_shape, offset_height, offset_width, x, x_shape)\n\n @test_util.run_deprecated_v1\n def testPadding(self):\n x = [1, 2, 3, 4, 5, 6, 7, 8, 9]\n x_shape = [3, 3, 1]\n\n offset_height, offset_width = [1, 0]\n y = [0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\n y_shape = [4, 3, 1]\n self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)\n\n offset_height, offset_width = [0, 1]\n y = [0, 1, 2, 3, 0, 4, 5, 6, 0, 7, 8, 9]\n y_shape = [3, 4, 1]\n self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)\n\n offset_height, offset_width = [0, 0]\n y = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 0, 0]\n y_shape = [4, 3, 1]\n self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)\n\n offset_height, offset_width = [0, 0]\n y = [1, 2, 3, 0, 4, 5, 6, 0, 7, 8, 9, 0]\n y_shape = [3, 4, 1]\n self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)\n\n @test_util.run_deprecated_v1\n def testShapeInference(self):\n self._assertShapeInference([55, 66, 3], 55, 66, [55, 66, 3])\n self._assertShapeInference([50, 60, 3], 55, 66, [55, 66, 3])\n self._assertShapeInference([None, 66, 3], 55, 66, [55, 66, 3])\n self._assertShapeInference([None, 60, 3], 55, 66, [55, 66, 3])\n self._assertShapeInference([55, None, 3], 55, 66, [55, 66, 3])\n self._assertShapeInference([50, None, 3], 55, 66, [55, 66, 3])\n self._assertShapeInference([None, None, 3], 55, 66, [55, 66, 3])\n self._assertShapeInference([55, 66, None], 55, 66, [55, 66, None])\n self._assertShapeInference([50, 60, None], 55, 66, [55, 66, None])\n self._assertShapeInference([None, None, None], 55, 66, [55, 66, None])\n self._assertShapeInference(None, 55, 66, [55, 66, None])\n\n @test_util.run_deprecated_v1\n def testNon3DInput(self):\n # Input image is not 3D\n x = [0] * 15\n offset_height, offset_width = [0, 0]\n target_height, target_width = [2, 2]\n\n for x_shape in ([3, 5], [1, 3, 5, 1, 1]):\n self._assertRaises(x, x_shape, offset_height, offset_width, target_height,\n target_width,\n \"must have either 3 or 4 dimensions.\")\n\n @test_util.run_deprecated_v1\n def testZeroLengthInput(self):\n # Input image has 0-length dimension(s).\n # Each line is a test configuration:\n # x_shape, target_height, target_width\n test_config = (([0, 2, 2], 2, 2), ([2, 0, 2], 2, 2), ([2, 2, 0], 2, 2))\n offset_height, offset_width = [0, 0]\n x = []\n\n for x_shape, target_height, target_width in test_config:\n self._assertRaises(\n x,\n x_shape,\n offset_height,\n offset_width,\n target_height,\n target_width,\n \"inner 3 dims of 'image.shape' must be > 0\",\n use_tensor_inputs_options=[False])\n\n # The original error message does not contain back slashes. However, they\n # are added by either the assert op or the runtime. If this behavior\n # changes in the future, the match string will also needs to be changed.\n self._assertRaises(\n x,\n x_shape,\n offset_height,\n offset_width,\n target_height,\n target_width,\n \"inner 3 dims of \\\\'image.shape\\\\' must be > 0\",\n use_tensor_inputs_options=[True])\n\n @test_util.run_deprecated_v1\n def testBadParams(self):\n x_shape = [3, 3, 1]\n x = np.zeros(x_shape)\n\n # Each line is a test configuration:\n # offset_height, offset_width, target_height, target_width, err_msg\n test_config = ((-1, 0, 4, 4, \"offset_height must be >= 0\"),\n (0, -1, 4, 4, \"offset_width must be >= 0\"),\n (2, 0, 4, 4, \"height must be <= target - offset\"),\n (0, 2, 4, 4, \"width must be <= target - offset\"))\n\n for config_item in test_config:\n self._assertRaises(x, x_shape, *config_item)\n\n @test_util.run_deprecated_v1\n def testNameScope(self):\n image = array_ops.placeholder(dtypes.float32, shape=[55, 66, 3])\n y = image_ops.pad_to_bounding_box(image, 0, 0, 55, 66)\n self.assertTrue(y.op.name.startswith(\"pad_to_bounding_box\"))\n\n\nclass SelectDistortedCropBoxTest(test_util.TensorFlowTestCase):\n\n def _testSampleDistortedBoundingBox(self, image, bounding_box,\n min_object_covered, aspect_ratio_range,\n area_range):\n original_area = float(np.prod(image.shape))\n bounding_box_area = float((bounding_box[3] - bounding_box[1]) *\n (bounding_box[2] - bounding_box[0]))\n\n image_size_np = np.array(image.shape, dtype=np.int32)\n bounding_box_np = (\n np.array(bounding_box, dtype=np.float32).reshape([1, 1, 4]))\n\n aspect_ratios = []\n area_ratios = []\n\n fraction_object_covered = []\n\n num_iter = 1000\n with self.cached_session(use_gpu=True):\n image_tf = constant_op.constant(image, shape=image.shape)\n image_size_tf = constant_op.constant(\n image_size_np, shape=image_size_np.shape)\n bounding_box_tf = constant_op.constant(\n bounding_box_np, dtype=dtypes.float32, shape=bounding_box_np.shape)\n\n begin, size, _ = image_ops.sample_distorted_bounding_box(\n image_size=image_size_tf,\n bounding_boxes=bounding_box_tf,\n min_object_covered=min_object_covered,\n aspect_ratio_range=aspect_ratio_range,\n area_range=area_range)\n y = array_ops.strided_slice(image_tf, begin, begin + size)\n\n for _ in xrange(num_iter):\n y_tf = self.evaluate(y)\n crop_height = y_tf.shape[0]\n crop_width = y_tf.shape[1]\n aspect_ratio = float(crop_width) / float(crop_height)\n area = float(crop_width * crop_height)\n\n aspect_ratios.append(aspect_ratio)\n area_ratios.append(area / original_area)\n fraction_object_covered.append(float(np.sum(y_tf)) / bounding_box_area)\n\n # min_object_covered as tensor\n min_object_covered_placeholder = array_ops.placeholder(dtypes.float32)\n begin, size, _ = image_ops.sample_distorted_bounding_box(\n image_size=image_size_tf,\n bounding_boxes=bounding_box_tf,\n min_object_covered=min_object_covered_placeholder,\n aspect_ratio_range=aspect_ratio_range,\n area_range=area_range)\n y = array_ops.strided_slice(image_tf, begin, begin + size)\n\n for _ in xrange(num_iter):\n y_tf = y.eval(feed_dict={\n min_object_covered_placeholder: min_object_covered\n })\n crop_height = y_tf.shape[0]\n crop_width = y_tf.shape[1]\n aspect_ratio = float(crop_width) / float(crop_height)\n area = float(crop_width * crop_height)\n\n aspect_ratios.append(aspect_ratio)\n area_ratios.append(area / original_area)\n fraction_object_covered.append(float(np.sum(y_tf)) / bounding_box_area)\n\n # Ensure that each entry is observed within 3 standard deviations.\n # num_bins = 10\n # aspect_ratio_hist, _ = np.histogram(aspect_ratios,\n # bins=num_bins,\n # range=aspect_ratio_range)\n # mean = np.mean(aspect_ratio_hist)\n # stddev = np.sqrt(mean)\n # TODO(wicke, shlens, dga): Restore this test so that it is no longer flaky.\n # TODO(irving): Since the rejection probability is not independent of the\n # aspect ratio, the aspect_ratio random value is not exactly uniformly\n # distributed in [min_aspect_ratio, max_aspect_ratio). This test should be\n # fixed to reflect the true statistical property, then tightened to enforce\n # a stricter bound. Or, ideally, the sample_distorted_bounding_box Op\n # be fixed to not use rejection sampling and generate correctly uniform\n # aspect ratios.\n # self.assertAllClose(aspect_ratio_hist,\n # [mean] * num_bins, atol=3.6 * stddev)\n\n # The resulting crop will not be uniformly distributed in area. In practice,\n # we find that the area skews towards the small sizes. Instead, we perform\n # a weaker test to ensure that the area ratios are merely within the\n # specified bounds.\n self.assertLessEqual(max(area_ratios), area_range[1])\n self.assertGreaterEqual(min(area_ratios), area_range[0])\n\n # For reference, here is what the distribution of area ratios look like.\n area_ratio_hist, _ = np.histogram(area_ratios, bins=10, range=area_range)\n print(\"area_ratio_hist \", area_ratio_hist)\n\n # Ensure that fraction_object_covered is satisfied.\n # TODO(wicke, shlens, dga): Restore this test so that it is no longer flaky.\n # self.assertGreaterEqual(min(fraction_object_covered), min_object_covered)\n\n @test_util.run_deprecated_v1\n def testWholeImageBoundingBox(self):\n height = 40\n width = 50\n image_size = [height, width, 1]\n bounding_box = [0.0, 0.0, 1.0, 1.0]\n image = np.arange(\n 0, np.prod(image_size), dtype=np.int32).reshape(image_size)\n self._testSampleDistortedBoundingBox(\n image,\n bounding_box,\n min_object_covered=0.1,\n aspect_ratio_range=(0.75, 1.33),\n area_range=(0.05, 1.0))\n\n @test_util.run_deprecated_v1\n def testWithBoundingBox(self):\n height = 40\n width = 50\n x_shape = [height, width, 1]\n image = np.zeros(x_shape, dtype=np.int32)\n\n # Create an object with 1's in a region with area A and require that\n # the total pixel values >= 0.1 * A.\n min_object_covered = 0.1\n\n xmin = 2\n ymin = 3\n xmax = 12\n ymax = 13\n for x in np.arange(xmin, xmax + 1, 1):\n for y in np.arange(ymin, ymax + 1, 1):\n image[x, y] = 1\n\n # Bounding box is specified as (ymin, xmin, ymax, xmax) in\n # relative coordinates.\n bounding_box = (float(ymin) / height, float(xmin) / width,\n float(ymax) / height, float(xmax) / width)\n\n self._testSampleDistortedBoundingBox(\n image,\n bounding_box=bounding_box,\n min_object_covered=min_object_covered,\n aspect_ratio_range=(0.75, 1.33),\n area_range=(0.05, 1.0))\n\n @test_util.run_deprecated_v1\n def testSampleDistortedBoundingBoxShape(self):\n with self.cached_session(use_gpu=True):\n image_size = constant_op.constant(\n [40, 50, 1], shape=[3], dtype=dtypes.int32)\n bounding_box = constant_op.constant(\n [[[0.0, 0.0, 1.0, 1.0]]],\n shape=[1, 1, 4],\n dtype=dtypes.float32,\n )\n begin, end, bbox_for_drawing = image_ops.sample_distorted_bounding_box(\n image_size=image_size,\n bounding_boxes=bounding_box,\n min_object_covered=0.1,\n aspect_ratio_range=(0.75, 1.33),\n area_range=(0.05, 1.0))\n\n # Test that the shapes are correct.\n self.assertAllEqual([3], begin.get_shape().as_list())\n self.assertAllEqual([3], end.get_shape().as_list())\n self.assertAllEqual([1, 1, 4], bbox_for_drawing.get_shape().as_list())\n # Actual run to make sure shape is correct inside Compute().\n begin = self.evaluate(begin)\n end = self.evaluate(end)\n bbox_for_drawing = self.evaluate(bbox_for_drawing)\n\n begin, end, bbox_for_drawing = image_ops.sample_distorted_bounding_box(\n image_size=image_size,\n bounding_boxes=bounding_box,\n min_object_covered=array_ops.placeholder(dtypes.float32),\n aspect_ratio_range=(0.75, 1.33),\n area_range=(0.05, 1.0))\n\n # Test that the shapes are correct.\n self.assertAllEqual([3], begin.get_shape().as_list())\n self.assertAllEqual([3], end.get_shape().as_list())\n self.assertAllEqual([1, 1, 4], bbox_for_drawing.get_shape().as_list())\n\n def testDefaultMinObjectCovered(self):\n # By default min_object_covered=0.1 if not provided\n with self.cached_session(use_gpu=True):\n image_size = constant_op.constant(\n [40, 50, 1], shape=[3], dtype=dtypes.int32)\n bounding_box = constant_op.constant(\n [[[0.0, 0.0, 1.0, 1.0]]],\n shape=[1, 1, 4],\n dtype=dtypes.float32,\n )\n begin, end, bbox_for_drawing = image_ops.sample_distorted_bounding_box(\n image_size=image_size,\n bounding_boxes=bounding_box,\n aspect_ratio_range=(0.75, 1.33),\n area_range=(0.05, 1.0))\n\n self.assertAllEqual([3], begin.get_shape().as_list())\n self.assertAllEqual([3], end.get_shape().as_list())\n self.assertAllEqual([1, 1, 4], bbox_for_drawing.get_shape().as_list())\n # Actual run to make sure shape is correct inside Compute().\n begin = self.evaluate(begin)\n end = self.evaluate(end)\n bbox_for_drawing = self.evaluate(bbox_for_drawing)\n\n def _testStatelessSampleDistortedBoundingBox(self, image, bounding_box,\n min_object_covered,\n aspect_ratio_range, area_range):\n with test_util.use_gpu():\n original_area = float(np.prod(image.shape))\n bounding_box_area = float((bounding_box[3] - bounding_box[1]) *\n (bounding_box[2] - bounding_box[0]))\n\n image_size_np = np.array(image.shape, dtype=np.int32)\n bounding_box_np = (\n np.array(bounding_box, dtype=np.float32).reshape([1, 1, 4]))\n\n iterations = 2\n test_seeds = [(1, 2), (3, 4), (5, 6)]\n\n for seed in test_seeds:\n aspect_ratios = []\n area_ratios = []\n fraction_object_covered = []\n for _ in range(iterations):\n image_tf = constant_op.constant(image, shape=image.shape)\n image_size_tf = constant_op.constant(\n image_size_np, shape=image_size_np.shape)\n bounding_box_tf = constant_op.constant(bounding_box_np,\n dtype=dtypes.float32,\n shape=bounding_box_np.shape)\n begin, size, _ = image_ops.stateless_sample_distorted_bounding_box(\n image_size=image_size_tf,\n bounding_boxes=bounding_box_tf,\n seed=seed,\n min_object_covered=min_object_covered,\n aspect_ratio_range=aspect_ratio_range,\n area_range=area_range)\n y = array_ops.strided_slice(image_tf, begin, begin + size)\n y_tf = self.evaluate(y)\n crop_height = y_tf.shape[0]\n crop_width = y_tf.shape[1]\n aspect_ratio = float(crop_width) / float(crop_height)\n area = float(crop_width * crop_height)\n aspect_ratios.append(aspect_ratio)\n area_ratio = area / original_area\n area_ratios.append(area_ratio)\n fraction_object_covered.append(\n float(np.sum(y_tf)) / bounding_box_area)\n\n # Check that `area_ratio` is within valid range.\n self.assertLessEqual(area_ratio, area_range[1])\n self.assertGreaterEqual(area_ratio, area_range[0])\n\n # Each array should consist of one value just repeated `iteration` times\n # because the same seed is used.\n self.assertEqual(len(set(aspect_ratios)), 1)\n self.assertEqual(len(set(area_ratios)), 1)\n self.assertEqual(len(set(fraction_object_covered)), 1)\n\n # TODO(b/162345082): stateless random op generates different random number\n # with xla_gpu. Update tests such that there is a single ground truth result\n # to test against.\n def testWholeImageBoundingBoxStateless(self):\n height = 40\n width = 50\n image_size = [height, width, 1]\n bounding_box = [0.0, 0.0, 1.0, 1.0]\n image = np.arange(\n 0, np.prod(image_size), dtype=np.int32).reshape(image_size)\n for min_obj_covered in [0.1, constant_op.constant(0.1)]:\n self._testStatelessSampleDistortedBoundingBox(\n image,\n bounding_box,\n min_object_covered=min_obj_covered,\n aspect_ratio_range=(0.75, 1.33),\n area_range=(0.05, 1.0))\n\n # TODO(b/162345082): stateless random op generates different random number\n # with xla_gpu. Update tests such that there is a single ground truth result\n # to test against.\n def testWithBoundingBoxStateless(self):\n height = 40\n width = 50\n x_shape = [height, width, 1]\n image = np.zeros(x_shape, dtype=np.int32)\n\n xmin = 2\n ymin = 3\n xmax = 12\n ymax = 13\n for x in np.arange(xmin, xmax + 1, 1):\n for y in np.arange(ymin, ymax + 1, 1):\n image[x, y] = 1\n\n # Bounding box is specified as (ymin, xmin, ymax, xmax) in\n # relative coordinates.\n bounding_box = (float(ymin) / height, float(xmin) / width,\n float(ymax) / height, float(xmax) / width)\n\n # Test both scalar and tensor input for `min_object_covered`.\n for min_obj_covered in [0.1, constant_op.constant(0.1)]:\n self._testStatelessSampleDistortedBoundingBox(\n image,\n bounding_box=bounding_box,\n min_object_covered=min_obj_covered,\n aspect_ratio_range=(0.75, 1.33),\n area_range=(0.05, 1.0))\n\n def testSampleDistortedBoundingBoxShapeStateless(self):\n with test_util.use_gpu():\n image_size = constant_op.constant(\n [40, 50, 1], shape=[3], dtype=dtypes.int32)\n bounding_box = constant_op.constant(\n [[[0.0, 0.0, 1.0, 1.0]]],\n shape=[1, 1, 4],\n dtype=dtypes.float32,\n )\n\n bbox_func = functools.partial(\n image_ops.stateless_sample_distorted_bounding_box,\n image_size=image_size,\n bounding_boxes=bounding_box,\n min_object_covered=0.1,\n aspect_ratio_range=(0.75, 1.33),\n area_range=(0.05, 1.0))\n\n # Check error is raised with wrong seed shapes.\n for seed in [1, (1, 2, 3)]:\n with self.assertRaises((ValueError, errors.InvalidArgumentError)):\n begin, end, bbox_for_drawing = bbox_func(seed=seed)\n\n test_seed = (1, 2)\n begin, end, bbox_for_drawing = bbox_func(seed=test_seed)\n\n # Test that the shapes are correct.\n self.assertAllEqual([3], begin.get_shape().as_list())\n self.assertAllEqual([3], end.get_shape().as_list())\n self.assertAllEqual([1, 1, 4], bbox_for_drawing.get_shape().as_list())\n\n # Actual run to make sure shape is correct inside Compute().\n begin = self.evaluate(begin)\n end = self.evaluate(end)\n bbox_for_drawing = self.evaluate(bbox_for_drawing)\n self.assertAllEqual([3], begin.shape)\n self.assertAllEqual([3], end.shape)\n self.assertAllEqual([1, 1, 4], bbox_for_drawing.shape)\n\n\nclass ResizeImagesV2Test(test_util.TensorFlowTestCase):\n\n METHODS = [\n image_ops.ResizeMethod.BILINEAR, image_ops.ResizeMethod.NEAREST_NEIGHBOR,\n image_ops.ResizeMethod.BICUBIC, image_ops.ResizeMethod.AREA,\n image_ops.ResizeMethod.LANCZOS3, image_ops.ResizeMethod.LANCZOS5,\n image_ops.ResizeMethod.GAUSSIAN, image_ops.ResizeMethod.MITCHELLCUBIC\n ]\n\n # Some resize methods, such as Gaussian, are non-interpolating in that they\n # change the image even if there is no scale change, for some test, we only\n # check the value on the value preserving methods.\n INTERPOLATING_METHODS = [\n image_ops.ResizeMethod.BILINEAR, image_ops.ResizeMethod.NEAREST_NEIGHBOR,\n image_ops.ResizeMethod.BICUBIC, image_ops.ResizeMethod.AREA,\n image_ops.ResizeMethod.LANCZOS3, image_ops.ResizeMethod.LANCZOS5\n ]\n\n TYPES = [\n np.uint8, np.int8, np.uint16, np.int16, np.int32, np.int64, np.float16,\n np.float32, np.float64\n ]\n\n def _assertShapeInference(self, pre_shape, size, post_shape):\n # Try single image resize\n single_image = array_ops.placeholder(dtypes.float32, shape=pre_shape)\n y = image_ops.resize_images_v2(single_image, size)\n self.assertEqual(y.get_shape().as_list(), post_shape)\n # Try batch images resize with known batch size\n images = array_ops.placeholder(dtypes.float32, shape=[99] + pre_shape)\n y = image_ops.resize_images_v2(images, size)\n self.assertEqual(y.get_shape().as_list(), [99] + post_shape)\n # Try batch images resize with unknown batch size\n images = array_ops.placeholder(dtypes.float32, shape=[None] + pre_shape)\n y = image_ops.resize_images_v2(images, size)\n self.assertEqual(y.get_shape().as_list(), [None] + post_shape)\n\n def shouldRunOnGPU(self, method, nptype):\n if (method == image_ops.ResizeMethod.NEAREST_NEIGHBOR and\n nptype in [np.float32, np.float64]):\n return True\n else:\n return False\n\n @test_util.disable_xla(\"align_corners=False not supported by XLA\")\n @test_util.run_deprecated_v1\n def testNoOp(self):\n img_shape = [1, 6, 4, 1]\n single_shape = [6, 4, 1]\n # This test is also conducted with int8, so 127 is the maximum\n # value that can be used.\n data = [\n 127, 127, 64, 64, 127, 127, 64, 64, 64, 64, 127, 127, 64, 64, 127, 127,\n 50, 50, 100, 100, 50, 50, 100, 100\n ]\n target_height = 6\n target_width = 4\n\n for nptype in self.TYPES:\n img_np = np.array(data, dtype=nptype).reshape(img_shape)\n\n for method in self.METHODS:\n with self.cached_session(use_gpu=True):\n image = constant_op.constant(img_np, shape=img_shape)\n y = image_ops.resize_images_v2(image, [target_height, target_width],\n method)\n yshape = array_ops.shape(y)\n resized, newshape = self.evaluate([y, yshape])\n self.assertAllEqual(img_shape, newshape)\n if method in self.INTERPOLATING_METHODS:\n self.assertAllClose(resized, img_np, atol=1e-5)\n\n # Resizing with a single image must leave the shape unchanged also.\n with self.cached_session(use_gpu=True):\n img_single = img_np.reshape(single_shape)\n image = constant_op.constant(img_single, shape=single_shape)\n y = image_ops.resize_images_v2(image, [target_height, target_width],\n self.METHODS[0])\n yshape = array_ops.shape(y)\n newshape = self.evaluate(yshape)\n self.assertAllEqual(single_shape, newshape)\n\n # half_pixel_centers unsupported in ResizeBilinear\n @test_util.run_deprecated_v1\n @test_util.disable_xla(\"b/127616992\")\n def testTensorArguments(self):\n img_shape = [1, 6, 4, 1]\n single_shape = [6, 4, 1]\n # This test is also conducted with int8, so 127 is the maximum\n # value that can be used.\n data = [\n 127, 127, 64, 64, 127, 127, 64, 64, 64, 64, 127, 127, 64, 64, 127, 127,\n 50, 50, 100, 100, 50, 50, 100, 100\n ]\n new_size = array_ops.placeholder(dtypes.int32, shape=(2))\n\n img_np = np.array(data, dtype=np.uint8).reshape(img_shape)\n\n for method in self.METHODS:\n with self.cached_session(use_gpu=True) as sess:\n image = constant_op.constant(img_np, shape=img_shape)\n y = image_ops.resize_images_v2(image, new_size, method)\n yshape = array_ops.shape(y)\n resized, newshape = sess.run([y, yshape], {new_size: [6, 4]})\n self.assertAllEqual(img_shape, newshape)\n if method in self.INTERPOLATING_METHODS:\n self.assertAllClose(resized, img_np, atol=1e-5)\n\n # Resizing with a single image must leave the shape unchanged also.\n with self.cached_session(use_gpu=True):\n img_single = img_np.reshape(single_shape)\n image = constant_op.constant(img_single, shape=single_shape)\n y = image_ops.resize_images_v2(image, new_size, self.METHODS[0])\n yshape = array_ops.shape(y)\n resized, newshape = sess.run([y, yshape], {new_size: [6, 4]})\n self.assertAllEqual(single_shape, newshape)\n if method in self.INTERPOLATING_METHODS:\n self.assertAllClose(resized, img_single, atol=1e-5)\n\n # Incorrect shape.\n with self.assertRaises(ValueError):\n new_size = constant_op.constant(4)\n _ = image_ops.resize_images_v2(image, new_size,\n image_ops.ResizeMethod.BILINEAR)\n with self.assertRaises(ValueError):\n new_size = constant_op.constant([4])\n _ = image_ops.resize_images_v2(image, new_size,\n image_ops.ResizeMethod.BILINEAR)\n with self.assertRaises(ValueError):\n new_size = constant_op.constant([1, 2, 3])\n _ = image_ops.resize_images_v2(image, new_size,\n image_ops.ResizeMethod.BILINEAR)\n\n # Incorrect dtypes.\n with self.assertRaises(ValueError):\n new_size = constant_op.constant([6.0, 4])\n _ = image_ops.resize_images_v2(image, new_size,\n image_ops.ResizeMethod.BILINEAR)\n with self.assertRaises(ValueError):\n _ = image_ops.resize_images_v2(image, [6, 4.0],\n image_ops.ResizeMethod.BILINEAR)\n with self.assertRaises(ValueError):\n _ = image_ops.resize_images_v2(image, [None, 4],\n image_ops.ResizeMethod.BILINEAR)\n with self.assertRaises(ValueError):\n _ = image_ops.resize_images_v2(image, [6, None],\n image_ops.ResizeMethod.BILINEAR)\n\n @test_util.run_deprecated_v1\n def testReturnDtype(self):\n target_shapes = [[6, 4], [3, 2],\n [\n array_ops.placeholder(dtypes.int32),\n array_ops.placeholder(dtypes.int32)\n ]]\n for nptype in self.TYPES:\n image = array_ops.placeholder(nptype, shape=[1, 6, 4, 1])\n for method in self.METHODS:\n for target_shape in target_shapes:\n y = image_ops.resize_images_v2(image, target_shape, method)\n if method == image_ops.ResizeMethod.NEAREST_NEIGHBOR:\n expected_dtype = image.dtype\n else:\n expected_dtype = dtypes.float32\n self.assertEqual(y.dtype, expected_dtype)\n\n # half_pixel_centers not supported by XLA\n @test_util.disable_xla(\"b/127616992\")\n def testSumTensor(self):\n img_shape = [1, 6, 4, 1]\n # This test is also conducted with int8, so 127 is the maximum\n # value that can be used.\n data = [\n 127, 127, 64, 64, 127, 127, 64, 64, 64, 64, 127, 127, 64, 64, 127, 127,\n 50, 50, 100, 100, 50, 50, 100, 100\n ]\n # Test size where width is specified as a tensor which is a sum\n # of two tensors.\n width_1 = constant_op.constant(1)\n width_2 = constant_op.constant(3)\n width = math_ops.add(width_1, width_2)\n height = constant_op.constant(6)\n\n img_np = np.array(data, dtype=np.uint8).reshape(img_shape)\n\n for method in self.METHODS:\n with self.cached_session():\n image = constant_op.constant(img_np, shape=img_shape)\n y = image_ops.resize_images_v2(image, [height, width], method)\n yshape = array_ops.shape(y)\n resized, newshape = self.evaluate([y, yshape])\n self.assertAllEqual(img_shape, newshape)\n if method in self.INTERPOLATING_METHODS:\n self.assertAllClose(resized, img_np, atol=1e-5)\n\n @test_util.disable_xla(\"align_corners=False not supported by XLA\")\n def testResizeDown(self):\n # This test is also conducted with int8, so 127 is the maximum\n # value that can be used.\n data = [\n 127, 127, 64, 64, 127, 127, 64, 64, 64, 64, 127, 127, 64, 64, 127, 127,\n 50, 50, 100, 100, 50, 50, 100, 100\n ]\n expected_data = [127, 64, 64, 127, 50, 100]\n target_height = 3\n target_width = 2\n\n # Test out 3-D and 4-D image shapes.\n img_shapes = [[1, 6, 4, 1], [6, 4, 1]]\n target_shapes = [[1, target_height, target_width, 1],\n [target_height, target_width, 1]]\n\n for target_shape, img_shape in zip(target_shapes, img_shapes):\n\n for nptype in self.TYPES:\n img_np = np.array(data, dtype=nptype).reshape(img_shape)\n\n for method in self.METHODS:\n if test.is_gpu_available() and self.shouldRunOnGPU(method, nptype):\n with self.cached_session(use_gpu=True):\n image = constant_op.constant(img_np, shape=img_shape)\n y = image_ops.resize_images_v2(\n image, [target_height, target_width], method)\n expected = np.array(expected_data).reshape(target_shape)\n resized = self.evaluate(y)\n self.assertAllClose(resized, expected, atol=1e-5)\n\n @test_util.disable_xla(\"align_corners=False not supported by XLA\")\n def testResizeUp(self):\n img_shape = [1, 3, 2, 1]\n data = [64, 32, 32, 64, 50, 100]\n target_height = 6\n target_width = 4\n expected_data = {}\n expected_data[image_ops.ResizeMethod.BILINEAR] = [\n 64.0, 56.0, 40.0, 32.0, 56.0, 52.0, 44.0, 40.0, 40.0, 44.0, 52.0, 56.0,\n 36.5, 45.625, 63.875, 73.0, 45.5, 56.875, 79.625, 91.0, 50.0, 62.5,\n 87.5, 100.0\n ]\n expected_data[image_ops.ResizeMethod.NEAREST_NEIGHBOR] = [\n 64.0, 64.0, 32.0, 32.0, 64.0, 64.0, 32.0, 32.0, 32.0, 32.0, 64.0, 64.0,\n 32.0, 32.0, 64.0, 64.0, 50.0, 50.0, 100.0, 100.0, 50.0, 50.0, 100.0,\n 100.0\n ]\n expected_data[image_ops.ResizeMethod.AREA] = [\n 64.0, 64.0, 32.0, 32.0, 64.0, 64.0, 32.0, 32.0, 32.0, 32.0, 64.0, 64.0,\n 32.0, 32.0, 64.0, 64.0, 50.0, 50.0, 100.0, 100.0, 50.0, 50.0, 100.0,\n 100.0\n ]\n expected_data[image_ops.ResizeMethod.LANCZOS3] = [\n 75.8294, 59.6281, 38.4313, 22.23, 60.6851, 52.0037, 40.6454, 31.964,\n 35.8344, 41.0779, 47.9383, 53.1818, 24.6968, 43.0769, 67.1244, 85.5045,\n 35.7939, 56.4713, 83.5243, 104.2017, 44.8138, 65.1949, 91.8603, 112.2413\n ]\n expected_data[image_ops.ResizeMethod.LANCZOS5] = [\n 77.5699, 60.0223, 40.6694, 23.1219, 61.8253, 51.2369, 39.5593, 28.9709,\n 35.7438, 40.8875, 46.5604, 51.7041, 21.5942, 43.5299, 67.7223, 89.658,\n 32.1213, 56.784, 83.984, 108.6467, 44.5802, 66.183, 90.0082, 111.6109\n ]\n expected_data[image_ops.ResizeMethod.GAUSSIAN] = [\n 61.1087, 54.6926, 41.3074, 34.8913, 54.6926, 51.4168, 44.5832, 41.3074,\n 41.696, 45.2456, 52.6508, 56.2004, 39.4273, 47.0526, 62.9602, 70.5855,\n 47.3008, 57.3042, 78.173, 88.1764, 51.4771, 62.3638, 85.0752, 95.9619\n ]\n expected_data[image_ops.ResizeMethod.BICUBIC] = [\n 70.1453, 59.0252, 36.9748, 25.8547, 59.3195, 53.3386, 41.4789, 35.4981,\n 36.383, 41.285, 51.0051, 55.9071, 30.2232, 42.151, 65.8032, 77.731,\n 41.6492, 55.823, 83.9288, 98.1026, 47.0363, 62.2744, 92.4903, 107.7284\n ]\n expected_data[image_ops.ResizeMethod.MITCHELLCUBIC] = [\n 66.0382, 56.6079, 39.3921, 29.9618, 56.7255, 51.9603, 43.2611, 38.4959,\n 39.1828, 43.4664, 51.2864, 55.57, 34.6287, 45.1812, 64.4458, 74.9983,\n 43.8523, 56.8078, 80.4594, 93.4149, 48.9943, 63.026, 88.6422, 102.6739\n ]\n for nptype in self.TYPES:\n for method in expected_data:\n with self.cached_session(use_gpu=True):\n img_np = np.array(data, dtype=nptype).reshape(img_shape)\n image = constant_op.constant(img_np, shape=img_shape)\n y = image_ops.resize_images_v2(image, [target_height, target_width],\n method)\n resized = self.evaluate(y)\n expected = np.array(expected_data[method]).reshape(\n [1, target_height, target_width, 1])\n self.assertAllClose(resized, expected, atol=1e-04)\n\n # XLA doesn't implement half_pixel_centers\n @test_util.disable_xla(\"b/127616992\")\n def testLegacyBicubicMethodsMatchNewMethods(self):\n img_shape = [1, 3, 2, 1]\n data = [64, 32, 32, 64, 50, 100]\n target_height = 6\n target_width = 4\n methods_to_test = ((gen_image_ops.resize_bilinear, \"triangle\"),\n (gen_image_ops.resize_bicubic, \"keyscubic\"))\n for legacy_method, new_method in methods_to_test:\n with self.cached_session(use_gpu=True):\n img_np = np.array(data, dtype=np.float32).reshape(img_shape)\n image = constant_op.constant(img_np, shape=img_shape)\n legacy_result = legacy_method(\n image,\n constant_op.constant([target_height, target_width],\n dtype=dtypes.int32),\n half_pixel_centers=True)\n scale = (\n constant_op.constant([target_height, target_width],\n dtype=dtypes.float32) /\n math_ops.cast(array_ops.shape(image)[1:3], dtype=dtypes.float32))\n new_result = gen_image_ops.scale_and_translate(\n image,\n constant_op.constant([target_height, target_width],\n dtype=dtypes.int32),\n scale,\n array_ops.zeros([2]),\n kernel_type=new_method,\n antialias=False)\n self.assertAllClose(\n self.evaluate(legacy_result), self.evaluate(new_result), atol=1e-04)\n\n def testResizeDownArea(self):\n img_shape = [1, 6, 6, 1]\n data = [\n 128, 64, 32, 16, 8, 4, 4, 8, 16, 32, 64, 128, 128, 64, 32, 16, 8, 4, 5,\n 10, 15, 20, 25, 30, 30, 25, 20, 15, 10, 5, 5, 10, 15, 20, 25, 30\n ]\n img_np = np.array(data, dtype=np.uint8).reshape(img_shape)\n\n target_height = 4\n target_width = 4\n expected_data = [\n 73, 33, 23, 39, 73, 33, 23, 39, 14, 16, 19, 21, 14, 16, 19, 21\n ]\n\n with self.cached_session(use_gpu=True):\n image = constant_op.constant(img_np, shape=img_shape)\n y = image_ops.resize_images_v2(image, [target_height, target_width],\n image_ops.ResizeMethod.AREA)\n expected = np.array(expected_data).reshape(\n [1, target_height, target_width, 1])\n resized = self.evaluate(y)\n self.assertAllClose(resized, expected, atol=1)\n\n @test_util.disable_xla(\"align_corners=False not supported by XLA\")\n def testCompareNearestNeighbor(self):\n if test.is_gpu_available():\n input_shape = [1, 5, 6, 3]\n target_height = 8\n target_width = 12\n for nptype in [np.float32, np.float64]:\n img_np = np.arange(\n 0, np.prod(input_shape), dtype=nptype).reshape(input_shape)\n with self.cached_session(use_gpu=True):\n image = constant_op.constant(img_np, shape=input_shape)\n new_size = constant_op.constant([target_height, target_width])\n out_op = image_ops.resize_images_v2(\n image, new_size, image_ops.ResizeMethod.NEAREST_NEIGHBOR)\n gpu_val = self.evaluate(out_op)\n with self.cached_session(use_gpu=False):\n image = constant_op.constant(img_np, shape=input_shape)\n new_size = constant_op.constant([target_height, target_width])\n out_op = image_ops.resize_images_v2(\n image, new_size, image_ops.ResizeMethod.NEAREST_NEIGHBOR)\n cpu_val = self.evaluate(out_op)\n self.assertAllClose(cpu_val, gpu_val, rtol=1e-5, atol=1e-5)\n\n @test_util.disable_xla(\"align_corners=False not supported by XLA\")\n def testBfloat16MultipleOps(self):\n target_height = 8\n target_width = 12\n img = np.random.uniform(0, 100, size=(30, 10, 2)).astype(np.float32)\n img_bf16 = ops.convert_to_tensor(img, dtype=\"bfloat16\")\n new_size = constant_op.constant([target_height, target_width])\n img_methods = [\n image_ops.ResizeMethod.BILINEAR,\n image_ops.ResizeMethod.NEAREST_NEIGHBOR, image_ops.ResizeMethod.BICUBIC,\n image_ops.ResizeMethod.AREA\n ]\n for method in img_methods:\n out_op_bf16 = image_ops.resize_images_v2(img_bf16, new_size, method)\n out_op_f32 = image_ops.resize_images_v2(img, new_size, method)\n bf16_val = self.evaluate(out_op_bf16)\n f32_val = self.evaluate(out_op_f32)\n self.assertAllClose(bf16_val, f32_val, rtol=1e-2, atol=1e-2)\n\n def testCompareBilinear(self):\n if test.is_gpu_available():\n input_shape = [1, 5, 6, 3]\n target_height = 8\n target_width = 12\n for nptype in [np.float32, np.float64]:\n img_np = np.arange(\n 0, np.prod(input_shape), dtype=nptype).reshape(input_shape)\n value = {}\n for use_gpu in [True, False]:\n with self.cached_session(use_gpu=use_gpu):\n image = constant_op.constant(img_np, shape=input_shape)\n new_size = constant_op.constant([target_height, target_width])\n out_op = image_ops.resize_images(image, new_size,\n image_ops.ResizeMethod.BILINEAR)\n value[use_gpu] = self.evaluate(out_op)\n self.assertAllClose(value[True], value[False], rtol=1e-5, atol=1e-5)\n\n @test_util.run_deprecated_v1\n def testShapeInference(self):\n self._assertShapeInference([50, 60, 3], [55, 66], [55, 66, 3])\n self._assertShapeInference([55, 66, 3], [55, 66], [55, 66, 3])\n self._assertShapeInference([59, 69, 3], [55, 66], [55, 66, 3])\n self._assertShapeInference([50, 69, 3], [55, 66], [55, 66, 3])\n self._assertShapeInference([59, 60, 3], [55, 66], [55, 66, 3])\n self._assertShapeInference([None, 60, 3], [55, 66], [55, 66, 3])\n self._assertShapeInference([None, 66, 3], [55, 66], [55, 66, 3])\n self._assertShapeInference([None, 69, 3], [55, 66], [55, 66, 3])\n self._assertShapeInference([50, None, 3], [55, 66], [55, 66, 3])\n self._assertShapeInference([55, None, 3], [55, 66], [55, 66, 3])\n self._assertShapeInference([59, None, 3], [55, 66], [55, 66, 3])\n self._assertShapeInference([None, None, 3], [55, 66], [55, 66, 3])\n self._assertShapeInference([50, 60, None], [55, 66], [55, 66, None])\n self._assertShapeInference([55, 66, None], [55, 66], [55, 66, None])\n self._assertShapeInference([59, 69, None], [55, 66], [55, 66, None])\n self._assertShapeInference([50, 69, None], [55, 66], [55, 66, None])\n self._assertShapeInference([59, 60, None], [55, 66], [55, 66, None])\n self._assertShapeInference([None, None, None], [55, 66], [55, 66, None])\n\n @test_util.run_deprecated_v1\n def testNameScope(self):\n with self.cached_session(use_gpu=True):\n single_image = array_ops.placeholder(dtypes.float32, shape=[50, 60, 3])\n y = image_ops.resize_images(single_image, [55, 66])\n self.assertTrue(y.op.name.startswith(\"resize\"))\n\n def _ResizeImageCall(self, x, max_h, max_w, preserve_aspect_ratio,\n use_tensor_inputs):\n if use_tensor_inputs:\n target_max = ops.convert_to_tensor([max_h, max_w])\n x_tensor = array_ops.placeholder(x.dtype, shape=[None] * x.ndim)\n feed_dict = {x_tensor: x}\n else:\n target_max = [max_h, max_w]\n x_tensor = x\n feed_dict = {}\n\n y = image_ops.resize_images(\n x_tensor,\n ops.convert_to_tensor(target_max),\n preserve_aspect_ratio=preserve_aspect_ratio)\n\n with self.cached_session(use_gpu=True):\n return y.eval(feed_dict=feed_dict)\n\n def _assertResizeEqual(self,\n x,\n x_shape,\n y,\n y_shape,\n preserve_aspect_ratio=True,\n use_tensor_inputs_options=None):\n use_tensor_inputs_options = use_tensor_inputs_options or [False, True]\n target_height, target_width, _ = y_shape\n x = np.array(x).reshape(x_shape)\n y = np.array(y).reshape(y_shape)\n\n for use_tensor_inputs in use_tensor_inputs_options:\n y_tf = self._ResizeImageCall(x, target_height, target_width,\n preserve_aspect_ratio, use_tensor_inputs)\n self.assertAllClose(y, y_tf)\n\n def _assertResizeCheckShape(self,\n x,\n x_shape,\n target_shape,\n y_shape,\n preserve_aspect_ratio=True,\n use_tensor_inputs_options=None):\n use_tensor_inputs_options = use_tensor_inputs_options or [False, True]\n target_height, target_width = target_shape\n x = np.array(x).reshape(x_shape)\n y = np.zeros(y_shape)\n\n for use_tensor_inputs in use_tensor_inputs_options:\n y_tf = self._ResizeImageCall(x, target_height, target_width,\n preserve_aspect_ratio, use_tensor_inputs)\n self.assertShapeEqual(y, ops.convert_to_tensor(y_tf))\n\n @test_util.run_deprecated_v1\n def testPreserveAspectRatioMultipleImages(self):\n x_shape = [10, 100, 80, 10]\n x = np.random.uniform(size=x_shape)\n for preserve_aspect_ratio in [True, False]:\n with self.subTest(preserve_aspect_ratio=preserve_aspect_ratio):\n expect_shape = [10, 250, 200, 10] if preserve_aspect_ratio \\\n else [10, 250, 250, 10]\n self._assertResizeCheckShape(\n x,\n x_shape, [250, 250],\n expect_shape,\n preserve_aspect_ratio=preserve_aspect_ratio)\n\n @test_util.run_deprecated_v1\n def testPreserveAspectRatioNoOp(self):\n x_shape = [10, 10, 10]\n x = np.random.uniform(size=x_shape)\n\n self._assertResizeEqual(x, x_shape, x, x_shape)\n\n @test_util.run_deprecated_v1\n def testPreserveAspectRatioSmaller(self):\n x_shape = [100, 100, 10]\n x = np.random.uniform(size=x_shape)\n\n self._assertResizeCheckShape(x, x_shape, [75, 50], [50, 50, 10])\n\n @test_util.run_deprecated_v1\n def testPreserveAspectRatioSmallerMultipleImages(self):\n x_shape = [10, 100, 100, 10]\n x = np.random.uniform(size=x_shape)\n\n self._assertResizeCheckShape(x, x_shape, [75, 50], [10, 50, 50, 10])\n\n @test_util.run_deprecated_v1\n def testPreserveAspectRatioLarger(self):\n x_shape = [100, 100, 10]\n x = np.random.uniform(size=x_shape)\n\n self._assertResizeCheckShape(x, x_shape, [150, 200], [150, 150, 10])\n\n @test_util.run_deprecated_v1\n def testPreserveAspectRatioSameRatio(self):\n x_shape = [1920, 1080, 3]\n x = np.random.uniform(size=x_shape)\n\n self._assertResizeCheckShape(x, x_shape, [3840, 2160], [3840, 2160, 3])\n\n @test_util.run_deprecated_v1\n def testPreserveAspectRatioSquare(self):\n x_shape = [299, 299, 3]\n x = np.random.uniform(size=x_shape)\n\n self._assertResizeCheckShape(x, x_shape, [320, 320], [320, 320, 3])\n\n\nclass ResizeImagesTest(test_util.TensorFlowTestCase):\n\n METHODS = [\n image_ops.ResizeMethodV1.BILINEAR,\n image_ops.ResizeMethodV1.NEAREST_NEIGHBOR,\n image_ops.ResizeMethodV1.BICUBIC, image_ops.ResizeMethodV1.AREA\n ]\n\n TYPES = [\n np.uint8, np.int8, np.uint16, np.int16, np.int32, np.int64, np.float16,\n np.float32, np.float64\n ]\n\n def _assertShapeInference(self, pre_shape, size, post_shape):\n # Try single image resize\n single_image = array_ops.placeholder(dtypes.float32, shape=pre_shape)\n y = image_ops.resize_images(single_image, size)\n self.assertEqual(y.get_shape().as_list(), post_shape)\n # Try batch images resize with known batch size\n images = array_ops.placeholder(dtypes.float32, shape=[99] + pre_shape)\n y = image_ops.resize_images(images, size)\n self.assertEqual(y.get_shape().as_list(), [99] + post_shape)\n # Try batch images resize with unknown batch size\n images = array_ops.placeholder(dtypes.float32, shape=[None] + pre_shape)\n y = image_ops.resize_images(images, size)\n self.assertEqual(y.get_shape().as_list(), [None] + post_shape)\n\n def shouldRunOnGPU(self, method, nptype):\n if (method == image_ops.ResizeMethodV1.NEAREST_NEIGHBOR and\n nptype in [np.float32, np.float64]):\n return True\n else:\n return False\n\n @test_util.disable_xla(\"align_corners=False not supported by XLA\")\n @test_util.run_deprecated_v1\n def testNoOp(self):\n img_shape = [1, 6, 4, 1]\n single_shape = [6, 4, 1]\n # This test is also conducted with int8, so 127 is the maximum\n # value that can be used.\n data = [\n 127, 127, 64, 64, 127, 127, 64, 64, 64, 64, 127, 127, 64, 64, 127, 127,\n 50, 50, 100, 100, 50, 50, 100, 100\n ]\n target_height = 6\n target_width = 4\n\n for nptype in self.TYPES:\n img_np = np.array(data, dtype=nptype).reshape(img_shape)\n\n for method in self.METHODS:\n with self.cached_session(use_gpu=True) as sess:\n image = constant_op.constant(img_np, shape=img_shape)\n y = image_ops.resize_images(image, [target_height, target_width],\n method)\n yshape = array_ops.shape(y)\n resized, newshape = self.evaluate([y, yshape])\n self.assertAllEqual(img_shape, newshape)\n self.assertAllClose(resized, img_np, atol=1e-5)\n\n # Resizing with a single image must leave the shape unchanged also.\n with self.cached_session(use_gpu=True):\n img_single = img_np.reshape(single_shape)\n image = constant_op.constant(img_single, shape=single_shape)\n y = image_ops.resize_images(image, [target_height, target_width],\n self.METHODS[0])\n yshape = array_ops.shape(y)\n newshape = self.evaluate(yshape)\n self.assertAllEqual(single_shape, newshape)\n\n @test_util.run_deprecated_v1\n def testTensorArguments(self):\n img_shape = [1, 6, 4, 1]\n single_shape = [6, 4, 1]\n # This test is also conducted with int8, so 127 is the maximum\n # value that can be used.\n data = [\n 127, 127, 64, 64, 127, 127, 64, 64, 64, 64, 127, 127, 64, 64, 127, 127,\n 50, 50, 100, 100, 50, 50, 100, 100\n ]\n new_size = array_ops.placeholder(dtypes.int32, shape=(2))\n\n img_np = np.array(data, dtype=np.uint8).reshape(img_shape)\n\n for method in self.METHODS:\n with self.cached_session(use_gpu=True) as sess:\n image = constant_op.constant(img_np, shape=img_shape)\n y = image_ops.resize_images(image, new_size, method)\n yshape = array_ops.shape(y)\n resized, newshape = sess.run([y, yshape], {new_size: [6, 4]})\n self.assertAllEqual(img_shape, newshape)\n self.assertAllClose(resized, img_np, atol=1e-5)\n\n # Resizing with a single image must leave the shape unchanged also.\n with self.cached_session(use_gpu=True):\n img_single = img_np.reshape(single_shape)\n image = constant_op.constant(img_single, shape=single_shape)\n y = image_ops.resize_images(image, new_size, self.METHODS[0])\n yshape = array_ops.shape(y)\n resized, newshape = sess.run([y, yshape], {new_size: [6, 4]})\n self.assertAllEqual(single_shape, newshape)\n self.assertAllClose(resized, img_single, atol=1e-5)\n\n # Incorrect shape.\n with self.assertRaises(ValueError):\n new_size = constant_op.constant(4)\n _ = image_ops.resize_images(image, new_size,\n image_ops.ResizeMethodV1.BILINEAR)\n with self.assertRaises(ValueError):\n new_size = constant_op.constant([4])\n _ = image_ops.resize_images(image, new_size,\n image_ops.ResizeMethodV1.BILINEAR)\n with self.assertRaises(ValueError):\n new_size = constant_op.constant([1, 2, 3])\n _ = image_ops.resize_images(image, new_size,\n image_ops.ResizeMethodV1.BILINEAR)\n\n # Incorrect dtypes.\n with self.assertRaises(ValueError):\n new_size = constant_op.constant([6.0, 4])\n _ = image_ops.resize_images(image, new_size,\n image_ops.ResizeMethodV1.BILINEAR)\n with self.assertRaises(ValueError):\n _ = image_ops.resize_images(image, [6, 4.0],\n image_ops.ResizeMethodV1.BILINEAR)\n with self.assertRaises(ValueError):\n _ = image_ops.resize_images(image, [None, 4],\n image_ops.ResizeMethodV1.BILINEAR)\n with self.assertRaises(ValueError):\n _ = image_ops.resize_images(image, [6, None],\n image_ops.ResizeMethodV1.BILINEAR)\n\n @test_util.run_deprecated_v1\n def testReturnDtype(self):\n target_shapes = [[6, 4], [3, 2], [\n array_ops.placeholder(dtypes.int32),\n array_ops.placeholder(dtypes.int32)\n ]]\n for nptype in self.TYPES:\n image = array_ops.placeholder(nptype, shape=[1, 6, 4, 1])\n for method in self.METHODS:\n for target_shape in target_shapes:\n y = image_ops.resize_images(image, target_shape, method)\n if (method == image_ops.ResizeMethodV1.NEAREST_NEIGHBOR or\n target_shape == image.shape[1:3]):\n expected_dtype = image.dtype\n else:\n expected_dtype = dtypes.float32\n self.assertEqual(y.dtype, expected_dtype)\n\n @test_util.disable_xla(\"align_corners=False not supported by XLA\")\n def testSumTensor(self):\n img_shape = [1, 6, 4, 1]\n # This test is also conducted with int8, so 127 is the maximum\n # value that can be used.\n data = [\n 127, 127, 64, 64, 127, 127, 64, 64, 64, 64, 127, 127, 64, 64, 127, 127,\n 50, 50, 100, 100, 50, 50, 100, 100\n ]\n # Test size where width is specified as a tensor which is a sum\n # of two tensors.\n width_1 = constant_op.constant(1)\n width_2 = constant_op.constant(3)\n width = math_ops.add(width_1, width_2)\n height = constant_op.constant(6)\n\n img_np = np.array(data, dtype=np.uint8).reshape(img_shape)\n\n for method in self.METHODS:\n with self.cached_session() as sess:\n image = constant_op.constant(img_np, shape=img_shape)\n y = image_ops.resize_images(image, [height, width], method)\n yshape = array_ops.shape(y)\n resized, newshape = self.evaluate([y, yshape])\n self.assertAllEqual(img_shape, newshape)\n self.assertAllClose(resized, img_np, atol=1e-5)\n\n @test_util.disable_xla(\"align_corners=False not supported by XLA\")\n def testResizeDown(self):\n # This test is also conducted with int8, so 127 is the maximum\n # value that can be used.\n data = [\n 127, 127, 64, 64, 127, 127, 64, 64, 64, 64, 127, 127, 64, 64, 127, 127,\n 50, 50, 100, 100, 50, 50, 100, 100\n ]\n expected_data = [127, 64, 64, 127, 50, 100]\n target_height = 3\n target_width = 2\n\n # Test out 3-D and 4-D image shapes.\n img_shapes = [[1, 6, 4, 1], [6, 4, 1]]\n target_shapes = [[1, target_height, target_width, 1],\n [target_height, target_width, 1]]\n\n for target_shape, img_shape in zip(target_shapes, img_shapes):\n\n for nptype in self.TYPES:\n img_np = np.array(data, dtype=nptype).reshape(img_shape)\n\n for method in self.METHODS:\n if test.is_gpu_available() and self.shouldRunOnGPU(method, nptype):\n with self.cached_session(use_gpu=True):\n image = constant_op.constant(img_np, shape=img_shape)\n y = image_ops.resize_images(image, [target_height, target_width],\n method)\n expected = np.array(expected_data).reshape(target_shape)\n resized = self.evaluate(y)\n self.assertAllClose(resized, expected, atol=1e-5)\n\n @test_util.disable_xla(\"align_corners=False not supported by XLA\")\n def testResizeUpAlignCornersFalse(self):\n img_shape = [1, 3, 2, 1]\n data = [64, 32, 32, 64, 50, 100]\n target_height = 6\n target_width = 4\n expected_data = {}\n expected_data[image_ops.ResizeMethodV1.BILINEAR] = [\n 64.0, 48.0, 32.0, 32.0, 48.0, 48.0, 48.0, 48.0, 32.0, 48.0, 64.0, 64.0,\n 41.0, 61.5, 82.0, 82.0, 50.0, 75.0, 100.0, 100.0, 50.0, 75.0, 100.0,\n 100.0\n ]\n expected_data[image_ops.ResizeMethodV1.NEAREST_NEIGHBOR] = [\n 64.0, 64.0, 32.0, 32.0, 64.0, 64.0, 32.0, 32.0, 32.0, 32.0, 64.0, 64.0,\n 32.0, 32.0, 64.0, 64.0, 50.0, 50.0, 100.0, 100.0, 50.0, 50.0, 100.0,\n 100.0\n ]\n expected_data[image_ops.ResizeMethodV1.AREA] = [\n 64.0, 64.0, 32.0, 32.0, 64.0, 64.0, 32.0, 32.0, 32.0, 32.0, 64.0, 64.0,\n 32.0, 32.0, 64.0, 64.0, 50.0, 50.0, 100.0, 100.0, 50.0, 50.0, 100.0,\n 100.0\n ]\n\n for nptype in self.TYPES:\n for method in [\n image_ops.ResizeMethodV1.BILINEAR,\n image_ops.ResizeMethodV1.NEAREST_NEIGHBOR,\n image_ops.ResizeMethodV1.AREA\n ]:\n with self.cached_session(use_gpu=True):\n img_np = np.array(data, dtype=nptype).reshape(img_shape)\n image = constant_op.constant(img_np, shape=img_shape)\n y = image_ops.resize_images(\n image, [target_height, target_width], method, align_corners=False)\n resized = self.evaluate(y)\n expected = np.array(expected_data[method]).reshape(\n [1, target_height, target_width, 1])\n self.assertAllClose(resized, expected, atol=1e-05)\n\n def testResizeUpAlignCornersTrue(self):\n img_shape = [1, 3, 2, 1]\n data = [6, 3, 3, 6, 6, 9]\n target_height = 5\n target_width = 4\n expected_data = {}\n expected_data[image_ops.ResizeMethodV1.BILINEAR] = [\n 6.0, 5.0, 4.0, 3.0, 4.5, 4.5, 4.5, 4.5, 3.0, 4.0, 5.0, 6.0, 4.5, 5.5,\n 6.5, 7.5, 6.0, 7.0, 8.0, 9.0\n ]\n expected_data[image_ops.ResizeMethodV1.NEAREST_NEIGHBOR] = [\n 6.0, 6.0, 3.0, 3.0, 3.0, 3.0, 6.0, 6.0, 3.0, 3.0, 6.0, 6.0, 6.0, 6.0,\n 9.0, 9.0, 6.0, 6.0, 9.0, 9.0\n ]\n # TODO(b/37749740): Improve alignment of ResizeMethodV1.AREA when\n # align_corners=True.\n expected_data[image_ops.ResizeMethodV1.AREA] = [\n 6.0, 6.0, 6.0, 3.0, 6.0, 6.0, 6.0, 3.0, 3.0, 3.0, 3.0, 6.0, 3.0, 3.0,\n 3.0, 6.0, 6.0, 6.0, 6.0, 9.0\n ]\n\n for nptype in self.TYPES:\n for method in [\n image_ops.ResizeMethodV1.BILINEAR,\n image_ops.ResizeMethodV1.NEAREST_NEIGHBOR,\n image_ops.ResizeMethodV1.AREA\n ]:\n with self.cached_session(use_gpu=True):\n img_np = np.array(data, dtype=nptype).reshape(img_shape)\n image = constant_op.constant(img_np, shape=img_shape)\n y = image_ops.resize_images(\n image, [target_height, target_width], method, align_corners=True)\n resized = self.evaluate(y)\n expected = np.array(expected_data[method]).reshape(\n [1, target_height, target_width, 1])\n self.assertAllClose(resized, expected, atol=1e-05)\n\n def testResizeUpBicubic(self):\n img_shape = [1, 6, 6, 1]\n data = [\n 128, 128, 64, 64, 128, 128, 64, 64, 64, 64, 128, 128, 64, 64, 128, 128,\n 50, 50, 100, 100, 50, 50, 100, 100, 50, 50, 100, 100, 50, 50, 100, 100,\n 50, 50, 100, 100\n ]\n img_np = np.array(data, dtype=np.uint8).reshape(img_shape)\n\n target_height = 8\n target_width = 8\n expected_data = [\n 128, 135, 96, 55, 64, 114, 134, 128, 78, 81, 68, 52, 57, 118, 144, 136,\n 55, 49, 79, 109, 103, 89, 83, 84, 74, 70, 95, 122, 115, 69, 49, 55, 100,\n 105, 75, 43, 50, 89, 105, 100, 57, 54, 74, 96, 91, 65, 55, 58, 70, 69,\n 75, 81, 80, 72, 69, 70, 105, 112, 75, 36, 45, 92, 111, 105\n ]\n\n with self.cached_session(use_gpu=True):\n image = constant_op.constant(img_np, shape=img_shape)\n y = image_ops.resize_images(image, [target_height, target_width],\n image_ops.ResizeMethodV1.BICUBIC)\n resized = self.evaluate(y)\n expected = np.array(expected_data).reshape(\n [1, target_height, target_width, 1])\n self.assertAllClose(resized, expected, atol=1)\n\n def testResizeDownArea(self):\n img_shape = [1, 6, 6, 1]\n data = [\n 128, 64, 32, 16, 8, 4, 4, 8, 16, 32, 64, 128, 128, 64, 32, 16, 8, 4, 5,\n 10, 15, 20, 25, 30, 30, 25, 20, 15, 10, 5, 5, 10, 15, 20, 25, 30\n ]\n img_np = np.array(data, dtype=np.uint8).reshape(img_shape)\n\n target_height = 4\n target_width = 4\n expected_data = [\n 73, 33, 23, 39, 73, 33, 23, 39, 14, 16, 19, 21, 14, 16, 19, 21\n ]\n\n with self.cached_session(use_gpu=True):\n image = constant_op.constant(img_np, shape=img_shape)\n y = image_ops.resize_images(image, [target_height, target_width],\n image_ops.ResizeMethodV1.AREA)\n expected = np.array(expected_data).reshape(\n [1, target_height, target_width, 1])\n resized = self.evaluate(y)\n self.assertAllClose(resized, expected, atol=1)\n\n @test_util.disable_xla(\"align_corners=False not supported by XLA\")\n def testCompareNearestNeighbor(self):\n if test.is_gpu_available():\n input_shape = [1, 5, 6, 3]\n target_height = 8\n target_width = 12\n for nptype in [np.float32, np.float64]:\n for align_corners in [True, False]:\n img_np = np.arange(\n 0, np.prod(input_shape), dtype=nptype).reshape(input_shape)\n with self.cached_session(use_gpu=True):\n image = constant_op.constant(img_np, shape=input_shape)\n new_size = constant_op.constant([target_height, target_width])\n out_op = image_ops.resize_images(\n image,\n new_size,\n image_ops.ResizeMethodV1.NEAREST_NEIGHBOR,\n align_corners=align_corners)\n gpu_val = self.evaluate(out_op)\n with self.cached_session(use_gpu=False):\n image = constant_op.constant(img_np, shape=input_shape)\n new_size = constant_op.constant([target_height, target_width])\n out_op = image_ops.resize_images(\n image,\n new_size,\n image_ops.ResizeMethodV1.NEAREST_NEIGHBOR,\n align_corners=align_corners)\n cpu_val = self.evaluate(out_op)\n self.assertAllClose(cpu_val, gpu_val, rtol=1e-5, atol=1e-5)\n\n def testCompareBilinear(self):\n if test.is_gpu_available():\n input_shape = [1, 5, 6, 3]\n target_height = 8\n target_width = 12\n for nptype in [np.float32, np.float64]:\n for align_corners in [True, False]:\n img_np = np.arange(\n 0, np.prod(input_shape), dtype=nptype).reshape(input_shape)\n value = {}\n for use_gpu in [True, False]:\n with self.cached_session(use_gpu=use_gpu):\n image = constant_op.constant(img_np, shape=input_shape)\n new_size = constant_op.constant([target_height, target_width])\n out_op = image_ops.resize_images(\n image,\n new_size,\n image_ops.ResizeMethodV1.BILINEAR,\n align_corners=align_corners)\n value[use_gpu] = self.evaluate(out_op)\n self.assertAllClose(value[True], value[False], rtol=1e-5, atol=1e-5)\n\n @test_util.run_deprecated_v1\n def testShapeInference(self):\n self._assertShapeInference([50, 60, 3], [55, 66], [55, 66, 3])\n self._assertShapeInference([55, 66, 3], [55, 66], [55, 66, 3])\n self._assertShapeInference([59, 69, 3], [55, 66], [55, 66, 3])\n self._assertShapeInference([50, 69, 3], [55, 66], [55, 66, 3])\n self._assertShapeInference([59, 60, 3], [55, 66], [55, 66, 3])\n self._assertShapeInference([None, 60, 3], [55, 66], [55, 66, 3])\n self._assertShapeInference([None, 66, 3], [55, 66], [55, 66, 3])\n self._assertShapeInference([None, 69, 3], [55, 66], [55, 66, 3])\n self._assertShapeInference([50, None, 3], [55, 66], [55, 66, 3])\n self._assertShapeInference([55, None, 3], [55, 66], [55, 66, 3])\n self._assertShapeInference([59, None, 3], [55, 66], [55, 66, 3])\n self._assertShapeInference([None, None, 3], [55, 66], [55, 66, 3])\n self._assertShapeInference([50, 60, None], [55, 66], [55, 66, None])\n self._assertShapeInference([55, 66, None], [55, 66], [55, 66, None])\n self._assertShapeInference([59, 69, None], [55, 66], [55, 66, None])\n self._assertShapeInference([50, 69, None], [55, 66], [55, 66, None])\n self._assertShapeInference([59, 60, None], [55, 66], [55, 66, None])\n self._assertShapeInference([None, None, None], [55, 66], [55, 66, None])\n\n @test_util.run_deprecated_v1\n def testNameScope(self):\n img_shape = [1, 3, 2, 1]\n with self.cached_session(use_gpu=True):\n single_image = array_ops.placeholder(dtypes.float32, shape=[50, 60, 3])\n y = image_ops.resize_images(single_image, [55, 66])\n self.assertTrue(y.op.name.startswith(\"resize\"))\n\n def _ResizeImageCall(self, x, max_h, max_w, preserve_aspect_ratio,\n use_tensor_inputs):\n if use_tensor_inputs:\n target_max = ops.convert_to_tensor([max_h, max_w])\n x_tensor = array_ops.placeholder(x.dtype, shape=[None] * x.ndim)\n feed_dict = {x_tensor: x}\n else:\n target_max = [max_h, max_w]\n x_tensor = x\n feed_dict = {}\n\n y = image_ops.resize_images(x_tensor, target_max,\n preserve_aspect_ratio=preserve_aspect_ratio)\n\n with self.cached_session(use_gpu=True):\n return y.eval(feed_dict=feed_dict)\n\n def _assertResizeEqual(self, x, x_shape, y, y_shape,\n preserve_aspect_ratio=True,\n use_tensor_inputs_options=None):\n use_tensor_inputs_options = use_tensor_inputs_options or [False, True]\n target_height, target_width, _ = y_shape\n x = np.array(x).reshape(x_shape)\n y = np.array(y).reshape(y_shape)\n\n for use_tensor_inputs in use_tensor_inputs_options:\n y_tf = self._ResizeImageCall(x, target_height, target_width,\n preserve_aspect_ratio, use_tensor_inputs)\n self.assertAllClose(y, y_tf)\n\n def _assertResizeCheckShape(self, x, x_shape, target_shape,\n y_shape, preserve_aspect_ratio=True,\n use_tensor_inputs_options=None):\n use_tensor_inputs_options = use_tensor_inputs_options or [False, True]\n target_height, target_width = target_shape\n x = np.array(x).reshape(x_shape)\n y = np.zeros(y_shape)\n\n for use_tensor_inputs in use_tensor_inputs_options:\n y_tf = self._ResizeImageCall(x, target_height, target_width,\n preserve_aspect_ratio, use_tensor_inputs)\n self.assertShapeEqual(y, ops.convert_to_tensor(y_tf))\n\n @test_util.run_deprecated_v1\n def testPreserveAspectRatioMultipleImages(self):\n x_shape = [10, 100, 100, 10]\n x = np.random.uniform(size=x_shape)\n\n self._assertResizeCheckShape(x, x_shape, [250, 250], [10, 250, 250, 10],\n preserve_aspect_ratio=False)\n\n @test_util.run_deprecated_v1\n def testPreserveAspectRatioNoOp(self):\n x_shape = [10, 10, 10]\n x = np.random.uniform(size=x_shape)\n\n self._assertResizeEqual(x, x_shape, x, x_shape)\n\n @test_util.run_deprecated_v1\n def testPreserveAspectRatioSmaller(self):\n x_shape = [100, 100, 10]\n x = np.random.uniform(size=x_shape)\n\n self._assertResizeCheckShape(x, x_shape, [75, 50], [50, 50, 10])\n\n @test_util.run_deprecated_v1\n def testPreserveAspectRatioSmallerMultipleImages(self):\n x_shape = [10, 100, 100, 10]\n x = np.random.uniform(size=x_shape)\n\n self._assertResizeCheckShape(x, x_shape, [75, 50], [10, 50, 50, 10])\n\n @test_util.run_deprecated_v1\n def testPreserveAspectRatioLarger(self):\n x_shape = [100, 100, 10]\n x = np.random.uniform(size=x_shape)\n\n self._assertResizeCheckShape(x, x_shape, [150, 200], [150, 150, 10])\n\n @test_util.run_deprecated_v1\n def testPreserveAspectRatioSameRatio(self):\n x_shape = [1920, 1080, 3]\n x = np.random.uniform(size=x_shape)\n\n self._assertResizeCheckShape(x, x_shape, [3840, 2160], [3840, 2160, 3])\n\n @test_util.run_deprecated_v1\n def testPreserveAspectRatioSquare(self):\n x_shape = [299, 299, 3]\n x = np.random.uniform(size=x_shape)\n\n self._assertResizeCheckShape(x, x_shape, [320, 320], [320, 320, 3])\n\n\nclass ResizeImageWithPadV1Test(test_util.TensorFlowTestCase):\n\n def _ResizeImageWithPad(self, x, target_height, target_width,\n use_tensor_inputs):\n if use_tensor_inputs:\n target_height = ops.convert_to_tensor(target_height)\n target_width = ops.convert_to_tensor(target_width)\n x_tensor = array_ops.placeholder(x.dtype, shape=[None] * x.ndim)\n feed_dict = {x_tensor: x}\n else:\n x_tensor = x\n feed_dict = {}\n\n y = image_ops.resize_image_with_pad_v1(x_tensor, target_height,\n target_width)\n if not use_tensor_inputs:\n self.assertTrue(y.get_shape().is_fully_defined())\n\n with self.cached_session(use_gpu=True):\n return y.eval(feed_dict=feed_dict)\n\n def _assertReturns(self,\n x,\n x_shape,\n y,\n y_shape,\n use_tensor_inputs_options=None):\n use_tensor_inputs_options = use_tensor_inputs_options or [False, True]\n target_height, target_width, _ = y_shape\n x = np.array(x).reshape(x_shape)\n y = np.array(y).reshape(y_shape)\n\n for use_tensor_inputs in use_tensor_inputs_options:\n y_tf = self._ResizeImageWithPad(x, target_height, target_width,\n use_tensor_inputs)\n self.assertAllClose(y, y_tf)\n\n def _assertRaises(self,\n x,\n x_shape,\n target_height,\n target_width,\n err_msg,\n use_tensor_inputs_options=None):\n use_tensor_inputs_options = use_tensor_inputs_options or [False, True]\n x = np.array(x).reshape(x_shape)\n\n for use_tensor_inputs in use_tensor_inputs_options:\n try:\n self._ResizeImageWithPad(x, target_height, target_width,\n use_tensor_inputs)\n except Exception as e: # pylint: disable=broad-except\n if err_msg not in str(e):\n raise\n else:\n raise AssertionError(\"Exception not raised: %s\" % err_msg)\n\n def _assertShapeInference(self, pre_shape, height, width, post_shape):\n image = array_ops.placeholder(dtypes.float32, shape=pre_shape)\n y = image_ops.resize_image_with_pad_v1(image, height, width)\n self.assertEqual(y.get_shape().as_list(), post_shape)\n\n @test_util.run_deprecated_v1\n def testNoOp(self):\n x_shape = [10, 10, 10]\n x = np.random.uniform(size=x_shape)\n\n self._assertReturns(x, x_shape, x, x_shape)\n\n @test_util.run_deprecated_v1\n def testPad(self):\n # Reduce vertical dimension\n x = [1, 2, 3, 4, 5, 6, 7, 8]\n x_shape = [2, 4, 1]\n\n y = [0, 1, 3, 0]\n y_shape = [1, 4, 1]\n\n self._assertReturns(x, x_shape, y, y_shape)\n\n # Reduce horizontal dimension\n x = [1, 2, 3, 4, 5, 6, 7, 8]\n x_shape = [2, 4, 1]\n\n y = [1, 3, 0, 0]\n y_shape = [2, 2, 1]\n\n self._assertReturns(x, x_shape, y, y_shape)\n\n x = [1, 2, 3, 4, 5, 6, 7, 8]\n x_shape = [2, 4, 1]\n\n y = [1, 3]\n y_shape = [1, 2, 1]\n\n self._assertReturns(x, x_shape, y, y_shape)\n\n\n# half_pixel_centers not supported by XLA\n@test_util.for_all_test_methods(test_util.disable_xla, \"b/127616992\")\nclass ResizeImageWithPadV2Test(test_util.TensorFlowTestCase):\n\n def _ResizeImageWithPad(self, x, target_height, target_width,\n use_tensor_inputs):\n if use_tensor_inputs:\n target_height = ops.convert_to_tensor(target_height)\n target_width = ops.convert_to_tensor(target_width)\n x_tensor = array_ops.placeholder(x.dtype, shape=[None] * x.ndim)\n feed_dict = {x_tensor: x}\n else:\n x_tensor = x\n feed_dict = {}\n\n y = image_ops.resize_image_with_pad_v2(x_tensor, target_height,\n target_width)\n if not use_tensor_inputs:\n self.assertTrue(y.get_shape().is_fully_defined())\n\n with self.cached_session(use_gpu=True):\n return y.eval(feed_dict=feed_dict)\n\n def _assertReturns(self,\n x,\n x_shape,\n y,\n y_shape,\n use_tensor_inputs_options=None):\n use_tensor_inputs_options = use_tensor_inputs_options or [False, True]\n target_height, target_width, _ = y_shape\n x = np.array(x).reshape(x_shape)\n y = np.array(y).reshape(y_shape)\n\n for use_tensor_inputs in use_tensor_inputs_options:\n y_tf = self._ResizeImageWithPad(x, target_height, target_width,\n use_tensor_inputs)\n self.assertAllClose(y, y_tf)\n\n def _assertRaises(self,\n x,\n x_shape,\n target_height,\n target_width,\n err_msg,\n use_tensor_inputs_options=None):\n use_tensor_inputs_options = use_tensor_inputs_options or [False, True]\n x = np.array(x).reshape(x_shape)\n\n for use_tensor_inputs in use_tensor_inputs_options:\n try:\n self._ResizeImageWithPad(x, target_height, target_width,\n use_tensor_inputs)\n except Exception as e: # pylint: disable=broad-except\n if err_msg not in str(e):\n raise\n else:\n raise AssertionError(\"Exception not raised: %s\" % err_msg)\n\n def _assertShapeInference(self, pre_shape, height, width, post_shape):\n image = array_ops.placeholder(dtypes.float32, shape=pre_shape)\n y = image_ops.resize_image_with_pad_v1(image, height, width)\n self.assertEqual(y.get_shape().as_list(), post_shape)\n\n\n @test_util.run_deprecated_v1\n def testNoOp(self):\n x_shape = [10, 10, 10]\n x = np.random.uniform(size=x_shape)\n\n self._assertReturns(x, x_shape, x, x_shape)\n\n @test_util.run_deprecated_v1\n def testPad(self):\n # Reduce vertical dimension\n x = [1, 2, 3, 4, 5, 6, 7, 8]\n x_shape = [2, 4, 1]\n\n y = [0, 3.5, 5.5, 0]\n y_shape = [1, 4, 1]\n\n self._assertReturns(x, x_shape, y, y_shape)\n\n # Reduce horizontal dimension\n x = [1, 2, 3, 4, 5, 6, 7, 8]\n x_shape = [2, 4, 1]\n\n y = [3.5, 5.5, 0, 0]\n y_shape = [2, 2, 1]\n\n self._assertReturns(x, x_shape, y, y_shape)\n\n x = [1, 2, 3, 4, 5, 6, 7, 8]\n x_shape = [2, 4, 1]\n\n y = [3.5, 5.5]\n y_shape = [1, 2, 1]\n\n self._assertReturns(x, x_shape, y, y_shape)\n\n\nclass ResizeImageWithCropOrPadTest(test_util.TensorFlowTestCase):\n\n def _ResizeImageWithCropOrPad(self, x, target_height, target_width,\n use_tensor_inputs):\n if use_tensor_inputs:\n target_height = ops.convert_to_tensor(target_height)\n target_width = ops.convert_to_tensor(target_width)\n x_tensor = array_ops.placeholder(x.dtype, shape=[None] * x.ndim)\n feed_dict = {x_tensor: x}\n else:\n x_tensor = x\n feed_dict = {}\n\n y = image_ops.resize_image_with_crop_or_pad(x_tensor, target_height,\n target_width)\n if not use_tensor_inputs:\n self.assertTrue(y.get_shape().is_fully_defined())\n\n with self.cached_session(use_gpu=True):\n return y.eval(feed_dict=feed_dict)\n\n def _assertReturns(self,\n x,\n x_shape,\n y,\n y_shape,\n use_tensor_inputs_options=None):\n use_tensor_inputs_options = use_tensor_inputs_options or [False, True]\n target_height, target_width, _ = y_shape\n x = np.array(x).reshape(x_shape)\n y = np.array(y).reshape(y_shape)\n\n for use_tensor_inputs in use_tensor_inputs_options:\n y_tf = self._ResizeImageWithCropOrPad(x, target_height, target_width,\n use_tensor_inputs)\n self.assertAllClose(y, y_tf)\n\n def _assertRaises(self,\n x,\n x_shape,\n target_height,\n target_width,\n err_msg,\n use_tensor_inputs_options=None):\n use_tensor_inputs_options = use_tensor_inputs_options or [False, True]\n x = np.array(x).reshape(x_shape)\n\n for use_tensor_inputs in use_tensor_inputs_options:\n try:\n self._ResizeImageWithCropOrPad(x, target_height, target_width,\n use_tensor_inputs)\n except Exception as e:\n if err_msg not in str(e):\n raise\n else:\n raise AssertionError(\"Exception not raised: %s\" % err_msg)\n\n def _assertShapeInference(self, pre_shape, height, width, post_shape):\n image = array_ops.placeholder(dtypes.float32, shape=pre_shape)\n y = image_ops.resize_image_with_crop_or_pad(image, height, width)\n self.assertEqual(y.get_shape().as_list(), post_shape)\n\n @test_util.run_deprecated_v1\n def testNoOp(self):\n x_shape = [10, 10, 10]\n x = np.random.uniform(size=x_shape)\n\n self._assertReturns(x, x_shape, x, x_shape)\n\n @test_util.run_deprecated_v1\n def testPad(self):\n # Pad even along col.\n x = [1, 2, 3, 4, 5, 6, 7, 8]\n x_shape = [2, 4, 1]\n\n y = [0, 1, 2, 3, 4, 0, 0, 5, 6, 7, 8, 0]\n y_shape = [2, 6, 1]\n\n self._assertReturns(x, x_shape, y, y_shape)\n\n # Pad odd along col.\n x = [1, 2, 3, 4, 5, 6, 7, 8]\n x_shape = [2, 4, 1]\n\n y = [0, 1, 2, 3, 4, 0, 0, 0, 5, 6, 7, 8, 0, 0]\n y_shape = [2, 7, 1]\n\n self._assertReturns(x, x_shape, y, y_shape)\n\n # Pad even along row.\n x = [1, 2, 3, 4, 5, 6, 7, 8]\n x_shape = [2, 4, 1]\n\n y = [0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 0, 0, 0]\n y_shape = [4, 4, 1]\n\n self._assertReturns(x, x_shape, y, y_shape)\n\n # Pad odd along row.\n x = [1, 2, 3, 4, 5, 6, 7, 8]\n x_shape = [2, 4, 1]\n\n y = [0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 0, 0, 0, 0, 0, 0, 0]\n y_shape = [5, 4, 1]\n\n self._assertReturns(x, x_shape, y, y_shape)\n\n @test_util.run_deprecated_v1\n def testCrop(self):\n # Crop even along col.\n x = [1, 2, 3, 4, 5, 6, 7, 8]\n x_shape = [2, 4, 1]\n\n y = [2, 3, 6, 7]\n y_shape = [2, 2, 1]\n\n self._assertReturns(x, x_shape, y, y_shape)\n\n # Crop odd along col.\n x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]\n x_shape = [2, 6, 1]\n\n y = [2, 3, 4, 8, 9, 10]\n y_shape = [2, 3, 1]\n\n self._assertReturns(x, x_shape, y, y_shape)\n\n # Crop even along row.\n x = [1, 2, 3, 4, 5, 6, 7, 8]\n x_shape = [4, 2, 1]\n\n y = [3, 4, 5, 6]\n y_shape = [2, 2, 1]\n\n self._assertReturns(x, x_shape, y, y_shape)\n\n # Crop odd along row.\n x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]\n x_shape = [8, 2, 1]\n\n y = [3, 4, 5, 6, 7, 8, 9, 10, 11, 12]\n y_shape = [5, 2, 1]\n\n self._assertReturns(x, x_shape, y, y_shape)\n\n @test_util.run_deprecated_v1\n def testCropAndPad(self):\n # Pad along row but crop along col.\n x = [1, 2, 3, 4, 5, 6, 7, 8]\n x_shape = [2, 4, 1]\n\n y = [0, 0, 2, 3, 6, 7, 0, 0]\n y_shape = [4, 2, 1]\n\n self._assertReturns(x, x_shape, y, y_shape)\n\n # Crop along row but pad along col.\n x = [1, 2, 3, 4, 5, 6, 7, 8]\n x_shape = [4, 2, 1]\n\n y = [0, 3, 4, 0, 0, 5, 6, 0]\n y_shape = [2, 4, 1]\n\n self._assertReturns(x, x_shape, y, y_shape)\n\n @test_util.run_deprecated_v1\n def testShapeInference(self):\n self._assertShapeInference([50, 60, 3], 55, 66, [55, 66, 3])\n self._assertShapeInference([55, 66, 3], 55, 66, [55, 66, 3])\n self._assertShapeInference([59, 69, 3], 55, 66, [55, 66, 3])\n self._assertShapeInference([50, 69, 3], 55, 66, [55, 66, 3])\n self._assertShapeInference([59, 60, 3], 55, 66, [55, 66, 3])\n self._assertShapeInference([None, 60, 3], 55, 66, [55, 66, 3])\n self._assertShapeInference([None, 66, 3], 55, 66, [55, 66, 3])\n self._assertShapeInference([None, 69, 3], 55, 66, [55, 66, 3])\n self._assertShapeInference([50, None, 3], 55, 66, [55, 66, 3])\n self._assertShapeInference([55, None, 3], 55, 66, [55, 66, 3])\n self._assertShapeInference([59, None, 3], 55, 66, [55, 66, 3])\n self._assertShapeInference([None, None, 3], 55, 66, [55, 66, 3])\n self._assertShapeInference([50, 60, None], 55, 66, [55, 66, None])\n self._assertShapeInference([55, 66, None], 55, 66, [55, 66, None])\n self._assertShapeInference([59, 69, None], 55, 66, [55, 66, None])\n self._assertShapeInference([50, 69, None], 55, 66, [55, 66, None])\n self._assertShapeInference([59, 60, None], 55, 66, [55, 66, None])\n self._assertShapeInference([None, None, None], 55, 66, [55, 66, None])\n self._assertShapeInference(None, 55, 66, [55, 66, None])\n\n @test_util.run_deprecated_v1\n def testNon3DInput(self):\n # Input image is not 3D\n x = [0] * 15\n target_height, target_width = [4, 4]\n\n for x_shape in ([3, 5],):\n self._assertRaises(x, x_shape, target_height, target_width,\n \"must have either 3 or 4 dimensions.\")\n\n for x_shape in ([1, 3, 5, 1, 1],):\n self._assertRaises(x, x_shape, target_height, target_width,\n \"must have either 3 or 4 dimensions.\")\n\n @test_util.run_deprecated_v1\n def testZeroLengthInput(self):\n # Input image has 0-length dimension(s).\n target_height, target_width = [1, 1]\n x = []\n\n for x_shape in ([0, 2, 2], [2, 0, 2], [2, 2, 0]):\n self._assertRaises(\n x,\n x_shape,\n target_height,\n target_width,\n \"inner 3 dims of 'image.shape' must be > 0\",\n use_tensor_inputs_options=[False])\n\n # The original error message does not contain back slashes. However, they\n # are added by either the assert op or the runtime. If this behavior\n # changes in the future, the match string will also needs to be changed.\n self._assertRaises(\n x,\n x_shape,\n target_height,\n target_width,\n \"inner 3 dims of \\\\'image.shape\\\\' must be > 0\",\n use_tensor_inputs_options=[True])\n\n @test_util.run_deprecated_v1\n def testBadParams(self):\n x_shape = [4, 4, 1]\n x = np.zeros(x_shape)\n\n # target_height <= 0\n target_height, target_width = [0, 5]\n self._assertRaises(x, x_shape, target_height, target_width,\n \"target_height must be > 0\")\n\n # target_width <= 0\n target_height, target_width = [5, 0]\n self._assertRaises(x, x_shape, target_height, target_width,\n \"target_width must be > 0\")\n\n @test_util.run_deprecated_v1\n def testNameScope(self):\n image = array_ops.placeholder(dtypes.float32, shape=[50, 60, 3])\n y = image_ops.resize_image_with_crop_or_pad(image, 55, 66)\n self.assertTrue(y.op.name.startswith(\"resize_image_with_crop_or_pad\"))\n\n\ndef simple_color_ramp():\n \"\"\"Build a simple color ramp RGB image.\"\"\"\n w, h = 256, 200\n i = np.arange(h)[:, None]\n j = np.arange(w)\n image = np.empty((h, w, 3), dtype=np.uint8)\n image[:, :, 0] = i\n image[:, :, 1] = j\n image[:, :, 2] = (i + j) >> 1\n return image\n\n\nclass JpegTest(test_util.TensorFlowTestCase):\n\n # TODO(irving): Add self.assertAverageLess or similar to test_util\n def averageError(self, image0, image1):\n self.assertEqual(image0.shape, image1.shape)\n image0 = image0.astype(int) # Avoid overflow\n return np.abs(image0 - image1).sum() / np.prod(image0.shape)\n\n def testExisting(self):\n # Read a real jpeg and verify shape\n path = (\"tensorflow/core/lib/jpeg/testdata/\"\n \"jpeg_merge_test1.jpg\")\n with self.cached_session(use_gpu=True) as sess:\n jpeg0 = io_ops.read_file(path)\n image0 = image_ops.decode_jpeg(jpeg0)\n image1 = image_ops.decode_jpeg(image_ops.encode_jpeg(image0))\n jpeg0, image0, image1 = self.evaluate([jpeg0, image0, image1])\n self.assertEqual(len(jpeg0), 3771)\n self.assertEqual(image0.shape, (256, 128, 3))\n self.assertLess(self.averageError(image0, image1), 1.4)\n\n def testCmyk(self):\n # Confirm that CMYK reads in as RGB\n base = \"tensorflow/core/lib/jpeg/testdata\"\n rgb_path = os.path.join(base, \"jpeg_merge_test1.jpg\")\n cmyk_path = os.path.join(base, \"jpeg_merge_test1_cmyk.jpg\")\n shape = 256, 128, 3\n for channels in 3, 0:\n with self.cached_session(use_gpu=True) as sess:\n rgb = image_ops.decode_jpeg(\n io_ops.read_file(rgb_path), channels=channels)\n cmyk = image_ops.decode_jpeg(\n io_ops.read_file(cmyk_path), channels=channels)\n rgb, cmyk = self.evaluate([rgb, cmyk])\n self.assertEqual(rgb.shape, shape)\n self.assertEqual(cmyk.shape, shape)\n error = self.averageError(rgb, cmyk)\n self.assertLess(error, 4)\n\n def testCropAndDecodeJpeg(self):\n with self.cached_session() as sess:\n # Encode it, then decode it, then encode it\n base = \"tensorflow/core/lib/jpeg/testdata\"\n jpeg0 = io_ops.read_file(os.path.join(base, \"jpeg_merge_test1.jpg\"))\n\n h, w, _ = 256, 128, 3\n crop_windows = [[0, 0, 5, 5], [0, 0, 5, w], [0, 0, h, 5],\n [h - 6, w - 5, 6, 5], [6, 5, 15, 10], [0, 0, h, w]]\n for crop_window in crop_windows:\n # Explicit two stages: decode + crop.\n image1 = image_ops.decode_jpeg(jpeg0)\n y, x, h, w = crop_window\n image1_crop = image_ops.crop_to_bounding_box(image1, y, x, h, w)\n\n # Combined decode+crop.\n image2 = image_ops.decode_and_crop_jpeg(jpeg0, crop_window)\n\n # Combined decode+crop should have the same shape inference\n self.assertAllEqual(image1_crop.get_shape().as_list(),\n image2.get_shape().as_list())\n\n # CropAndDecode should be equal to DecodeJpeg+Crop.\n image1_crop, image2 = self.evaluate([image1_crop, image2])\n self.assertAllEqual(image1_crop, image2)\n\n @test_util.run_deprecated_v1\n def testCropAndDecodeJpegWithInvalidCropWindow(self):\n with self.cached_session() as sess:\n # Encode it, then decode it, then encode it\n base = \"tensorflow/core/lib/jpeg/testdata\"\n jpeg0 = io_ops.read_file(os.path.join(base, \"jpeg_merge_test1.jpg\"))\n\n h, w, _ = 256, 128, 3\n # Invalid crop windows.\n crop_windows = [[-1, 11, 11, 11], [11, -1, 11, 11], [11, 11, -1, 11],\n [11, 11, 11, -1], [11, 11, 0, 11], [11, 11, 11, 0],\n [0, 0, h + 1, w], [0, 0, h, w + 1]]\n for crop_window in crop_windows:\n result = image_ops.decode_and_crop_jpeg(jpeg0, crop_window)\n with self.assertRaisesWithPredicateMatch(\n errors.InvalidArgumentError,\n lambda e: \"Invalid JPEG data or crop window\" in str(e)):\n self.evaluate(result)\n\n def testSynthetic(self):\n with self.cached_session(use_gpu=True) as sess:\n # Encode it, then decode it, then encode it\n image0 = constant_op.constant(simple_color_ramp())\n jpeg0 = image_ops.encode_jpeg(image0)\n image1 = image_ops.decode_jpeg(jpeg0, dct_method=\"INTEGER_ACCURATE\")\n image2 = image_ops.decode_jpeg(\n image_ops.encode_jpeg(image1), dct_method=\"INTEGER_ACCURATE\")\n jpeg0, image0, image1, image2 = self.evaluate(\n [jpeg0, image0, image1, image2])\n\n # The decoded-encoded image should be similar to the input\n self.assertLess(self.averageError(image0, image1), 0.6)\n\n # We should be very close to a fixpoint\n self.assertLess(self.averageError(image1, image2), 0.02)\n\n # Smooth ramps compress well (input size is 153600)\n self.assertGreaterEqual(len(jpeg0), 5000)\n self.assertLessEqual(len(jpeg0), 6000)\n\n def testSyntheticFasterAlgorithm(self):\n with self.cached_session(use_gpu=True) as sess:\n # Encode it, then decode it, then encode it\n image0 = constant_op.constant(simple_color_ramp())\n jpeg0 = image_ops.encode_jpeg(image0)\n image1 = image_ops.decode_jpeg(jpeg0, dct_method=\"INTEGER_FAST\")\n image2 = image_ops.decode_jpeg(\n image_ops.encode_jpeg(image1), dct_method=\"INTEGER_FAST\")\n jpeg0, image0, image1, image2 = self.evaluate(\n [jpeg0, image0, image1, image2])\n\n # The decoded-encoded image should be similar to the input, but\n # note this is worse than the slower algorithm because it is\n # less accurate.\n self.assertLess(self.averageError(image0, image1), 0.95)\n\n # Repeated compression / decompression will have a higher error\n # with a lossier algorithm.\n self.assertLess(self.averageError(image1, image2), 1.05)\n\n # Smooth ramps compress well (input size is 153600)\n self.assertGreaterEqual(len(jpeg0), 5000)\n self.assertLessEqual(len(jpeg0), 6000)\n\n def testDefaultDCTMethodIsIntegerFast(self):\n with self.cached_session(use_gpu=True) as sess:\n # Compare decoding with both dct_option=INTEGER_FAST and\n # default. They should be the same.\n image0 = constant_op.constant(simple_color_ramp())\n jpeg0 = image_ops.encode_jpeg(image0)\n image1 = image_ops.decode_jpeg(jpeg0, dct_method=\"INTEGER_FAST\")\n image2 = image_ops.decode_jpeg(jpeg0)\n image1, image2 = self.evaluate([image1, image2])\n\n # The images should be the same.\n self.assertAllClose(image1, image2)\n\n @test_util.run_deprecated_v1\n def testShape(self):\n with self.cached_session(use_gpu=True) as sess:\n jpeg = constant_op.constant(\"nonsense\")\n for channels in 0, 1, 3:\n image = image_ops.decode_jpeg(jpeg, channels=channels)\n self.assertEqual(image.get_shape().as_list(),\n [None, None, channels or None])\n\n @test_util.run_deprecated_v1\n def testExtractJpegShape(self):\n # Read a real jpeg and verify shape.\n path = (\"tensorflow/core/lib/jpeg/testdata/\"\n \"jpeg_merge_test1.jpg\")\n with self.cached_session(use_gpu=True) as sess:\n jpeg = io_ops.read_file(path)\n # Extract shape without decoding.\n [image_shape] = sess.run([image_ops.extract_jpeg_shape(jpeg)])\n self.assertEqual(image_shape.tolist(), [256, 128, 3])\n\n @test_util.run_deprecated_v1\n def testExtractJpegShapeforCmyk(self):\n # Read a cmyk jpeg image, and verify its shape.\n path = (\"tensorflow/core/lib/jpeg/testdata/\"\n \"jpeg_merge_test1_cmyk.jpg\")\n with self.cached_session(use_gpu=True) as sess:\n jpeg = io_ops.read_file(path)\n [image_shape] = sess.run([image_ops.extract_jpeg_shape(jpeg)])\n # Cmyk jpeg image has 4 channels.\n self.assertEqual(image_shape.tolist(), [256, 128, 4])\n\n def testRandomJpegQuality(self):\n # Previous implementation of random_jpeg_quality had a bug.\n # This unit test tests the fixed version, but due to forward compatibility\n # this test can only be done when fixed version is used.\n # Test jpeg quality dynamic randomization.\n with ops.Graph().as_default(), self.test_session():\n np.random.seed(7)\n path = (\"tensorflow/core/lib/jpeg/testdata/medium.jpg\")\n jpeg = io_ops.read_file(path)\n image = image_ops.decode_jpeg(jpeg)\n random_jpeg_image = image_ops.random_jpeg_quality(image, 40, 100)\n with self.cached_session(use_gpu=True) as sess:\n # Test randomization.\n random_jpeg_images = [sess.run(random_jpeg_image) for _ in range(5)]\n are_images_equal = []\n for i in range(1, len(random_jpeg_images)):\n # Most of them should be different if randomization is occurring\n # correctly.\n are_images_equal.append(\n np.array_equal(random_jpeg_images[0], random_jpeg_images[i]))\n self.assertFalse(all(are_images_equal))\n\n # TODO(b/162345082): stateless random op generates different random number\n # with xla_gpu. Update tests such that there is a single ground truth result\n # to test against.\n def testStatelessRandomJpegQuality(self):\n # Test deterministic randomness in jpeg quality by checking that the same\n # sequence of jpeg quality adjustments are returned each round given the\n # same seed.\n with test_util.use_gpu():\n path = (\"tensorflow/core/lib/jpeg/testdata/medium.jpg\")\n jpeg = io_ops.read_file(path)\n image = image_ops.decode_jpeg(jpeg)\n jpeg_quality = (40, 100)\n seeds_list = [(1, 2), (3, 4)]\n\n iterations = 2\n random_jpeg_images_all = [[] for _ in range(iterations)]\n for random_jpeg_images in random_jpeg_images_all:\n for seed in seeds_list:\n distorted_jpeg = image_ops.stateless_random_jpeg_quality(\n image, jpeg_quality[0], jpeg_quality[1], seed=seed)\n # Verify that the random jpeg image is different from the original\n # jpeg image.\n self.assertNotAllEqual(image, distorted_jpeg)\n random_jpeg_images.append(self.evaluate(distorted_jpeg))\n\n # Verify that the results are identical given the same seed.\n for i in range(1, iterations):\n self.assertAllEqual(random_jpeg_images_all[0],\n random_jpeg_images_all[i])\n\n def testAdjustJpegQuality(self):\n # Test if image_ops.adjust_jpeg_quality works when jpeq quality\n # is an int (not tensor) for backward compatibility.\n with ops.Graph().as_default(), self.test_session():\n np.random.seed(7)\n jpeg_quality = np.random.randint(40, 100)\n path = (\"tensorflow/core/lib/jpeg/testdata/medium.jpg\")\n jpeg = io_ops.read_file(path)\n image = image_ops.decode_jpeg(jpeg)\n adjust_jpeg_quality_image = image_ops.adjust_jpeg_quality(\n image, jpeg_quality)\n with self.cached_session(use_gpu=True) as sess:\n sess.run(adjust_jpeg_quality_image)\n\n @test_util.run_deprecated_v1\n def testAdjustJpegQualityShape(self):\n with self.cached_session(use_gpu=True):\n image = constant_op.constant(\n np.arange(24, dtype=np.uint8).reshape([2, 4, 3]))\n adjusted_image = image_ops.adjust_jpeg_quality(image, 80)\n self.assertListEqual(adjusted_image.shape.as_list(),\n [None, None, 3])\n\n\nclass PngTest(test_util.TensorFlowTestCase):\n\n def testExisting(self):\n # Read some real PNGs, converting to different channel numbers\n prefix = \"tensorflow/core/lib/png/testdata/\"\n inputs = ((1, \"lena_gray.png\"), (4, \"lena_rgba.png\"),\n (3, \"lena_palette.png\"), (4, \"lena_palette_trns.png\"))\n for channels_in, filename in inputs:\n for channels in 0, 1, 3, 4:\n with self.cached_session(use_gpu=True) as sess:\n png0 = io_ops.read_file(prefix + filename)\n image0 = image_ops.decode_png(png0, channels=channels)\n png0, image0 = self.evaluate([png0, image0])\n self.assertEqual(image0.shape, (26, 51, channels or channels_in))\n if channels == channels_in:\n image1 = image_ops.decode_png(image_ops.encode_png(image0))\n self.assertAllEqual(image0, self.evaluate(image1))\n\n def testSynthetic(self):\n with self.cached_session(use_gpu=True) as sess:\n # Encode it, then decode it\n image0 = constant_op.constant(simple_color_ramp())\n png0 = image_ops.encode_png(image0, compression=7)\n image1 = image_ops.decode_png(png0)\n png0, image0, image1 = self.evaluate([png0, image0, image1])\n\n # PNG is lossless\n self.assertAllEqual(image0, image1)\n\n # Smooth ramps compress well, but not too well\n self.assertGreaterEqual(len(png0), 400)\n self.assertLessEqual(len(png0), 750)\n\n def testSyntheticUint16(self):\n with self.cached_session(use_gpu=True) as sess:\n # Encode it, then decode it\n image0 = constant_op.constant(simple_color_ramp(), dtype=dtypes.uint16)\n png0 = image_ops.encode_png(image0, compression=7)\n image1 = image_ops.decode_png(png0, dtype=dtypes.uint16)\n png0, image0, image1 = self.evaluate([png0, image0, image1])\n\n # PNG is lossless\n self.assertAllEqual(image0, image1)\n\n # Smooth ramps compress well, but not too well\n self.assertGreaterEqual(len(png0), 800)\n self.assertLessEqual(len(png0), 1500)\n\n def testSyntheticTwoChannel(self):\n with self.cached_session(use_gpu=True) as sess:\n # Strip the b channel from an rgb image to get a two-channel image.\n gray_alpha = simple_color_ramp()[:, :, 0:2]\n image0 = constant_op.constant(gray_alpha)\n png0 = image_ops.encode_png(image0, compression=7)\n image1 = image_ops.decode_png(png0)\n png0, image0, image1 = self.evaluate([png0, image0, image1])\n self.assertEqual(2, image0.shape[-1])\n self.assertAllEqual(image0, image1)\n\n def testSyntheticTwoChannelUint16(self):\n with self.cached_session(use_gpu=True) as sess:\n # Strip the b channel from an rgb image to get a two-channel image.\n gray_alpha = simple_color_ramp()[:, :, 0:2]\n image0 = constant_op.constant(gray_alpha, dtype=dtypes.uint16)\n png0 = image_ops.encode_png(image0, compression=7)\n image1 = image_ops.decode_png(png0, dtype=dtypes.uint16)\n png0, image0, image1 = self.evaluate([png0, image0, image1])\n self.assertEqual(2, image0.shape[-1])\n self.assertAllEqual(image0, image1)\n\n @test_util.run_deprecated_v1\n def testShape(self):\n with self.cached_session(use_gpu=True):\n png = constant_op.constant(\"nonsense\")\n for channels in 0, 1, 3:\n image = image_ops.decode_png(png, channels=channels)\n self.assertEqual(image.get_shape().as_list(),\n [None, None, channels or None])\n\n def testPaletteOnly(self):\n filename = \"tensorflow/core/lib/png/testdata/palette_only.png\"\n expected = np.zeros((20, 20, 1), np.uint8)\n expected[1, 1:19, :] = 1\n expected[3, 1:19, :] = 2\n with self.cached_session(use_gpu=True):\n channels = 1\n png = image_ops.decode_png(io_ops.read_file(filename), channels=channels)\n png = self.evaluate(png)\n self.assertAllEqual(expected, png)\n\n\nclass GifTest(test_util.TensorFlowTestCase):\n\n def _testValid(self, filename):\n # Read some real GIFs\n prefix = \"tensorflow/core/lib/gif/testdata/\"\n WIDTH = 20\n HEIGHT = 40\n STRIDE = 5\n shape = (12, HEIGHT, WIDTH, 3)\n\n with self.cached_session(use_gpu=True) as sess:\n gif0 = io_ops.read_file(prefix + filename)\n image0 = image_ops.decode_gif(gif0)\n gif0, image0 = self.evaluate([gif0, image0])\n\n self.assertEqual(image0.shape, shape)\n\n for frame_idx, frame in enumerate(image0):\n gt = np.zeros(shape[1:], dtype=np.uint8)\n start = frame_idx * STRIDE\n end = (frame_idx + 1) * STRIDE\n print(frame_idx)\n if end <= WIDTH:\n gt[:, start:end, :] = 255\n else:\n start -= WIDTH\n end -= WIDTH\n gt[start:end, :, :] = 255\n\n self.assertAllClose(frame, gt)\n\n def testValid(self):\n self._testValid(\"scan.gif\")\n self._testValid(\"optimized.gif\")\n\n @test_util.run_deprecated_v1\n def testShape(self):\n with self.cached_session(use_gpu=True) as sess:\n gif = constant_op.constant(\"nonsense\")\n image = image_ops.decode_gif(gif)\n self.assertEqual(image.get_shape().as_list(), [None, None, None, 3])\n\n\nclass ConvertImageTest(test_util.TensorFlowTestCase):\n\n def _convert(self, original, original_dtype, output_dtype, expected):\n x_np = np.array(original, dtype=original_dtype.as_numpy_dtype())\n y_np = np.array(expected, dtype=output_dtype.as_numpy_dtype())\n\n with self.cached_session(use_gpu=True):\n image = constant_op.constant(x_np)\n y = image_ops.convert_image_dtype(image, output_dtype)\n self.assertTrue(y.dtype == output_dtype)\n self.assertAllClose(y, y_np, atol=1e-5)\n if output_dtype in [\n dtypes.float32, dtypes.float64, dtypes.int32, dtypes.int64\n ]:\n y_saturate = image_ops.convert_image_dtype(\n image, output_dtype, saturate=True)\n self.assertTrue(y_saturate.dtype == output_dtype)\n self.assertAllClose(y_saturate, y_np, atol=1e-5)\n\n @test_util.run_deprecated_v1\n def testNoConvert(self):\n # Make sure converting to the same data type creates only an identity op\n with self.cached_session(use_gpu=True):\n image = constant_op.constant([1], dtype=dtypes.uint8)\n image_ops.convert_image_dtype(image, dtypes.uint8)\n y = image_ops.convert_image_dtype(image, dtypes.uint8)\n self.assertEqual(y.op.type, \"Identity\")\n self.assertEqual(y.op.inputs[0], image)\n\n @test_util.run_deprecated_v1\n def testConvertBetweenInteger(self):\n # Make sure converting to between integer types scales appropriately\n with self.cached_session(use_gpu=True):\n self._convert([0, 255], dtypes.uint8, dtypes.int16, [0, 255 * 128])\n self._convert([0, 32767], dtypes.int16, dtypes.uint8, [0, 255])\n self._convert([0, 2**32], dtypes.int64, dtypes.int32, [0, 1])\n self._convert([0, 1], dtypes.int32, dtypes.int64, [0, 2**32])\n\n @test_util.run_deprecated_v1\n def testConvertBetweenFloat(self):\n # Make sure converting to between float types does nothing interesting\n with self.cached_session(use_gpu=True):\n self._convert([-1.0, 0, 1.0, 200000], dtypes.float32, dtypes.float64,\n [-1.0, 0, 1.0, 200000])\n self._convert([-1.0, 0, 1.0, 200000], dtypes.float64, dtypes.float32,\n [-1.0, 0, 1.0, 200000])\n\n @test_util.run_deprecated_v1\n def testConvertBetweenIntegerAndFloat(self):\n # Make sure converting from and to a float type scales appropriately\n with self.cached_session(use_gpu=True):\n self._convert([0, 1, 255], dtypes.uint8, dtypes.float32,\n [0, 1.0 / 255.0, 1])\n self._convert([0, 1.1 / 255.0, 1], dtypes.float32, dtypes.uint8,\n [0, 1, 255])\n\n @test_util.run_deprecated_v1\n def testConvertBetweenInt16AndInt8(self):\n with self.cached_session(use_gpu=True):\n # uint8, uint16\n self._convert([0, 255 * 256], dtypes.uint16, dtypes.uint8, [0, 255])\n self._convert([0, 255], dtypes.uint8, dtypes.uint16, [0, 255 * 256])\n # int8, uint16\n self._convert([0, 127 * 2 * 256], dtypes.uint16, dtypes.int8, [0, 127])\n self._convert([0, 127], dtypes.int8, dtypes.uint16, [0, 127 * 2 * 256])\n # int16, uint16\n self._convert([0, 255 * 256], dtypes.uint16, dtypes.int16, [0, 255 * 128])\n self._convert([0, 255 * 128], dtypes.int16, dtypes.uint16, [0, 255 * 256])\n\n\nclass TotalVariationTest(test_util.TensorFlowTestCase):\n \"\"\"Tests the function total_variation() in image_ops.\n\n We test a few small handmade examples, as well as\n some larger examples using an equivalent numpy\n implementation of the total_variation() function.\n\n We do NOT test for overflows and invalid / edge-case arguments.\n \"\"\"\n\n def _test(self, x_np, y_np):\n \"\"\"Test that the TensorFlow implementation of\n total_variation(x_np) calculates the values in y_np.\n\n Note that these may be float-numbers so we only test\n for approximate equality within some narrow error-bound.\n \"\"\"\n\n # Create a TensorFlow session.\n with self.cached_session(use_gpu=True):\n # Add a constant to the TensorFlow graph that holds the input.\n x_tf = constant_op.constant(x_np, shape=x_np.shape)\n\n # Add ops for calculating the total variation using TensorFlow.\n y = image_ops.total_variation(images=x_tf)\n\n # Run the TensorFlow session to calculate the result.\n y_tf = self.evaluate(y)\n\n # Assert that the results are as expected within\n # some small error-bound in case they are float-values.\n self.assertAllClose(y_tf, y_np)\n\n def _total_variation_np(self, x_np):\n \"\"\"Calculate the total variation of x_np using numpy.\n This implements the same function as TensorFlow but\n using numpy instead.\n\n Args:\n x_np: Numpy array with 3 or 4 dimensions.\n \"\"\"\n\n dim = len(x_np.shape)\n\n if dim == 3:\n # Calculate differences for neighboring pixel-values using slices.\n dif1 = x_np[1:, :, :] - x_np[:-1, :, :]\n dif2 = x_np[:, 1:, :] - x_np[:, :-1, :]\n\n # Sum for all axis.\n sum_axis = None\n elif dim == 4:\n # Calculate differences for neighboring pixel-values using slices.\n dif1 = x_np[:, 1:, :, :] - x_np[:, :-1, :, :]\n dif2 = x_np[:, :, 1:, :] - x_np[:, :, :-1, :]\n\n # Only sum for the last 3 axis.\n sum_axis = (1, 2, 3)\n else:\n # This should not occur in this test-code.\n pass\n\n tot_var = np.sum(np.abs(dif1), axis=sum_axis) + \\\n np.sum(np.abs(dif2), axis=sum_axis)\n\n return tot_var\n\n def _test_tensorflow_vs_numpy(self, x_np):\n \"\"\"Test the TensorFlow implementation against a numpy implementation.\n\n Args:\n x_np: Numpy array with 3 or 4 dimensions.\n \"\"\"\n\n # Calculate the y-values using the numpy implementation.\n y_np = self._total_variation_np(x_np)\n\n self._test(x_np, y_np)\n\n def _generateArray(self, shape):\n \"\"\"Generate an array of the given shape for use in testing.\n The numbers are calculated as the cumulative sum, which\n causes the difference between neighboring numbers to vary.\"\"\"\n\n # Flattened length of the array.\n flat_len = np.prod(shape)\n\n a = np.array(range(flat_len), dtype=int)\n a = np.cumsum(a)\n a = a.reshape(shape)\n\n return a\n\n # TODO(b/133851381): re-enable this test.\n def disabledtestTotalVariationNumpy(self):\n \"\"\"Test the TensorFlow implementation against a numpy implementation.\n The two implementations are very similar so it is possible that both\n have the same bug, which would not be detected by this test. It is\n therefore necessary to test with manually crafted data as well.\"\"\"\n\n # Generate a test-array.\n # This is an 'image' with 100x80 pixels and 3 color channels.\n a = self._generateArray(shape=(100, 80, 3))\n\n # Test the TensorFlow implementation vs. numpy implementation.\n # We use a numpy implementation to check the results that are\n # calculated using TensorFlow are correct.\n self._test_tensorflow_vs_numpy(a)\n self._test_tensorflow_vs_numpy(a + 1)\n self._test_tensorflow_vs_numpy(-a)\n self._test_tensorflow_vs_numpy(1.1 * a)\n\n # Expand to a 4-dim array.\n b = a[np.newaxis, :]\n\n # Combine several variations of the image into a single 4-dim array.\n multi = np.vstack((b, b + 1, -b, 1.1 * b))\n\n # Test that the TensorFlow function can also handle 4-dim arrays.\n self._test_tensorflow_vs_numpy(multi)\n\n def testTotalVariationHandmade(self):\n \"\"\"Test the total variation for a few handmade examples.\"\"\"\n\n # We create an image that is 2x2 pixels with 3 color channels.\n # The image is very small so we can check the result by hand.\n\n # Red color channel.\n # The following are the sum of absolute differences between the pixels.\n # sum row dif = (4-1) + (7-2) = 3 + 5 = 8\n # sum col dif = (2-1) + (7-4) = 1 + 3 = 4\n r = [[1, 2], [4, 7]]\n\n # Blue color channel.\n # sum row dif = 18 + 29 = 47\n # sum col dif = 7 + 18 = 25\n g = [[11, 18], [29, 47]]\n\n # Green color channel.\n # sum row dif = 120 + 193 = 313\n # sum col dif = 47 + 120 = 167\n b = [[73, 120], [193, 313]]\n\n # Combine the 3 color channels into a single 3-dim array.\n # The shape is (2, 2, 3) corresponding to (height, width and color).\n a = np.dstack((r, g, b))\n\n # Total variation for this image.\n # Sum of all pixel differences = 8 + 4 + 47 + 25 + 313 + 167 = 564\n tot_var = 564\n\n # Calculate the total variation using TensorFlow and assert it is correct.\n self._test(a, tot_var)\n\n # If we add 1 to all pixel-values then the total variation is unchanged.\n self._test(a + 1, tot_var)\n\n # If we negate all pixel-values then the total variation is unchanged.\n self._test(-a, tot_var)\n\n # Scale the pixel-values by a float. This scales the total variation as\n # well.\n b = 1.1 * a\n self._test(b, 1.1 * tot_var)\n\n # Scale by another float.\n c = 1.2 * a\n self._test(c, 1.2 * tot_var)\n\n # Combine these 3 images into a single array of shape (3, 2, 2, 3)\n # where the first dimension is for the image-number.\n multi = np.vstack((a[np.newaxis, :], b[np.newaxis, :], c[np.newaxis, :]))\n\n # Check that TensorFlow correctly calculates the total variation\n # for each image individually and returns the correct array.\n self._test(multi, tot_var * np.array([1.0, 1.1, 1.2]))\n\n\nclass FormatTest(test_util.TensorFlowTestCase):\n\n @test_util.run_deprecated_v1\n def testFormats(self):\n prefix = \"tensorflow/core/lib\"\n paths = (\"png/testdata/lena_gray.png\", \"jpeg/testdata/jpeg_merge_test1.jpg\",\n \"gif/testdata/lena.gif\")\n decoders = {\n \"jpeg\": functools.partial(image_ops.decode_jpeg, channels=3),\n \"png\": functools.partial(image_ops.decode_png, channels=3),\n \"gif\": lambda s: array_ops.squeeze(image_ops.decode_gif(s), axis=0),\n }\n with self.cached_session():\n for path in paths:\n contents = io_ops.read_file(os.path.join(prefix, path)).eval()\n images = {}\n for name, decode in decoders.items():\n image = decode(contents).eval()\n self.assertEqual(image.ndim, 3)\n for prev_name, prev in images.items():\n print(\"path %s, names %s %s, shapes %s %s\" %\n (path, name, prev_name, image.shape, prev.shape))\n self.assertAllEqual(image, prev)\n images[name] = image\n\n def testError(self):\n path = \"tensorflow/core/lib/gif/testdata/scan.gif\"\n with self.cached_session():\n for decode in image_ops.decode_jpeg, image_ops.decode_png:\n with self.assertRaisesOpError(r\"Got 12 frames\"):\n decode(io_ops.read_file(path)).eval()\n\n\nclass NonMaxSuppressionTest(test_util.TensorFlowTestCase):\n\n @test_util.run_deprecated_v1\n def NonMaxSuppressionTest(self):\n boxes_np = [[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9],\n [0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]]\n scores_np = [0.9, 0.75, 0.6, 0.95, 0.5, 0.3]\n max_output_size_np = 3\n iou_threshold_np = 0.5\n with self.cached_session():\n boxes = constant_op.constant(boxes_np)\n scores = constant_op.constant(scores_np)\n max_output_size = constant_op.constant(max_output_size_np)\n iou_threshold = constant_op.constant(iou_threshold_np)\n selected_indices = image_ops.non_max_suppression(\n boxes, scores, max_output_size, iou_threshold)\n self.assertAllClose(selected_indices, [3, 0, 5])\n\n @test_util.run_deprecated_v1\n def testInvalidShape(self):\n # The boxes should be 2D of shape [num_boxes, 4].\n with self.assertRaisesRegex(ValueError,\n \"Shape must be rank 2 but is rank 1\"):\n boxes = constant_op.constant([0.0, 0.0, 1.0, 1.0])\n scores = constant_op.constant([0.9])\n image_ops.non_max_suppression(boxes, scores, 3, 0.5)\n\n with self.assertRaisesRegex(ValueError, \"Dimension must be 4 but is 3\"):\n boxes = constant_op.constant([[0.0, 0.0, 1.0]])\n scores = constant_op.constant([0.9])\n image_ops.non_max_suppression(boxes, scores, 3, 0.5)\n\n # The boxes is of shape [num_boxes, 4], and the scores is\n # of shape [num_boxes]. So an error will be thrown.\n with self.assertRaisesRegex(ValueError,\n \"Dimensions must be equal, but are 1 and 2\"):\n boxes = constant_op.constant([[0.0, 0.0, 1.0, 1.0]])\n scores = constant_op.constant([0.9, 0.75])\n image_ops.non_max_suppression(boxes, scores, 3, 0.5)\n\n # The scores should be 1D of shape [num_boxes].\n with self.assertRaisesRegex(ValueError,\n \"Shape must be rank 1 but is rank 2\"):\n boxes = constant_op.constant([[0.0, 0.0, 1.0, 1.0]])\n scores = constant_op.constant([[0.9]])\n image_ops.non_max_suppression(boxes, scores, 3, 0.5)\n\n # The max_output_size should be a scalar (0-D).\n with self.assertRaisesRegex(ValueError,\n \"Shape must be rank 0 but is rank 1\"):\n boxes = constant_op.constant([[0.0, 0.0, 1.0, 1.0]])\n scores = constant_op.constant([0.9])\n image_ops.non_max_suppression(boxes, scores, [3], 0.5)\n\n # The iou_threshold should be a scalar (0-D).\n with self.assertRaisesRegex(ValueError,\n \"Shape must be rank 0 but is rank 2\"):\n boxes = constant_op.constant([[0.0, 0.0, 1.0, 1.0]])\n scores = constant_op.constant([0.9])\n image_ops.non_max_suppression(boxes, scores, 3, [[0.5]])\n\n @test_util.run_deprecated_v1\n @test_util.xla_allow_fallback(\n \"non_max_suppression with dynamic output shape unsupported.\")\n def testDataTypes(self):\n # Test case for GitHub issue 20199.\n boxes_np = [[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9],\n [0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]]\n scores_np = [0.9, 0.75, 0.6, 0.95, 0.5, 0.3]\n max_output_size_np = 3\n iou_threshold_np = 0.5\n score_threshold_np = float(\"-inf\")\n # Note: There are multiple versions of non_max_suppression v2, v3, v4.\n # gen_image_ops.non_max_suppression_v2:\n for dtype in [np.float16, np.float32]:\n with self.cached_session():\n boxes = constant_op.constant(boxes_np, dtype=dtype)\n scores = constant_op.constant(scores_np, dtype=dtype)\n max_output_size = constant_op.constant(max_output_size_np)\n iou_threshold = constant_op.constant(iou_threshold_np, dtype=dtype)\n selected_indices = gen_image_ops.non_max_suppression_v2(\n boxes, scores, max_output_size, iou_threshold).eval()\n self.assertAllClose(selected_indices, [3, 0, 5])\n # gen_image_ops.non_max_suppression_v3\n for dtype in [np.float16, np.float32]:\n with self.cached_session():\n boxes = constant_op.constant(boxes_np, dtype=dtype)\n scores = constant_op.constant(scores_np, dtype=dtype)\n max_output_size = constant_op.constant(max_output_size_np)\n iou_threshold = constant_op.constant(iou_threshold_np, dtype=dtype)\n score_threshold = constant_op.constant(score_threshold_np, dtype=dtype)\n selected_indices = gen_image_ops.non_max_suppression_v3(\n boxes, scores, max_output_size, iou_threshold, score_threshold)\n selected_indices = self.evaluate(selected_indices)\n self.assertAllClose(selected_indices, [3, 0, 5])\n # gen_image_ops.non_max_suppression_v4.\n for dtype in [np.float16, np.float32]:\n with self.cached_session():\n boxes = constant_op.constant(boxes_np, dtype=dtype)\n scores = constant_op.constant(scores_np, dtype=dtype)\n max_output_size = constant_op.constant(max_output_size_np)\n iou_threshold = constant_op.constant(iou_threshold_np, dtype=dtype)\n score_threshold = constant_op.constant(score_threshold_np, dtype=dtype)\n selected_indices, _ = gen_image_ops.non_max_suppression_v4(\n boxes, scores, max_output_size, iou_threshold, score_threshold)\n selected_indices = self.evaluate(selected_indices)\n self.assertAllClose(selected_indices, [3, 0, 5])\n # gen_image_ops.non_max_suppression_v5.\n soft_nms_sigma_np = float(0.0)\n for dtype in [np.float16, np.float32]:\n with self.cached_session():\n boxes = constant_op.constant(boxes_np, dtype=dtype)\n scores = constant_op.constant(scores_np, dtype=dtype)\n max_output_size = constant_op.constant(max_output_size_np)\n iou_threshold = constant_op.constant(iou_threshold_np, dtype=dtype)\n score_threshold = constant_op.constant(score_threshold_np, dtype=dtype)\n soft_nms_sigma = constant_op.constant(soft_nms_sigma_np, dtype=dtype)\n selected_indices, _, _ = gen_image_ops.non_max_suppression_v5(\n boxes, scores, max_output_size, iou_threshold, score_threshold,\n soft_nms_sigma)\n selected_indices = self.evaluate(selected_indices)\n self.assertAllClose(selected_indices, [3, 0, 5])\n\n\nclass NonMaxSuppressionWithScoresTest(test_util.TensorFlowTestCase):\n\n @test_util.run_deprecated_v1\n @test_util.xla_allow_fallback(\n \"non_max_suppression with dynamic output shape unsupported.\")\n def testSelectFromThreeClustersWithSoftNMS(self):\n boxes_np = [[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9],\n [0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]]\n scores_np = [0.9, 0.75, 0.6, 0.95, 0.5, 0.3]\n max_output_size_np = 6\n iou_threshold_np = 1.0\n score_threshold_np = 0.0\n soft_nms_sigma_np = 0.5\n boxes = constant_op.constant(boxes_np)\n scores = constant_op.constant(scores_np)\n max_output_size = constant_op.constant(max_output_size_np)\n iou_threshold = constant_op.constant(iou_threshold_np)\n score_threshold = constant_op.constant(score_threshold_np)\n soft_nms_sigma = constant_op.constant(soft_nms_sigma_np)\n selected_indices, selected_scores = \\\n image_ops.non_max_suppression_with_scores(\n boxes,\n scores,\n max_output_size,\n iou_threshold,\n score_threshold,\n soft_nms_sigma)\n selected_indices, selected_scores = self.evaluate(\n [selected_indices, selected_scores])\n self.assertAllClose(selected_indices, [3, 0, 1, 5, 4, 2])\n self.assertAllClose(selected_scores,\n [0.95, 0.9, 0.384, 0.3, 0.256, 0.197],\n rtol=1e-2, atol=1e-2)\n\n\nclass NonMaxSuppressionPaddedTest(test_util.TensorFlowTestCase):\n\n @test_util.run_deprecated_v1\n @test_util.disable_xla(\n \"b/141236442: \"\n \"non_max_suppression with dynamic output shape unsupported.\")\n def testSelectFromThreeClusters(self):\n boxes_np = [[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9],\n [0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]]\n scores_np = [0.9, 0.75, 0.6, 0.95, 0.5, 0.3]\n max_output_size_np = 5\n iou_threshold_np = 0.5\n boxes = constant_op.constant(boxes_np)\n scores = constant_op.constant(scores_np)\n max_output_size = constant_op.constant(max_output_size_np)\n iou_threshold = constant_op.constant(iou_threshold_np)\n selected_indices_padded, num_valid_padded = \\\n image_ops.non_max_suppression_padded(\n boxes,\n scores,\n max_output_size,\n iou_threshold,\n pad_to_max_output_size=True)\n selected_indices, num_valid = image_ops.non_max_suppression_padded(\n boxes,\n scores,\n max_output_size,\n iou_threshold,\n pad_to_max_output_size=False)\n # The output shape of the padded operation must be fully defined.\n self.assertEqual(selected_indices_padded.shape.is_fully_defined(), True)\n self.assertEqual(selected_indices.shape.is_fully_defined(), False)\n with self.cached_session():\n self.assertAllClose(selected_indices_padded, [3, 0, 5, 0, 0])\n self.assertEqual(num_valid_padded.eval(), 3)\n self.assertAllClose(selected_indices, [3, 0, 5])\n self.assertEqual(num_valid.eval(), 3)\n\n @test_util.run_deprecated_v1\n @test_util.xla_allow_fallback(\n \"non_max_suppression with dynamic output shape unsupported.\")\n def testSelectFromContinuousOverLap(self):\n boxes_np = [[0, 0, 1, 1], [0, 0.2, 1, 1.2], [0, 0.4, 1, 1.4],\n [0, 0.6, 1, 1.6], [0, 0.8, 1, 1.8], [0, 2, 1, 2]]\n scores_np = [0.9, 0.75, 0.6, 0.5, 0.4, 0.3]\n max_output_size_np = 3\n iou_threshold_np = 0.5\n score_threshold_np = 0.1\n boxes = constant_op.constant(boxes_np)\n scores = constant_op.constant(scores_np)\n max_output_size = constant_op.constant(max_output_size_np)\n iou_threshold = constant_op.constant(iou_threshold_np)\n score_threshold = constant_op.constant(score_threshold_np)\n selected_indices, num_valid = image_ops.non_max_suppression_padded(\n boxes,\n scores,\n max_output_size,\n iou_threshold,\n score_threshold)\n # The output shape of the padded operation must be fully defined.\n self.assertEqual(selected_indices.shape.is_fully_defined(), False)\n with self.cached_session():\n self.assertAllClose(selected_indices, [0, 2, 4])\n self.assertEqual(num_valid.eval(), 3)\n\n\nclass NonMaxSuppressionWithOverlapsTest(test_util.TensorFlowTestCase):\n\n @test_util.run_deprecated_v1\n def testSelectOneFromThree(self):\n overlaps_np = [\n [1.0, 0.7, 0.2],\n [0.7, 1.0, 0.0],\n [0.2, 0.0, 1.0],\n ]\n scores_np = [0.7, 0.9, 0.1]\n max_output_size_np = 3\n\n overlaps = constant_op.constant(overlaps_np)\n scores = constant_op.constant(scores_np)\n max_output_size = constant_op.constant(max_output_size_np)\n overlap_threshold = 0.6\n score_threshold = 0.4\n\n selected_indices = image_ops.non_max_suppression_with_overlaps(\n overlaps, scores, max_output_size, overlap_threshold, score_threshold)\n\n with self.cached_session():\n self.assertAllClose(selected_indices, [1])\n\n\nclass VerifyCompatibleImageShapesTest(test_util.TensorFlowTestCase):\n \"\"\"Tests utility function used by ssim() and psnr().\"\"\"\n\n @test_util.run_deprecated_v1\n def testWrongDims(self):\n img = array_ops.placeholder(dtype=dtypes.float32)\n img_np = np.array((2, 2))\n\n with self.cached_session(use_gpu=True) as sess:\n _, _, checks = image_ops_impl._verify_compatible_image_shapes(img, img)\n with self.assertRaises(errors.InvalidArgumentError):\n sess.run(checks, {img: img_np})\n\n @test_util.run_deprecated_v1\n def testShapeMismatch(self):\n img1 = array_ops.placeholder(dtype=dtypes.float32)\n img2 = array_ops.placeholder(dtype=dtypes.float32)\n\n img1_np = np.array([1, 2, 2, 1])\n img2_np = np.array([1, 3, 3, 1])\n\n with self.cached_session(use_gpu=True) as sess:\n _, _, checks = image_ops_impl._verify_compatible_image_shapes(img1, img2)\n with self.assertRaises(errors.InvalidArgumentError):\n sess.run(checks, {img1: img1_np, img2: img2_np})\n\n\nclass PSNRTest(test_util.TensorFlowTestCase):\n \"\"\"Tests for PSNR.\"\"\"\n\n def _LoadTestImage(self, sess, filename):\n content = io_ops.read_file(os.path.join(\n \"tensorflow/core/lib/psnr/testdata\", filename))\n im = image_ops.decode_jpeg(content, dct_method=\"INTEGER_ACCURATE\")\n im = image_ops.convert_image_dtype(im, dtypes.float32)\n im, = self.evaluate([im])\n return np.expand_dims(im, axis=0)\n\n def _LoadTestImages(self):\n with self.cached_session(use_gpu=True) as sess:\n q20 = self._LoadTestImage(sess, \"cat_q20.jpg\")\n q72 = self._LoadTestImage(sess, \"cat_q72.jpg\")\n q95 = self._LoadTestImage(sess, \"cat_q95.jpg\")\n return q20, q72, q95\n\n def _PSNR_NumPy(self, orig, target, max_value):\n \"\"\"Numpy implementation of PSNR.\"\"\"\n mse = ((orig - target) ** 2).mean(axis=(-3, -2, -1))\n return 20 * np.log10(max_value) - 10 * np.log10(mse)\n\n def _RandomImage(self, shape, max_val):\n \"\"\"Returns an image or image batch with given shape.\"\"\"\n return np.random.rand(*shape).astype(np.float32) * max_val\n\n @test_util.run_deprecated_v1\n def testPSNRSingleImage(self):\n image1 = self._RandomImage((8, 8, 1), 1)\n image2 = self._RandomImage((8, 8, 1), 1)\n psnr = self._PSNR_NumPy(image1, image2, 1)\n\n with self.cached_session(use_gpu=True):\n tf_image1 = constant_op.constant(image1, shape=image1.shape,\n dtype=dtypes.float32)\n tf_image2 = constant_op.constant(image2, shape=image2.shape,\n dtype=dtypes.float32)\n tf_psnr = image_ops.psnr(tf_image1, tf_image2, 1.0, \"psnr\").eval()\n self.assertAllClose(psnr, tf_psnr, atol=0.001)\n\n @test_util.run_deprecated_v1\n def testPSNRMultiImage(self):\n image1 = self._RandomImage((10, 8, 8, 1), 1)\n image2 = self._RandomImage((10, 8, 8, 1), 1)\n psnr = self._PSNR_NumPy(image1, image2, 1)\n\n with self.cached_session(use_gpu=True):\n tf_image1 = constant_op.constant(image1, shape=image1.shape,\n dtype=dtypes.float32)\n tf_image2 = constant_op.constant(image2, shape=image2.shape,\n dtype=dtypes.float32)\n tf_psnr = image_ops.psnr(tf_image1, tf_image2, 1, \"psnr\").eval()\n self.assertAllClose(psnr, tf_psnr, atol=0.001)\n\n @test_util.run_deprecated_v1\n def testGoldenPSNR(self):\n q20, q72, q95 = self._LoadTestImages()\n\n # Verify NumPy implementation first.\n # Golden values are generated using GNU Octave's psnr() function.\n psnr1 = self._PSNR_NumPy(q20, q72, 1)\n self.assertNear(30.321, psnr1, 0.001, msg=\"q20.dtype=\" + str(q20.dtype))\n psnr2 = self._PSNR_NumPy(q20, q95, 1)\n self.assertNear(29.994, psnr2, 0.001)\n psnr3 = self._PSNR_NumPy(q72, q95, 1)\n self.assertNear(35.302, psnr3, 0.001)\n\n # Test TensorFlow implementation.\n with self.cached_session(use_gpu=True):\n tf_q20 = constant_op.constant(q20, shape=q20.shape, dtype=dtypes.float32)\n tf_q72 = constant_op.constant(q72, shape=q72.shape, dtype=dtypes.float32)\n tf_q95 = constant_op.constant(q95, shape=q95.shape, dtype=dtypes.float32)\n tf_psnr1 = image_ops.psnr(tf_q20, tf_q72, 1, \"psnr1\").eval()\n tf_psnr2 = image_ops.psnr(tf_q20, tf_q95, 1, \"psnr2\").eval()\n tf_psnr3 = image_ops.psnr(tf_q72, tf_q95, 1, \"psnr3\").eval()\n self.assertAllClose(psnr1, tf_psnr1, atol=0.001)\n self.assertAllClose(psnr2, tf_psnr2, atol=0.001)\n self.assertAllClose(psnr3, tf_psnr3, atol=0.001)\n\n @test_util.run_deprecated_v1\n def testInfinity(self):\n q20, _, _ = self._LoadTestImages()\n psnr = self._PSNR_NumPy(q20, q20, 1)\n with self.cached_session(use_gpu=True):\n tf_q20 = constant_op.constant(q20, shape=q20.shape, dtype=dtypes.float32)\n tf_psnr = image_ops.psnr(tf_q20, tf_q20, 1, \"psnr\").eval()\n self.assertAllClose(psnr, tf_psnr, atol=0.001)\n\n @test_util.run_deprecated_v1\n def testInt(self):\n img1 = self._RandomImage((10, 8, 8, 1), 255)\n img2 = self._RandomImage((10, 8, 8, 1), 255)\n img1 = constant_op.constant(img1, dtypes.uint8)\n img2 = constant_op.constant(img2, dtypes.uint8)\n psnr_uint8 = image_ops.psnr(img1, img2, 255)\n img1 = image_ops.convert_image_dtype(img1, dtypes.float32)\n img2 = image_ops.convert_image_dtype(img2, dtypes.float32)\n psnr_float32 = image_ops.psnr(img1, img2, 1.0)\n with self.cached_session(use_gpu=True):\n self.assertAllClose(\n psnr_uint8.eval(), self.evaluate(psnr_float32), atol=0.001)\n\n\nclass SSIMTest(test_util.TensorFlowTestCase):\n \"\"\"Tests for SSIM.\"\"\"\n\n _filenames = [\"checkerboard1.png\",\n \"checkerboard2.png\",\n \"checkerboard3.png\",]\n\n _ssim = np.asarray([[1.000000, 0.230880, 0.231153],\n [0.230880, 1.000000, 0.996828],\n [0.231153, 0.996828, 1.000000]])\n\n def _LoadTestImage(self, sess, filename):\n content = io_ops.read_file(os.path.join(\n \"tensorflow/core/lib/ssim/testdata\", filename))\n im = image_ops.decode_png(content)\n im = image_ops.convert_image_dtype(im, dtypes.float32)\n im, = self.evaluate([im])\n return np.expand_dims(im, axis=0)\n\n def _LoadTestImages(self):\n with self.cached_session(use_gpu=True) as sess:\n return [self._LoadTestImage(sess, f) for f in self._filenames]\n\n def _RandomImage(self, shape, max_val):\n \"\"\"Returns an image or image batch with given shape.\"\"\"\n return np.random.rand(*shape).astype(np.float32) * max_val\n\n @test_util.run_deprecated_v1\n def testAgainstMatlab(self):\n \"\"\"Tests against values produced by Matlab.\"\"\"\n img = self._LoadTestImages()\n expected = self._ssim[np.triu_indices(3)]\n\n ph = [array_ops.placeholder(dtype=dtypes.float32) for _ in range(2)]\n ssim = image_ops.ssim(\n *ph, max_val=1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03)\n with self.cached_session(use_gpu=True):\n scores = [ssim.eval(dict(zip(ph, t)))\n for t in itertools.combinations_with_replacement(img, 2)]\n self.assertAllClose(expected, np.squeeze(scores), atol=1e-4)\n\n def testBatch(self):\n img = self._LoadTestImages()\n expected = self._ssim[np.triu_indices(3, k=1)]\n\n img1, img2 = zip(*itertools.combinations(img, 2))\n img1 = np.concatenate(img1)\n img2 = np.concatenate(img2)\n\n ssim = image_ops.ssim(\n constant_op.constant(img1),\n constant_op.constant(img2),\n 1.0,\n filter_size=11,\n filter_sigma=1.5,\n k1=0.01,\n k2=0.03)\n with self.cached_session(use_gpu=True):\n self.assertAllClose(expected, self.evaluate(ssim), atol=1e-4)\n\n def testBatchNumpyInputs(self):\n img = self._LoadTestImages()\n expected = self._ssim[np.triu_indices(3, k=1)]\n\n img1, img2 = zip(*itertools.combinations(img, 2))\n img1 = np.concatenate(img1)\n img2 = np.concatenate(img2)\n\n with self.cached_session(use_gpu=True):\n img1 = self.evaluate(constant_op.constant(img1))\n img2 = self.evaluate(constant_op.constant(img2))\n\n ssim = image_ops.ssim(\n img1,\n img2,\n 1.0,\n filter_size=11,\n filter_sigma=1.5,\n k1=0.01,\n k2=0.03)\n with self.cached_session(use_gpu=True):\n self.assertAllClose(expected, self.evaluate(ssim), atol=1e-4)\n\n def testBroadcast(self):\n img = self._LoadTestImages()[:2]\n expected = self._ssim[:2, :2]\n\n img = constant_op.constant(np.concatenate(img))\n img1 = array_ops.expand_dims(img, axis=0) # batch dims: 1, 2.\n img2 = array_ops.expand_dims(img, axis=1) # batch dims: 2, 1.\n\n ssim = image_ops.ssim(\n img1, img2, 1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03)\n with self.cached_session(use_gpu=True):\n self.assertAllClose(expected, self.evaluate(ssim), atol=1e-4)\n\n @test_util.run_deprecated_v1\n def testNegative(self):\n \"\"\"Tests against negative SSIM index.\"\"\"\n step = np.expand_dims(np.arange(0, 256, 16, dtype=np.uint8), axis=0)\n img1 = np.tile(step, (16, 1))\n img2 = np.fliplr(img1)\n\n img1 = img1.reshape((1, 16, 16, 1))\n img2 = img2.reshape((1, 16, 16, 1))\n\n ssim = image_ops.ssim(\n constant_op.constant(img1),\n constant_op.constant(img2),\n 255,\n filter_size=11,\n filter_sigma=1.5,\n k1=0.01,\n k2=0.03)\n with self.cached_session(use_gpu=True):\n self.assertLess(ssim.eval(), 0)\n\n @test_util.run_deprecated_v1\n def testInt(self):\n img1 = self._RandomImage((1, 16, 16, 3), 255)\n img2 = self._RandomImage((1, 16, 16, 3), 255)\n img1 = constant_op.constant(img1, dtypes.uint8)\n img2 = constant_op.constant(img2, dtypes.uint8)\n ssim_uint8 = image_ops.ssim(\n img1, img2, 255, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03)\n img1 = image_ops.convert_image_dtype(img1, dtypes.float32)\n img2 = image_ops.convert_image_dtype(img2, dtypes.float32)\n ssim_float32 = image_ops.ssim(\n img1, img2, 1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03)\n with self.cached_session(use_gpu=True):\n self.assertAllClose(\n ssim_uint8.eval(), self.evaluate(ssim_float32), atol=0.001)\n\n\nclass MultiscaleSSIMTest(test_util.TensorFlowTestCase):\n \"\"\"Tests for MS-SSIM.\"\"\"\n\n _filenames = [\"checkerboard1.png\",\n \"checkerboard2.png\",\n \"checkerboard3.png\",]\n\n _msssim = np.asarray([[1.000000, 0.091016, 0.091025],\n [0.091016, 1.000000, 0.999567],\n [0.091025, 0.999567, 1.000000]])\n\n def _LoadTestImage(self, sess, filename):\n content = io_ops.read_file(os.path.join(\n \"tensorflow/core/lib/ssim/testdata\", filename))\n im = image_ops.decode_png(content)\n im = image_ops.convert_image_dtype(im, dtypes.float32)\n im, = self.evaluate([im])\n return np.expand_dims(im, axis=0)\n\n def _LoadTestImages(self):\n with self.cached_session(use_gpu=True) as sess:\n return [self._LoadTestImage(sess, f) for f in self._filenames]\n\n def _RandomImage(self, shape, max_val):\n \"\"\"Returns an image or image batch with given shape.\"\"\"\n return np.random.rand(*shape).astype(np.float32) * max_val\n\n @test_util.run_deprecated_v1\n def testAgainstMatlab(self):\n \"\"\"Tests against MS-SSIM computed with Matlab implementation.\n\n For color images, MS-SSIM scores are averaged over color channels.\n \"\"\"\n img = self._LoadTestImages()\n expected = self._msssim[np.triu_indices(3)]\n\n ph = [array_ops.placeholder(dtype=dtypes.float32) for _ in range(2)]\n msssim = image_ops.ssim_multiscale(\n *ph, max_val=1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03)\n with self.cached_session(use_gpu=True):\n scores = [msssim.eval(dict(zip(ph, t)))\n for t in itertools.combinations_with_replacement(img, 2)]\n\n self.assertAllClose(expected, np.squeeze(scores), atol=1e-4)\n\n @test_util.run_deprecated_v1\n def testUnweightedIsDifferentiable(self):\n img = self._LoadTestImages()\n ph = [array_ops.placeholder(dtype=dtypes.float32) for _ in range(2)]\n scalar = constant_op.constant(1.0, dtype=dtypes.float32)\n scaled_ph = [x * scalar for x in ph]\n msssim = image_ops.ssim_multiscale(\n *scaled_ph,\n max_val=1.0,\n power_factors=(1, 1, 1, 1, 1),\n filter_size=11,\n filter_sigma=1.5,\n k1=0.01,\n k2=0.03)\n grads = gradients.gradients(msssim, scalar)\n with self.cached_session(use_gpu=True) as sess:\n np_grads = sess.run(grads, feed_dict={ph[0]: img[0], ph[1]: img[1]})\n self.assertTrue(np.isfinite(np_grads).all())\n\n def testBatch(self):\n \"\"\"Tests MS-SSIM computed in batch.\"\"\"\n img = self._LoadTestImages()\n expected = self._msssim[np.triu_indices(3, k=1)]\n\n img1, img2 = zip(*itertools.combinations(img, 2))\n img1 = np.concatenate(img1)\n img2 = np.concatenate(img2)\n\n msssim = image_ops.ssim_multiscale(\n constant_op.constant(img1),\n constant_op.constant(img2),\n 1.0,\n filter_size=11,\n filter_sigma=1.5,\n k1=0.01,\n k2=0.03)\n with self.cached_session(use_gpu=True):\n self.assertAllClose(expected, self.evaluate(msssim), 1e-4)\n\n def testBroadcast(self):\n \"\"\"Tests MS-SSIM broadcasting.\"\"\"\n img = self._LoadTestImages()[:2]\n expected = self._msssim[:2, :2]\n\n img = constant_op.constant(np.concatenate(img))\n img1 = array_ops.expand_dims(img, axis=0) # batch dims: 1, 2.\n img2 = array_ops.expand_dims(img, axis=1) # batch dims: 2, 1.\n\n score_tensor = image_ops.ssim_multiscale(\n img1, img2, 1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03)\n with self.cached_session(use_gpu=True):\n self.assertAllClose(expected, self.evaluate(score_tensor), 1e-4)\n\n def testRange(self):\n \"\"\"Tests against low MS-SSIM score.\n\n MS-SSIM is a geometric mean of SSIM and CS scores of various scales.\n If any of the value is negative so that the geometric mean is not\n well-defined, then treat the MS-SSIM score as zero.\n \"\"\"\n with self.cached_session(use_gpu=True) as sess:\n img1 = self._LoadTestImage(sess, \"checkerboard1.png\")\n img2 = self._LoadTestImage(sess, \"checkerboard3.png\")\n images = [img1, img2, np.zeros_like(img1),\n np.full_like(img1, fill_value=255)]\n\n images = [ops.convert_to_tensor(x, dtype=dtypes.float32) for x in images]\n msssim_ops = [\n image_ops.ssim_multiscale(\n x, y, 1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03)\n for x, y in itertools.combinations(images, 2)\n ]\n msssim = self.evaluate(msssim_ops)\n msssim = np.squeeze(msssim)\n\n self.assertTrue(np.all(msssim >= 0.0))\n self.assertTrue(np.all(msssim <= 1.0))\n\n @test_util.run_deprecated_v1\n def testInt(self):\n img1 = self._RandomImage((1, 180, 240, 3), 255)\n img2 = self._RandomImage((1, 180, 240, 3), 255)\n img1 = constant_op.constant(img1, dtypes.uint8)\n img2 = constant_op.constant(img2, dtypes.uint8)\n ssim_uint8 = image_ops.ssim_multiscale(\n img1, img2, 255, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03)\n img1 = image_ops.convert_image_dtype(img1, dtypes.float32)\n img2 = image_ops.convert_image_dtype(img2, dtypes.float32)\n ssim_float32 = image_ops.ssim_multiscale(\n img1, img2, 1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03)\n with self.cached_session(use_gpu=True):\n self.assertAllClose(\n ssim_uint8.eval(), self.evaluate(ssim_float32), atol=0.001)\n\n def testNumpyInput(self):\n \"\"\"Test case for GitHub issue 28241.\"\"\"\n image = np.random.random([512, 512, 1])\n score_tensor = image_ops.ssim_multiscale(image, image, max_val=1.0)\n with self.cached_session(use_gpu=True):\n _ = self.evaluate(score_tensor)\n\n\nclass ImageGradientsTest(test_util.TensorFlowTestCase):\n\n def testImageGradients(self):\n shape = [1, 2, 4, 1]\n img = constant_op.constant([[1, 3, 4, 2], [8, 7, 5, 6]])\n img = array_ops.reshape(img, shape)\n\n expected_dy = np.reshape([[7, 4, 1, 4], [0, 0, 0, 0]], shape)\n expected_dx = np.reshape([[2, 1, -2, 0], [-1, -2, 1, 0]], shape)\n\n dy, dx = image_ops.image_gradients(img)\n with self.cached_session():\n actual_dy = self.evaluate(dy)\n actual_dx = self.evaluate(dx)\n self.assertAllClose(expected_dy, actual_dy)\n self.assertAllClose(expected_dx, actual_dx)\n\n def testImageGradientsMultiChannelBatch(self):\n batch = [[[[1, 2], [2, 5], [3, 3]],\n [[8, 4], [5, 1], [9, 8]]],\n [[[5, 3], [7, 9], [1, 6]],\n [[1, 2], [6, 3], [6, 3]]]]\n\n expected_dy = [[[[7, 2], [3, -4], [6, 5]],\n [[0, 0], [0, 0], [0, 0]]],\n [[[-4, -1], [-1, -6], [5, -3]],\n [[0, 0], [0, 0], [0, 0]]]]\n\n expected_dx = [[[[1, 3], [1, -2], [0, 0]],\n [[-3, -3], [4, 7], [0, 0]]],\n [[[2, 6], [-6, -3], [0, 0]],\n [[5, 1], [0, 0], [0, 0]]]]\n\n batch = constant_op.constant(batch)\n assert batch.get_shape().as_list() == [2, 2, 3, 2]\n dy, dx = image_ops.image_gradients(batch)\n with self.cached_session(use_gpu=True):\n actual_dy = self.evaluate(dy)\n actual_dx = self.evaluate(dx)\n self.assertAllClose(expected_dy, actual_dy)\n self.assertAllClose(expected_dx, actual_dx)\n\n def testImageGradientsBadShape(self):\n # [2 x 4] image but missing batch and depth dimensions.\n img = constant_op.constant([[1, 3, 4, 2], [8, 7, 5, 6]])\n with self.assertRaises(ValueError):\n image_ops.image_gradients(img)\n\n\nclass SobelEdgesTest(test_util.TensorFlowTestCase):\n\n def disabled_testSobelEdges1x2x3x1(self):\n img = constant_op.constant([[1, 3, 6], [4, 1, 5]],\n dtype=dtypes.float32, shape=[1, 2, 3, 1])\n expected = np.reshape([[[0, 0], [0, 12], [0, 0]],\n [[0, 0], [0, 12], [0, 0]]], [1, 2, 3, 1, 2])\n sobel = image_ops.sobel_edges(img)\n with self.cached_session(use_gpu=True):\n actual_sobel = self.evaluate(sobel)\n self.assertAllClose(expected, actual_sobel)\n\n def testSobelEdges5x3x4x2(self):\n batch_size = 5\n plane = np.reshape([[1, 3, 6, 2], [4, 1, 5, 7], [2, 5, 1, 4]],\n [1, 3, 4, 1])\n two_channel = np.concatenate([plane, plane], axis=3)\n batch = np.concatenate([two_channel] * batch_size, axis=0)\n img = constant_op.constant(batch, dtype=dtypes.float32,\n shape=[batch_size, 3, 4, 2])\n\n expected_plane = np.reshape([[[0, 0], [0, 12], [0, 10], [0, 0]],\n [[6, 0], [0, 6], [-6, 10], [-6, 0]],\n [[0, 0], [0, 0], [0, 10], [0, 0]]],\n [1, 3, 4, 1, 2])\n expected_two_channel = np.concatenate(\n [expected_plane, expected_plane], axis=3)\n expected_batch = np.concatenate([expected_two_channel] * batch_size, axis=0)\n\n sobel = image_ops.sobel_edges(img)\n with self.cached_session(use_gpu=True):\n actual_sobel = self.evaluate(sobel)\n self.assertAllClose(expected_batch, actual_sobel)\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass DecodeImageTest(test_util.TensorFlowTestCase):\n\n _FORWARD_COMPATIBILITY_HORIZONS = [\n (2020, 1, 1),\n (2020, 7, 14),\n (2525, 1, 1), # future behavior\n ]\n\n def testBmpChannels(self):\n for horizon in self._FORWARD_COMPATIBILITY_HORIZONS:\n with compat.forward_compatibility_horizon(*horizon):\n with test_util.use_gpu():\n base = \"tensorflow/core/lib/bmp/testdata\"\n # `rgba_transparent.bmp` has 4 channels with transparent pixels.\n # Test consistency between `decode_image` and `decode_bmp` functions.\n bmp0 = io_ops.read_file(os.path.join(base, \"rgba_small.bmp\"))\n image0 = image_ops.decode_image(bmp0, channels=4)\n image1 = image_ops.decode_bmp(bmp0, channels=4)\n image0, image1 = self.evaluate([image0, image1])\n self.assertAllEqual(image0, image1)\n\n # Test that 3 channels is returned with user request of `channels=3`\n # even though image has 4 channels.\n # Note that this operation simply drops 4th channel information. This\n # is the same behavior as `decode_png`.\n # e.g. pixel values [25, 25, 25, 100] becomes [25, 25, 25].\n bmp1 = io_ops.read_file(os.path.join(base, \"rgb_small.bmp\"))\n image2 = image_ops.decode_bmp(bmp0, channels=3)\n image3 = image_ops.decode_bmp(bmp1)\n image2, image3 = self.evaluate([image2, image3])\n self.assertAllEqual(image2, image3)\n\n # Test that 4 channels is returned with user request of `channels=4`\n # even though image has 3 channels. Alpha channel should be set to\n # UINT8_MAX.\n bmp3 = io_ops.read_file(os.path.join(base, \"rgb_small_255.bmp\"))\n bmp4 = io_ops.read_file(os.path.join(base, \"rgba_small_255.bmp\"))\n image4 = image_ops.decode_bmp(bmp3, channels=4)\n image5 = image_ops.decode_bmp(bmp4)\n image4, image5 = self.evaluate([image4, image5])\n self.assertAllEqual(image4, image5)\n\n # Test that 3 channels is returned with user request of `channels=3`\n # even though image has 1 channel (grayscale).\n bmp6 = io_ops.read_file(os.path.join(base, \"grayscale_small.bmp\"))\n bmp7 = io_ops.read_file(\n os.path.join(base, \"grayscale_small_3channels.bmp\"))\n image6 = image_ops.decode_bmp(bmp6, channels=3)\n image7 = image_ops.decode_bmp(bmp7)\n image6, image7 = self.evaluate([image6, image7])\n self.assertAllEqual(image6, image7)\n\n # Test that 4 channels is returned with user request of `channels=4`\n # even though image has 1 channel (grayscale). Alpha channel should be\n # set to UINT8_MAX.\n bmp9 = io_ops.read_file(\n os.path.join(base, \"grayscale_small_4channels.bmp\"))\n image8 = image_ops.decode_bmp(bmp6, channels=4)\n image9 = image_ops.decode_bmp(bmp9)\n image8, image9 = self.evaluate([image8, image9])\n self.assertAllEqual(image8, image9)\n\n def testJpegUint16(self):\n for horizon in self._FORWARD_COMPATIBILITY_HORIZONS:\n with compat.forward_compatibility_horizon(*horizon):\n with self.cached_session(use_gpu=True) as sess:\n base = \"tensorflow/core/lib/jpeg/testdata\"\n jpeg0 = io_ops.read_file(os.path.join(base, \"jpeg_merge_test1.jpg\"))\n image0 = image_ops.decode_image(jpeg0, dtype=dtypes.uint16)\n image1 = image_ops.convert_image_dtype(image_ops.decode_jpeg(jpeg0),\n dtypes.uint16)\n image0, image1 = self.evaluate([image0, image1])\n self.assertAllEqual(image0, image1)\n\n def testPngUint16(self):\n for horizon in self._FORWARD_COMPATIBILITY_HORIZONS:\n with compat.forward_compatibility_horizon(*horizon):\n with self.cached_session(use_gpu=True) as sess:\n base = \"tensorflow/core/lib/png/testdata\"\n png0 = io_ops.read_file(os.path.join(base, \"lena_rgba.png\"))\n image0 = image_ops.decode_image(png0, dtype=dtypes.uint16)\n image1 = image_ops.convert_image_dtype(\n image_ops.decode_png(png0, dtype=dtypes.uint16), dtypes.uint16)\n image0, image1 = self.evaluate([image0, image1])\n self.assertAllEqual(image0, image1)\n\n # NumPy conversions should happen before\n x = np.random.randint(256, size=(4, 4, 3), dtype=np.uint16)\n x_str = image_ops_impl.encode_png(x)\n x_dec = image_ops_impl.decode_image(\n x_str, channels=3, dtype=dtypes.uint16)\n self.assertAllEqual(x, x_dec)\n\n def testGifUint16(self):\n for horizon in self._FORWARD_COMPATIBILITY_HORIZONS:\n with compat.forward_compatibility_horizon(*horizon):\n with self.cached_session(use_gpu=True) as sess:\n base = \"tensorflow/core/lib/gif/testdata\"\n gif0 = io_ops.read_file(os.path.join(base, \"scan.gif\"))\n image0 = image_ops.decode_image(gif0, dtype=dtypes.uint16)\n image1 = image_ops.convert_image_dtype(image_ops.decode_gif(gif0),\n dtypes.uint16)\n image0, image1 = self.evaluate([image0, image1])\n self.assertAllEqual(image0, image1)\n\n def testBmpUint16(self):\n for horizon in self._FORWARD_COMPATIBILITY_HORIZONS:\n with compat.forward_compatibility_horizon(*horizon):\n with self.cached_session(use_gpu=True) as sess:\n base = \"tensorflow/core/lib/bmp/testdata\"\n bmp0 = io_ops.read_file(os.path.join(base, \"lena.bmp\"))\n image0 = image_ops.decode_image(bmp0, dtype=dtypes.uint16)\n image1 = image_ops.convert_image_dtype(image_ops.decode_bmp(bmp0),\n dtypes.uint16)\n image0, image1 = self.evaluate([image0, image1])\n self.assertAllEqual(image0, image1)\n\n def testJpegFloat32(self):\n for horizon in self._FORWARD_COMPATIBILITY_HORIZONS:\n with compat.forward_compatibility_horizon(*horizon):\n with self.cached_session(use_gpu=True) as sess:\n base = \"tensorflow/core/lib/jpeg/testdata\"\n jpeg0 = io_ops.read_file(os.path.join(base, \"jpeg_merge_test1.jpg\"))\n image0 = image_ops.decode_image(jpeg0, dtype=dtypes.float32)\n image1 = image_ops.convert_image_dtype(image_ops.decode_jpeg(jpeg0),\n dtypes.float32)\n image0, image1 = self.evaluate([image0, image1])\n self.assertAllEqual(image0, image1)\n\n def testPngFloat32(self):\n for horizon in self._FORWARD_COMPATIBILITY_HORIZONS:\n with compat.forward_compatibility_horizon(*horizon):\n with self.cached_session(use_gpu=True) as sess:\n base = \"tensorflow/core/lib/png/testdata\"\n png0 = io_ops.read_file(os.path.join(base, \"lena_rgba.png\"))\n image0 = image_ops.decode_image(png0, dtype=dtypes.float32)\n image1 = image_ops.convert_image_dtype(\n image_ops.decode_png(png0, dtype=dtypes.uint16), dtypes.float32)\n image0, image1 = self.evaluate([image0, image1])\n self.assertAllEqual(image0, image1)\n\n def testGifFloat32(self):\n for horizon in self._FORWARD_COMPATIBILITY_HORIZONS:\n with compat.forward_compatibility_horizon(*horizon):\n with self.cached_session(use_gpu=True) as sess:\n base = \"tensorflow/core/lib/gif/testdata\"\n gif0 = io_ops.read_file(os.path.join(base, \"scan.gif\"))\n image0 = image_ops.decode_image(gif0, dtype=dtypes.float32)\n image1 = image_ops.convert_image_dtype(image_ops.decode_gif(gif0),\n dtypes.float32)\n image0, image1 = self.evaluate([image0, image1])\n self.assertAllEqual(image0, image1)\n\n def testBmpFloat32(self):\n for horizon in self._FORWARD_COMPATIBILITY_HORIZONS:\n with compat.forward_compatibility_horizon(*horizon):\n with self.cached_session(use_gpu=True) as sess:\n base = \"tensorflow/core/lib/bmp/testdata\"\n bmp0 = io_ops.read_file(os.path.join(base, \"lena.bmp\"))\n image0 = image_ops.decode_image(bmp0, dtype=dtypes.float32)\n image1 = image_ops.convert_image_dtype(image_ops.decode_bmp(bmp0),\n dtypes.float32)\n image0, image1 = self.evaluate([image0, image1])\n self.assertAllEqual(image0, image1)\n\n def testExpandAnimations(self):\n for horizon in self._FORWARD_COMPATIBILITY_HORIZONS:\n with compat.forward_compatibility_horizon(*horizon):\n with self.cached_session(use_gpu=True) as sess:\n base = \"tensorflow/core/lib/gif/testdata\"\n gif0 = io_ops.read_file(os.path.join(base, \"scan.gif\"))\n\n # Test `expand_animations=False` case.\n image0 = image_ops.decode_image(\n gif0, dtype=dtypes.float32, expand_animations=False)\n # image_ops.decode_png() handles GIFs and returns 3D tensors\n animation = image_ops.decode_gif(gif0)\n first_frame = array_ops.gather(animation, 0)\n image1 = image_ops.convert_image_dtype(first_frame, dtypes.float32)\n image0, image1 = self.evaluate([image0, image1])\n self.assertEqual(len(image0.shape), 3)\n self.assertAllEqual(list(image0.shape), [40, 20, 3])\n self.assertAllEqual(image0, image1)\n\n # Test `expand_animations=True` case.\n image2 = image_ops.decode_image(gif0, dtype=dtypes.float32)\n image3 = image_ops.convert_image_dtype(animation, dtypes.float32)\n image2, image3 = self.evaluate([image2, image3])\n self.assertEqual(len(image2.shape), 4)\n self.assertAllEqual(list(image2.shape), [12, 40, 20, 3])\n self.assertAllEqual(image2, image3)\n\n\nif __name__ == \"__main__\":\n googletest.main()\n" ]
[ [ "numpy.sqrt", "tensorflow.python.ops.image_ops.adjust_saturation", "numpy.cumsum", "numpy.all", "tensorflow.python.ops.gen_image_ops.non_max_suppression_v2", "tensorflow.python.ops.image_ops.flip_up_down", "tensorflow.python.ops.image_ops.non_max_suppression", "tensorflow.python.ops.image_ops.resize_image_with_pad_v1", "tensorflow.python.ops.image_ops.extract_jpeg_shape", "tensorflow.python.ops.image_ops.encode_png", "numpy.zeros", "tensorflow.python.ops.image_ops.hsv_to_rgb", "tensorflow.python.ops.image_ops_impl._verify_compatible_image_shapes", "numpy.power", "tensorflow.python.platform.test.is_gpu_available", "numpy.full_like", "tensorflow.python.ops.image_ops_impl.decode_image", "numpy.log10", "tensorflow.python.ops.image_ops.adjust_hue", "numpy.array", "numpy.sum", "tensorflow.python.ops.image_ops.non_max_suppression_with_scores", "tensorflow.python.ops.image_ops.yuv_to_rgb", "tensorflow.python.ops.gradients.gradients", "tensorflow.python.ops.image_ops.pad_to_bounding_box", "tensorflow.python.ops.image_ops.decode_png", "tensorflow.python.ops.image_ops.non_max_suppression_padded", "tensorflow.python.ops.image_ops.ssim_multiscale", "tensorflow.python.ops.image_ops.image_gradients", "tensorflow.python.platform.test.gpu_device_name", "numpy.random.uniform", "tensorflow.core.protobuf.config_pb2.ConfigProto", "numpy.vstack", "tensorflow.python.ops.image_ops.rgb_to_yuv", "numpy.expand_dims", "tensorflow.python.framework.test_util.run_without_tensor_float_32", "numpy.asarray", "tensorflow.python.ops.image_ops.psnr", "numpy.ndarray", "numpy.concatenate", "tensorflow.python.ops.image_ops.resize_image_with_crop_or_pad", "tensorflow.python.framework.test_util.disable_xla", "tensorflow.python.ops.image_ops.encode_jpeg", "numpy.reshape", "tensorflow.python.ops.image_ops.stateless_sample_distorted_bounding_box", "tensorflow.python.ops.image_ops.random_flip_left_right", "numpy.std", "tensorflow.python.ops.math_ops.add", "tensorflow.python.ops.image_ops.sample_distorted_bounding_box", "tensorflow.python.ops.image_ops.resize_bilinear", "tensorflow.python.ops.image_ops.decode_bmp", "tensorflow.python.ops.image_ops.per_image_standardization", "tensorflow.python.ops.image_ops.random_jpeg_quality", "numpy.random.rand", "tensorflow.python.framework.ops.convert_to_tensor", "tensorflow.python.ops.image_ops.rgb_to_grayscale", "tensorflow.python.ops.array_ops.stack", "tensorflow.python.ops.image_ops.resize_image_with_pad_v2", "tensorflow.python.ops.image_ops.transpose", "numpy.array_equal", "tensorflow.python.ops.image_ops.adjust_brightness", "numpy.ones", "tensorflow.python.ops.image_ops.convert_image_dtype", "tensorflow.python.ops.image_ops.decode_image", "tensorflow.python.ops.image_ops.decode_and_crop_jpeg", "tensorflow.python.framework.test_util.for_all_test_methods", "tensorflow.python.framework.test_util.use_gpu", "numpy.empty", "tensorflow.python.ops.image_ops.random_flip_up_down", "tensorflow.python.ops.array_ops.shape", "tensorflow.python.ops.array_ops.strided_slice", "tensorflow.python.ops.array_ops.placeholder", "numpy.mean", "numpy.zeros_like", "numpy.histogram", "tensorflow.python.ops.image_ops.resize_images_v2", "numpy.random.randint", "tensorflow.python.ops.image_ops.stateless_random_jpeg_quality", "tensorflow.python.ops.image_ops.yiq_to_rgb", "numpy.clip", "numpy.fliplr", "tensorflow.python.ops.array_ops.unstack", "tensorflow.python.ops.array_ops.gather", "tensorflow.python.ops.image_ops.decode_jpeg", "tensorflow.python.ops.gen_image_ops.non_max_suppression_v5", "numpy.rot90", "numpy.isnan", "tensorflow.python.ops.image_ops.flip_left_right", "tensorflow.python.ops.image_ops.rgb_to_hsv", "tensorflow.python.ops.image_ops.grayscale_to_rgb", "tensorflow.python.ops.image_ops.resize_bicubic", "tensorflow.python.ops.image_ops.adjust_contrast", "tensorflow.python.ops.control_flow_ops.group", "tensorflow.python.ops.gen_image_ops.non_max_suppression_v3", "tensorflow.python.framework.ops.Graph", "tensorflow.python.ops.image_ops.resize_area", "tensorflow.python.ops.image_ops.central_crop", "numpy.dstack", "numpy.tile", "tensorflow.python.ops.array_ops.reshape", "tensorflow.python.ops.image_ops.rgb_to_yiq", "tensorflow.python.ops.random_ops.random_uniform", "tensorflow.python.ops.image_ops.decode_gif", "tensorflow.python.framework.tensor_shape.TensorShape", "numpy.squeeze", "tensorflow.python.ops.image_ops.total_variation", "tensorflow.python.ops.array_ops.zeros", "tensorflow.python.framework.ops.device", "tensorflow.python.ops.image_ops.resize_images", "tensorflow.python.ops.image_ops.rot90", "numpy.arange", "tensorflow.python.platform.googletest.main", "tensorflow.python.ops.io_ops.read_file", "tensorflow.python.ops.image_ops.crop_to_bounding_box", "tensorflow.python.framework.ops.control_dependencies", "tensorflow.python.ops.image_ops.sobel_edges", "tensorflow.python.framework.test_util.xla_allow_fallback", "tensorflow.python.ops.image_ops.adjust_gamma", "tensorflow.python.ops.image_ops_impl.encode_png", "tensorflow.python.ops.image_ops.ssim", "tensorflow.python.ops.gen_image_ops.non_max_suppression_v4", "tensorflow.python.compat.compat.forward_compatibility_horizon", "tensorflow.python.ops.image_ops.adjust_jpeg_quality", "tensorflow.python.ops.image_ops.non_max_suppression_with_overlaps", "numpy.random.random", "numpy.abs", "numpy.random.seed", "tensorflow.python.ops.stateless_random_ops.split", "numpy.triu_indices", "numpy.isfinite", "numpy.prod", "tensorflow.python.ops.random_ops.random_normal", "tensorflow.python.ops.variables.global_variables_initializer", "tensorflow.python.ops.array_ops.expand_dims", "tensorflow.python.framework.constant_op.constant" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.8", "2.7", "2.4", "2.9", "2.5", "2.6", "2.10" ] } ]
aaronzguan/RadimoicDeepFeatureExtraction
[ "86867356e1af1b2a473afe563cb4a5ba63a70494" ]
[ "main.py" ]
[ "import torch\nimport torchvision.models as models\nfrom torchsummary import summary\nimport numpy as np\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.utils.data import DataLoader\nfrom torch.utils.data.dataset import Dataset\nfrom torchvision import transforms\nfrom PIL import Image\nimport os\nfrom torch.optim.lr_scheduler import StepLR\nimport argparse\nimport random\nimport math\n\n\nfrom sklearn.metrics import average_precision_score\nfrom sklearn.metrics import precision_recall_curve\n\nimport logging\n\nimport matplotlib.pyplot as plt\n\n\nlogging.basicConfig(filename='alexnet.log', level=logging.INFO)\n\nparser = argparse.ArgumentParser(description='AlexNet')\nparser.add_argument('--lr', default=0.0001, type=float, help='learning rate')\nparser.add_argument('--batch_size', default=10, type=int, help='')\nparser.add_argument('--num_epochs', default=200, type=int, help='')\nparser.add_argument('--model_root', default='/home/aaron/VIPCUP/Radiomic/model/alexnet_18_09_09.pkl')\nargs = parser.parse_args()\n\ndef delete_ds_store(fpath):\n for root, dirs, files in os.walk(fpath):\n for file in files:\n if file.endswith('.DS_Store'):\n path = os.path.join(root, file)\n os.remove(path)\n\n\n# torch.cuda.set_device(0)\ndevice = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\nprint(device)\n\n\ndef read_samples(positive_data_path, positive_label, negative_data_path, negative_label):\n positive_data = []\n negative_data = []\n\n delete_ds_store(positive_data_path)\n patient_list = os.listdir(positive_data_path)\n for patient_folder in patient_list:\n patient_path = os.path.join(positive_data_path, patient_folder)\n # print('reading samples from: ' + str(patient_path))\n delete_ds_store(patient_path)\n img_list = os.listdir(patient_path)\n for img in img_list:\n positive_data.append(os.path.join(patient_path, img))\n\n delete_ds_store(negative_data_path)\n patient_list = os.listdir(negative_data_path)\n for patient_folder in patient_list:\n patient_path = os.path.join(negative_data_path, patient_folder)\n # print('reading samples from: ' + str(patient_path))\n delete_ds_store(patient_path)\n img_list = os.listdir(patient_path)\n for img in img_list:\n negative_data.append(os.path.join(patient_path, img))\n\n # ------------------------\n # Create Training Dataset\n # Positive_training: 3331\n # Negative_training: 3331 * 3 = 9993\n # ------------------------\n positive_training_num = len(positive_data) - 1000\n positive_data_train = random.sample(positive_data, positive_training_num)\n\n negative_training_num = len(positive_data)*3\n negative_data_train = random.sample(negative_data, negative_training_num)\n\n data_train = positive_data_train + negative_data_train\n label_train = [positive_label]*len(positive_data_train) + [negative_label]*len(negative_data_train)\n # ------------------------\n # Create Validation Dataset\n # Positive_val: 1000\n # Negative_val: 3000\n # ------------------------\n positive_data_val = [i for i in positive_data if i not in positive_data_train]\n negative_data_val = random.sample([i for i in negative_data if i not in negative_data_train], len(positive_data_val)*3)\n\n data_val = positive_data_val + negative_data_val\n label_val = [positive_label]*len(positive_data_val) + [negative_label]*len(negative_data_val)\n\n return data_train, label_train, data_val, label_val\n\n\nclass load_data(Dataset):\n def __init__(self, positive_data_path, positive_label, negative_data_path, negative_label, is_train=True):\n # Transforms\n self.transform = transforms.Compose([transforms.Resize((224, 224)),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n ])\n # Read the image files & label for train or validation\n if is_train:\n self.data, self.label, _, _ = read_samples(positive_data_path, positive_label, negative_data_path, negative_label)\n else:\n _, _, self.data, self.label = read_samples(positive_data_path, positive_label, negative_data_path, negative_label)\n\n # Calculate len\n self.data_len = len(self.data)\n\n def __getitem__(self, index):\n # Get label(class) of the image\n label = [self.label[index]]\n label = np.asarray(label).reshape(1, 1)\n # Get image path\n img_path = self.data[index]\n # Open image\n img = Image.open(img_path)\n # Transform image\n img = self.transform(img)\n\n return img, torch.LongTensor(label)\n\n def __len__(self):\n return self.data_len\n\n# def performace_evalution(outputs, labels):\n\n\ndef train(net, loader, optimizer, criterion, epoch):\n\n net.train()\n\n print('Epoch {}/{}'.format(epoch+1, args.num_epochs))\n logging.info('Epoch {}/{}'.format(epoch + 1, args.num_epochs))\n print('-' * 10)\n logging.info('-' * 10)\n\n running_batch = 0\n running_loss = 0.0\n running_corrects = 0\n\n # Iterate over data.\n for i, (image, label) in enumerate(loader):\n\n image = image.to(device)\n label = label.to(device)\n\n # zero the parameter gradients\n optimizer.zero_grad()\n\n output = net(image)\n _, label_pred = torch.max(output, 1)\n\n loss = criterion(output, label.view(-1))\n loss.backward()\n optimizer.step()\n\n running_batch += label.size(0)\n running_loss += loss.item() * image.size(0)\n running_corrects += torch.sum(label_pred == label.view(-1)).item()\n\n if (i+1) % 50 == 0: # print every 50 mini-batches\n print('[%d, %5d] loss: %.3f correct: %.3f' %\n (epoch + 1, i + 1, running_loss/running_batch, running_corrects/running_batch))\n logging.info('[%d, %5d] loss: %.3f correct: %.3f' %\n (epoch + 1, i + 1, running_loss/running_batch, running_corrects/running_batch))\n\n\ndef validate(net, loader, criterion, epoch):\n\n net.eval()\n\n running_batch = 0\n running_loss = 0.0\n running_corrects = 0\n\n label_truth = []\n label_output = []\n\n with torch.no_grad():\n for i, (image, label) in enumerate(loader):\n image = image.to(device)\n label = label.to(device)\n output = net(image)\n\n label_pred = torch.nn.functional.softmax(output)[:, 1]\n # _, label_pred = torch.max(output, 1)\n loss = criterion(output, label.view(-1))\n running_batch += label.size(0)\n running_loss += loss.item() * image.size(0)\n # running_corrects += torch.sum(label_pred == label.view(-1)).item()\n\n # if (i+1) % 50 == 0: # print every 50 mini-batches\n # print('[%d, %5d] loss_val: %.3f correct_val: %.3f' %\n # (epoch + 1, i + 1, running_loss/running_batch, running_corrects/running_batch))\n # logging.info('[%d, %5d] loss_val: %.3f correct_val: %.3f' %\n # (epoch + 1, i + 1, running_loss/running_batch, running_corrects/running_batch))\n\n label_truth.extend(label.view(-1).cpu().numpy())\n label_output.extend(label_pred.cpu().numpy())\n\n precision, recall, thresholds = precision_recall_curve(label_truth, label_output)\n average_precision = average_precision_score(label_truth, label_output)\n\n plt.figure()\n plt.plot(recall, precision)\n # plt.step(recall, precision, color='b', alpha=0.2, where='post')\n # plt.fill_between(recall, precision, step='post', alpha=0.2, color='b')\n plt.xlabel('Recall')\n plt.ylabel('Precision')\n plt.ylim([0.0, 1.05])\n plt.xlim([0.0, 1.0])\n plt.title('2-class Precision-Recall curve: AP={0:0.2f}'.format(average_precision))\n # plt.show()\n plt.savefig('%d.png' % epoch)\n\n print('[%d, %5d] average_precision_val: %.3f' %\n (epoch + 1, i + 1, average_precision))\n logging.info('[%d, %5d] average_precision_val: %.3f' %\n (epoch + 1, i + 1, average_precision))\n\n # # Precision\n # running_precision = precision_score(label_truth, label_output)\n # # Recall\n # running_recall = recall_score(label_truth, label_output)\n # # Average Precision\n # running_ap = average_precision_score(label_truth, label_output)\n\n # print('[%d, %5d] precision_val: %.3f recall_val: %.3f average_precision_val: %.3f' %\n # (epoch + 1, i + 1, running_precision, running_recall, running_ap))\n # logging.info('[%d, %5d] precision_val: %.3f recall_val: %.3f average_precision_val: %.3f' %\n # (epoch + 1, i + 1, running_precision, running_recall, running_ap))\n\n\n\n\nif __name__ == '__main__':\n positive_data_path = '/home/aaron/VIPCUP_Data/Positive_Data_Set'\n positive_label = 1\n negative_data_path = '/home/aaron/VIPCUP_Data/Negative_Data_Set'\n negative_label = 0\n\n train_data = load_data(positive_data_path, positive_label, negative_data_path, negative_label, is_train=True)\n train_loader = DataLoader(dataset=train_data, num_workers=4, batch_size=args.batch_size, pin_memory=False, shuffle=True)\n\n val_data = load_data(positive_data_path, positive_label, negative_data_path, negative_label, is_train=False)\n val_loader = DataLoader(dataset=val_data, num_workers=4, batch_size=args.batch_size, pin_memory=False, shuffle=False)\n\n alexnet = models.alexnet(pretrained=True)\n # print(alexnet)\n alexnet.classifier[6] = nn.Linear(4096, 2)\n # Extract features from the first fc\n # new_classifier = nn.Sequential(*list(alexnet.classifier.children())[:-5])\n # alexnet.classifier = new_classifier\n # print(alexnet)\n alexnet.to(device)\n\n criterion = nn.CrossEntropyLoss()\n optimizer = optim.Adam(alexnet.parameters(),\n lr=args.lr,\n weight_decay=0.0005,\n betas=(0.9, 0.999),\n amsgrad=True)\n scheduler = StepLR(optimizer, step_size=10, gamma=0.5)\n\n # ------------------------\n # Start Training and Validating\n # ------------------------\n save_step = 50\n for epoch in range(args.num_epochs):\n scheduler.step()\n train(alexnet, train_loader, optimizer, criterion, epoch)\n print('Validation')\n logging.info('Validation')\n validate(alexnet, val_loader, criterion, epoch)\n if epoch % save_step == 0:\n torch.save(alexnet.state_dict(), args.model_root)" ]
[ [ "torch.nn.functional.softmax", "torch.max", "numpy.asarray", "torch.utils.data.DataLoader", "matplotlib.pyplot.plot", "torch.no_grad", "torch.cuda.is_available", "torch.nn.CrossEntropyLoss", "sklearn.metrics.precision_recall_curve", "torch.optim.lr_scheduler.StepLR", "matplotlib.pyplot.figure", "torch.LongTensor", "matplotlib.pyplot.ylim", "matplotlib.pyplot.savefig", "torch.nn.Linear", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.xlim", "sklearn.metrics.average_precision_score", "matplotlib.pyplot.xlabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
braunmagrin/pandas-coalesce
[ "4fef04b833e9193c2194805a44a8a0d9b93df71c" ]
[ "coalesce.py" ]
[ "import pandas as pd\n\n\ndef coalesce(self, columns, name='coalesced'):\n \"\"\"\n Coalesce the DataFrame's columns in order\n\n Parameters\n ----------\n columns : list-like\n\n Returns\n -------\n coalesced : Series\n \"\"\"\n from functools import reduce\n return reduce(lambda series, col: series.fillna(self[col]),\n columns[1:],\n pd.Series(self[columns[0]], name=name))\n\n\npd.DataFrame.coalesce = coalesce\n" ]
[ [ "pandas.Series" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
revsic/tf-speech-dataset
[ "93a3ce2574616a3a52d2ac99af9826af680dda8f" ]
[ "utils/normalizer.py" ]
[ "from typing import List\n\nimport tensorflow as tf\n\n\nclass TextNormalizer:\n \"\"\"Normalize text in to fixed graphme set.\n WARNING: It does not accept digits, please use normalized text in LJ Speech.\n \"\"\"\n GRAPHEMES = 'abcdefghijklmnopqrstuvwxyz !?,.'\n REPLACER = {\n '\"\\'()-:;[]’“”': '', # punct\n 'àâ': 'a', 'èéê': 'e', 'ü': 'u' # special character in ljspeech.\n }\n\n def __init__(self):\n \"\"\"Initializer.\n \"\"\"\n replacer = {}\n for rep, out in TextNormalizer.REPLACER.items():\n for r in rep:\n replacer[r] = out\n self.replacer = replacer\n\n def grapheme_fn(self, grapheme: str) -> str:\n \"\"\"Map grapheme into fixed set `TextNormalizer.GRAPHEMES`.\n Args:\n grapheme: single grapheme.\n Returns:\n normalized form.\n \"\"\"\n if grapheme in self.replacer:\n grapheme = self.replacer[grapheme]\n assert grapheme in TextNormalizer.GRAPHEMES, \\\n f'invalid grapheme: {grapheme}'\n return grapheme\n\n def normalize(self, text: str) -> str:\n \"\"\"Normalize text.\n Args:\n text: input text.\n Returns:\n normalized.\n \"\"\"\n return ''.join(self.grapheme_fn(t.lower()) for t in text)\n\n def labeling(self, text: str) -> List[int]:\n \"\"\"Normalize text and make to integer label.\n Padding token for 0, TextNormalizer.GRAPHEMES for afters.\n\n Args:\n text: input text.\n Returns:\n integer labels.\n \"\"\"\n return [\n TextNormalizer.GRAPHEMES.index(t) + 1\n for t in self.normalize(text)]\n\n def recover(self, labels: List[int]) -> str:\n \"\"\"Convert label to normalized text.\n Ars:\n labels: integer label.\n Returns:\n converted text.\n \"\"\"\n return ''.join(\n TextNormalizer.GRAPHEMES[i - 1] \n for i in labels if i != 0)\n\n def tf_labeler(self, text: tf.Tensor) -> tf.Tensor:\n \"\"\"Tensorflow level text labeler.\n Args:\n text: tf.string, text.\n Returns:\n labels: [tf.int32; S], labels.\n \"\"\"\n def labeler(text: tf.Tensor) -> tf.Tensor:\n text = text.numpy().decode('utf-8')\n labels = self.labeling(text)\n return tf.convert_to_tensor(labels, dtype=tf.int32)\n return tf.py_function(labeler, [text], tf.int32)\n" ]
[ [ "tensorflow.convert_to_tensor", "tensorflow.py_function" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
shahlab/nnPUlearning
[ "0458f78510ee2383f6988733ccb26298f62cd7bb" ]
[ "model.py" ]
[ "import warnings\n\nwarnings.filterwarnings('ignore')\n\nimport chainer\nimport chainer.functions as F\nimport chainer.links as L\nimport numpy as np\nfrom chainer import Chain\nfrom chainer.backends import cuda\nfrom sklearn.metrics import recall_score\nfrom functools import partial\n\n\nclass MyClassifier(Chain):\n prior = 0\n it_position = None\n\n def __call__(self, x, t, loss_func):\n self.clear()\n h = self.calculate(x)\n self.loss = loss_func(h, t)\n chainer.reporter.report({'loss': self.loss}, self)\n return self.loss\n\n def clear(self):\n self.loss = None\n\n def calculate(self, x):\n return None\n\n def call_reporter(self, dictionary):\n chainer.reporter.report(dictionary, self)\n\n def error(self, x, t):\n warnings.filterwarnings(\"ignore\")\n\n xp = cuda.get_array_module(x, False)\n size = len(t)\n with chainer.no_backprop_mode():\n with chainer.using_config(\"train\", False):\n h = xp.reshape(xp.sign(self.calculate(x).data), size)\n if isinstance(h, chainer.Variable):\n h = h.data\n if isinstance(t, chainer.Variable):\n t = t.data\n result = (h != t).sum() / size\n\n t, h = t.get(), h.get()\n\n h_separated = ','.join([str(x) for x in h]) + '\\n'\n h_separated = str(self.it_position) + ',' + h_separated\n\n with open('result/preds.csv', 'a') as f:\n f.write(h_separated)\n\n assert h.shape[0] == t.shape[0]\n\n # Calculate partial recall\n recall = recall_score(t, h)\n\n # Calculate perc pos and perc pos non fake\n h_pos_idx = np.where(h == 1)[0]\n perc_pos = h_pos_idx.shape[0]/h.shape[0] if h.shape[0] > 0 else 0.\n\n if len(h_pos_idx) > 0:\n perc_pos_nf = np.unique(t[h_pos_idx], return_counts=True)[1]/h_pos_idx.shape[0]\n if len(perc_pos_nf) > 0:\n perc_pos_nf = perc_pos_nf[-1]\n else:\n perc_pos_nf = 0.\n else:\n perc_pos_nf = 0.\n\n chainer.reporter.report({'error': result}, self)\n chainer.reporter.report({'recall': recall}, self)\n chainer.reporter.report({'percPos': perc_pos}, self)\n chainer.reporter.report({'percPosNF': perc_pos_nf}, self)\n return cuda.to_cpu(result) if xp != np else result\n\n def compute_prediction_summary(self, x, t):\n xp = cuda.get_array_module(x, False)\n if isinstance(t, chainer.Variable):\n t = t.data\n n_p = (t == 1).sum()\n n_n = (t == -1).sum()\n with chainer.no_backprop_mode():\n with chainer.using_config(\"train\", False):\n h = xp.ravel(xp.sign(self.calculate(x).data))\n if isinstance(h, chainer.Variable):\n h = h.data\n t_p = ((h == 1) * (t == 1)).sum()\n t_n = ((h == -1) * (t == -1)).sum()\n f_p = n_n - t_n\n f_n = n_p - t_p\n return int(t_p), int(t_n), int(f_p), int(f_n)\n\n\nclass LinearClassifier(MyClassifier, Chain):\n def __init__(self, prior, dim):\n super(LinearClassifier, self).__init__(\n l=L.Linear(dim, 1)\n )\n self.prior = prior\n\n def calculate(self, x):\n h = self.l(x)\n return h\n\n\nclass ThreeLayerPerceptron(MyClassifier, Chain):\n def __init__(self, prior, dim):\n super(ThreeLayerPerceptron, self).__init__(l1=L.Linear(dim, 100),\n l2=L.Linear(100, 1))\n self.af = F.relu\n self.prior = prior\n\n def calculate(self, x):\n h = self.l1(x)\n h = self.af(h)\n h = self.l2(h)\n return h\n\n#\n# class MultiLayerPerceptron(MyClassifier, Chain):\n# def __init__(self, prior, dim):\n# super(MultiLayerPerceptron, self).__init__(l1=L.Linear(dim, 300, nobias=True),\n# b1=L.BatchNormalization(300),\n# l2=L.Linear(300, 300, nobias=True),\n# b2=L.BatchNormalization(300),\n# l3=L.Linear(300, 300, nobias=True),\n# b3=L.BatchNormalization(300),\n# l4=L.Linear(300, 300, nobias=True),\n# b4=L.BatchNormalization(300),\n# l5=L.Linear(300, 1))\n# self.af = F.relu\n# self.prior = prior\n#\n# def calculate(self, x):\n# h = self.l1(x)\n# h = self.b1(h)\n# h = self.af(h)\n# h = self.l2(h)\n# h = self.b2(h)\n# h = self.af(h)\n# h = self.l3(h)\n# h = self.b3(h)\n# h = self.af(h)\n# h = self.l4(h)\n# h = self.b4(h)\n# h = self.af(h)\n# h = self.l5(h)\n# return h\n\n\nclass MultiLayerPerceptron(MyClassifier, Chain):\n def __init__(self, prior, dim):\n super(MultiLayerPerceptron, self).__init__(l1=L.Linear(dim, 300, nobias=True),\n b1=L.BatchNormalization(300),\n l2=L.Linear(300, 300, nobias=True),\n b2=L.BatchNormalization(300),\n l3=L.Linear(300, 300, nobias=True),\n b3=L.BatchNormalization(300),\n l4=L.Linear(300, 300, nobias=True),\n b4=L.BatchNormalization(300),\n l5=L.Linear(300, 300, nobias=True),\n b5=L.BatchNormalization(300),\n l6=L.Linear(300, 300, nobias=True),\n b6=L.BatchNormalization(300),\n l7=L.Linear(300, 300, nobias=True),\n b7=L.BatchNormalization(300),\n l8=L.Linear(300, 300, nobias=True),\n b8=L.BatchNormalization(300),\n l9=L.Linear(300, 1))\n self.af = F.relu\n self.dr = F.dropout\n self.prior = prior\n\n def calculate(self, x):\n h = self.l1(x)\n h = self.b1(h)\n h = self.af(h)\n h = self.l2(h)\n h = self.b2(h)\n h = self.af(h)\n h = self.l3(h)\n h = self.b3(h)\n h = self.af(h)\n h = self.l4(h)\n h = self.b4(h)\n h = self.af(h)\n h = self.l5(h)\n h = self.b5(h)\n h = self.af(h)\n h = self.l6(h)\n # h = self.dr(h)\n h = self.b6(h)\n h = self.af(h)\n h = self.l7(h)\n # h = self.dr(h)\n h = self.b7(h)\n h = self.af(h)\n h = self.l8(h)\n # h = self.dr(h)\n h = self.b8(h)\n h = self.af(h)\n h = self.l9(h)\n return h\n\n\nclass CNN(MyClassifier, Chain):\n def __init__(self, prior, dim):\n super(CNN, self).__init__(\n conv1=L.Convolution2D(None, 16, 3, pad=1),\n conv2=L.Convolution2D(None, 16, 3, pad=1),\n conv3=L.Convolution2D(None, 16, 3, pad=1),\n conv4=L.Convolution2D(None, 16, 3, pad=1),\n b1=L.BatchNormalization(16),\n b2=L.BatchNormalization(16),\n fc1=L.Linear(None, 128),\n fc2=L.Linear(128, 128),\n fc3=L.Linear(128, 1),\n )\n self.mpool=partial(F.max_pooling_2d, ksize=2, stride=2)\n self.af = F.relu\n self.prior = prior\n\n def calculate(self, x):\n h = self.conv1(x)\n h = self.af(h)\n h = self.mpool(x)\n # h = self.b1(h)\n h = self.conv2(h)\n h = self.af(h)\n h = self.mpool(x)\n h = self.conv3(x)\n h = self.af(h)\n h = self.mpool(x)\n h = self.conv4(x)\n h = self.af(h)\n h = self.mpool(x)\n # h = self.b2(h)\n # h = self.af(h)\n h = self.fc1(h)\n h = self.af(h)\n # h = self.fc2(h)\n # h = self.af(h)\n h = self.fc3(h)\n return h\n\n\n# class CNN(MyClassifier, Chain):\n# def __init__(self, prior, dim):\n# super(CNN, self).__init__(\n# conv1=L.Convolution2D(None, 96, 3, pad=1),\n# conv2=L.Convolution2D(96, 96, 3, pad=1),\n# conv3=L.Convolution2D(96, 96, 3, pad=1, stride=2),\n# conv4=L.Convolution2D(96, 192, 3, pad=1),\n# conv5=L.Convolution2D(192, 192, 3, pad=1),\n# conv6=L.Convolution2D(192, 192, 3, pad=1, stride=2),\n# conv7=L.Convolution2D(192, 192, 3, pad=1),\n# conv8=L.Convolution2D(192, 192, 1),\n# conv9=L.Convolution2D(192, 10, 1),\n# b1=L.BatchNormalization(96),\n# b2=L.BatchNormalization(96),\n# b3=L.BatchNormalization(96),\n# b4=L.BatchNormalization(192),\n# b5=L.BatchNormalization(192),\n# b6=L.BatchNormalization(192),\n# b7=L.BatchNormalization(192),\n# b8=L.BatchNormalization(192),\n# b9=L.BatchNormalization(10),\n# fc1=L.Linear(None, 1000),\n# fc2=L.Linear(1000, 1000),\n# fc3=L.Linear(1000, 1),\n# )\n# self.af = F.relu\n# self.prior = prior\n#\n# def calculate(self, x):\n# h = self.conv1(x)\n# h = self.b1(h)\n# h = self.af(h)\n# h = self.conv2(h)\n# h = self.b2(h)\n# h = self.af(h)\n# h = self.conv3(h)\n# h = self.b3(h)\n# h = self.af(h)\n# h = self.conv4(h)\n# h = self.b4(h)\n# h = self.af(h)\n# h = self.conv5(h)\n# h = self.b5(h)\n# h = self.af(h)\n# h = self.conv6(h)\n# h = self.b6(h)\n# h = self.af(h)\n# h = self.conv7(h)\n# h = self.b7(h)\n# h = self.af(h)\n# h = self.conv8(h)\n# h = self.b8(h)\n# h = self.af(h)\n# h = self.conv9(h)\n# h = self.b9(h)\n# h = self.af(h)\n# h = self.fc1(h)\n# h = self.af(h)\n# h = self.fc2(h)\n# h = self.af(h)\n# h = self.fc3(h)\n# return h\n" ]
[ [ "sklearn.metrics.recall_score", "numpy.where", "numpy.unique" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
mimipaskova/MasterThesis
[ "9288b7cb566839e91cb0fb7cee4f28a930f20807" ]
[ "MUSE/unsupervised.py" ]
[ "# Copyright (c) 2017-present, Facebook, Inc.\n# All rights reserved.\n#\n# This source code is licensed under the license found in the\n# LICENSE file in the root directory of this source tree.\n#\n\nimport os\nimport time\nimport json\nimport argparse\nfrom collections import OrderedDict\nimport numpy as np\nimport torch\n\nfrom src.utils import bool_flag, initialize_exp\nfrom src.models import build_model\nfrom src.trainer import Trainer\nfrom src.evaluation import Evaluator\n\n\nVALIDATION_METRIC = 'mean_cosine-csls_knn_10-S2T-10000'\n\n\n# main\nparser = argparse.ArgumentParser(description='Unsupervised training')\nparser.add_argument(\"--seed\", type=int, default=-1, help=\"Initialization seed\")\nparser.add_argument(\"--verbose\", type=int, default=2, help=\"Verbose level (2:debug, 1:info, 0:warning)\")\nparser.add_argument(\"--exp_path\", type=str, default=\"\", help=\"Where to store experiment logs and models\")\nparser.add_argument(\"--exp_name\", type=str, default=\"debug\", help=\"Experiment name\")\nparser.add_argument(\"--exp_id\", type=str, default=\"\", help=\"Experiment ID\")\nparser.add_argument(\"--cuda\", type=bool_flag, default=True, help=\"Run on GPU\")\nparser.add_argument(\"--export\", type=str, default=\"txt\", help=\"Export embeddings after training (txt / pth)\")\n# data\nparser.add_argument(\"--src_lang\", type=str, default='en', help=\"Source language\")\nparser.add_argument(\"--tgt_lang\", type=str, default='es', help=\"Target language\")\nparser.add_argument(\"--emb_dim\", type=int, default=300, help=\"Embedding dimension\")\nparser.add_argument(\"--max_vocab\", type=int, default=200000, help=\"Maximum vocabulary size (-1 to disable)\")\n# mapping\nparser.add_argument(\"--map_id_init\", type=bool_flag, default=True, help=\"Initialize the mapping as an identity matrix\")\nparser.add_argument(\"--map_beta\", type=float, default=0.001, help=\"Beta for orthogonalization\")\n# discriminator\nparser.add_argument(\"--dis_layers\", type=int, default=2, help=\"Discriminator layers\")\nparser.add_argument(\"--dis_hid_dim\", type=int, default=2048, help=\"Discriminator hidden layer dimensions\")\nparser.add_argument(\"--dis_dropout\", type=float, default=0., help=\"Discriminator dropout\")\nparser.add_argument(\"--dis_input_dropout\", type=float, default=0.1, help=\"Discriminator input dropout\")\nparser.add_argument(\"--dis_steps\", type=int, default=5, help=\"Discriminator steps\")\nparser.add_argument(\"--dis_lambda\", type=float, default=1, help=\"Discriminator loss feedback coefficient\")\nparser.add_argument(\"--dis_most_frequent\", type=int, default=75000, help=\"Select embeddings of the k most frequent words for discrimination (0 to disable)\")\nparser.add_argument(\"--dis_smooth\", type=float, default=0.1, help=\"Discriminator smooth predictions\")\nparser.add_argument(\"--dis_clip_weights\", type=float, default=0, help=\"Clip discriminator weights (0 to disable)\")\n# training adversarial\nparser.add_argument(\"--adversarial\", type=bool_flag, default=True, help=\"Use adversarial training\")\nparser.add_argument(\"--n_epochs\", type=int, default=5, help=\"Number of epochs\")\nparser.add_argument(\"--epoch_size\", type=int, default=1000000, help=\"Iterations per epoch\")\nparser.add_argument(\"--batch_size\", type=int, default=32, help=\"Batch size\")\nparser.add_argument(\"--map_optimizer\", type=str, default=\"sgd,lr=0.1\", help=\"Mapping optimizer\")\nparser.add_argument(\"--dis_optimizer\", type=str, default=\"sgd,lr=0.1\", help=\"Discriminator optimizer\")\nparser.add_argument(\"--lr_decay\", type=float, default=0.98, help=\"Learning rate decay (SGD only)\")\nparser.add_argument(\"--min_lr\", type=float, default=1e-6, help=\"Minimum learning rate (SGD only)\")\nparser.add_argument(\"--lr_shrink\", type=float, default=0.5, help=\"Shrink the learning rate if the validation metric decreases (1 to disable)\")\n# training refinement\nparser.add_argument(\"--n_refinement\", type=int, default=5, help=\"Number of refinement iterations (0 to disable the refinement procedure)\")\n# dictionary creation parameters (for refinement)\nparser.add_argument(\"--dico_eval\", type=str, default=\"default\", help=\"Path to evaluation dictionary\")\nparser.add_argument(\"--dico_method\", type=str, default='csls_knn_10', help=\"Method used for dictionary generation (nn/invsm_beta_30/csls_knn_10)\")\nparser.add_argument(\"--dico_build\", type=str, default='S2T', help=\"S2T,T2S,S2T|T2S,S2T&T2S\")\nparser.add_argument(\"--dico_threshold\", type=float, default=0, help=\"Threshold confidence for dictionary generation\")\nparser.add_argument(\"--dico_max_rank\", type=int, default=15000, help=\"Maximum dictionary words rank (0 to disable)\")\nparser.add_argument(\"--dico_min_size\", type=int, default=0, help=\"Minimum generated dictionary size (0 to disable)\")\nparser.add_argument(\"--dico_max_size\", type=int, default=0, help=\"Maximum generated dictionary size (0 to disable)\")\n# reload pre-trained embeddings\nparser.add_argument(\"--src_emb\", type=str, default=\"\", help=\"Reload source embeddings\")\nparser.add_argument(\"--tgt_emb\", type=str, default=\"\", help=\"Reload target embeddings\")\nparser.add_argument(\"--normalize_embeddings\", type=str, default=\"\", help=\"Normalize embeddings before training\")\n\n\n# parse parameters\nparams = parser.parse_args()\n\n# check parameters\nassert not params.cuda or torch.cuda.is_available()\nassert 0 <= params.dis_dropout < 1\nassert 0 <= params.dis_input_dropout < 1\nassert 0 <= params.dis_smooth < 0.5\nassert params.dis_lambda > 0 and params.dis_steps > 0\nassert 0 < params.lr_shrink <= 1\nassert os.path.isfile(params.src_emb)\nassert os.path.isfile(params.tgt_emb)\nassert params.dico_eval == 'default' or os.path.isfile(params.dico_eval)\nassert params.export in [\"\", \"txt\", \"pth\"]\n\n# build model / trainer / evaluator\nlogger = initialize_exp(params)\nsrc_emb, tgt_emb, mapping, discriminator = build_model(params, True)\ntrainer = Trainer(src_emb, tgt_emb, mapping, discriminator, params)\nevaluator = Evaluator(trainer)\n\n\n\"\"\"\nLearning loop for Adversarial Training\n\"\"\"\nif params.adversarial:\n logger.info('----> ADVERSARIAL TRAINING <----\\n\\n')\n\n # training loop\n for n_epoch in range(params.n_epochs):\n\n logger.info('Starting adversarial training epoch %i...' % n_epoch)\n tic = time.time()\n n_words_proc = 0\n stats = {'DIS_COSTS': []}\n\n for n_iter in range(0, params.epoch_size, params.batch_size):\n\n # discriminator training\n for _ in range(params.dis_steps):\n trainer.dis_step(stats)\n\n # mapping training (discriminator fooling)\n n_words_proc += trainer.mapping_step(stats)\n\n # log stats\n if n_iter % 500 == 0:\n stats_str = [('DIS_COSTS', 'Discriminator loss')]\n stats_log = ['%s: %.4f' % (v, np.mean(stats[k]))\n for k, v in stats_str if len(stats[k]) > 0]\n stats_log.append('%i samples/s' % int(n_words_proc / (time.time() - tic)))\n logger.info(('%06i - ' % n_iter) + ' - '.join(stats_log))\n\n # reset\n tic = time.time()\n n_words_proc = 0\n for k, _ in stats_str:\n del stats[k][:]\n\n # embeddings / discriminator evaluation\n to_log = OrderedDict({'n_epoch': n_epoch})\n evaluator.all_eval(to_log)\n evaluator.eval_dis(to_log)\n\n # JSON log / save best model / end of epoch\n logger.info(\"__log__:%s\" % json.dumps(to_log))\n trainer.save_best(to_log, VALIDATION_METRIC)\n logger.info('End of epoch %i.\\n\\n' % n_epoch)\n\n # update the learning rate (stop if too small)\n trainer.update_lr(to_log, VALIDATION_METRIC)\n if trainer.map_optimizer.param_groups[0]['lr'] < params.min_lr:\n logger.info('Learning rate < 1e-6. BREAK.')\n break\n\n\n\"\"\"\nLearning loop for Procrustes Iterative Refinement\n\"\"\"\nif params.n_refinement > 0:\n # Get the best mapping according to VALIDATION_METRIC\n logger.info('----> ITERATIVE PROCRUSTES REFINEMENT <----\\n\\n')\n trainer.reload_best()\n\n # training loop\n for n_iter in range(params.n_refinement):\n\n logger.info('Starting refinement iteration %i...' % n_iter)\n\n # build a dictionary from aligned embeddings\n trainer.build_dictionary()\n\n # apply the Procrustes solution\n trainer.procrustes()\n\n # embeddings evaluation\n to_log = OrderedDict({'n_iter': n_iter})\n evaluator.all_eval(to_log)\n\n # JSON log / save best model / end of epoch\n logger.info(\"__log__:%s\" % json.dumps(to_log))\n trainer.save_best(to_log, VALIDATION_METRIC)\n logger.info('End of refinement iteration %i.\\n\\n' % n_iter)\n\n\n# export embeddings\nif params.export:\n trainer.reload_best()\n trainer.export()\n" ]
[ [ "numpy.mean", "torch.cuda.is_available" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]