repo_name
stringlengths 6
130
| hexsha
sequence | file_path
sequence | code
sequence | apis
sequence | possible_versions
list |
---|---|---|---|---|---|
cnk113/TREX | [
"add83d8108f3602c5bbe7b37f60ff19f89b2236d"
] | [
"src/trex/writers.py"
] | [
"from pathlib import Path\nfrom typing import List\nfrom .cell import Cell\nimport operator\nimport warnings\n\nwith warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", \"Conversion of the second argument of issubdtype\")\n import loompy\nimport numpy as np\n\n\ndef write_count_matrix(path: Path, cells: List[Cell]):\n \"\"\"Create a Read-count matrix with cells as columns and cloneIDs as rows\"\"\"\n clone_ids = set()\n for cell in cells:\n clone_ids.update(clone_id for clone_id in cell.counts)\n clone_ids = sorted(clone_ids)\n all_counts = [cell.counts for cell in cells]\n with open(path, \"w\") as f:\n f.write(\",\")\n f.write(\",\".join(cell.cell_id for cell in cells))\n f.write(\"\\n\")\n for clone_id in clone_ids:\n f.write(clone_id)\n f.write(\",\")\n values = [lic.get(clone_id, 0) for lic in all_counts]\n f.write(\",\".join(str(v) for v in values))\n f.write(\"\\n\")\n\n\ndef write_cells(path: Path, cells: List[Cell]) -> None:\n \"\"\"Write cells to a tab-separated file\"\"\"\n with open(path, \"w\") as f:\n print(\n \"#cell_id\",\n \":\",\n \"clone_id1\",\n \"count1\",\n \"clone_id2\",\n \"count2\",\n \"...\",\n sep=\"\\t\",\n file=f,\n )\n for cell in cells:\n row = [cell.cell_id, \":\"]\n sorted_clone_ids = sorted(\n cell.counts, key=lambda x: cell.counts[x], reverse=True\n )\n if not sorted_clone_ids:\n continue\n for clone_id in sorted_clone_ids:\n row.extend([clone_id, cell.counts[clone_id]])\n print(*row, sep=\"\\t\", file=f)\n\n\ndef write_reads_or_molecules(path, mols_or_reads, require_umis=True, sort=True):\n with open(path, \"w\") as f:\n if require_umis:\n if sort:\n mols_or_reads = sorted(\n mols_or_reads,\n key=lambda mol_or_read: (\n mol_or_read.umi,\n mol_or_read.cell_id,\n mol_or_read.clone_id,\n ),\n )\n print(\"#cell_id\", \"umi\", \"clone_id\", sep=\"\\t\", file=f)\n for mol_or_read in mols_or_reads:\n print(\n mol_or_read.cell_id,\n mol_or_read.umi,\n mol_or_read.clone_id,\n sep=\"\\t\",\n file=f,\n )\n else:\n if sort:\n mols_or_reads = sorted(\n mols_or_reads,\n key=lambda mol_or_read: (mol_or_read.clone_id, mol_or_read.cell_id),\n )\n print(\"#cell_id\", \"clone_id\", sep=\"\\t\", file=f)\n for mol_or_read in mols_or_reads:\n print(mol_or_read.cell_id, mol_or_read.clone_id, sep=\"\\t\", file=f)\n\n\ndef write_loom(cells: List[Cell], cellranger, output_dir, clone_id_length, top_n=6):\n \"\"\"\n Create a loom file from a Cell Ranger result directory and augment it with information about\n the most abundant cloneIDs and their counts.\n \"\"\"\n # For each cell, collect the most abundant cloneIDs and their counts\n # Maps cell_id to a list of (clone_id, count) pairs that represent the most abundant cloneIDs.\n most_abundant = dict()\n for cell in cells:\n if not cell.counts:\n continue\n counts = sorted(cell.counts.items(), key=operator.itemgetter(1))\n counts.reverse()\n counts = counts[:top_n]\n most_abundant[cell.cell_id] = counts\n\n loompy.create_from_cellranger(cellranger.sample_dir, outdir=output_dir)\n # create_from_cellranger() does not tell us the name of the created file,\n # so we need to re-derive it from the sample name.\n sample_name = cellranger.sample_dir.name\n loom_path = output_dir / (sample_name + \".loom\")\n\n with loompy.connect(loom_path) as ds:\n # Cell ids in the loom file are prefixed by the sample name and a ':'. Remove that prefix.\n loom_cell_ids = [cell_id[len(sample_name) + 1 :] for cell_id in ds.ca.CellID]\n\n # Transform cloneIDs and count data\n # brings cloneID data into correct format for loom file.\n # Array must have same shape as all_cellIDs\n clone_id_lists = [[] for _ in range(top_n)]\n count_lists = [[] for _ in range(top_n)]\n for cell_id in loom_cell_ids:\n clone_id_counts = most_abundant.get(cell_id, [])\n # Fill up to a constant length\n while len(clone_id_counts) < top_n:\n clone_id_counts.append((\"-\", 0))\n\n for i, (clone_id, count) in enumerate(clone_id_counts):\n clone_id_lists[i].append(clone_id)\n count_lists[i].append(count)\n\n # Add cloneID and count information to loom file\n for i in range(top_n):\n ds.ca[f\"cloneid_{i+1}\"] = np.array(\n clone_id_lists[i], dtype=\"S%r\" % clone_id_length\n )\n ds.ca[f\"cloneid_count_{i+1}\"] = np.array(count_lists[i], dtype=int)\n"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
18621579069/PaddleHub-yu | [
"47741382cf15eda852fefdada6ce83ef86350af6",
"15e8bcef2addf239081e235bdcfd039de12330e0",
"15e8bcef2addf239081e235bdcfd039de12330e0",
"15e8bcef2addf239081e235bdcfd039de12330e0",
"15e8bcef2addf239081e235bdcfd039de12330e0",
"15e8bcef2addf239081e235bdcfd039de12330e0",
"15e8bcef2addf239081e235bdcfd039de12330e0",
"15e8bcef2addf239081e235bdcfd039de12330e0",
"15e8bcef2addf239081e235bdcfd039de12330e0"
] | [
"paddlehub/contrib/ppdet/data/source/simple_source.py",
"hub_module/modules/text/semantic_model/slda_news/util.py",
"hub_module/modules/image/object_detection/retinanet_resnet50_fpn_coco2017/retina_head.py",
"hub_module/modules/image/humanseg/humanseg_lite/data_feed.py",
"hub_module/modules/image/classification/fix_resnext101_32x48d_wsl_imagenet/module.py",
"hub_module/modules/image/classification/vgg16_imagenet/data_feed.py",
"hub_module/modules/image/face_detection/ultra_light_fast_generic_face_detector_1mb_320/data_feed.py",
"hub_module/tests/unittests/test_mobilenet_v1.py",
"hub_module/modules/image/super_resolution/dcscn/processor.py"
] | [
"# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# function:\n# interface to load data from txt file.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport numpy as np\nimport copy\nfrom ..dataset import Dataset\n\n\nclass SimpleSource(Dataset):\n \"\"\"\n Load image files for testing purpose\n\n Args:\n images (list): list of path of images\n samples (int): number of samples to load, -1 means all\n load_img (bool): should images be loaded\n \"\"\"\n\n def __init__(self, images=[], samples=-1, load_img=True, **kwargs):\n super(SimpleSource, self).__init__()\n self._epoch = -1\n for image in images:\n assert image != '' and os.path.isfile(image), \\\n \"Image {} not found\".format(image)\n self._images = images\n self._fname = None\n self._simple = None\n self._pos = -1\n self._drained = False\n self._samples = samples\n self._load_img = load_img\n self._imid2path = {}\n\n def next(self):\n if self._epoch < 0:\n self.reset()\n\n if self._pos >= self.size():\n self._drained = True\n raise StopIteration(\"no more data in \" + str(self))\n else:\n sample = copy.deepcopy(self._simple[self._pos])\n if self._load_img:\n sample['image'] = self._load_image(sample['im_file'])\n\n self._pos += 1\n return sample\n\n def _load(self):\n ct = 0\n records = []\n for image in self._images:\n if self._samples > 0 and ct >= self._samples:\n break\n rec = {'im_id': np.array([ct]), 'im_file': image}\n self._imid2path[ct] = image\n ct += 1\n records.append(rec)\n assert len(records) > 0, \"no image file found\"\n return records\n\n def _load_image(self, where):\n with open(where, 'rb') as f:\n return f.read()\n\n def reset(self):\n if self._simple is None:\n self._simple = self._load()\n\n if self._epoch < 0:\n self._epoch = 0\n else:\n self._epoch += 1\n\n self._pos = 0\n self._drained = False\n\n def size(self):\n return len(self._simple)\n\n def drained(self):\n assert self._epoch >= 0, \"the first epoch has not started yet\"\n return self._pos >= self.size()\n\n def epoch_id(self):\n return self._epoch\n\n def get_imid2path(self):\n \"\"\"return image id to image path map\"\"\"\n return self._imid2path\n",
"import time\nimport yaml\n\nimport numpy as np\nfrom paddlehub.common.logger import logger\n\nfrom slda_news.config import ModelType\n\n\ndef load_prototxt(config_file, config):\n \"\"\"\n Args:\n config_file: model configuration file.\n config: ModelConfig class\n \"\"\"\n logger.info(\"Loading SLDA config.\")\n with open(config_file, 'r') as f:\n yaml_dict = yaml.load(f, Loader=yaml.FullLoader)\n\n # Assignment.\n if yaml_dict[\"type\"] == \"LDA\":\n config.type = ModelType.LDA\n else:\n config.type = ModelType.SLDA\n config.num_topics = yaml_dict[\"num_topics\"]\n config.alpha = yaml_dict[\"alpha\"]\n config.beta = yaml_dict[\"beta\"]\n config.word_topic_file = yaml_dict[\"word_topic_file\"]\n config.vocab_file = yaml_dict[\"vocab_file\"]\n\n\ndef fix_random_seed(seed=2147483647):\n np.random.seed(seed)\n\n\ndef rand(min_=0, max_=1):\n return np.random.uniform(low=min_, high=max_)\n\n\ndef rand_k(k):\n \"\"\"Returns an integer float number between [0, k - 1].\n \"\"\"\n return int(rand() * k)\n\n\ndef timeit(f):\n \"\"\"Return time cost of function f.\n \"\"\"\n\n def timed(*args, **kwargs):\n start_time = time.time()\n result = f(*args, **kwargs)\n end_time = time.time()\n print(\" [-] %s : %2.5f sec\" % (f.__name__, end_time - start_time))\n return result\n\n return timed\n",
"# coding=utf-8\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport paddle.fluid as fluid\nfrom paddle.fluid.param_attr import ParamAttr\nfrom paddle.fluid.initializer import Normal, Constant\nfrom paddle.fluid.regularizer import L2Decay\n\n__all__ = [\n 'AnchorGenerator', 'RetinaTargetAssign', 'RetinaOutputDecoder', 'RetinaHead'\n]\n\n\nclass AnchorGenerator(object):\n # __op__ = fluid.layers.anchor_generator\n def __init__(self,\n stride=[16.0, 16.0],\n anchor_sizes=[32, 64, 128, 256, 512],\n aspect_ratios=[0.5, 1., 2.],\n variance=[1., 1., 1., 1.]):\n self.anchor_sizes = anchor_sizes\n self.aspect_ratios = aspect_ratios\n self.variance = variance\n self.stride = stride\n\n\nclass RetinaTargetAssign(object):\n # __op__ = fluid.layers.retinanet_target_assign\n def __init__(self, positive_overlap=0.5, negative_overlap=0.4):\n self.positive_overlap = positive_overlap\n self.negative_overlap = negative_overlap\n\n\nclass RetinaOutputDecoder(object):\n # __op__ = fluid.layers.retinanet_detection_output\n def __init__(self,\n score_thresh=0.05,\n nms_thresh=0.3,\n pre_nms_top_n=1000,\n detections_per_im=100,\n nms_eta=1.0):\n super(RetinaOutputDecoder, self).__init__()\n self.score_threshold = score_thresh\n self.nms_threshold = nms_thresh\n self.nms_top_k = pre_nms_top_n\n self.keep_top_k = detections_per_im\n self.nms_eta = nms_eta\n\n\nclass RetinaHead(object):\n \"\"\"\n Retina Head\n\n Args:\n anchor_generator (object): `AnchorGenerator` instance\n target_assign (object): `RetinaTargetAssign` instance\n output_decoder (object): `RetinaOutputDecoder` instance\n num_convs_per_octave (int): Number of convolution layers in each octave\n num_chan (int): Number of octave output channels\n max_level (int): Highest level of FPN output\n min_level (int): Lowest level of FPN output\n prior_prob (float): Used to set the bias init for the class prediction layer\n base_scale (int): Anchors are generated based on this scale\n num_scales_per_octave (int): Number of anchor scales per octave\n num_classes (int): Number of classes\n gamma (float): The parameter in focal loss\n alpha (float): The parameter in focal loss\n sigma (float): The parameter in smooth l1 loss\n \"\"\"\n __inject__ = ['anchor_generator', 'target_assign', 'output_decoder']\n __shared__ = ['num_classes']\n\n def __init__(self,\n anchor_generator=AnchorGenerator(),\n target_assign=RetinaTargetAssign(),\n output_decoder=RetinaOutputDecoder(),\n num_convs_per_octave=4,\n num_chan=256,\n max_level=7,\n min_level=3,\n prior_prob=0.01,\n base_scale=4,\n num_scales_per_octave=3,\n num_classes=81,\n gamma=2.0,\n alpha=0.25,\n sigma=3.0151134457776365):\n self.anchor_generator = anchor_generator\n self.target_assign = target_assign\n self.output_decoder = output_decoder\n self.num_convs_per_octave = num_convs_per_octave\n self.num_chan = num_chan\n self.max_level = max_level\n self.min_level = min_level\n self.prior_prob = prior_prob\n self.base_scale = base_scale\n self.num_scales_per_octave = num_scales_per_octave\n self.num_classes = num_classes\n self.gamma = gamma\n self.alpha = alpha\n self.sigma = sigma\n\n def _class_subnet(self, body_feats, spatial_scale):\n \"\"\"\n Get class predictions of all level FPN level.\n\n Args:\n fpn_dict(dict): A dictionary represents the output of FPN with\n their name.\n spatial_scale(list): A list of multiplicative spatial scale factor.\n\n Returns:\n cls_pred_input(list): Class prediction of all input fpn levels.\n \"\"\"\n assert len(body_feats) == self.max_level - self.min_level + 1\n fpn_name_list = list(body_feats.keys())\n cls_pred_list = []\n for lvl in range(self.min_level, self.max_level + 1):\n fpn_name = fpn_name_list[self.max_level - lvl]\n subnet_blob = body_feats[fpn_name]\n for i in range(self.num_convs_per_octave):\n conv_name = 'retnet_cls_conv_n{}_fpn{}'.format(i, lvl)\n conv_share_name = 'retnet_cls_conv_n{}_fpn{}'.format(\n i, self.min_level)\n subnet_blob_in = subnet_blob\n subnet_blob = fluid.layers.conv2d(\n input=subnet_blob_in,\n num_filters=self.num_chan,\n filter_size=3,\n stride=1,\n padding=1,\n act='relu',\n name=conv_name,\n param_attr=ParamAttr(\n name=conv_share_name + '_w',\n initializer=Normal(loc=0., scale=0.01)),\n bias_attr=ParamAttr(\n name=conv_share_name + '_b',\n learning_rate=2.,\n regularizer=L2Decay(0.)))\n\n # class prediction\n cls_name = 'retnet_cls_pred_fpn{}'.format(lvl)\n cls_share_name = 'retnet_cls_pred_fpn{}'.format(self.min_level)\n num_anchors = self.num_scales_per_octave * len(\n self.anchor_generator.aspect_ratios)\n cls_dim = num_anchors * (self.num_classes - 1)\n # bias initialization: b = -log((1 - pai) / pai)\n bias_init = float(-np.log((1 - self.prior_prob) / self.prior_prob))\n out_cls = fluid.layers.conv2d(\n input=subnet_blob,\n num_filters=cls_dim,\n filter_size=3,\n stride=1,\n padding=1,\n act=None,\n name=cls_name,\n param_attr=ParamAttr(\n name=cls_share_name + '_w',\n initializer=Normal(loc=0., scale=0.01)),\n bias_attr=ParamAttr(\n name=cls_share_name + '_b',\n initializer=Constant(value=bias_init),\n learning_rate=2.,\n regularizer=L2Decay(0.)))\n cls_pred_list.append(out_cls)\n\n return cls_pred_list\n\n def _bbox_subnet(self, body_feats, spatial_scale):\n \"\"\"\n Get bounding box predictions of all level FPN level.\n\n Args:\n fpn_dict(dict): A dictionary represents the output of FPN with\n their name.\n spatial_scale(list): A list of multiplicative spatial scale factor.\n\n Returns:\n bbox_pred_input(list): Bounding box prediction of all input fpn\n levels.\n \"\"\"\n assert len(body_feats) == self.max_level - self.min_level + 1\n fpn_name_list = list(body_feats.keys())\n bbox_pred_list = []\n for lvl in range(self.min_level, self.max_level + 1):\n fpn_name = fpn_name_list[self.max_level - lvl]\n subnet_blob = body_feats[fpn_name]\n for i in range(self.num_convs_per_octave):\n conv_name = 'retnet_bbox_conv_n{}_fpn{}'.format(i, lvl)\n conv_share_name = 'retnet_bbox_conv_n{}_fpn{}'.format(\n i, self.min_level)\n subnet_blob_in = subnet_blob\n subnet_blob = fluid.layers.conv2d(\n input=subnet_blob_in,\n num_filters=self.num_chan,\n filter_size=3,\n stride=1,\n padding=1,\n act='relu',\n name=conv_name,\n param_attr=ParamAttr(\n name=conv_share_name + '_w',\n initializer=Normal(loc=0., scale=0.01)),\n bias_attr=ParamAttr(\n name=conv_share_name + '_b',\n learning_rate=2.,\n regularizer=L2Decay(0.)))\n\n # bbox prediction\n bbox_name = 'retnet_bbox_pred_fpn{}'.format(lvl)\n bbox_share_name = 'retnet_bbox_pred_fpn{}'.format(self.min_level)\n num_anchors = self.num_scales_per_octave * len(\n self.anchor_generator.aspect_ratios)\n bbox_dim = num_anchors * 4\n out_bbox = fluid.layers.conv2d(\n input=subnet_blob,\n num_filters=bbox_dim,\n filter_size=3,\n stride=1,\n padding=1,\n act=None,\n name=bbox_name,\n param_attr=ParamAttr(\n name=bbox_share_name + '_w',\n initializer=Normal(loc=0., scale=0.01)),\n bias_attr=ParamAttr(\n name=bbox_share_name + '_b',\n learning_rate=2.,\n regularizer=L2Decay(0.)))\n bbox_pred_list.append(out_bbox)\n return bbox_pred_list\n\n def _anchor_generate(self, body_feats, spatial_scale):\n \"\"\"\n Get anchor boxes of all level FPN level.\n\n Args:\n fpn_dict(dict): A dictionary represents the output of FPN with their name.\n spatial_scale(list): A list of multiplicative spatial scale factor.\n\n Return:\n anchor_input(list): Anchors of all input fpn levels with shape of.\n anchor_var_input(list): Anchor variance of all input fpn levels with shape.\n \"\"\"\n assert len(body_feats) == self.max_level - self.min_level + 1\n fpn_name_list = list(body_feats.keys())\n anchor_list = []\n anchor_var_list = []\n for lvl in range(self.min_level, self.max_level + 1):\n anchor_sizes = []\n stride = int(1 / spatial_scale[self.max_level - lvl])\n for octave in range(self.num_scales_per_octave):\n anchor_size = stride * (2**(float(octave) / float(\n self.num_scales_per_octave))) * self.base_scale\n anchor_sizes.append(anchor_size)\n fpn_name = fpn_name_list[self.max_level - lvl]\n anchor, anchor_var = fluid.layers.anchor_generator(\n input=body_feats[fpn_name],\n anchor_sizes=anchor_sizes,\n aspect_ratios=self.anchor_generator.aspect_ratios,\n stride=[stride, stride],\n variance=self.anchor_generator.variance)\n anchor_list.append(anchor)\n anchor_var_list.append(anchor_var)\n return anchor_list, anchor_var_list\n\n def _get_output(self, body_feats, spatial_scale):\n \"\"\"\n Get class, bounding box predictions and anchor boxes of all level FPN level.\n\n Args:\n fpn_dict(dict): A dictionary represents the output of FPN with\n their name.\n spatial_scale(list): A list of multiplicative spatial scale factor.\n\n Returns:\n cls_pred_input(list): Class prediction of all input fpn levels.\n bbox_pred_input(list): Bounding box prediction of all input fpn\n levels.\n anchor_input(list): Anchors of all input fpn levels with shape of.\n anchor_var_input(list): Anchor variance of all input fpn levels with\n shape.\n \"\"\"\n assert len(body_feats) == self.max_level - self.min_level + 1\n # class subnet\n cls_pred_list = self._class_subnet(body_feats, spatial_scale)\n # bbox subnet\n bbox_pred_list = self._bbox_subnet(body_feats, spatial_scale)\n #generate anchors\n anchor_list, anchor_var_list = self._anchor_generate(\n body_feats, spatial_scale)\n cls_pred_reshape_list = []\n bbox_pred_reshape_list = []\n anchor_reshape_list = []\n anchor_var_reshape_list = []\n for i in range(self.max_level - self.min_level + 1):\n cls_pred_transpose = fluid.layers.transpose(\n cls_pred_list[i], perm=[0, 2, 3, 1])\n cls_pred_reshape = fluid.layers.reshape(\n cls_pred_transpose, shape=(0, -1, self.num_classes - 1))\n bbox_pred_transpose = fluid.layers.transpose(\n bbox_pred_list[i], perm=[0, 2, 3, 1])\n bbox_pred_reshape = fluid.layers.reshape(\n bbox_pred_transpose, shape=(0, -1, 4))\n anchor_reshape = fluid.layers.reshape(anchor_list[i], shape=(-1, 4))\n anchor_var_reshape = fluid.layers.reshape(\n anchor_var_list[i], shape=(-1, 4))\n cls_pred_reshape_list.append(cls_pred_reshape)\n bbox_pred_reshape_list.append(bbox_pred_reshape)\n anchor_reshape_list.append(anchor_reshape)\n anchor_var_reshape_list.append(anchor_var_reshape)\n output = {}\n output['cls_pred'] = cls_pred_reshape_list\n output['bbox_pred'] = bbox_pred_reshape_list\n output['anchor'] = anchor_reshape_list\n output['anchor_var'] = anchor_var_reshape_list\n return output\n\n def get_prediction(self, body_feats, spatial_scale, im_info):\n \"\"\"\n Get prediction bounding box in test stage.\n\n Args:\n fpn_dict(dict): A dictionary represents the output of FPN with\n their name.\n spatial_scale(list): A list of multiplicative spatial scale factor.\n im_info (Variable): A 2-D LoDTensor with shape [B, 3]. B is the\n number of input images, each element consists of im_height,\n im_width, im_scale.\n\n Returns:\n pred_result(Variable): Prediction result with shape [N, 6]. Each\n row has 6 values: [label, confidence, xmin, ymin, xmax, ymax].\n N is the total number of prediction.\n \"\"\"\n output = self._get_output(body_feats, spatial_scale)\n cls_pred_reshape_list = output['cls_pred']\n bbox_pred_reshape_list = output['bbox_pred']\n anchor_reshape_list = output['anchor']\n for i in range(self.max_level - self.min_level + 1):\n cls_pred_reshape_list[i] = fluid.layers.sigmoid(\n cls_pred_reshape_list[i])\n pred_result = fluid.layers.retinanet_detection_output(\n bboxes=bbox_pred_reshape_list,\n scores=cls_pred_reshape_list,\n anchors=anchor_reshape_list,\n im_info=im_info,\n score_threshold=self.output_decoder.score_threshold,\n nms_threshold=self.output_decoder.nms_threshold,\n nms_top_k=self.output_decoder.nms_top_k,\n keep_top_k=self.output_decoder.keep_top_k,\n nms_eta=self.output_decoder.nms_eta)\n return pred_result\n\n def get_loss(self, body_feats, spatial_scale, im_info, gt_box, gt_label,\n is_crowd):\n \"\"\"\n Calculate the loss of retinanet.\n Args:\n fpn_dict(dict): A dictionary represents the output of FPN with\n their name.\n spatial_scale(list): A list of multiplicative spatial scale factor.\n im_info(Variable): A 2-D LoDTensor with shape [B, 3]. B is the\n number of input images, each element consists of im_height,\n im_width, im_scale.\n gt_box(Variable): The ground-truth bounding boxes with shape [M, 4].\n M is the number of groundtruth.\n gt_label(Variable): The ground-truth labels with shape [M, 1].\n M is the number of groundtruth.\n is_crowd(Variable): Indicates groud-truth is crowd or not with\n shape [M, 1]. M is the number of groundtruth.\n\n Returns:\n Type: dict\n loss_cls(Variable): focal loss.\n loss_bbox(Variable): smooth l1 loss.\n \"\"\"\n output = self._get_output(body_feats, spatial_scale)\n cls_pred_reshape_list = output['cls_pred']\n bbox_pred_reshape_list = output['bbox_pred']\n anchor_reshape_list = output['anchor']\n anchor_var_reshape_list = output['anchor_var']\n\n cls_pred_input = fluid.layers.concat(cls_pred_reshape_list, axis=1)\n bbox_pred_input = fluid.layers.concat(bbox_pred_reshape_list, axis=1)\n anchor_input = fluid.layers.concat(anchor_reshape_list, axis=0)\n anchor_var_input = fluid.layers.concat(anchor_var_reshape_list, axis=0)\n score_pred, loc_pred, score_tgt, loc_tgt, bbox_weight, fg_num = \\\n fluid.layers.rpn_target_assign(\n bbox_pred=bbox_pred_input,\n cls_logits=cls_pred_input,\n anchor_box=anchor_input,\n anchor_var=anchor_var_input,\n gt_boxes=gt_box,\n gt_labels=gt_label,\n is_crowd=is_crowd,\n im_info=im_info,\n num_classes=self.num_classes - 1,\n rpn_batch_size_per_im=self.target_assign.rpn_batch_size_per_im,\n rpn_straddle_thresh=self.target_assign.rpn_straddle_thresh,\n rpn_fg_fraction=self.target_assign.rpn_fg_fraction,\n rpn_positive_overlap=self.target_assign.rpn_positive_overlap,\n rpn_negative_overlap=self.target_assign.rpn_negative_overlap,\n use_random=self.target_assign.use_random)\n fg_num = fluid.layers.reduce_sum(fg_num, name='fg_num')\n score_tgt = fluid.layers.cast(score_tgt, 'int32')\n loss_cls = fluid.layers.sigmoid_focal_loss(\n x=score_pred,\n label=score_tgt,\n fg_num=fg_num,\n gamma=self.gamma,\n alpha=self.alpha)\n loss_cls = fluid.layers.reduce_sum(loss_cls, name='loss_cls')\n loss_bbox = fluid.layers.smooth_l1(\n x=loc_pred,\n y=loc_tgt,\n sigma=self.sigma,\n inside_weight=bbox_weight,\n outside_weight=bbox_weight)\n loss_bbox = fluid.layers.reduce_sum(loss_bbox, name='loss_bbox')\n loss_bbox = loss_bbox / fg_num\n return {'loss_cls': loss_cls, 'loss_bbox': loss_bbox}\n",
"# -*- coding:utf-8 -*-\nimport os\nimport time\nfrom collections import OrderedDict\n\nimport cv2\nimport numpy as np\nfrom PIL import Image\n\n__all__ = ['reader', 'preprocess_v']\n\n\ndef preprocess_v(img, w, h):\n img = cv2.resize(img, (w, h), cv2.INTER_LINEAR).astype(np.float32)\n img_mean = np.array([0.5, 0.5, 0.5]).reshape((3, 1, 1))\n img_std = np.array([0.5, 0.5, 0.5]).reshape((3, 1, 1))\n img = img.transpose((2, 0, 1)) / 255\n img -= img_mean\n img /= img_std\n return img\n\n\ndef reader(images=None, paths=None):\n \"\"\"\n Preprocess to yield image.\n\n Args:\n images (list(numpy.ndarray)): images data, shape of each is [H, W, C]\n paths (list[str]): paths to images.\n\n Yield:\n each (collections.OrderedDict): info of original image, preprocessed image.\n \"\"\"\n component = list()\n if paths:\n for im_path in paths:\n each = OrderedDict()\n assert os.path.isfile(\n im_path), \"The {} isn't a valid file path.\".format(im_path)\n #print(im_path)\n im = cv2.imread(im_path).astype('float32')\n each['org_im'] = im\n each['org_im_path'] = im_path\n each['org_im_shape'] = im.shape\n component.append(each)\n if images is not None:\n assert type(images) is list, \"images should be a list.\"\n for im in images:\n each = OrderedDict()\n each['org_im'] = im\n each['org_im_path'] = 'ndarray_time={}'.format(\n round(time.time(), 6) * 1e6)\n each['org_im_shape'] = im.shape\n component.append(each)\n\n for element in component:\n img = element['org_im'].copy()\n img = cv2.resize(img, (192, 192)).astype(np.float32)\n img_mean = np.array([0.5, 0.5, 0.5]).reshape((3, 1, 1))\n img_std = np.array([0.5, 0.5, 0.5]).reshape((3, 1, 1))\n img = img.transpose((2, 0, 1)) / 255\n img -= img_mean\n img /= img_std\n element['image'] = img\n yield element\n",
"# coding=utf-8\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nimport ast\nimport argparse\nimport os\n\nimport numpy as np\nimport paddle.fluid as fluid\nimport paddlehub as hub\nfrom paddle.fluid.core import PaddleTensor, AnalysisConfig, create_paddle_predictor\nfrom paddlehub.module.module import moduleinfo, runnable, serving\nfrom paddlehub.common.paddle_helper import add_vars_prefix\n\nfrom fix_resnext101_32x48d_wsl_imagenet.processor import postprocess, base64_to_cv2\nfrom fix_resnext101_32x48d_wsl_imagenet.data_feed import reader\nfrom fix_resnext101_32x48d_wsl_imagenet.resnext101_wsl import Fix_ResNeXt101_32x48d_wsl\n\n\n@moduleinfo(\n name=\"fix_resnext101_32x48d_wsl_imagenet\",\n type=\"CV/image_classification\",\n author=\"paddlepaddle\",\n author_email=\"[email protected]\",\n summary=\n \"fix_resnext101_32x48d_wsl is a image classfication model, this module is trained with imagenet datasets.\",\n version=\"1.0.0\")\nclass FixResnext10132x48dwslImagenet(hub.Module):\n def _initialize(self):\n self.default_pretrained_model_path = os.path.join(\n self.directory, \"model\")\n label_file = os.path.join(self.directory, \"label_list.txt\")\n with open(label_file, 'r', encoding='utf-8') as file:\n self.label_list = file.read().split(\"\\n\")[:-1]\n self.predictor_set = False\n\n def get_expected_image_width(self):\n return 224\n\n def get_expected_image_height(self):\n return 224\n\n def get_pretrained_images_mean(self):\n im_mean = np.array([0.485, 0.456, 0.406]).reshape(1, 3)\n return im_mean\n\n def get_pretrained_images_std(self):\n im_std = np.array([0.229, 0.224, 0.225]).reshape(1, 3)\n return im_std\n\n def _set_config(self):\n \"\"\"\n predictor config setting\n \"\"\"\n cpu_config = AnalysisConfig(self.default_pretrained_model_path)\n cpu_config.disable_glog_info()\n cpu_config.disable_gpu()\n self.cpu_predictor = create_paddle_predictor(cpu_config)\n\n try:\n _places = os.environ[\"CUDA_VISIBLE_DEVICES\"]\n int(_places[0])\n use_gpu = True\n except:\n use_gpu = False\n if use_gpu:\n gpu_config = AnalysisConfig(self.default_pretrained_model_path)\n gpu_config.disable_glog_info()\n gpu_config.enable_use_gpu(\n memory_pool_init_size_mb=1000, device_id=0)\n self.gpu_predictor = create_paddle_predictor(gpu_config)\n\n def context(self, trainable=True, pretrained=True):\n \"\"\"context for transfer learning.\n\n Args:\n trainable (bool): Set parameters in program to be trainable.\n pretrained (bool) : Whether to load pretrained model.\n\n Returns:\n inputs (dict): key is 'image', corresponding vaule is image tensor.\n outputs (dict): key is :\n 'classification', corresponding value is the result of classification.\n 'feature_map', corresponding value is the result of the layer before the fully connected layer.\n context_prog (fluid.Program): program for transfer learning.\n \"\"\"\n context_prog = fluid.Program()\n startup_prog = fluid.Program()\n with fluid.program_guard(context_prog, startup_prog):\n with fluid.unique_name.guard():\n image = fluid.layers.data(\n name=\"image\", shape=[3, 224, 224], dtype=\"float32\")\n resnet_vd = Fix_ResNeXt101_32x48d_wsl()\n output, feature_map = resnet_vd.net(\n input=image, class_dim=len(self.label_list))\n\n name_prefix = '@HUB_{}@'.format(self.name)\n inputs = {'image': name_prefix + image.name}\n outputs = {\n 'classification': name_prefix + output.name,\n 'feature_map': name_prefix + feature_map.name\n }\n add_vars_prefix(context_prog, name_prefix)\n add_vars_prefix(startup_prog, name_prefix)\n global_vars = context_prog.global_block().vars\n inputs = {\n key: global_vars[value]\n for key, value in inputs.items()\n }\n outputs = {\n key: global_vars[value]\n for key, value in outputs.items()\n }\n\n place = fluid.CPUPlace()\n exe = fluid.Executor(place)\n # pretrained\n if pretrained:\n\n def _if_exist(var):\n b = os.path.exists(\n os.path.join(self.default_pretrained_model_path,\n var.name))\n return b\n\n fluid.io.load_vars(\n exe,\n self.default_pretrained_model_path,\n context_prog,\n predicate=_if_exist)\n else:\n exe.run(startup_prog)\n # trainable\n for param in context_prog.global_block().iter_parameters():\n param.trainable = trainable\n return inputs, outputs, context_prog\n\n def classification(self,\n images=None,\n paths=None,\n batch_size=1,\n use_gpu=False,\n top_k=1):\n \"\"\"\n API for image classification.\n\n Args:\n images (list[numpy.ndarray]): data of images, shape of each is [H, W, C], color space must be BGR.\n paths (list[str]): The paths of images.\n batch_size (int): batch size.\n use_gpu (bool): Whether to use gpu.\n top_k (int): Return top k results.\n\n Returns:\n res (list[dict]): The classfication results.\n \"\"\"\n if use_gpu:\n try:\n _places = os.environ[\"CUDA_VISIBLE_DEVICES\"]\n int(_places[0])\n except:\n raise RuntimeError(\n \"Environment Variable CUDA_VISIBLE_DEVICES is not set correctly. If you wanna use gpu, please set CUDA_VISIBLE_DEVICES as cuda_device_id.\"\n )\n\n if not self.predictor_set:\n self._set_config()\n self.predictor_set = True\n\n all_data = list()\n for yield_data in reader(images, paths):\n all_data.append(yield_data)\n\n total_num = len(all_data)\n loop_num = int(np.ceil(total_num / batch_size))\n\n res = list()\n for iter_id in range(loop_num):\n batch_data = list()\n handle_id = iter_id * batch_size\n for image_id in range(batch_size):\n try:\n batch_data.append(all_data[handle_id + image_id])\n except:\n pass\n # feed batch image\n batch_image = np.array([data['image'] for data in batch_data])\n batch_image = PaddleTensor(batch_image.copy())\n predictor_output = self.gpu_predictor.run([\n batch_image\n ]) if use_gpu else self.cpu_predictor.run([batch_image])\n out = postprocess(\n data_out=predictor_output[0].as_ndarray(),\n label_list=self.label_list,\n top_k=top_k)\n res += out\n return res\n\n def save_inference_model(self,\n dirname,\n model_filename=None,\n params_filename=None,\n combined=True):\n if combined:\n model_filename = \"__model__\" if not model_filename else model_filename\n params_filename = \"__params__\" if not params_filename else params_filename\n place = fluid.CPUPlace()\n exe = fluid.Executor(place)\n\n program, feeded_var_names, target_vars = fluid.io.load_inference_model(\n dirname=self.default_pretrained_model_path, executor=exe)\n\n fluid.io.save_inference_model(\n dirname=dirname,\n main_program=program,\n executor=exe,\n feeded_var_names=feeded_var_names,\n target_vars=target_vars,\n model_filename=model_filename,\n params_filename=params_filename)\n\n @serving\n def serving_method(self, images, **kwargs):\n \"\"\"\n Run as a service.\n \"\"\"\n images_decode = [base64_to_cv2(image) for image in images]\n results = self.classification(images=images_decode, **kwargs)\n return results\n\n @runnable\n def run_cmd(self, argvs):\n \"\"\"\n Run as a command.\n \"\"\"\n self.parser = argparse.ArgumentParser(\n description=\"Run the {} module.\".format(self.name),\n prog='hub run {}'.format(self.name),\n usage='%(prog)s',\n add_help=True)\n self.arg_input_group = self.parser.add_argument_group(\n title=\"Input options\", description=\"Input data. Required\")\n self.arg_config_group = self.parser.add_argument_group(\n title=\"Config options\",\n description=\n \"Run configuration for controlling module behavior, not required.\")\n self.add_module_config_arg()\n self.add_module_input_arg()\n args = self.parser.parse_args(argvs)\n results = self.classification(\n paths=[args.input_path],\n batch_size=args.batch_size,\n use_gpu=args.use_gpu)\n return results\n\n def add_module_config_arg(self):\n \"\"\"\n Add the command config options.\n \"\"\"\n self.arg_config_group.add_argument(\n '--use_gpu',\n type=ast.literal_eval,\n default=False,\n help=\"whether use GPU or not.\")\n self.arg_config_group.add_argument(\n '--batch_size',\n type=ast.literal_eval,\n default=1,\n help=\"batch size.\")\n self.arg_config_group.add_argument(\n '--top_k',\n type=ast.literal_eval,\n default=1,\n help=\"Return top k results.\")\n\n def add_module_input_arg(self):\n \"\"\"\n Add the command input options.\n \"\"\"\n self.arg_input_group.add_argument(\n '--input_path', type=str, help=\"path to image.\")\n",
"# coding=utf-8\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import division\n\nimport os\nfrom collections import OrderedDict\n\nimport cv2\nimport numpy as np\nfrom PIL import Image, ImageEnhance\nfrom paddle import fluid\n\nDATA_DIM = 224\nimg_mean = np.array([0.485, 0.456, 0.406]).reshape((3, 1, 1))\nimg_std = np.array([0.229, 0.224, 0.225]).reshape((3, 1, 1))\n\n\ndef resize_short(img, target_size):\n percent = float(target_size) / min(img.size[0], img.size[1])\n resized_width = int(round(img.size[0] * percent))\n resized_height = int(round(img.size[1] * percent))\n img = img.resize((resized_width, resized_height), Image.LANCZOS)\n return img\n\n\ndef crop_image(img, target_size, center):\n width, height = img.size\n size = target_size\n if center == True:\n w_start = (width - size) / 2\n h_start = (height - size) / 2\n else:\n w_start = np.random.randint(0, width - size + 1)\n h_start = np.random.randint(0, height - size + 1)\n w_end = w_start + size\n h_end = h_start + size\n img = img.crop((w_start, h_start, w_end, h_end))\n return img\n\n\ndef process_image(img):\n img = resize_short(img, target_size=256)\n img = crop_image(img, target_size=DATA_DIM, center=True)\n if img.mode != 'RGB':\n img = img.convert('RGB')\n #img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n img = np.array(img).astype('float32').transpose((2, 0, 1)) / 255\n img -= img_mean\n img /= img_std\n return img\n\n\ndef test_reader(paths=None, images=None):\n \"\"\"data generator\n :param paths: path to images.\n :type paths: list, each element is a str\n :param images: data of images, [N, H, W, C]\n :type images: numpy.ndarray\n \"\"\"\n img_list = []\n if paths:\n for img_path in paths:\n assert os.path.isfile(\n img_path), \"The {} isn't a valid file path.\".format(img_path)\n img = Image.open(img_path)\n #img = cv2.imread(img_path)\n img_list.append(img)\n if images is not None:\n for img in images:\n img_list.append(Image.fromarray(np.uint8(img)))\n for im in img_list:\n im = process_image(im)\n yield im\n",
"# coding=utf-8\nimport os\nfrom collections import OrderedDict\n\nimport cv2\nimport numpy as np\n\n__all__ = ['reader']\n\n\ndef preprocess(orig_image):\n image = cv2.cvtColor(orig_image, cv2.COLOR_BGR2RGB)\n image = cv2.resize(image, (320, 240))\n image_mean = np.array([127, 127, 127])\n image = (image - image_mean) / 128.0\n image = np.transpose(image, [2, 0, 1])\n return image\n\n\ndef reader(images=None, paths=None):\n \"\"\"\n Preprocess to yield image.\n\n Args:\n images (list(numpy.ndarray)): images data, shape of each is [H, W, C]\n paths (list[str]): paths to images.\n\n Yield:\n each (collections.OrderedDict): info of original image, preprocessed image.\n \"\"\"\n component = list()\n if paths:\n for im_path in paths:\n each = OrderedDict()\n assert os.path.isfile(\n im_path), \"The {} isn't a valid file path.\".format(im_path)\n im = cv2.imread(im_path)\n each['orig_im'] = im\n each['orig_im_shape'] = im.shape # height, width, channel\n each['orig_im_path'] = im_path\n component.append(each)\n if images is not None:\n assert type(images) is list, \"images should be a list.\"\n for im in images:\n each = OrderedDict()\n each['orig_im'] = im\n each['orig_im_path'] = None\n each['orig_im_shape'] = im.shape # height, width, channel\n component.append(each)\n\n for element in component:\n element['image'] = preprocess(element['orig_im'])\n yield element\n",
"# coding=utf-8\nimport os\nimport unittest\n\nimport cv2\nimport numpy as np\nimport paddle.fluid as fluid\nimport paddlehub as hub\n\n\nclass TestMobileNetV1(unittest.TestCase):\n @classmethod\n def setUpClass(self):\n \"\"\"Prepare the environment once before execution of all tests.\"\"\"\n # self.mobilenet_v1 = hub.Module(name=\"mobilenet_v1\")\n self.mobilenet_v1 = hub.Module(name='mobilenet_v1_imagenet')\n\n @classmethod\n def tearDownClass(self):\n \"\"\"clean up the environment after the execution of all tests.\"\"\"\n self.ssd = None\n\n def setUp(self):\n \"Call setUp() to prepare environment\\n\"\n self.test_prog = fluid.Program()\n\n def tearDown(self):\n \"Call tearDown to restore environment.\\n\"\n self.test_prog = None\n\n def test_context(self):\n with fluid.program_guard(self.test_prog):\n image = fluid.layers.data(\n name='image', shape=[3, 224, 224], dtype='float32')\n inputs, outputs, program = self.mobilenet_v1.context(\n input_image=image,\n pretrained=False,\n trainable=True,\n param_prefix='BaiDu')\n image = inputs[\"image\"]\n body_feats = outputs['body_feats']\n\n def test_classification(self):\n with fluid.program_guard(self.test_prog):\n image_dir = \"../image_dataset/pascal_voc\"\n airplane = cv2.imread(os.path.join(\n image_dir, 'airplane.jpg')).astype('float32')\n airplanes = np.array([airplane, airplane])\n classification_results = self.mobilenet_v1.classification(\n paths=[\n os.path.join(image_dir, 'bird.jpg'),\n os.path.join(image_dir, 'bike.jpg'),\n os.path.join(image_dir, 'cowboy.jpg'),\n os.path.join(image_dir, 'sheep.jpg'),\n os.path.join(image_dir, 'train.jpg')\n ],\n images=airplanes,\n batch_size=2)\n print(classification_results)\n\n\nif __name__ == \"__main__\":\n suite = unittest.TestSuite()\n suite.addTest(TestMobileNetV1('test_context'))\n suite.addTest(TestMobileNetV1('test_classification'))\n runner = unittest.TextTestRunner(verbosity=2)\n runner.run(suite)\n",
"# -*- coding:utf-8 -*-\nimport os\nimport time\nimport base64\n\nimport cv2\nimport numpy as np\n\n__all__ = ['cv2_to_base64', 'base64_to_cv2', 'postprocess']\n\n\ndef cv2_to_base64(image):\n data = cv2.imencode('.jpg', image)[1]\n return base64.b64encode(data.tostring()).decode('utf8')\n\n\ndef base64_to_cv2(b64str):\n data = base64.b64decode(b64str.encode('utf8'))\n data = np.fromstring(data, np.uint8)\n data = cv2.imdecode(data, cv2.IMREAD_COLOR)\n return data\n\n\ndef postprocess(data_out, org_im, org_im_shape, org_im_path, output_dir,\n visualization):\n \"\"\"\n Postprocess output of network. one image at a time.\n\n Args:\n data_out (numpy.ndarray): output of network.\n org_im (numpy.ndarray): original image.\n org_im_shape (list): shape pf original image.\n org_im_path (list): path of riginal image.\n output_dir (str): output directory to store image.\n visualization (bool): whether to save image or not.\n\n Returns:\n result (dict): The data of processed image.\n \"\"\"\n result = dict()\n for sr in data_out:\n sr = np.squeeze(sr, 0)\n sr = np.clip(sr * 255, 0, 255)\n sr = sr.astype(np.uint8)\n shape = sr.shape\n if visualization:\n org_im = cv2.cvtColor(org_im, cv2.COLOR_BGR2YUV)\n uv = cv2.resize(\n org_im[..., 1:], (shape[1], shape[0]),\n interpolation=cv2.INTER_CUBIC)\n combine_im = cv2.cvtColor(\n np.concatenate((sr, uv), axis=2), cv2.COLOR_YUV2BGR)\n check_dir(output_dir)\n save_im_path = get_save_image_name(org_im, org_im_path, output_dir)\n cv2.imwrite(save_im_path, combine_im)\n print(\"save image at: \", save_im_path)\n result['save_path'] = save_im_path\n result['data'] = sr\n else:\n result['data'] = sr\n\n return result\n\n\ndef check_dir(dir_path):\n if not os.path.exists(dir_path):\n os.makedirs(dir_path)\n elif os.path.isfile(dir_path):\n os.remove(dir_path)\n os.makedirs(dir_path)\n\n\ndef get_save_image_name(org_im, org_im_path, output_dir):\n \"\"\"\n Get save image name from source image path.\n \"\"\"\n # name prefix of orginal image\n org_im_name = os.path.split(org_im_path)[-1]\n im_prefix = os.path.splitext(org_im_name)[0]\n ext = '.png'\n # save image path\n save_im_path = os.path.join(output_dir, im_prefix + ext)\n if os.path.exists(save_im_path):\n save_im_path = os.path.join(\n output_dir, im_prefix + 'time={}'.format(int(time.time())) + ext)\n\n return save_im_path\n"
] | [
[
"numpy.array"
],
[
"numpy.random.uniform",
"numpy.random.seed"
],
[
"numpy.log"
],
[
"numpy.array"
],
[
"numpy.ceil",
"numpy.array"
],
[
"numpy.uint8",
"numpy.array",
"numpy.random.randint"
],
[
"numpy.array",
"numpy.transpose"
],
[
"numpy.array"
],
[
"numpy.concatenate",
"numpy.squeeze",
"numpy.fromstring",
"numpy.clip"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mcoughlin/PypeIt | [
"9aa1d10633faf3d73135e1a1c94b1cd18c7058e0",
"9aa1d10633faf3d73135e1a1c94b1cd18c7058e0"
] | [
"pypeit/core/gui/identify.py",
"pypeit/datamodel.py"
] | [
"import os\nimport copy\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom matplotlib.lines import Line2D\nfrom matplotlib.colors import LinearSegmentedColormap, Normalize\nfrom matplotlib.cm import ScalarMappable\nimport matplotlib.transforms as mtransforms\nfrom matplotlib.widgets import Button, Slider\n\nfrom IPython import embed\n\nfrom pypeit.par import pypeitpar\nfrom pypeit.core.wavecal import wv_fitting, waveio, wvutils\nfrom pypeit import utils, msgs\nfrom astropy.io import ascii as ascii_io\nfrom astropy.table import Table\n\noperations = dict({'cursor': \"Select lines (LMB click)\\n\" +\n \" Select regions (LMB drag = add, RMB drag = remove)\\n\" +\n \" Navigate (LMB drag = pan, RMB drag = zoom)\",\n 'left' : \"Advance the line list slider to the left by one\",\n 'right' : \"Advance the line list slider to the right by one\",\n 'p' : \"Toggle pan/zoom with the cursor\",\n 'q' : \"Close Identify window and continue PypeIt reduction\",\n 'a' : \"Automatically identify lines using current solution\",\n 'c' : \"Clear automatically identified lines\",\n 'd' : \"Delete all line identifications (start from scratch)\",\n 'f' : \"Fit the wavelength solution\",\n 'g' : \"Toggle ghost solution (show predicted line positions when wavelength is on the x-axis)\",\n 'h' : \"Reset ghost parameters\",\n 'l' : \"Load saved line IDs from file (waveids.ascii in local directory)\",\n 'm' : \"Select a line\",\n 'r' : \"Refit a line\",\n 's' : \"Save current line IDs to a file\",\n 'w' : \"Toggle wavelength/pixels on the x-axis of the main panel\",\n 'z' : \"Delete a single line identification\",\n '+/-' : \"Raise/Lower the order of the fitting polynomial\"\n })\n\n\nclass Identify(object):\n \"\"\"\n GUI to interactively identify arc lines. The GUI can be run within\n PypeIt during data reduction, or as a standalone script outside of\n PypeIt. To initialise the GUI, call the initialise() function in this\n file.\n \"\"\"\n\n def __init__(self, canvas, axes, spec, specres, detns, line_lists, par, lflag_color,\n slit=0, spatid='0', wv_calib=None, pxtoler=None):\n \"\"\"Controls for the Identify task in PypeIt.\n\n The main goal of this routine is to interactively identify arc lines\n to be used for wavelength calibration.\n\n Parameters\n ----------\n canvas : Matploltib figure canvas\n The canvas on which all axes are contained\n axes : dict\n Dictionary of four Matplotlib axes instances (Main spectrum panel, two for residuals, one for information)\n spec : Matplotlib.Line2D\n Matplotlib Line2D instance which contains plotting information of the plotted arc spectrum\n specres : dict\n Three element list of Matplotlib Line2D/path instances, used for residuals plotting\n detns : ndarray\n Detections from the arc spectrum\n line_lists : astropy.Table\n Contains information about the line list to be used for wavelength calibration\n par : class\n WavelengthSolutionPar Calibration parameters\n lflag_color : list\n List of colors used for plotting\n slit : int\n The slit to be used for wavelength calibration\n spatid : str\n Spatial ID corresponding to slit\n wv_calib : :obj:`dict`, None, optional\n If a best-fitting solution exists, and you wish to load it, provide the wv_calib dictionary.\n pxtoler : float, optional\n Tolerance in pixels for adding lines with the auto option\n \"\"\"\n # Store the axes\n self.axes = axes\n # Initialise the spectrum properties\n self.spec = spec\n self.specres = specres # Residual information\n self.specdata = spec.get_ydata()\n self.specx = np.arange(self.specdata.size)\n self.plotx = self.specx.copy()\n # Detections, linelist, line IDs, and fitting params\n self._slit = slit\n self._spatid = spatid\n self._detns = detns\n self._detnsy = self.get_ann_ypos() # Get the y locations of the annotations\n self._line_lists = line_lists\n self._lines = np.sort(line_lists['wave'].data) # Remove mask (if any) and then sort\n self._lineids = np.zeros(self._detns.size, dtype=np.float)\n self._lineflg = np.zeros(self._detns.size, dtype=np.int) # Flags: 0=no ID, 1=user ID, 2=auto ID, 3=flag reject\n self._lflag_color = lflag_color\n self.par = par\n # Auto ID\n self.pxtoler = 0.1 if pxtoler is None else pxtoler\n # Fitting properties\n self._fitdict = dict(polyorder=1,\n scale=self.specdata.size-1,\n coeff=None,\n fitc=None,\n full_fit=None,\n res_stats=[]\n )\n # Initialise the residuals colormap\n residcmap = LinearSegmentedColormap.from_list(\"my_list\", ['grey', 'blue', 'orange', 'red'], N=4)\n self.residmap = ScalarMappable(norm=Normalize(vmin=0, vmax=3), cmap=residcmap)\n # Initialise the annotations\n self.annlines = []\n self.anntexts = []\n\n # Unset some of the matplotlib keymaps\n matplotlib.pyplot.rcParams['keymap.fullscreen'] = '' # toggling fullscreen (Default: f, ctrl+f)\n matplotlib.pyplot.rcParams['keymap.home'] = '' # home or reset mnemonic (Default: h, r, home)\n matplotlib.pyplot.rcParams['keymap.back'] = '' # forward / backward keys to enable (Default: left, c, backspace)\n matplotlib.pyplot.rcParams['keymap.forward'] = '' # left handed quick navigation (Default: right, v)\n #matplotlib.pyplot.rcParams['keymap.pan'] = '' # pan mnemonic (Default: p)\n matplotlib.pyplot.rcParams['keymap.zoom'] = '' # zoom mnemonic (Default: o)\n matplotlib.pyplot.rcParams['keymap.save'] = '' # saving current figure (Default: s)\n matplotlib.pyplot.rcParams['keymap.quit'] = '' # close the current figure (Default: ctrl+w, cmd+w)\n matplotlib.pyplot.rcParams['keymap.grid'] = '' # switching on/off a grid in current axes (Default: g)\n matplotlib.pyplot.rcParams['keymap.yscale'] = '' # toggle scaling of y-axes ('log'/'linear') (Default: l)\n matplotlib.pyplot.rcParams['keymap.xscale'] = '' # toggle scaling of x-axes ('log'/'linear') (Default: L, k)\n matplotlib.pyplot.rcParams['keymap.all_axes'] = '' # enable all axes (Default: a)\n\n # Initialise the main canvas tools\n canvas.mpl_connect('draw_event', self.draw_callback)\n canvas.mpl_connect('button_press_event', self.button_press_callback)\n canvas.mpl_connect('key_press_event', self.key_press_callback)\n canvas.mpl_connect('button_release_event', self.button_release_callback)\n canvas.mpl_connect('motion_notify_event', self.motion_notify_event)\n self.canvas = canvas\n self.background = self.canvas.copy_from_bbox(self.axes['main'].bbox)\n\n # Interaction variables\n self._detns_idx = -1\n self._fitr = None # Matplotlib shaded fit region (for refitting lines)\n self._fitregions = np.zeros(self.specdata.size, dtype=np.int) # Mask of the pixels to be included in a fit\n self._addsub = 0 # Adding a region (1) or removing (0)\n self._msedown = False # Is the mouse button being held down (i.e. dragged)\n self._respreq = [False, None] # Does the user need to provide a response before any other operation will be permitted? Once the user responds, the second element of this array provides the action to be performed.\n self._qconf = False # Confirm quit message\n self._changes = False\n self._wavepix = 1 # Show wavelength (0) or pixels (1) on the x-axis of the main panel\n # Setup ghost properties\n # The ghost params correspond to the central wavelength and dispersion, as measured at the middle pixel of the display\n self._ghosttrans = mtransforms.blended_transform_factory(self.axes['main'].transData, self.axes['main'].transAxes)\n self._ghostmode = False # Display a ghost wavelength solution\n self._ghostdown = False\n self._ghostparam = [0.0, 1.0] # Ghost params [shift, scale] = [wavecen, disp]\n self.gstlines = []\n self.gsttexts = []\n\n # Setup slider for the linelist\n self._slideval = 0 # Default starting point for the linelist slider\n self.linelist_init()\n\n\n # If an initial solution is available, load it\n if wv_calib is not None:\n self.load_IDs(wv_calib=wv_calib)\n self.fitsol_fit()\n\n # Draw the spectrum\n self.replot()\n\n @classmethod\n def initialise(cls, arccen, slits, slit=0, par=None, wv_calib_all=None,\n wavelim=None, nonlinear_counts=None, test=False,\n pxtoler=0.1, fwhm=4.):\n \"\"\"Initialise the 'Identify' window for real-time wavelength calibration\n\n .. todo::\n\n * Implement multislit functionality\n\n Parameters\n ----------\n arccen : ndarray\n Arc spectrum\n slits : :class:`SlitTraceSet`\n Data container with slit trace information\n slit : int, optional\n The slit to be used for wavelength calibration\n par : :obj:`int`, optional\n The slit to be used for wavelength calibration\n wv_calib_all : :obj:`dict`, None, optional\n If a best-fitting solution exists, and you wish to load it, provide the wv_calib dictionary.\n wavelim : :obj:`list`, None, optional\n A two element list containing the desired minimum and maximum wavelength of the linelist\n test : bool, optional\n If True, this is a unit test\n nonlinear_counts : float, optional\n Counts where the arc is presumed to go non-linear\n Passed to arc_lines_from_spec()\n fwhm : float, optional\n FWHM of arc lines in pixels\n pxtoler : float, optional\n Tolerance in pixels for adding lines with the auto option\n\n\n Returns\n -------\n object : :class:`Identify`\n Returns an instance of the :class:`Identify` class, which contains the results of the fit\n \"\"\"\n\n # Double check that a WavelengthSolutionPar was input\n par = pypeitpar.WavelengthSolutionPar() if par is None else par\n\n # If a wavelength calibration has been performed already, load it:\n msgs.info(\"Slit ID = {0:d} (SPAT ID = {1:d})\".format(slit, slits.spat_id[slit]))\n wv_calib = wv_calib_all[str(slits.spat_id[slit])] if wv_calib_all is not None else None\n\n # Extract the lines that are detected in arccen\n thisarc = arccen[:, slit]\n tdetns, _, _, icut, _ = wvutils.arc_lines_from_spec(thisarc,\n fwhm=fwhm,\n sigdetect=par['sigdetect'],\n nonlinear_counts=nonlinear_counts)\n detns = tdetns[icut]\n\n # Load line lists\n if 'ThAr' in par['lamps']:\n line_lists_all = waveio.load_line_lists(par['lamps'])\n line_lists = line_lists_all[np.where(line_lists_all['ion'] != 'UNKNWN')]\n else:\n line_lists = waveio.load_line_lists(par['lamps'])\n\n # Trim the wavelength scale if requested\n if wavelim is not None:\n ww = np.ones(len(line_lists), dtype=bool)\n if wavelim[0] is not None:\n ww &= line_lists['wave'] > wavelim[0]\n if wavelim[1] is not None:\n ww &= line_lists['wave'] < wavelim[1]\n line_lists = line_lists[ww]\n\n # Create a Line2D instance for the arc spectrum\n spec = Line2D(np.arange(thisarc.size), thisarc,\n linewidth=1, linestyle='solid', color='k',\n drawstyle='steps-mid', animated=True)\n\n # Add the main figure axis\n fig, ax = plt.subplots(figsize=(16, 9), facecolor=\"white\")\n plt.subplots_adjust(bottom=0.05, top=0.85, left=0.05, right=0.65)\n ax.add_line(spec)\n ax.set_ylim((0.0, 1.1 * spec.get_ydata().max()))\n\n # Add two residual fitting axes\n axfit = fig.add_axes([0.7, .5, .28, 0.35])\n axres = fig.add_axes([0.7, .1, .28, 0.35])\n # Residuals\n lflag_color = ['grey', 'blue', 'yellow', 'red']\n residcmap = LinearSegmentedColormap.from_list(\"my_list\", lflag_color, N=len(lflag_color))\n resres = axres.scatter(detns, np.zeros(detns.size), marker='x',\n c=np.zeros(detns.size), cmap=residcmap, norm=Normalize(vmin=0.0, vmax=3.0))\n axres.axhspan(-1*pxtoler, pxtoler, alpha=0.5, color='grey')\n axres.axhline(0.0, color='r', linestyle='-') # Zero level\n axres.set_xlim((0, thisarc.size - 1))\n axres.set_ylim((-0.3, 0.3))\n axres.set_xlabel('Pixel')\n axres.set_ylabel('Residuals (Pix)')\n\n # pixel vs wavelength\n respts = axfit.scatter(detns, np.zeros(detns.size), marker='x',\n c=np.zeros(detns.size), cmap=residcmap, norm=Normalize(vmin=0.0, vmax=3.0))\n resfit = Line2D(np.arange(thisarc.size), np.zeros(thisarc.size), linewidth=1, linestyle='-', color='r')\n axfit.add_line(resfit)\n axfit.set_xlim((0, thisarc.size - 1))\n axfit.set_ylim((-0.3, 0.3)) # This will get updated as lines are identified\n axfit.set_xlabel('Pixel')\n axfit.set_ylabel('Wavelength')\n\n # Add an information GUI axis\n axinfo = fig.add_axes([0.15, .92, .7, 0.07])\n axinfo.get_xaxis().set_visible(False)\n axinfo.get_yaxis().set_visible(False)\n axinfo.text(0.5, 0.5, \"Press '?' to list the available options\", transform=axinfo.transAxes,\n horizontalalignment='center', verticalalignment='center')\n axinfo.set_xlim((0, 1))\n axinfo.set_ylim((0, 1))\n specres = dict(pixels=respts, model=resfit, resid=resres)\n\n axes = dict(main=ax, fit=axfit, resid=axres, info=axinfo)\n # Initialise the identify window and display to screen\n fig.canvas.set_window_title('PypeIt - Identify')\n ident = Identify(fig.canvas, axes, spec, specres, detns, line_lists, par, lflag_color, slit=slit,\n spatid=str(slits.spat_id[slit]), wv_calib=wv_calib, pxtoler=pxtoler)\n\n if not test:\n plt.show()\n\n # Now return the results\n return ident\n\n def print_help(self):\n \"\"\"Print the keys and descriptions that can be used for Identification\n \"\"\"\n keys = operations.keys()\n print(\"===============================================================\")\n print(\" Colored lines in main panels:\")\n print(\" gray : wavelength has not been assigned to this detection\")\n print(\" red : currently selected line\")\n print(\" blue : user has assigned wavelength to this detection\")\n print(\" yellow : detection has been automatically assigned\")\n print(\" Colored symbols in residual panels:\")\n print(\" gray : wavelength has not been assigned to this detection\")\n print(\" blue : user has assigned wavelength to this detection\")\n print(\" yellow : detection has been automatically assigned\")\n print(\" red : automatically assigned wavelength was rejected\")\n print(\"---------------------------------------------------------------\")\n print(\" IDENTIFY OPERATIONS\")\n for key in keys:\n print(\"{0:6s} : {1:s}\".format(key, operations[key]))\n print(\"---------------------------------------------------------------\")\n\n def replot(self):\n \"\"\"Redraw the entire canvas\n \"\"\"\n # First set the xdata to be shown\n self.canvas.restore_region(self.background)\n self.toggle_wavepix()\n self.draw_residuals()\n self.draw_lines()\n self.draw_ghost()\n self.canvas.draw()\n\n def linelist_update(self, val):\n \"\"\"For a given detection, set the linelist value to be the best guess based on the wavelength solution\n\n When a user selects a detection, reset the current value of the linelist\n to reflect the best candidate wavelength for that detection (given the current\n wavelength solution)\n\n Args:\n val (int): The index corresponding to the closest match\n \"\"\"\n val = int(val)\n self._slidell.label.set_text(\"{0:.4f}\".format(self._lines[val]))\n self._slideval = val\n\n def linelist_select(self, event):\n \"\"\"Assign a wavelength to a detection\n\n Note, only the LMB works.\n\n Args:\n event (Event): A matplotlib event instance\n \"\"\"\n if event.button == 1:\n self.update_line_id()\n self._detns_idx = -1\n # Try to perform a fit\n self.fitsol_fit()\n # Now replot everything\n self.replot()\n\n def linelist_init(self):\n \"\"\"Initialise the linelist Slider (used to assign a line to a detection)\n \"\"\"\n axcolor = 'lightgoldenrodyellow'\n # Slider\n self.axl = plt.axes([0.15, 0.87, 0.7, 0.04], facecolor=axcolor)\n self._slidell = Slider(self.axl, \"{0:.4f}\".format(self._lines[self._slideval]), self._slideval,\n self._lines.size-1, valinit=0, valstep=1)\n self._slidell.valtext.set_visible(False)\n self._slidell.on_changed(self.linelist_update)\n # Select button\n selax = plt.axes([0.86, 0.87, 0.1, 0.04])\n self._select = Button(selax, 'Assign Line', color=axcolor, hovercolor='y')\n self._select.on_clicked(self.linelist_select)\n\n def toggle_wavepix(self, toggled=False):\n if toggled:\n self._wavepix = 1 - self._wavepix\n self.plotx = self.specx.copy() # Plot pixels on the x-axis\n if self._wavepix == 0:\n # Check that a wavelength solution exists\n if self._fitdict['coeff'] is None:\n self.update_infobox(message=\"Unable to show wavelength until a guess at the solution is available\",\n yesno=False)\n else:\n self.plotx = self._fitdict['wave_soln'].copy()\n # Update the x-axis data and axis range\n self.spec.set_xdata(self.plotx)\n if toggled:\n self.axes['main'].set_xlim([self.plotx.min(), self.plotx.max()])\n\n def draw_ghost(self):\n \"\"\"Draw tick marks at the location of the ghost\n \"\"\"\n for i in self.gstlines:\n try:\n i.remove()\n except TypeError:\n i[0].remove()\n for i in self.gsttexts:\n i.remove()\n self.gstlines = []\n self.gsttexts = []\n # Must have ghost mode on, plotting in wavelength, and have an estimated wavelength solution\n if not self._ghostmode or self._wavepix != 0 or self._fitdict['fitc'] is None:\n return\n\n xmn, xmx = self.axes['main'].get_xlim()\n cent = 0.5*(xmn+xmx)\n plotx = cent + (self._lines + self._ghostparam[0] - cent)*self._ghostparam[1]\n\n # Plot the lines\n w = np.where((plotx > xmn) & (plotx < xmx))[0]\n for i in range(w.size):\n self.gstlines.append(self.axes['main'].plot([plotx[w[i]], plotx[w[i]]], [0.45, 0.55],\n color='g', transform=self._ghosttrans))\n txt = \"{0:.2f}\".format(self._lines[w[i]])\n self.gsttexts.append(\n self.axes['main'].annotate(txt, (plotx[w[i]], 0.6), rotation=90.0, alpha=0.5,\n color='g', ha='center', xycoords=self._ghosttrans))\n\n def draw_lines(self):\n \"\"\"Draw the lines and annotate with their IDs\n \"\"\"\n for i in self.annlines:\n i.remove()\n for i in self.anntexts:\n i.remove()\n self.annlines = []\n self.anntexts = []\n # Decide if pixels or wavelength is being plotted\n plotx = self._detns\n if self._wavepix == 0 and self._fitdict['fitc'] is not None:\n # Plot wavelength\n pixel_fit = self._detns\n xnorm = self._fitdict['xnorm']\n\n # Calculate the estimated wavelength of the detections\n plotx = self._fitdict['full_fit'].eval(pixel_fit / xnorm)\n #plotx = utils.func_val(self._fitdict['fitc'],\n # pixel_fit / xnorm,\n # self._fitdict[\"function\"],\n # minx=self._fitdict['fmin'],\n # maxx=self._fitdict['fmax'])\n # Plot the lines\n xmn, xmx = self.axes['main'].get_xlim()\n w = np.where((plotx > xmn) & (plotx < xmx))[0]\n for i in range(w.size):\n if self._lineflg[w[i]] in [0, 3]:\n if w[i] == self._detns_idx:\n self.annlines.append(self.axes['main'].axvline(plotx[w[i]], color='r'))\n else:\n self.annlines.append(self.axes['main'].axvline(plotx[w[i]], color='grey', alpha=0.5))\n continue\n else:\n if w[i] == self._detns_idx:\n self.annlines.append(self.axes['main'].axvline(plotx[w[i]], color='r'))\n else:\n self.annlines.append(self.axes['main'].axvline(plotx[w[i]],\n color=self._lflag_color[self._lineflg[w[i]]]))\n txt = \"{0:.2f}\".format(self._lineids[w[i]])\n self.anntexts.append(\n self.axes['main'].annotate(txt, (plotx[w[i]], self._detnsy[w[i]]), rotation=90.0,\n color='b', ha='right', va='bottom'))\n\n def draw_residuals(self):\n \"\"\"Update the subplots that show the residuals\n \"\"\"\n if self._fitdict[\"coeff\"] is None:\n nid = np.where((self._lineflg == 1) | (self._lineflg == 2))[0].size\n msg = \"Cannot plot residuals until more lines have been identified\\n\" +\\\n \"Polynomial order = {0:d}, Number of line IDs = {1:d}\".format(self._fitdict[\"polyorder\"], nid)\n self.update_infobox(message=msg, yesno=False)\n else:\n # Remove the annotated residual statistics\n for i in self._fitdict[\"res_stats\"]:\n i.remove()\n self._fitdict[\"res_stats\"] = []\n\n # Update the line IDs\n for ii in range(self._fitdict['pixel_fit'].size):\n idx = np.argmin(np.abs(self._detns-self._fitdict['pixel_fit'][ii]))\n self._lineids[idx] = self._fitdict['wave_fit'][ii]\n\n # Extract the fitting info\n wave_soln = self._fitdict['wave_soln']\n pixel_fit = self._detns\n wave_fit = self._lineids\n xnorm = self._fitdict['xnorm']\n ymin, ymax = np.min(wave_soln[wave_soln != 0.0]) * .95, np.max(wave_soln) * 1.05\n\n # Calculate some stats\n wave_soln_fit = self._fitdict['full_fit'].eval(pixel_fit / xnorm)\n dwv_pix = np.median(np.abs(wave_soln - np.roll(wave_soln, 1)))\n resvals = (wave_fit - wave_soln_fit) / dwv_pix\n\n # Pixel vs wavelength\n self.specres['pixels'].set_offsets(np.c_[pixel_fit, wave_fit])\n self.specres['model'].set_ydata(wave_soln)\n self.axes['fit'].set_ylim((ymin, ymax))\n self.specres['pixels'].set_color(self.residmap.to_rgba(self._lineflg))\n\n # Pixel residuals\n self.specres['resid'].set_offsets(np.c_[pixel_fit, resvals])\n self.axes['resid'].set_ylim((-1.0, 1.0))\n self.specres['resid'].set_color(self.residmap.to_rgba(self._lineflg))\n\n # Write some statistics on the plot\n disptxt = r'$\\Delta\\lambda$={:.3f}$\\AA$ (per pix)'.format(dwv_pix)\n rmstxt = 'RMS={:.3f} (pixels)'.format(self._fitdict['rms'])\n self._fitdict[\"res_stats\"].append(self.axes['fit'].text(0.1 * self.specdata.size,\n ymin + 0.90 * (ymax - ymin),\n disptxt, size='small'))\n self._fitdict[\"res_stats\"].append(self.axes['fit'].text(0.1 * self.specdata.size,\n ymin + 0.80 * (ymax - ymin),\n rmstxt, size='small'))\n\n def draw_callback(self, event):\n \"\"\"Draw the lines and annotate with their IDs\n\n Args:\n event (Event): A matplotlib event instance\n \"\"\"\n # Get the background\n self.background = self.canvas.copy_from_bbox(self.axes['main'].bbox)\n # Set the axis transform\n trans = mtransforms.blended_transform_factory(self.axes['main'].transData, self.axes['main'].transAxes)\n self.draw_fitregions(trans)\n self.axes['main'].draw_artist(self.spec)\n self.draw_lines()\n self.draw_ghost()\n\n def draw_fitregions(self, trans):\n \"\"\"Refresh the fit regions\n\n Args:\n trans (AxisTransform): A matplotlib axis transform from data to axes coordinates\n \"\"\"\n if self._fitr is not None:\n self._fitr.remove()\n # Find all regions\n regwhr = np.copy(self._fitregions == 1)\n # Fudge to get the leftmost pixel shaded in too\n regwhr[np.where((self._fitregions[:-1] == 0) & (self._fitregions[1:] == 1))] = True\n self._fitr = self.axes['main'].fill_between(self.plotx, 0, 1, where=regwhr, facecolor='green',\n alpha=0.5, transform=trans)\n\n def get_ann_ypos(self, scale=1.02):\n \"\"\"Calculate the y locations of the annotated IDs\n\n Args:\n scale (float): Scale the location relative to the maximum value of the spectrum\n\n Returns:\n ypos (ndarray): y locations of the annotations\n \"\"\"\n ypos = np.zeros(self._detns.size)\n for xx in range(self._detns.size):\n wmin = np.argmin(np.abs(self.specx-self._detns[xx]))\n ypos[xx] = scale * np.max(self.specdata[wmin-1:wmin+2])\n return ypos\n\n def get_detns(self):\n \"\"\"Get the index of the detection closest to the cursor\n \"\"\"\n return np.argmin(np.abs(self._detns-self.specx[self._end]))\n\n def get_ind_under_point(self, event):\n \"\"\"Get the index of the line closest to the cursor\n\n Args:\n event (Event): Matplotlib event instance containing information about the event\n\n Returns:\n ind (int): Index of the spectrum where the event occurred\n \"\"\"\n ind = np.argmin(np.abs(self.plotx - event.xdata))\n return ind\n\n def get_axisID(self, event):\n \"\"\"Get the ID of the axis where an event has occurred\n\n Args:\n event (Event): Matplotlib event instance containing information about the event\n\n Returns:\n axisID (int, None): Axis where the event has occurred\n \"\"\"\n if event.inaxes == self.axes['main']:\n return 0\n elif event.inaxes == self.axes['resid']:\n return 1\n elif event.inaxes == self.axes['fit']:\n return 2\n elif event.inaxes == self.axes['info']:\n return 3\n return None\n\n def get_results(self):\n \"\"\"Perform the final wavelength calibration\n\n Using the line IDs perform the final fit according\n to the wavelength calibration parameters set by the\n user. This routine must be called after the user has\n manually identified all lines.\n\n Returns:\n wvcalib (dict): Dict of wavelength calibration solutions\n \"\"\"\n wvcalib = {}\n # Check that a result exists:\n if self._fitdict['coeff'] is None:\n wvcalib[str(self._slit)] = None\n else:\n # Perform an initial fit to the user IDs\n self.fitsol_fit()\n # Now perform a detailed fit\n gd_det = np.where((self._lineflg == 1) | (self._lineflg == 2))[0]\n bdisp = self.fitsol_deriv(self.specdata.size/2) # Angstroms/pixel at the centre of the spectrum\n try:\n #n_final = wvutils.parse_param(self.par, 'n_final', self._slit)\n final_fit = wv_fitting.iterative_fitting(self.specdata, self._detns, gd_det,\n self._lineids[gd_det], self._line_lists, bdisp,\n verbose=False, n_first=self._fitdict[\"polyorder\"],\n match_toler=self.par['match_toler'],\n func=self.par['func'],\n n_final=self._fitdict[\"polyorder\"], input_only=True,\n sigrej_first=self.par['sigrej_first'],\n sigrej_final=self.par['sigrej_final'])\n except TypeError:\n wvcalib = None\n else:\n wvcalib = copy.deepcopy(final_fit)\n return wvcalib\n\n def store_solution(self, final_fit, master_dir, binspec, rmstol=0.15,\n force_save=False, wvcalib=None):\n \"\"\"Check if the user wants to store this solution in the reid arxiv\n\n Parameters\n ----------\n\n final_fit : dict\n Dict of wavelength calibration solutions (see self.get_results())\n master_dir : str\n Master directory -- NOT USED\n binspec : int\n Spectral binning\n rmstol : float\n RMS tolerance allowed for the wavelength solution to be stored in the archive\n force_save : bool\n Force save\n wvcalib : :class:`pypeit.wavecalib.WaveCalib`\n Wavelength solution\n\n \"\"\"\n # Line IDs\n ans = ''\n if not force_save:\n while ans != 'y' and ans != 'n':\n ans = input(\"Would you like to store the line IDs? (y/n): \")\n else:\n ans = 'y'\n if ans == 'y':\n self.save_IDs()\n # Solution\n if 'rms' not in final_fit.keys():\n msgs.warn(\"No wavelength solution available\")\n return\n elif final_fit['rms'] < rmstol:\n ans = ''\n if not force_save:\n while ans != 'y' and ans != 'n':\n ans = input(\"Would you like to write this wavelength solution to disk? (y/n): \")\n else:\n ans = 'y'\n if ans == 'y':\n # Arxiv solution\n #outroot = templates.pypeit_identify_record(final_fit, binspec, specname, gratname, dispangl, outdir=master_dir)\n wavelengths = self._fitdict['full_fit'].eval(np.arange(self.specdata.size) /\n (self.specdata.size - 1))\n wvutils.write_template(wavelengths, self.specdata, binspec,\n './', 'wvarxiv.fits')\n msgs.info(\"\\nYour arxiv solution has been written to wvarxiv.fits\")\n #msgs.info(\"\\nYour wavelength solution has been stored here:\" + msgs.newline() +\n # os.path.join(master_dir, outroot) + msgs.newline() + msgs.newline() +\n # \"If you would like to move this to the PypeIt database, please move this file into the directory:\" +\n # msgs.newline() + templates.outpath + msgs.newline() + msgs.newline() +\n # \"Please consider sending your solution to the PypeIt team!\" + msgs.newline())\n #\n if wvcalib is not None:\n wvcalib.to_file('wvcalib.fits')\n msgs.info(\"\\nA WaveCalib container was written to wvcalib.fits\")\n msgs.info(\"\\nPlease consider sending your solution to the PypeIt team!\" + msgs.newline())\n else:\n print(\"Final fit RMS: {0:0.3f} is larger than the allowed tolerance: {1:0.3f}\".format(final_fit['rms'], rmstol))\n print(\"Set the variable --rmstol on the command line to allow a more flexible RMS tolerance\")\n ans = ''\n\n def button_press_callback(self, event):\n \"\"\"What to do when the mouse button is pressed\n\n Args:\n event (Event): Matplotlib event instance containing information about the event\n \"\"\"\n if event.inaxes is None:\n return\n if self.canvas.toolbar.mode != \"\":\n return\n if event.button == 1:\n self._addsub = 1\n elif event.button == 3:\n self._addsub = 0\n if event.inaxes == self.axes[\"main\"]:\n self._msedown = True\n axisID = self.get_axisID(event)\n self._start = self.get_ind_under_point(event)\n self._startdata = event.xdata\n self._oldghostscl = self._ghostparam[1]\n\n def motion_notify_event(self, event):\n if event.inaxes is None:\n return\n self._middata = event.xdata\n if self._ghostmode and self._msedown:\n self.update_ghosts()\n # Now plot\n trans = mtransforms.blended_transform_factory(self.axes['main'].transData, self.axes['main'].transAxes)\n self.canvas.restore_region(self.background)\n self.draw_fitregions(trans)\n # Now replot everything\n self.replot()\n\n def button_release_callback(self, event):\n \"\"\"What to do when the mouse button is released\n\n Args:\n event (Event): Matplotlib event instance containing information about the event\n\n Returns:\n None\n \"\"\"\n self._msedown = False\n if event.inaxes is None:\n return\n if event.inaxes == self.axes['info']:\n if (event.xdata > 0.8) and (event.xdata < 0.9):\n answer = \"y\"\n elif event.xdata >= 0.9:\n answer = \"n\"\n else:\n return\n self.operations(answer, -1)\n self.update_infobox(default=True)\n return\n elif self._respreq[0]:\n # The user is trying to do something before they have responded to a question\n return\n if self.canvas.toolbar.mode != \"\":\n return\n # Draw an actor\n axisID = self.get_axisID(event)\n if axisID is not None:\n if axisID <= 2:\n self._end = self.get_ind_under_point(event)\n if self._end == self._start:\n # The mouse button was pressed (not dragged)\n self.operations('m', axisID, event)\n elif self._end != self._start:\n # The mouse button was dragged\n if axisID == 0:\n if not self._ghostmode:\n if self._start > self._end:\n tmp = self._start\n self._start = self._end\n self._end = tmp\n self.update_regions()\n # Now plot\n trans = mtransforms.blended_transform_factory(self.axes['main'].transData, self.axes['main'].transAxes)\n self.canvas.restore_region(self.background)\n self.draw_fitregions(trans)\n # Now replot everything\n self.replot()\n\n def key_press_callback(self, event):\n \"\"\"What to do when a key is pressed\n\n Args:\n event (Event): Matplotlib event instance containing information about the event\n\n Returns:\n None\n \"\"\"\n # Check that the event is in an axis...\n if not event.inaxes:\n return\n # ... but not the information box!\n if event.inaxes == self.axes['info']:\n return\n axisID = self.get_axisID(event)\n self.operations(event.key, axisID, event)\n\n def operations(self, key, axisID, event):\n \"\"\"Canvas operations\n\n Args:\n key (str): Which key has been pressed\n axisID (int): The index of the axis where the key has been pressed (see get_axisID)\n \"\"\"\n # Check if the user really wants to quit\n if key == 'q' and self._qconf:\n if self._changes:\n self.update_infobox(message=\"WARNING: There are unsaved changes!!\\nPress q again to exit\", yesno=False)\n self._qconf = True\n else:\n msgs.bug(\"Need to change this to kill and return the results to PypeIt\")\n plt.close()\n elif self._qconf:\n self.update_infobox(default=True)\n self._qconf = False\n\n # Manage responses from questions posed to the user.\n if self._respreq[0]:\n if key != \"y\" and key != \"n\":\n return\n else:\n # Switch off the required response\n self._respreq[0] = False\n # Deal with the response\n if self._respreq[1] == \"write\":\n # First remove the old file, and save the new one\n msgs.work(\"Not implemented yet!\")\n self.write()\n else:\n return\n # Reset the info box\n self.update_infobox(default=True)\n return\n\n if key == '?':\n self.print_help()\n elif key == 'left':\n widx = self._slideval - 1\n if widx < 0:\n widx = self._lines.size-1\n self.linelist_update(widx)\n elif key == 'right':\n widx = self._slideval + 1\n if widx >= self._lines.size:\n widx = 0\n self.linelist_update(widx)\n elif key == 'a':\n if self._fitdict['coeff'] is not None:\n self.auto_id()\n else:\n msgs.info(\"You must identify a few lines first\")\n elif key == 'c':\n wclr = np.where((self._lineflg == 2) | (self._lineflg == 3))\n self._lineflg[wclr] = 0\n self.replot()\n elif key == 'd':\n self._lineflg *= 0\n self._lineids *= 0.0\n self._fitdict['coeff'] = None\n self.replot()\n elif key == 'f':\n self.fitsol_fit()\n self.replot()\n elif key == 'l':\n self.load_IDs()\n elif key == 'm':\n self._end = self.get_ind_under_point(event)\n self._detns_idx = self.get_detns()\n # Estimate the wavelength, if a solution is available\n if self._fitdict['coeff'] is not None:\n # Find closest line\n waveest = self.fitsol_value(idx=self._detns_idx)\n widx = np.argmin(np.abs(waveest - self._lines))\n self.linelist_update(widx)\n self._slidell.set_val(self._slideval)\n # Print to the information panel\n self.update_infobox(message=\"Pixel position = {0:.1f} Estimated wavelength = {1:.3f}\".format(\n self._detns[self._detns_idx], waveest), yesno=False)\n self.replot()\n elif key == 'q':\n if self._changes:\n self.update_infobox(message=\"WARNING: There are unsaved changes!!\\nPress q again to exit\", yesno=False)\n self._qconf = True\n else:\n plt.close()\n elif key == 'r':\n if self._detns_idx == -1:\n msgs.info(\"You must select a line first\")\n elif self._fitr is None:\n msgs.info(\"You must select a fitting region first\")\n else:\n msgs.work(\"Feature not yet implemented\")\n elif key == 's':\n self.save_IDs()\n elif key == 'w':\n self.toggle_wavepix(toggled=True)\n self.replot()\n elif key == 'z':\n self.delete_line_id()\n self.operations('f', axisID, event)\n elif key == '+':\n if self._fitdict[\"polyorder\"] < 10:\n self._fitdict[\"polyorder\"] += 1\n self.update_infobox(message=\"Polynomial order = {0:d}\".format(self._fitdict[\"polyorder\"]), yesno=False)\n self.fitsol_fit()\n self.replot()\n else:\n self.update_infobox(message=\"Polynomial order must be <= 10\", yesno=False)\n elif key == '-':\n if self._fitdict[\"polyorder\"] > 1:\n self._fitdict[\"polyorder\"] -= 1\n self.update_infobox(message=\"Polynomial order = {0:d}\".format(self._fitdict[\"polyorder\"]), yesno=False)\n self.fitsol_fit()\n self.replot()\n else:\n self.update_infobox(message=\"Polynomial order must be >= 1\", yesno=False)\n elif key == 'g':\n if self._wavepix == 0:\n self._ghostmode = not self._ghostmode\n self.replot()\n else:\n self.update_infobox(message=\"To enable ghost mode, you need to identify some lines.\\nYou also need to set wavelength as the x-axis scale\", yesno=False)\n elif key == 'h':\n self._ghostparam = [0.0, 1.0]\n self.replot()\n self.canvas.draw()\n\n def auto_id(self):\n \"\"\"Automatically assign lines based on a few lines identified by the user\n\n Using the current line IDs and approximate wavelength solution,\n automatically assign a wavelength to all line detections.\n \"\"\"\n\n # If the IDs are within an acceptable tolerance, flag them as such\n wave_est = self._fitdict['full_fit'].eval(self._detns / self._fitdict['xnorm'])\n for wav in range(wave_est.size):\n if self._lineflg[wav] == 1:\n # User has manually identified this line already\n continue\n pixdiff = np.abs(wave_est[wav]-self._lines)\n amin = np.argmin(pixdiff)\n pxtst = pixdiff[amin]/self._fitdict['cen_disp']\n self._lineids[wav] = self._lines[amin]\n if pxtst < self.pxtoler:\n # Acceptable\n self._lineflg[wav] = 2\n else:\n # Unacceptable\n self._lineflg[wav] = 3\n # Now that we've automatically identified lines, update the canvas\n self.replot()\n\n def delete_line_id(self):\n \"\"\"Remove an incorrect line ID\n \"\"\"\n rmid = self.get_detns()\n self._lineids[rmid] = 0.0\n self._lineflg[rmid] = 0\n\n def fitsol_value(self, xfit=None, idx=None):\n \"\"\"Calculate the wavelength at a pixel\n\n Parameters\n ----------\n\n xfit : ndarray, float\n Pixel values that the user wishes to evaluate the wavelength\n idx : ndarray, int\n Index of the arc line detections that the user wishes to evaluate the wavelength\n\n Returns\n -------\n\n disp : The wavelength (Angstroms) of the requested pixels\n \"\"\"\n if xfit is None:\n xfit = self._detns\n if self._fitdict['coeff'] is not None:\n if idx is None:\n return np.polyval(self._fitdict[\"coeff\"], xfit / self._fitdict[\"scale\"])\n else:\n return np.polyval(self._fitdict[\"coeff\"], xfit[idx] / self._fitdict[\"scale\"])\n else:\n msgs.bug(\"Cannot predict wavelength value - no fit has been performed\")\n return None\n\n def fitsol_deriv(self, xfit=None, idx=None):\n \"\"\"Calculate the dispersion as a function of wavelength\n\n Args:\n xfit (ndarray, float): Pixel values that the user wishes to evaluate the wavelength\n idx (int): Index of the arc line detections that the user wishes to evaluate the wavelength\n\n Returns:\n disp (ndarray, float, None): The dispersion (Angstroms/pixel) as a function of wavelength\n \"\"\"\n if xfit is None:\n xfit = self._detns\n if self._fitdict['coeff'] is not None:\n cder = np.polyder(self._fitdict[\"coeff\"])\n if idx is None:\n return np.polyval(cder, xfit / self._fitdict[\"scale\"]) / self._fitdict[\"scale\"]\n else:\n return np.polyval(cder, xfit[idx] / self._fitdict[\"scale\"]) / self._fitdict[\"scale\"]\n else:\n msgs.bug(\"Cannot predict wavelength value - no fit has been performed\")\n return None\n\n def fitsol_fit(self):\n \"\"\"Perform a fit to the line identifications\n \"\"\"\n # Calculate the dispersion\n # disp = (ids[-1] - ids[0]) / (tcent[idx_str[-1]] - tcent[idx_str[0]])\n # final_fit = fitting.iterative_fitting(censpec, tcent, idx_str, ids,\n # llist, disp, verbose=False,\n # n_first=2, n_final=self._fitdict[\"polyorder\"])\n ord = self._fitdict[\"polyorder\"]\n gd_det = np.where((self._lineflg == 1) | (self._lineflg == 2)) # Use the user IDs or acceptable auto IDs only!\n # Check if there are enough points to perform a fit\n if gd_det[0].size < ord+1:\n msg = \"Polynomial order must be >= number of line IDs\\n\" +\\\n \"Polynomial order = {0:d}, Number of line IDs = {1:d}\".format(ord, gd_det[0].size)\n self.update_infobox(message=msg, yesno=False)\n else:\n # Start by performing a basic fit\n xpix = self._detns[gd_det] / self._fitdict[\"scale\"]\n ylam = self._lineids[gd_det]\n self._fitdict[\"coeff\"] = np.polyfit(xpix, ylam, ord)\n bdisp = self.fitsol_deriv(self.specdata.size / (2*self._fitdict[\"scale\"])) # Angstroms/pixel at the centre of the spectrum\n # Then try a detailed fit\n try:\n final_fit = wv_fitting.iterative_fitting(\n self.specdata, self._detns, gd_det[0],\n self._lineids[gd_det[0]], self._line_lists, bdisp,\n verbose=False, n_first=min(2, self._fitdict[\"polyorder\"]),\n match_toler=self.par['match_toler'],\n func=self.par['func'], input_only=True,\n n_final=self._fitdict[\"polyorder\"],\n sigrej_first=self.par['sigrej_first'],\n sigrej_final=self.par['sigrej_final'])\n final_fit.spat_id = self._slit\n\n # Update the fitdict\n #for key in final_fit:\n # self._fitdict[key] = final_fit[key]\n self._fitdict['polyorder'] = final_fit.pypeitfit['order'][0]\n self._fitdict['fitc'] = final_fit.pypeitfit['fitc']\n self._fitdict['full_fit'] = final_fit.pypeitfit\n self._fitdict['pixel_fit'] = final_fit.pixel_fit\n self._fitdict['wave_fit'] = final_fit.wave_fit\n self._fitdict['wave_soln'] = final_fit.wave_soln\n self._fitdict['xnorm'] = final_fit.xnorm\n self._fitdict['rms'] = final_fit.rms\n self._fitdict['tcent'] = final_fit.tcent\n self._fitdict['cen_disp'] = final_fit.cen_disp\n self._fitdict['cen_wave'] = final_fit.cen_wave\n self._fitdict['WaveFit'] = final_fit\n\n except TypeError:\n # Just stick use the basic fit\n self._fitdict[\"fitc\"] = None\n\n def update_infobox(self, message=\"Press '?' to list the available options\",\n yesno=True, default=False):\n \"\"\"Send a new message to the information window at the top of the canvas\n\n Args:\n message (str): Message to be displayed\n \"\"\"\n self.axes['info'].clear()\n if default:\n self.axes['info'].text(0.5, 0.5, \"Press '?' to list the available options\", transform=self.axes['info'].transAxes,\n horizontalalignment='center', verticalalignment='center')\n self.canvas.draw()\n return\n # Display the message\n self.axes['info'].text(0.5, 0.5, message, transform=self.axes['info'].transAxes,\n horizontalalignment='center', verticalalignment='center')\n if yesno:\n self.axes['info'].fill_between([0.8, 0.9], 0, 1, facecolor='green', alpha=0.5, transform=self.axes['info'].transAxes)\n self.axes['info'].fill_between([0.9, 1.0], 0, 1, facecolor='red', alpha=0.5, transform=self.axes['info'].transAxes)\n self.axes['info'].text(0.85, 0.5, \"YES\", transform=self.axes['info'].transAxes,\n horizontalalignment='center', verticalalignment='center')\n self.axes['info'].text(0.95, 0.5, \"NO\", transform=self.axes['info'].transAxes,\n horizontalalignment='center', verticalalignment='center')\n self.axes['info'].set_xlim((0, 1))\n self.axes['info'].set_ylim((0, 1))\n self.canvas.draw()\n\n def update_line_id(self):\n \"\"\"Find the nearest wavelength in the linelist\n \"\"\"\n if self._detns_idx != -1:\n self._lineids[self._detns_idx] = self._lines[self._slideval]\n self._lineflg[self._detns_idx] = 1\n\n def update_regions(self):\n \"\"\"Update the regions used to fit Gaussian\n \"\"\"\n self._fitregions[self._start:self._end] = self._addsub\n\n def update_ghosts(self):\n \"\"\"Update the ghosts\n \"\"\"\n if self._addsub == 0: # RMB\n # Stretching factor\n xmn, xmx = self.axes['main'].get_xlim()\n self._ghostparam[1] = self._oldghostscl*(1.0 + (self._middata - self._startdata) / (xmx - xmn))\n else: # LMB\n if self._wavepix == 0:\n # Plotting wavelength\n self._ghostparam[0] = self._middata - self._startdata\n elif self._fitdict['fitc'] is not None:\n # Plotting pixels and have a wavelength solution\n xnorm = self._fitdict['xnorm']\n\n # Calculate the estimated wavelength of the detections\n specy = self._fitdict['full_fit'].eval(np.array([self._startdata, self._middata]) / xnorm)\n self._ghostparam[0] = specy[1] - specy[0]\n else:\n # Plotting pixels, but don't have a wavelength solution\n scale = (np.max(self._lines) - np.min(self._lines))/self.specx.size # Angstroms per pixel\n self._ghostparam[0] = (self._middata - self._startdata) * scale # Calculate the shift in Angstroms\n # grad_orig = self.specx.size / (np.max(self._lines) - np.min(self._lines))\n # plotx = self._ghostparam[1] * grad_orig * (self._lines - np.min(self._lines) + self._ghostparam[0])\n\n def load_IDs(self, wv_calib=None, fname='waveid.ascii'):\n \"\"\"Load line IDs\n \"\"\"\n if wv_calib is not None:\n for ii in range(wv_calib['pixel_fit'].size):\n idx = np.argmin(np.abs(self._detns-wv_calib['pixel_fit'][ii]))\n self._lineids[idx] = wv_calib['wave_fit'][ii]\n self._lineflg[idx] = int(wv_calib['mask'][ii])\n self._fitdict['polyorder'] = len(wv_calib['fitc'])-1\n msgs.info(\"Loaded line IDs\")\n elif os.path.exists(fname):\n data = ascii_io.read(fname, format='fixed_width')\n self._detns = data['pixel'].data\n self._lineids = data['wavelength'].data\n self._lineflg = data['flag'].data\n msgs.info(\"Loaded line IDs:\" + msgs.newline() + fname)\n self.update_infobox(message=\"Loaded line IDs: {0:s}\".format(fname), yesno=False)\n else:\n self.update_infobox(message=\"Could not find line IDs: {0:s}\".format(fname), yesno=False)\n\n def save_IDs(self, fname='waveid.ascii'):\n \"\"\"Save the current IDs\n \"\"\"\n meta = dict(comments=[\"flags:\",\n \" 0 = wavelength has not been assigned to this detection\",\n \" 1 = user has assigned wavelength to this detection\",\n \" 2 = detection has been automatically assigned\",\n \" 3 = automatically assigned wavelength was rejected\"])\n data = Table({'pixel' : self._detns,\n 'wavelength' : self._lineids,\n 'flag' : self._lineflg},\n names=['pixel', 'wavelength', 'flag'],\n meta=meta)\n ascii_io.write(data, fname, format='fixed_width')\n msgs.info(\"Line IDs saved as:\" + msgs.newline() + fname)\n self.update_infobox(message=\"Line IDs saved as: {0:s}\".format(fname), yesno=False)\n",
"\"\"\"\nImplements classes and function for the PypeIt data model.\n\n.. data-container:\n\nDataContainer\n-------------\n\n:class:`DataContainer` objects provide a utility for\nenforcing a specific datamodel on an object, and provides convenience\nroutines for writing data to fits files. The class itself is an\nabstract base class that cannot be directly instantiated. As a base\nclass, :class:`DataContainer` objects are versatile, but they have\ntheir limitations.\n\nDerived classes must do the following:\n\n - Define a class attribute called ``datamodel``. See the examples\n below for their format.\n - Provide an :func:`__init__` method that defines the\n instantiation calling sequence and passes the relevant\n dictionary to this base-class instantiation.\n - Provide a :func:`_validate` method, if necessary, that\n processes the data provided in the `__init__` into a complete\n instantiation of the object. This method and the\n :func:`__init__` method are the *only* places where attributes\n can be added to the class.\n - Provide a :func:`_bundle` method that reorganizes the datamodel\n into partitions that can be written to one or more fits\n extensions. More details are provided in the description of\n :func:`DataContainer._bundle`.\n - Provide a :func:`_parse` method that parses information in one\n or more fits extensions into the appropriate datamodel. More\n details are provided in the description of\n :func:`DataContainer._parse`.\n\n.. note::\n\n The attributes of the class are *not required* to be a part of\n the ``datamodel``; however, it makes the object simpler when they\n are. Any attributes that are not part of the ``datamodel`` must\n be defined in either the :func:`__init__` or :func:`_validate`\n methods; otherwise, the class with throw an ``AttributeError``.\n\nHere are some examples of how to and how not to use them.\n\nDefining the datamodel\n++++++++++++++++++++++\n\nThe format of the ``datamodel`` needed for each implementation of a\n:class:`DataContainer` derived class is as follows.\n\nThe datamodel itself is a class attribute (i.e., it is a member of\nthe class, not just of an instance of the class). The datamodel is a\ndictionary of dictionaries: Each key of the datamodel dictionary\nprovides the name of a given datamodel element, and the associated\nitem (dictionary) for the datamodel element provides the type and\ndescription information for that datamodel element. For each\ndatamodel element, the dictionary item must provide:\n\n - ``otype``: This is the type of the object for this datamodel\n item. E.g., for a float or a `numpy.ndarray`_, you would set\n ``otype=float`` and ``otype=np.ndarray``, respectively.\n\n - ``descr``: This provides a text description of the datamodel\n element. This is used to construct the datamodel tables in the\n pypeit documentation.\n\nIf the object type is a `numpy.ndarray`_, you should also provide the\n``atype`` keyword that sets the type of the data contained within the\narray. E.g., for a floating point array containing an image, your\ndatamodel could be simply::\n\n datamodel = {'image' : dict(otype=np.ndarray, atype=float, descr='My image')}\n\nCurrently, ``datamodel`` components are restricted to have ``otype``\nthat are :obj:`tuple`, :obj:`int`, :obj:`float`, ``numpy.integer``,\n``numpy.floating``, `numpy.ndarray`_, or `astropy.table.Table`_\nobjects. E.g., ``datamodel`` values for ``otype`` *cannot* be\n:obj:`dict`.\n\nMore advanced examples are given below.\n\nBasic container\n+++++++++++++++\n\nHere's how to create a derived class for a basic container that\nholds two arrays and a metadata parameter::\n\n import numpy as np\n import inspect\n\n from pypeit.datamodel import DataContainer\n\n class BasicContainer(DataContainer):\n datamodel = {'vec1': dict(otype=np.ndarray, atype=float, descr='Test'),\n 'meta1': dict(otype=str, decr='test'),\n 'arr1': dict(otype=np.ndarray, atype=float, descr='test')}\n\n def __init__(self, vec1, meta1, arr1):\n # All arguments are passed directly to the container\n # instantiation\n args, _, _, values = inspect.getargvalues(inspect.currentframe())\n super(BasicContainer, self).__init__({k: values[k] for k in args[1:]}) \n\n def _bundle(self):\n # Use the base class _bundle Specify the extension\n return super(BasicContainer, self)._bundle(ext='basic')\n\nWith this implementation:\n\n - You can instantiate the container so that number of data table\n rows would be the same (10)::\n\n data = BasicContainer(np.arange(10), 'length=10', np.arange(30).reshape(10,3))\n\n - After instantiating, access the data like this::\n\n # Get the data model keys\n keys = list(data.keys())\n\n # Access the datamodel as attributes ...\n print(data.vec1)\n # ... or as items\n print(data['meta1'])\n\n - Attributes and items are case-sensitive::\n\n # Faults because of case-sensitive attributes/items\n print(data.Vec1)\n print(data['MeTa1'])\n\n - The attributes of the container can only be part of the\n datamodel or added in either the :func:`DataContainer.__init__`\n or :func:`DataContainer._validate` methods::\n\n # Faults with KeyError\n data['newvec'] = np.arange(10)\n test = data['newvec']\n\n # Faults with AttributeError\n data.newvec = np.arange(10)\n test = data.newvec\n\n - The :class:`DataContainer` also enforces strict types for\n members of the datamodel::\n\n # Faults with TypeError\n data.vec1 = 3\n data.meta1 = 4.\n\n - Read/Write the data from/to a fits file. In this instantiation,\n the data is written to a `astropy.io.fits.BinTableHDU` object;\n the table has 10 rows because the shape of the arrays match\n this. The file I/O routines look like this::\n\n # Write to a file\n ofile = 'test.fits'\n data.to_file(ofile)\n\n # Write to a gzipped file\n ofile = 'test.fits.gz'\n data.to_file(ofile)\n\n # Test written data against input\n with fits.open(ofile) as hdu:\n print(len(hdu))\n # 2: The primary extension and a binary table with the data\n\n print(hdu[1].name)\n # BASIC: As set by the _bundle method\n \n print(len(hdu['BASIC'].data))\n # 10: The length of the data table\n \n print(hdu['BASIC'].columns.names)\n # ['vec1', 'arr1']: datamodel elements written to the table columns\n \n print(hdu['BASIC'].header['meta1'])\n # 'length=10': int, float, or string datamodel components are written to headers\n\n - If the shape of the first axis of the arrays (number of rows)\n do not match, the arrays are written as single elements of a\n table with one row::\n\n # Number of rows are mismatched \n data = BasicContainer(np.arange(10), 'length=1', np.arange(30).reshape(3,10))\n data.to_file(ofile)\n\n with fits.open(ofile) as hdu:\n print(len(hdu))\n # 2: The primary extension and a binary table with the data\n print(len(hdu['BASIC'].data))\n # 1: All of the data is put into a single row\n\nMixed Object Containers\n+++++++++++++++++++++++\n\n:class:`DataContainer` objects can also contain multiple arrays\nand/or `astropy.table.Table`_ objects. However, multiple Tables or\ncombinations of arrays and Tables cannot be bundled into individual\nextensions. Here are two implementations of a mixed container, a good\none and a bad one::\n\n import numpy as np\n import inspect\n\n from astropy.table import Table\n\n from pypeit.datamodel import DataContainer\n\n class GoodMixedTypeContainer(DataContainer):\n datamodel = {'tab1': dict(otype=Table, descr='Test'),\n 'tab1len': dict(otype=int, descr='test'),\n 'arr1': dict(otype=np.ndarray, descr='test'),\n 'arr1shape': dict(otype=tuple, descr='test')}\n\n def __init__(self, tab1, arr1):\n # All arguments are passed directly to the container\n # instantiation, but the list is incomplete\n args, _, _, values = inspect.getargvalues(inspect.currentframe())\n super(GoodMixedTypeContainer, self).__init__({k: values[k] for k in args[1:]}) \n\n def _validate(self):\n # Complete the instantiation\n self.tab1len = len(self.tab1)\n # NOTE: DataContainer does allow for tuples, but beware because\n # they have to saved to the fits files by converting them to strings\n # and writing them to the fits header. So the tuples should be\n # short! See _bundle below, and in DataContainer.\n self.arr1shape = self.arr1.shape\n\n def _bundle(self):\n # Bundle so there's only one Table and one Image per extension\n return [{'tab1len': self.tab1len, 'tab1': self.tab1},\n {'arr1shape': str(self.arr1shape), 'arr1': self.arr1}]\n\n\n class BadMixedTypeContainer(GoodMixedTypeContainer):\n def _bundle(self):\n # Use default _bundle method, which will try to put both tables\n # in the same extension. NOTE: Can't use super here because\n # GoodMixedTypeContainer doesn't have an 'ext' argument\n return DataContainer._bundle(self, ext='bad')\n\nWith this implementation:\n\n - To instantiate::\n\n x = np.arange(10)\n y = np.arange(10)+5\n z = np.arange(30).reshape(10,3)\n\n arr1 = np.full((3,3,3), -1)\n tab1 = Table(data=({'x':x,'y':y,'z':z}), meta={'test':'this'})\n\n data = GoodMixedTypeContainer(tab1, arr1)\n\n - Data access::\n\n print(data.tab1.keys())\n # ['x', 'y', 'z']\n print(assert data.tab1.meta['test'])\n # 'this'\n print(data.arr1shape)\n # (3,3,3)\n\n - Construct an `astropy.io.fits.HDUList`::\n\n hdu = data.to_hdu(add_primary=True)\n\n print(len(hdu))\n # 3: Includes the primary HDU, one with the table, and one with the array\n\n print([h.name for h in hdu])\n # ['PRIMARY', 'TAB1', 'ARR1']\n\n - Tuples are converted to strings::\n\n print(hdu['ARR1'].header['ARR1SHAPE'])\n # '(3,3,3)'\n\n - The tuples are converted back from strings when they're read\n from the HDU::\n\n _data = GoodMixedTypeContainer.from_hdu(hdu)\n print(_data.arr1shape)\n # (3,3,3)\n\n - Table metadata is also written to the header, which can be\n accessed with case-insensitive keys::\n\n print(hdu['TAB1'].header['TEST'])\n print(hdu['TAB1'].header['TesT'])\n # Both print: 'this'\n\n - However, it's important to note that the keyword case gets\n mangled when you read it back in. This has to do with the\n Table.read method and I'm not sure there's anything we can do\n about it without the help of the astropy folks. We recommend\n table metadata use keys that are in all caps::\n\n # Fails because of the string case\n print(_data.tab1.meta['test'])\n # This is okay\n print(_data.tab1.meta['TEST'])\n\n - The difference between the implementation of\n ``BadMixedTypeContainer`` and ``GoodMixedTypeContainer`` has to\n do with how the data is bundled into HDU extensions. The\n ``BadMixedTypeContainer`` will instantiate fine::\n\n data = BadMixedTypeContainer(tab1, arr1)\n print(data.tab1.keys())\n # ['x', 'y', 'z']\n print(data.tab1.meta['test'])\n # 'this'\n\n - But it will barf when you try to reformat/write the data\n because you can't write both a Table and an array to a single\n HDU::\n\n # Fails\n hdu = data.to_hdu()\n\nComplex Instantiation Methods\n+++++++++++++++++++++++++++++\n\nAll of the :class:`DataContainer` above have had simple instatiation\nmethods. :class:`DataContainer` can have more complex instantiation\nmethods, but there are significant limitations to keep in made.\nConsider::\n\n class BadInitContainer(DataContainer):\n datamodel = {'inp1': dict(otype=np.ndarray, descr='Test'),\n 'inp2': dict(otype=np.ndarray, descr='test'),\n 'out': dict(otype=np.ndarray, descr='test'),\n 'alt': dict(otype=np.ndarray, descr='test')}\n\n def __init__(self, inp1, inp2, func='add'):\n args, _, _, values = inspect.getargvalues(inspect.currentframe())\n super(BadInitContainer, self).__init__({k: values[k] for k in args[1:]}) \n\n\n class DubiousInitContainer(DataContainer):\n datamodel = {'inp1': dict(otype=np.ndarray, descr='Test'),\n 'inp2': dict(otype=np.ndarray, descr='test'),\n 'out': dict(otype=np.ndarray, descr='test'),\n 'alt': dict(otype=np.ndarray, descr='test')}\n\n def __init__(self, inp1, inp2, func='add'):\n # If any of the arguments of the init method aren't actually\n # part of the datamodel, you can't use the nominal two lines\n # used in all the other examples above. You have to be specific\n # about what gets passed to super.__init__. See the\n # BadInitContainer example above and the test below. WARNING:\n # I'm not sure you would ever want to do this because it can\n # lead to I/O issues; see the _validate function.\n self.func = func\n super(DubiousInitContainer, self).__init__({'inp1': inp1, 'inp2':inp2})\n\n def _validate(self):\n # Because func isn't part of the data model, it won't be part of\n # self if the object is instantiated from a file. So I have to\n # add it here. But I don't know what the value of the attribute\n # was for the original object that was written to disk. This is\n # why you likely always want anything that's critical to setting\n # up the object to be part of the datamodel so that it gets\n # written to disk. See the testing examples for when this will\n # go haywire.\n if not hasattr(self, 'func'):\n self.func = None\n if self.func not in [None, 'add', 'sub']:\n raise ValueError('Function must be either \\'add\\' or \\'sub\\'.')\n\n # This is here because I don't want to overwrite something that\n # might have been read in from an HDU, particularly given that\n # func will be None if reading from a file!\n if self.out is None:\n print('Assigning out!')\n if self.func is None:\n raise ValueError('Do not know how to construct out attribute!')\n self.out = self.inp1 + self.inp2 if self.func == 'add' else self.inp1 - self.inp2\n\n # I'm not going to overwrite _bundle, so that the nominal approach\n # is used.\n\n\n class ComplexInitContainer(DataContainer):\n datamodel = {'inp1': dict(otype=np.ndarray, descr='Test'),\n 'inp2': dict(otype=np.ndarray, descr='test'),\n 'out': dict(otype=np.ndarray, descr='test'),\n 'alt': dict(otype=np.ndarray, descr='test'),\n 'func': dict(otype=str, descr='test')}\n\n def __init__(self, inp1, inp2, func='add'):\n # Since func is part of the datamodel now, we can use the normal\n # two intantiation lines.\n args, _, _, values = inspect.getargvalues(inspect.currentframe())\n super(ComplexInitContainer, self).__init__({k: values[k] for k in args[1:]}) \n\n def _validate(self):\n if self.func not in ['add', 'sub']:\n raise ValueError('Function must be either \\'add\\' or \\'sub\\'.')\n # This is here because I don't want to overwrite something that\n # might have been read in from an HDU, even though they should\n # nominally be the same!\n if self.out is None:\n print('Assigning out!')\n if self.func is None:\n raise ValueError('Do not know how to construct out attribute!')\n self.out = self.inp1 + self.inp2 if self.func == 'add' else self.inp1 - self.inp2\n\n\nWith this implementation:\n\n - The instantiation of the ``BadInitContainer`` will fail because\n the init arguments all need to be part of the datamodel for it\n to work::\n\n x = np.arange(10)\n y = np.arange(10)+5\n\n data = BadInitContainer(x,y)\n # Fails with AttributeError\n\n - The following instantiation is fine because\n ``DubiousInitContainer`` handles the fact that some of the\n arguments to :func:`__init__` are not part of the datamodel::\n\n data = DubiousInitContainer(x,y)\n print(np.array_equal(data.out, data.inp1+data.inp2))\n # True\n\n - One component of the data model wasn't instantiated, so it will\n be None::\n \n print(data.alt is None)\n # True\n\n - The problem with ``DubiousInitContainer`` is that it has\n attributes that cannot be reinstantiated from what's written to\n disk. That is, :class:`DataContainers` aren't really fully\n formed objects unless all of its relevant attributes are\n components of the data model::\n\n _data = DubiousInitContainer.from_hdu(hdu)\n print(_data.func == DubiousInitContainer(x,y).func)\n # False\n \n - This is solved by adding func to the datamodel::\n \n data = ComplexInitContainer(x,y)\n _data = ComplexInitContainer.from_hdu(data.to_hdu(add_primary=True))\n print(data.func == _data.func)\n # True\n\n----\n\n.. include common links, assuming primary doc root is up one directory\n.. include:: ../include/links.rst\n\n\"\"\"\nimport os\nimport warnings\n\nfrom IPython import embed\n\nimport numpy as np\nimport inspect\n\nfrom astropy.io import fits\nfrom astropy.table import Table\n\nfrom pypeit import io\nfrom pypeit import masterframe\nfrom pypeit import msgs\n\nclass DataContainer:\n \"\"\"\n Defines an abstract class for holding and manipulating data.\n\n The primary utilities of the class are:\n - Attributes can be accessed normally or as expected for a :obj:`dict`\n - Attributes and items are restricted to conform to a specified data model.\n\n This abstract class should only be used as a base class.\n\n Derived classes must do the following:\n\n - Define a datamodel\n - Provide an :func:`__init__` method that defines the\n instantiation calling sequence and passes the relevant\n dictionary to this base-class instantiation.\n - Provide a :func:`_validate` method, if necessary, that\n processes the data provided in the `__init__` into a\n complete instantiation of the object. This method and the\n :func:`__init__` method are the *only* places where\n attributes can be added to the class.\n - Provide a :func:`_bundle` method that reorganizes the\n datamodel into partitions that can be written to one or\n more fits extensions. More details are provided in the\n description of :func:`_bundle`.\n - Provide a :func:`_parse` method that parses information in\n one or more fits extensions into the appropriate datamodel.\n More details are provided in the description of\n :func:`_parse`.\n\n .. note::\n\n The attributes of the class are *not required* to be a part\n of the ``datamodel``; however, it makes the object simpler\n when they are. Any attributes that are not part of the\n ``datamodel`` must be defined in either the :func:`__init__`\n or :func:`_validate` methods; otherwise, the class with throw\n an ``AttributeError``.\n\n .. todo::\n\n Add a copy method\n\n Args:\n d (:obj:`dict`, optional):\n Dictionary to copy to the internal attribute dictionary.\n All of the keys in the dictionary *must* be elements of\n the ``datamodel``. Any attributes that are not part of\n the ``datamodel`` can be set in the :func:`__init__` or\n :func:`_validate` methods of a derived class. If None,\n the object is instantiated with all of the relevant data\n model attributes but with all of those attributes set to\n None.\n\n \"\"\"\n\n # Define the class version\n version = None\n \"\"\"\n Provides the string representation of the class version.\n\n This is currently put to minimal use so far, but will used for\n I/O verification in the future.\n\n Each derived class should provide a version to guard against data\n model changes during development.\n \"\"\"\n\n hdu_prefix = None\n \"\"\"\n If set, all HDUs generated for this DataContainer will have this\n prefix. This can be set independently for each DataContainer\n derived class; however, it always defaults to None for the base\n class. Be wary of nested DataContainer's!!\n \"\"\"\n\n output_to_disk = None\n \"\"\"\n If set, this limits the HDU extensions that are written to the\n output file. Note this is the name of the extension, including\n the hdu_prefix, not necessarily the names of specific datamodel\n components.\n \"\"\"\n\n # Define the data model\n datamodel = None\n \"\"\"\n Provides the class data model. This is undefined in the abstract\n class and should be overwritten in the derived classes.\n\n The format of the ``datamodel`` needed for each implementation of\n a :class:`DataContainer` derived class is as follows.\n\n The datamodel itself is a class attribute (i.e., it is a member\n of the class, not just of an instance of the class). The\n datamodel is a dictionary of dictionaries: Each key of the\n datamodel dictionary provides the name of a given datamodel\n element, and the associated item (dictionary) for the datamodel\n element provides the type and description information for that\n datamodel element. For each datamodel element, the dictionary\n item must provide:\n\n - ``otype``: This is the type of the object for this\n datamodel item. E.g., for a float or a `numpy.ndarray`_,\n you would set ``otype=float`` and ``otype=np.ndarray``,\n respectively.\n\n - ``descr``: This provides a text description of the\n datamodel element. This is used to construct the datamodel\n tables in the pypeit documentation.\n\n If the object type is a `numpy.ndarray`_, you should also provide\n the ``atype`` keyword that sets the type of the data contained\n within the array. E.g., for a floating point array containing an\n image, your datamodel could be simply::\n\n datamodel = {'image' : dict(otype=np.ndarray, atype=float, descr='My image')}\n\n More advanced examples are given in the top-level module documentation.\n\n Currently, ``datamodel`` components are restricted to have\n ``otype`` that are :obj:`tuple`, :obj:`int`, :obj:`float`,\n ``numpy.integer``, ``numpy.floating``, `numpy.ndarray`_, or\n `astropy.table.Table`_ objects. E.g., ``datamodel`` values for\n ``otype`` *cannot* be :obj:`dict`.\n \"\"\"\n # TODO: Enable multiple possible types for the datamodel elements?\n # I.e., allow `otype` to be a tuple of the allowed object types? It\n # looks like this is already possible at least for some types, see\n # pypeit.tracepca.TracePCA.reference_row.\n\n\n def __init__(self, d=None):\n # Data model must be defined\n if self.datamodel is None:\n raise ValueError('Data model for {0} is undefined!'.format(self.__class__.__name__))\n if self.version is None:\n raise ValueError('Must define a version for the class.')\n\n # Ensure the dictionary has all the expected keys\n self.__dict__.update(dict.fromkeys(self.datamodel.keys()))\n\n # Initialize other internals\n self._init_internals()\n\n # Finalize the instantiation.\n # NOTE: The key added to `__dict__` by this call is always\n # `_DataContainer__initialised`, regardless of whether or not\n # the call to this `__init__` is from the derived class. This\n # is why I can check for `_DataContainer__initialised` is in\n # `self.__dict__`, even for derived classes. But is there a way\n # we could just check a boolean instead?\n self._init_key = '_DataContainer__initialised'\n self.__initialised = True\n\n # Include the provided data and build-out the data model, if\n # data were provided\n if d is not None:\n\n # Input dictionary cannot have keys that do not exist in\n # the data model\n if not np.all(np.isin(list(d.keys()), list(self.datamodel.keys()))):\n raise AttributeError('Coding error: Initialization arguments do not match '\n 'data model!')\n\n # Assign the values provided by the input dictionary\n #self.__dict__.update(d) # This by-passes the data model checking\n\n ## Assign the values provided by the input dictionary\n for key in d:\n # Nested DataContainer?\n if obj_is_data_container(self.datamodel[key]['otype']):\n if isinstance(d[key], dict):\n setattr(self, key, self.datamodel[key]['otype'](**d[key]))\n else:\n setattr(self, key, d[key])\n else:\n setattr(self, key, d[key])\n\n # Validate the object\n self._validate()\n\n @classmethod\n def full_datamodel(cls, include_parent=True, include_children=True):\n \"\"\"\n Expand out the datamodel into a single dict\n This needs to be a class method to access the datamodel without instantiation\n\n Args:\n include_parent (bool, optional):\n If True, include the parent entry in additional to its pieces\n include_children (bool, optional):\n If True, expand any items that are DataModel's\n\n\n Returns:\n dict: All the keys, items of the nested datamodel's\n\n \"\"\"\n #\n full_datamodel = {}\n for key in cls.datamodel.keys():\n # Data container?\n if obj_is_data_container(cls.datamodel[key]['otype']):\n if include_parent:\n full_datamodel[key] = cls.datamodel[key]\n if include_children:\n # Now run through the others\n sub_datamodel = cls.datamodel[key]['otype'].full_datamodel()\n for key in sub_datamodel.keys():\n # Check this is not a duplicate\n if key in full_datamodel.keys():\n msgs.error(\"Duplicate key in DataModel. Deal with it..\")\n # Assign\n full_datamodel[key] = sub_datamodel[key]\n else:\n full_datamodel[key] = cls.datamodel[key]\n else:\n full_datamodel[key] = cls.datamodel[key]\n #\n return full_datamodel\n\n def _init_internals(self):\n \"\"\"\n Add internal variables to the object before initialization completes\n\n These should be set to None\n \"\"\"\n pass\n\n def _validate(self):\n \"\"\"\n Validate the data container.\n\n The purpose of this function is to check the input data\n provided by the instantiation and flesh out any details of\n the object.\n\n Derived classes should override this function, unless there\n is nothing to validate.\n\n Attributes can be added to the object in this function\n because it is called before the datamodel is frozen.\n \"\"\"\n pass\n\n def _bundle(self, ext=None, transpose_arrays=False):\n \"\"\"\n Bundle the data into a series of objects to be written to\n fits HDU extensions.\n\n The returned object must be a list. The list items should be\n one of the following:\n\n - a dictionary with a single key/item pair, where the key\n is the name for the extension and the item is the data\n to be written.\n\n - a single item (object) to be written, which will be\n written in the provided order without any extension\n names (although see the caveat in\n :func:`pypeit.io.dict_to_hdu` for dictionaries with\n single array or `astropy.table.Table`_ items).\n\n The item to be written can be a single array for an ImageHDU,\n an `astropy.table.Table`_ for a BinTableHDU, or a dictionary;\n see :func:`pypeit.io.write_to_hdu`.\n\n For how these objects parsed into the HDUs, see\n :func:`to_hdu`.\n\n The default behavior implemented by this base class just\n parses the attributes into a single dictionary based on the\n datamodel and is returned such that it all is written to a\n single fits extension. Note that this will fault if the\n datamodel contains:\n\n - a dictionary object\n - more than one `astropy.table.Table`_,\n - an `astropy.table.Table`_ and an array-like object\n (:obj:`list` or `numpy.ndarray`_), or\n\n Certain **restrictions** apply to how the data can be bundled\n for the general parser implementation (:func:`_parse`) to\n work correctly. These restriction are:\n\n - The shape and orientation of any input arrays are\n assumed to be correct.\n\n - Datamodel keys for arrays or `astropy.table.Table`_\n objects written to an HDU should match the HDU\n extension name. Otherwise, the set of HDU extension\n names and datamodel keys **must** be unique.\n\n - Datamodel keys will be matched to header values:\n :func:`to_hdu` will write any items in a dictionary\n that is an integer, float, or string (specific numpy\n types or otherwise) to the header. This means header\n keys in **all** extensions should be unique and should\n not be the same as any extension name.\n\n - Datamodels can contain tuples, but these must be\n reasonably converted to strings such they are included\n in (one of) the HDU header(s).\n\n Args:\n ext (:obj:`str`, optional):\n Name for the HDU extension. If None, no name is\n given.\n transpose_arrays (:obj:`bool`, optional):\n Transpose the arrays before writing them to the HDU.\n This option is mostly meant to correct orientation of\n arrays meant for tables so that the number of rows\n match.\n \n Returns:\n :obj:`list`: A list of dictionaries, each list element is\n written to its own fits extension. See the description\n above.\n\n Raises:\n TypeError:\n Raised if the provided ``ext`` is not a string.\n \"\"\"\n d = {}\n for key in self.keys():\n if self[key] is not None and transpose_arrays \\\n and self.datamodel[key]['otype'] == np.ndarray:\n d[key] = self[key].T\n elif self.datamodel[key]['otype'] == tuple:\n # TODO: Anything with tuple type that is None will be\n # converted to 'None'. Is that what we want, or do we\n # want to set it to None so that it's not written?\n d[key] = str(self[key])\n else:\n d[key] = self[key]\n return [d] if ext is None else [{ext:d}]\n\n @classmethod\n def _parse(cls, hdu, ext=None, transpose_table_arrays=False, hdu_prefix=None):\n \"\"\"\n Parse data read from one or more HDUs.\n\n This method is the counter-part to :func:`_bundle`, and\n parses data from the HDUs into a dictionary that can be used\n to instantiate the object.\n\n .. warning::\n\n - Beware that this may read data from the provided HDUs\n that could then be changed by :func:`_validate`.\n Construct your :func:`_validate` methods carefully!\n\n - Although :func:`_bundle` methods will likely need to be\n written for each derived class, this parsing method is\n very general to what :class:`DataContainer` can do.\n Before overwriting this function in a derived class,\n make sure and/or test that this method doesn't meet\n your needs, and then tread carefully regardless.\n\n - Because the `astropy.table.Table`_ methods are used\n directly, any metadata associated with the Table will\n also be included in the HDUs constructed by\n :func:`to_hdu`. However, the\n `astropy.table.Table.read`_ method always returns the\n metadata with capitalized keys. This means that,\n regardless of the capitalization of the metadata\n keywords when the data is written, **they will be\n upper-case when read by this function**!\n\n .. note::\n\n Currently, the only test for the object type (``otype``)\n given explicitly by the class datamodel is to check if\n the type should be a tuple. If it is, the parser reads\n the string from the header and evaluates it so that it's\n converted to a tuple on output. See the restrictions\n listed for :func:`_bundle`.\n\n All the other type conversions are implicit or based on\n the HDU type.\n\n Args:\n hdu (`astropy.io.fits.HDUList`_, `astropy.io.fits.ImageHDU`_, `astropy.io.fits.BinTableHDU`_):\n The HDU(s) to parse into the instantiation dictionary.\n ext (:obj:`int`, :obj:`str`, :obj:`list`, optional):\n One or more extensions with the data. If None, the\n function trolls through the HDU(s) and parses the\n data into the datamodel.\n transpose_table_arrays (:obj:`bool`, optional):\n Tranpose *all* the arrays read from any binary\n tables. This is meant to invert the use of\n ``transpose_arrays`` in :func:`_bound`.\n hdu_prefix (:obj:`str`, optional):\n Only parse HDUs with extension names matched to this\n prefix. If None, :attr:`hdu_prefix` is used. If the\n latter is also None, all HDUs are parsed. See\n :func:`pypeit.io.hdu_iter_by_ext`.\n\n Returns:\n :obj:`tuple`: Return three objects\n\n - :obj:`dict`: Dictionary used to instantiate the object.\n - :obj:`bool`: Describes datamodel version checking passed\n - :obj:`bool`: Describes datamodel type checking passed\n\n Raises:\n TypeError:\n Raised if ``ext``, or any of its elements if it's a\n :obj:`list`, are not either strings or integers.\n \"\"\"\n dm_version_passed = True\n dm_type_passed = True\n\n # Setup to iterate through the provided HDUs\n _ext, _hdu = io.hdu_iter_by_ext(hdu, ext=ext, hdu_prefix=hdu_prefix)\n _ext = np.atleast_1d(np.array(_ext, dtype=object))\n str_ext = np.logical_not([isinstance(e, (int, np.integer)) for e in _ext])\n\n # Construct instantiation dictionary\n _d = dict.fromkeys(cls.datamodel.keys())\n\n # Log if relevant data is found for this datamodel\n if np.all([_hdu[e].data is None for e in _ext]):\n # TODO: This is a KLUDGE. Not sure we should allow this...\n msgs.warn('Extensions to be read by {0} have no data!'.format(cls.__name__))\n # This is so that the returned booleans for reading the\n # data are not tripped as false!\n found_data = True\n else:\n found_data = False\n\n # NOTE: The extension and keyword comparisons are complicated\n # because the fits standard is to force these all to be\n # capitalized, while the datamodel doesn't\n # implement this restriction.\n\n # Handle hdu_prefix\n if hdu_prefix is not None:\n prefix = hdu_prefix\n else:\n prefix = '' if cls.hdu_prefix is None else cls.hdu_prefix\n\n # Save the list of hdus that have been parsed\n parsed_hdus = []\n\n # HDUs can have dictionary elements directly.\n keys = np.array(list(_d.keys()))\n prefkeys = np.array([prefix+key.upper() for key in keys])\n indx = np.isin(prefkeys, _ext[str_ext])\n if np.any(indx):\n found_data = True\n for e in keys[indx]:\n hduindx = prefix+e.upper()\n # Add it to the list of parsed HDUs\n parsed_hdus += [hduindx]\n if obj_is_data_container(cls.datamodel[e]['otype']):\n # Parse the DataContainer\n # TODO: This only works with single extension\n # DataContainers. Do we want this to be from_hdu\n # instead and add chk_version to _parse?\n _d[e], p1, p2, _ = cls.datamodel[e]['otype']._parse(_hdu[hduindx])\n dm_version_passed &= p1\n dm_type_passed &= p2\n else:\n # Parse the Image or Table data\n dm_type_passed &= hdu[hduindx].header['DMODCLS'] == cls.__name__\n dm_version_passed &= hdu[hduindx].header['DMODVER'] == cls.version\n # Grab it\n _d[e] = _hdu[hduindx].data if isinstance(hdu[hduindx], fits.ImageHDU) \\\n else Table.read(hdu[hduindx]).copy()\n\n for e in _ext:\n if 'DMODCLS' not in _hdu[e].header.keys() or 'DMODVER' not in _hdu[e].header.keys() \\\n or _hdu[e].header['DMODCLS'] != cls.__name__:\n # Can't be parsed\n continue\n # Check for header elements, but do not over-ride existing items\n indx = np.isin([key.upper() for key in keys], list(_hdu[e].header.keys()))\n if np.any(indx):\n found_data = True\n parsed_hdus += [e if _hdu[e].name == '' else _hdu[e].name]\n dm_type_passed &= _hdu[e].header['DMODCLS'] == cls.__name__\n dm_version_passed &= _hdu[e].header['DMODVER'] == cls.version\n for key in keys[indx]:\n if key in _d.keys() and _d[key] is not None:\n continue\n _d[key] = _hdu[e].header[key.upper()] if cls.datamodel[key]['otype'] != tuple \\\n else eval(_hdu[e].header[key.upper()])\n if isinstance(e, (str, np.str_)) and e in prefkeys:\n # Already parsed this above\n continue\n # Parse BinTableHDUs\n if isinstance(_hdu[e], fits.BinTableHDU) \\\n and np.any(np.isin(list(cls.datamodel.keys()), _hdu[e].columns.names)):\n parsed_hdus += [e if _hdu[e].name is None else _hdu[e].name]\n found_data = True\n # Datamodel checking\n dm_type_passed &= _hdu[e].header['DMODCLS'] == cls.__name__\n dm_version_passed &= _hdu[e].header['DMODVER'] == cls.version\n # If the length of the table is 1, assume the table\n # data had to be saved as a single row because of shape\n # differences.\n single_row = len(_hdu[e].data) == 1\n for key in _hdu[e].columns.names:\n if key in cls.datamodel.keys():\n _d[key] = _hdu[e].data[key][0] \\\n if (single_row and _hdu[e].data[key].ndim > 1) \\\n else _hdu[e].data[key]\n if transpose_table_arrays:\n _d[key] = _d[key].T\n\n # Two annoying hacks:\n # - Hack to expunge charray which are basically deprecated and\n # cause trouble.\n # - Hack to force native byte ordering\n for key in _d:\n if isinstance(_d[key], np.chararray):\n _d[key] = np.asarray(_d[key])\n elif isinstance(_d[key], np.ndarray) and _d[key].dtype.byteorder not in ['=', '|']:\n _d[key] = _d[key].astype(_d[key].dtype.type)\n\n # Return\n return _d, dm_version_passed and found_data, dm_type_passed and found_data, \\\n np.unique(parsed_hdus).tolist()\n\n\n def __getattr__(self, item):\n \"\"\"Maps values to attributes.\n Only called if there *isn't* an attribute with this name\n \"\"\"\n try:\n return self.__getitem__(item)\n except KeyError as e:\n raise AttributeError('{0} is not an attribute of {1}!'.format(item,\n self.__class__.__name__)) from e\n\n def __setattr__(self, item, value):\n \"\"\"\n Set the attribute value.\n\n Attributes are restricted to those defined by the datamodel.\n \"\"\"\n # version is immutable\n if item == 'version':\n raise TypeError('Internal version does not support assignment.')\n # TODO: It seems like it would be faster to check a boolean\n # attribute. Is that possible?\n if '_DataContainer__initialised' not in self.__dict__:\n # Allow new attributes to be added before object is\n # initialized\n dict.__setattr__(self, item, value)\n else:\n # Otherwise, set as an item\n try:\n self.__setitem__(item, value)\n except KeyError as e:\n # Raise attribute error instead of key error\n raise AttributeError('{0} is not part of the internals nor data model!'.format(item)) from e\n\n def __setitem__(self, item, value):\n \"\"\"\n Access and set an attribute identically to a dictionary item.\n\n Items are restricted to those defined by the datamodel.\n \"\"\"\n if item not in self.__dict__.keys():\n raise KeyError('Key {0} not part of the internals nor data model'.format(item))\n # Internal?\n if item not in self.keys():\n self.__dict__[item] = value\n return\n # Set datamodel item to None?\n if value is None:\n self.__dict__[item] = value\n return\n # Check data type\n if not isinstance(value, self.datamodel[item]['otype']):\n raise TypeError('Incorrect data type for {0}! '.format(item) +\n 'Allowed type(s) are: {0}'.format(self.datamodel[item]['otype']))\n # Array?\n if 'atype' in self.datamodel[item].keys():\n if not isinstance(value.flat[0], self.datamodel[item]['atype']):\n raise TypeError('Wrong data type for array: {}\\n'.format(item)\n + 'Allowed type(s) for the array are: {}'.format(\n self.datamodel[item]['atype']))\n # Set\n self.__dict__[item] = value\n\n def __getitem__(self, item):\n \"\"\"Get an item directly from the internal dict.\"\"\"\n return self.__dict__[item]\n\n def keys(self):\n \"\"\"\n Return the keys for the data objects only\n\n Returns:\n :obj:`dict_keys`: The iterable with the data model keys.\n \"\"\"\n return self.datamodel.keys()\n\n def _primary_header(self, hdr=None):\n \"\"\"\n Construct a primary header that is included with the primary\n HDU extension produced by :func:`to_hdu`.\n\n Additional data can be added to the header for individual HDU\n extensions in :func:`to_hdu` as desired, but this function\n **should not** add any elements from the datamodel to the\n header.\n\n Args:\n hdr (`astropy.io.fits.Header`_, optional):\n Header for the primary extension. If None, set by\n :func:`pypeit.io.initialize_header()`.\n\n Returns:\n `astropy.io.fits.Header`_: Header object to include in\n the primary HDU.\n \"\"\"\n return io.initialize_header() if hdr is None else hdr.copy()\n\n def _base_header(self, hdr=None):\n \"\"\"\n Construct a base header that is included with all HDU\n extensions produced by :func:`to_hdu` (unless they are\n overwritten by a nested :class:`DataContainer`).\n\n Additional data can be added to the header for individual HDU\n extensions in :func:`to_hdu` as desired, but this function\n **should not** add any elements from the datamodel to the\n header.\n\n Args:\n hdr (`astropy.io.fits.Header`_, optional):\n Baseline header to add to all returned HDUs. If None,\n set by :func:`pypeit.io.initialize_header()`.\n\n Returns:\n `astropy.io.fits.Header`_: Header object to include in\n all HDU extensions.\n \"\"\"\n # Copy primary header to all subsequent headers\n _hdr = self._primary_header(hdr=hdr)\n # Add DataContainer class name and datamodel version number.\n # This is not added to the primary header because single output\n # files can contain multiple DataContainer objects.\n _hdr['DMODCLS'] = (self.__class__.__name__, 'Datamodel class')\n _hdr['DMODVER'] = (self.version, 'Datamodel version')\n return _hdr\n\n # TODO: Always have this return an HDUList instead of either that\n # or a normal list?\n def to_hdu(self, hdr=None, add_primary=False, primary_hdr=None,\n limit_hdus=None, force_to_bintbl=False, hdu_prefix=None):\n \"\"\"\n Construct one or more HDU extensions with the data.\n\n The organization of the data into separate HDUs is performed\n by :func:`_bundle`, which returns a list of objects. The type\n of each element in the list determines how it is handled. If\n the object is a dictionary with a single key/item pair, the\n key is used as the extension header. Otherwise, the objects\n are written to unnamed extensions. The object or dictionary\n item is passed to :func:`pypeit.io.write_to_hdu` to construct\n the HDU.\n\n Args:\n hdr (`astropy.io.fits.Header`, optional):\n Baseline header to add to all returned HDUs. If None,\n set by :func:`pypeit.io.initialize_header()`.\n add_primary (:obj:`bool`, optional):\n If False, the returned object is a simple\n :obj:`list`, with a list of HDU objects (either\n `astropy.io.fits.ImageHDU`_ or\n `astropy.io.fits.BinTableHDU`_). If true, the method\n constructs an `astropy.io.fits.HDUList` with a\n primary HDU, such that this call::\n\n hdr = io.initialize_header()\n hdu = fits.HDUList([fits.PrimaryHDU(header=primary_hdr)] + self.to_hdu(hdr=hdr))\n\n and this call::\n\n hdu = self.to_hdu(add_primary=True)\n\n are identical.\n primary_hdr (`astropy.io.fits.Header`, optional):\n Header to add to the primary if add_primary=True\n limit_hdus (:obj:`list`, optional):\n Limit the HDUs that can be written to the items in this list\n force_to_bintbl (:obj:`bool`, optional):\n Force any dict into a BinTableHDU (e.g. for\n :class:`pypeit.specobj.SpecObj`)\n hdu_prefix (:obj:`str`, optional):\n Prefix for the HDU names. If None, will use\n :attr:`hdu_prefix`. If the latter is also None, no\n prefix is added.\n\n Returns:\n :obj:`list`, `astropy.io.fits.HDUList`_: A list of HDUs,\n where the type depends on the value of ``add_primary``.\n \"\"\"\n # Bundle the data\n data = self._bundle()\n\n # Initialize the primary header (only used if add_primary=True)\n _primary_hdr = self._primary_header(hdr=primary_hdr)\n # Check that the keywords in the primary header do not overlap\n # with any datamodel keys.\n if _primary_hdr is not None:\n hdr_keys = np.array([k.upper() for k in self.keys()])\n indx = np.isin(hdr_keys, list(_primary_hdr.keys()))\n # TODO: This is a hack to deal with PYP_SPEC, but this\n # needs to be cleaned up, as does masterframe more\n # generally...\n if np.sum(indx) > 1 or (np.sum(indx) == 1 and hdr_keys[indx] != 'PYP_SPEC'):\n msgs.error('CODING ERROR: Primary header should not contain keywords that are the '\n 'same as the datamodel for {0}.'.format(self.__class__.__name__))\n\n # Initialize the base header\n _hdr = self._base_header(hdr=hdr)\n # Check that the keywords in the base header do not overlap\n # with any datamodel keys.\n if _hdr is not None \\\n and np.any(np.isin([k.upper() for k in self.keys()], list(_hdr.keys()))):\n msgs.error('CODING ERROR: Baseline header should not contain keywords that are the '\n 'same as the datamodel for {0}.'.format(self.__class__.__name__))\n\n # Construct the list of HDUs\n hdu = []\n for d in data:\n if isinstance(d, dict) and len(d) == 1:\n ext = list(d.keys())[0]\n # Allow for embedded DataContainer's\n if isinstance(d[ext], DataContainer):\n _hdu = d[ext].to_hdu(add_primary=False)\n # NOTE: The lines below allow extension names to be\n # overridden by the input dictionary keyword for\n # DataContainers that are confined to a single HDU.\n # This is necessary because `hdu_prefix` must be a\n # class attribute to allow for its inclusion in the\n # read methods (e.g., `_parse`)\n if len(_hdu) == 1 and _hdu[0].name != ext:\n _hdu[0].name = ext\n hdu += _hdu\n else:\n hdu += [io.write_to_hdu(d[ext], name=ext, hdr=_hdr,\n force_to_bintbl=force_to_bintbl)]\n else:\n hdu += [io.write_to_hdu(d, hdr=_hdr, force_to_bintbl=force_to_bintbl)]\n # Prefixes\n _hdu_prefix = (None if self.hdu_prefix is None else self.hdu_prefix) \\\n if hdu_prefix is None else hdu_prefix\n if _hdu_prefix is not None:\n for ihdu in hdu:\n ihdu.name = _hdu_prefix+ihdu.name\n # Limit?\n if limit_hdus:\n hdu = [h for h in hdu if h.name in limit_hdus]\n # Return\n return fits.HDUList([fits.PrimaryHDU(header=_primary_hdr)] + hdu) if add_primary else hdu\n\n @classmethod\n def from_hdu(cls, hdu, hdu_prefix=None, chk_version=True):\n \"\"\"\n Instantiate the object from an HDU extension.\n\n This is primarily a wrapper for :func:`_parse`.\n\n Args:\n hdu (`astropy.io.fits.HDUList`_, `astropy.io.fits.ImageHDU`_, `astropy.io.fits.BinTableHDU`_):\n The HDU(s) with the data to use for instantiation.\n hdu_prefix (:obj:`str`, optional):\n Passed to _parse()\n chk_version (:obj:`bool`, optional):\n If True, raise an error if the datamodel version or\n type check failed. If False, throw a warning only.\n \"\"\"\n # NOTE: We can't use `cls(cls._parse(hdu))` here because this\n # will call the `__init__` method of the derived class and we\n # need to use the `__init__` of the base class instead. So\n # below, I get an empty instance of the derived class using\n # `__new__`, call the parent `__init__`, and then return the\n # result. The call to `DataContainer.__init__` is explicit to\n # deal with objects inheriting from both DataContainer and\n # other base classes, like MasterFrame.\n d, dm_version_passed, dm_type_passed, parsed_hdus = cls._parse(hdu, hdu_prefix=hdu_prefix)\n # Check version and type?\n if not dm_type_passed:\n msgs.error('The HDU(s) cannot be parsed by a {0} object!'.format(cls.__name__))\n if not dm_version_passed:\n _f = msgs.error if chk_version else msgs.warn\n _f('Current version of {0} object in code (v{1})'.format(cls.__name__, cls.version)\n + ' does not match version used to write your HDU(s)!')\n # Instantiate\n return cls.from_dict(d=d)\n\n @classmethod\n def from_dict(cls, d=None):\n \"\"\"\n Instantiate from a dictionary.\n\n This is primarily to allow for instantiating classes from a\n file where the data has already been parsed. E.g., see how\n the :class:`~pypeit.tracepca.TracePCA` objects are\n instantiated in\n :class:`pypeit.edgetrace.EdgeTraceSet.from_hdu`. However,\n note that this does the bare minimum to instantiate the\n object. Any class-specific operations that are needed to\n complete the instantiation should be done by ovewritting this\n method; e.g., see :func:`pypeit.tracepca.TracePCA.from_dict`.\n\n Args:\n d (:obj:`dict`, optional):\n Dictionary with the data to use for instantiation.\n \"\"\"\n self = super().__new__(cls)\n DataContainer.__init__(self, d=d)\n return self\n\n def to_file(self, ofile, overwrite=False, checksum=True, primary_hdr=None, hdr=None,\n limit_hdus=None):\n \"\"\"\n Write the data to a file.\n\n This is a convenience wrapper for :func:`to_hdu` and\n :func:`pypeit.io.write_to_fits`. The output is always placed\n in the 2nd extension; the first (primary) extension is always\n empty.\n\n Args:\n ofile (:obj:`str`):\n Fits file for the data. File names with '.gz'\n extensions will be gzipped; see\n :func:`pypeit.io.write_to_fits`.\n primary_hdr (`astropy.io.fits.Header`, optional):\n Primary header to add to first extension. Passed\n directly to :func:`to_hdu`; see usage there.\n hdr (`astropy.io.fits.Header`, optional):\n Baseline header to add to all returned HDUs. Passed\n directly to :func:`to_hdu`; see usage there.\n overwrite (:obj:`bool`, optional):\n Flag to overwrite any existing file.\n checksum (:obj:`bool`, optional):\n Passed to `astropy.io.fits.HDUList.writeto`_ to add\n the DATASUM and CHECKSUM keywords fits header(s).\n limit_hdus (:obj:`list`, optional):\n Passed to :func:`to_hdu`; see usage there\n \"\"\"\n io.write_to_fits(self.to_hdu(add_primary=True, primary_hdr=primary_hdr,\n limit_hdus=limit_hdus, hdr=hdr),\n ofile, overwrite=overwrite, checksum=checksum, hdr=hdr)\n\n # TODO: This requires that master_key be an attribute... This\n # method is a bit too ad hoc for me...\n def to_master_file(self, master_filename=None, **kwargs):\n \"\"\"\n Wrapper on to_file() that deals with masterframe naming and header\n\n This also sets master_key and master_dir internally if\n when master_filename is provided\n\n self.hdu_prefix and self.output_to_disk must be set (or None)\n\n Args:\n master_filename (str, optional):\n Name of masterfile; if provided, parsed for master_key, master_dir\n If not provided, constructed from internal master_key, master_dir\n **kwargs: passed to to_file()\n \"\"\"\n # Output file\n if master_filename is None:\n master_filename = masterframe.construct_file_name(self, self.master_key,\n master_dir=self.master_dir)\n else:\n self.master_key, self.master_dir = masterframe.grab_key_mdir(\n master_filename, from_filename=True)\n # Header\n if hasattr(self, 'process_steps'):\n steps = self.process_steps\n else:\n steps = None\n if hasattr(self, 'files'):\n raw_files = self.files\n else:\n raw_files = None\n hdr = masterframe.build_master_header(self, self.master_key, self.master_dir,\n steps=steps, raw_files=raw_files)\n # Finish\n self.to_file(master_filename, primary_hdr=hdr,\n limit_hdus=self.output_to_disk, overwrite=True, **kwargs)\n\n # TODO: Add options to compare the checksum and/or check the package versions\n @classmethod\n def from_file(cls, ifile, verbose=True, chk_version=True):\n \"\"\"\n Instantiate the object from an extension in the specified fits file.\n\n This is a convenience wrapper for :func:`from_hdu`.\n \n Args:\n ifile (:obj:`str`):\n Fits file with the data to read\n verbose (:obj:`bool`, optional):\n Print informational messages\n chk_version (:obj:`bool`, optional):\n Passed to from_hdu(). See those docs for details\n\n Raises:\n FileNotFoundError:\n Raised if the specified file does not exist.\n \"\"\"\n if not os.path.isfile(ifile):\n raise FileNotFoundError('{0} does not exist!'.format(ifile))\n\n if verbose:\n msgs.info(\"Loading {} from {}\".format(cls.__name__, ifile))\n\n # Do it\n with io.fits_open(ifile) as hdu:\n obj = cls.from_hdu(hdu, chk_version=chk_version)\n if hasattr(obj, 'head0'):\n obj.head0 = hdu[0].header\n if hasattr(obj, 'filename'):\n obj.filename = ifile\n\n # Master this and that\n if hasattr(cls, 'master_type'):\n obj.master_key, obj.master_dir = masterframe.grab_key_mdir(ifile)\n if hasattr(obj, 'head0'):\n if 'MSTRTYP' in obj.head0.keys():\n if obj.head0['MSTRTYP'] != cls.master_type:\n msgs.error('Master Type read from header incorrect! '\n 'Found {0}; expected {1}'.format(obj.head0['MSTRTYP'],\n cls.master_type))\n else:\n msgs.warn('DataContainer has `master_type` attribute but is missing the '\n 'MSTRTYP header keyword!')\n return obj\n\n def __repr__(self):\n repr = '<{:s}: '.format(self.__class__.__name__)\n # Image\n rdict = {}\n for attr in self.datamodel.keys():\n if hasattr(self, attr) and getattr(self, attr) is not None:\n rdict[attr] = True\n else:\n rdict[attr] = False\n repr += ' items={}'.format(rdict)\n repr = repr + '>'\n return repr\n\n\ndef obj_is_data_container(obj):\n \"\"\"\n Simple method to check whether an object is a data container\n\n Args:\n obj:\n\n Returns:\n bool: True if it is\n\n \"\"\"\n return inspect.isclass(obj) and issubclass(obj, DataContainer)\n"
] | [
[
"numpy.polyfit",
"matplotlib.pyplot.axes",
"numpy.max",
"numpy.argmin",
"numpy.where",
"numpy.polyval",
"numpy.roll",
"numpy.arange",
"numpy.copy",
"matplotlib.pyplot.close",
"matplotlib.pyplot.subplots_adjust",
"numpy.zeros",
"numpy.polyder",
"numpy.min",
"matplotlib.colors.LinearSegmentedColormap.from_list",
"matplotlib.pyplot.show",
"numpy.array",
"numpy.abs",
"matplotlib.widgets.Button",
"matplotlib.pyplot.subplots",
"numpy.sort",
"matplotlib.colors.Normalize",
"matplotlib.transforms.blended_transform_factory"
],
[
"numpy.unique",
"numpy.asarray",
"numpy.all",
"numpy.any",
"numpy.array",
"numpy.sum",
"numpy.isin"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
JerBouma/OpenBBTerminal | [
"0c60d70cb29b0a6e4db41d6dd0d34f79a6169b27",
"0c60d70cb29b0a6e4db41d6dd0d34f79a6169b27"
] | [
"openbb_terminal/cryptocurrency/overview/coinpaprika_view.py",
"openbb_terminal/cryptocurrency/due_diligence/pycoingecko_model.py"
] | [
"\"\"\"CoinPaprika view\"\"\"\n__docformat__ = \"numpy\"\n\nimport logging\nimport os\n\nfrom pandas.plotting import register_matplotlib_converters\n\nimport openbb_terminal.cryptocurrency.overview.coinpaprika_model as paprika\nfrom openbb_terminal.cryptocurrency.dataframe_helpers import (\n lambda_long_number_format_with_type_check,\n)\nfrom openbb_terminal.decorators import log_start_end\nfrom openbb_terminal.helper_funcs import export_data, print_rich_table\nfrom openbb_terminal.rich_config import console\n\nlogger = logging.getLogger(__name__)\n\nregister_matplotlib_converters()\n\n# pylint: disable=inconsistent-return-statements\n# pylint: disable=C0302, too-many-lines\n\nCURRENCIES = [\n \"BTC\",\n \"ETH\",\n \"USD\",\n \"EUR\",\n \"PLN\",\n \"KRW\",\n \"GBP\",\n \"CAD\",\n \"JPY\",\n \"RUB\",\n \"TRY\",\n \"NZD\",\n \"AUD\",\n \"CHF\",\n \"UAH\",\n \"HKD\",\n \"SGD\",\n \"NGN\",\n \"PHP\",\n \"MXN\",\n \"BRL\",\n \"THB\",\n \"CLP\",\n \"CNY\",\n \"CZK\",\n \"DKK\",\n \"HUF\",\n \"IDR\",\n \"ILS\",\n \"INR\",\n \"MYR\",\n \"NOK\",\n \"PKR\",\n \"SEK\",\n \"TWD\",\n \"ZAR\",\n \"VND\",\n \"BOB\",\n \"COP\",\n \"PEN\",\n \"ARS\",\n \"ISK\",\n]\n\n# see https://github.com/OpenBB-finance/OpenBBTerminal/pull/562#issuecomment-887842888\n# EXCHANGES = paprika.get_list_of_exchanges()\n\n\n@log_start_end(log=logger)\ndef display_global_market(export: str) -> None:\n \"\"\"Return data frame with most important global crypto statistics like:\n market_cap_usd, volume_24h_usd, bitcoin_dominance_percentage, cryptocurrencies_number,\n market_cap_ath_value, market_cap_ath_date, volume_24h_ath_value, volume_24h_ath_date,\n market_cap_change_24h, volume_24h_change_24h, last_updated [Source: CoinPaprika]\n\n Parameters\n ----------\n export : str\n Export dataframe data to csv,json,xlsx file\n \"\"\"\n\n df = paprika.get_global_market()\n df_data = df.copy()\n df[\"Value\"] = df[\"Value\"].apply( # pylint:disable=unsupported-assignment-operation\n lambda x: lambda_long_number_format_with_type_check(x)\n )\n\n print_rich_table(\n df, headers=list(df.columns), show_index=False, title=\"Global Crypto Statistics\"\n )\n\n export_data(\n export,\n os.path.dirname(os.path.abspath(__file__)),\n \"global\",\n df_data,\n )\n\n\n@log_start_end(log=logger)\ndef display_all_coins_market_info(\n currency: str, sortby: str, descend: bool, top: int, export: str\n) -> None:\n \"\"\"Displays basic market information for all coins from CoinPaprika API. [Source: CoinPaprika]\n\n Parameters\n ----------\n currency: str\n Quoted currency\n top: int\n Number of records to display\n sortby: str\n Key by which to sort data\n descend: bool\n Flag to sort data descending\n links: bool\n Flag to display urls\n export : str\n Export dataframe data to csv,json,xlsx file\n \"\"\"\n\n df = paprika.get_coins_market_info(quotes=currency).sort_values(\n by=sortby, ascending=descend\n )\n\n df_data = df.copy()\n\n if df.empty:\n console.print(\"No data found\", \"\\n\")\n return\n\n cols = [col for col in df.columns if col != \"rank\"]\n df[cols] = df[cols].applymap(lambda x: lambda_long_number_format_with_type_check(x))\n\n console.print(f\"\\nDisplaying data vs {currency}\")\n\n print_rich_table(\n df.head(top),\n headers=list(df.columns),\n show_index=False,\n title=\"Basic Market Information\",\n )\n\n export_data(\n export,\n os.path.dirname(os.path.abspath(__file__)),\n \"markets\",\n df_data,\n )\n\n\n@log_start_end(log=logger)\ndef display_all_coins_info(\n currency: str, sortby: str, descend: bool, top: int, export: str\n) -> None:\n \"\"\"Displays basic coin information for all coins from CoinPaprika API. [Source: CoinPaprika]\n\n Parameters\n ----------\n currency: str\n Quoted currency\n top: int\n Number of records to display\n sortby: str\n Key by which to sort data\n descend: bool\n Flag to sort data descending\n links: bool\n Flag to display urls\n export : str\n Export dataframe data to csv,json,xlsx file\n \"\"\"\n\n df = paprika.get_coins_info(quotes=currency).sort_values(\n by=sortby, ascending=descend\n )\n\n df_data = df.copy()\n\n if df.empty:\n console.print(\"Not data found\", \"\\n\")\n return\n\n cols = [col for col in df.columns if col != \"rank\"]\n df[cols] = df[cols].applymap(lambda x: lambda_long_number_format_with_type_check(x))\n\n console.print(f\"\\nDisplaying data vs {currency}\")\n\n print_rich_table(\n df.head(top),\n headers=list(df.columns),\n show_index=False,\n title=\"Basic Coin Information\",\n )\n\n export_data(\n export,\n os.path.dirname(os.path.abspath(__file__)),\n \"info\",\n df_data,\n )\n\n\n@log_start_end(log=logger)\ndef display_all_exchanges(\n currency: str, sortby: str, descend: bool, top: int, export: str\n) -> None:\n \"\"\"List exchanges from CoinPaprika API. [Source: CoinPaprika]\n\n Parameters\n ----------\n currency: str\n Quoted currency\n top: int\n Number of records to display\n sortby: str\n Key by which to sort data\n descend: bool\n Flag to sort data descending\n links: bool\n Flag to display urls\n export : str\n Export dataframe data to csv,json,xlsx file\n\n \"\"\"\n\n df = paprika.get_list_of_exchanges(quotes=currency).sort_values(\n by=sortby, ascending=descend\n )\n\n df_data = df.copy()\n\n if df.empty:\n console.print(\"No data found\", \"\\n\")\n return\n\n cols = [col for col in df.columns if col != \"rank\"]\n df[cols] = df[cols].applymap(lambda x: lambda_long_number_format_with_type_check(x))\n console.print(f\"\\nDisplaying data vs {currency}\")\n\n print_rich_table(\n df.head(top), headers=list(df.columns), show_index=False, title=\"List Exchanges\"\n )\n\n export_data(\n export,\n os.path.dirname(os.path.abspath(__file__)),\n \"exchanges\",\n df_data,\n )\n\n\n@log_start_end(log=logger)\ndef display_exchange_markets(\n exchange: str, sortby: str, descend: bool, top: int, links: bool, export: str\n) -> None:\n \"\"\"Get all markets for given exchange [Source: CoinPaprika]\n\n Parameters\n ----------\n exchange: str\n Exchange identifier e.g Binance\n top: int\n Number of records to display\n sortby: str\n Key by which to sort data\n descend: bool\n Flag to sort data descending\n links: bool\n Flag to display urls\n export : str\n Export dataframe data to csv,json,xlsx file\n \"\"\"\n\n df = paprika.get_exchanges_market(exchange_id=exchange)\n\n df_data = df.copy()\n\n if df.empty:\n console.print(\"No data found\", \"\\n\")\n return\n\n df = df.sort_values(by=sortby, ascending=descend)\n\n if links is True:\n df = df[[\"exchange_id\", \"pair\", \"trust_score\", \"market_url\"]]\n else:\n df.drop(\"market_url\", axis=1, inplace=True)\n\n print_rich_table(\n df.head(top),\n headers=list(df.columns),\n show_index=False,\n title=\"Exchange Markets\",\n )\n\n export_data(\n export,\n os.path.dirname(os.path.abspath(__file__)),\n \"exmarkets\",\n df_data,\n )\n\n\n@log_start_end(log=logger)\ndef display_all_platforms(export: str) -> None:\n \"\"\"List all smart contract platforms like ethereum, solana, cosmos, polkadot, kusama. [Source: CoinPaprika]\n\n Parameters\n ----------\n export : str\n Export dataframe data to csv,json,xlsx file\n \"\"\"\n\n df = paprika.get_all_contract_platforms()\n\n print_rich_table(\n df, headers=list(df.columns), show_index=False, title=\"Smart Contract Platforms\"\n )\n\n export_data(\n export,\n os.path.dirname(os.path.abspath(__file__)),\n \"platforms\",\n df,\n )\n\n\n@log_start_end(log=logger)\ndef display_contracts(\n platform: str, sortby: str, descend: bool, top: int, export: str\n) -> None:\n \"\"\"Gets all contract addresses for given platform. [Source: CoinPaprika]\n\n Parameters\n ----------\n platform: str\n Blockchain platform like eth-ethereum\n top: int\n Number of records to display\n sortby: str\n Key by which to sort data\n descend: bool\n Flag to sort data descending\n export : str\n Export dataframe data to csv,json,xlsx file\n \"\"\"\n\n df = paprika.get_contract_platform(platform)\n\n if df.empty:\n console.print(f\"Nothing found for platform: {platform}\", \"\\n\")\n return\n\n df = df.sort_values(by=sortby, ascending=descend)\n\n print_rich_table(\n df.head(top),\n headers=list(df.columns),\n show_index=False,\n title=\"Contract Addresses\",\n )\n\n export_data(\n export,\n os.path.dirname(os.path.abspath(__file__)),\n \"contracts\",\n df,\n )\n",
"\"\"\"CoinGecko model\"\"\"\n__docformat__ = \"numpy\"\n# pylint:disable=unsupported-assignment-operation\n\nimport logging\nfrom typing import Any, Dict, List, Optional, Tuple, Union\n\nimport pandas as pd\nimport regex as re\nfrom pycoingecko import CoinGeckoAPI\n\nfrom openbb_terminal.cryptocurrency.dataframe_helpers import (\n lambda_replace_underscores_in_column_names,\n)\nfrom openbb_terminal.cryptocurrency.discovery.pycoingecko_model import read_file_data\nfrom openbb_terminal.cryptocurrency.pycoingecko_helpers import (\n DENOMINATION,\n calc_change,\n create_dictionary_with_prefixes,\n filter_list,\n find_discord,\n remove_keys,\n rename_columns_in_dct,\n)\nfrom openbb_terminal.decorators import log_start_end\nfrom openbb_terminal.rich_config import console\n\nlogger = logging.getLogger(__name__)\n\nCHANNELS = {\n \"telegram_channel_identifier\": \"telegram\",\n \"twitter_screen_name\": \"twitter\",\n \"subreddit_url\": \"subreddit\",\n \"bitcointalk_thread_identifier\": \"bitcointalk\",\n \"facebook_username\": \"facebook\",\n \"discord\": \"discord\",\n}\n\nBASE_INFO = [\n \"id\",\n \"name\",\n \"symbol\",\n \"asset_platform_id\",\n \"description\",\n \"contract_address\",\n \"market_cap_rank\",\n \"public_interest_score\",\n]\n\n\n@log_start_end(log=logger)\ndef get_coin_potential_returns(\n main_coin: str,\n vs: Union[str, None] = None,\n top: Union[int, None] = None,\n price: Union[int, None] = None,\n) -> pd.DataFrame:\n \"\"\"Fetch data to calculate potential returns of a certain coin. [Source: CoinGecko]\n\n Parameters\n ----------\n main_coin : str\n Coin loaded to check potential returns for (e.g., algorand)\n vs : str | None\n Coin to compare main_coin with (e.g., bitcoin)\n top : int | None\n Number of coins with highest market cap to compare main_coin with (e.g., 5)\n price\n Target price of main_coin to check potential returns (e.g., 5)\n\n Returns\n -------\n pd.DataFrame\n Potential returns data\n Columns: Coin, Current Price, Target Coin, Potential Price, Potential Market Cap ($), Change (%)\n \"\"\"\n client = CoinGeckoAPI()\n COLUMNS = [\n \"Coin\",\n \"Current Price ($)\",\n \"Current Market Cap ($)\",\n \"Target Coin\",\n \"Potential Price ($)\",\n \"Potential Market Cap ($)\",\n \"Change (%)\",\n ]\n if top and top > 0: # user wants to compare with top coins\n data = client.get_price(\n ids=f\"{main_coin}\",\n vs_currencies=\"usd\",\n include_market_cap=True,\n include_24hr_vol=False,\n include_24hr_change=False,\n include_last_updated_at=False,\n )\n top_coins_data = client.get_coins_markets(\n vs_currency=\"usd\", per_page=top, order=\"market_cap_desc\"\n )\n main_coin_data = data[main_coin]\n diff_arr = []\n for coin in top_coins_data:\n market_cap_difference_percentage = calc_change(\n coin[\"market_cap\"], main_coin_data[\"usd_market_cap\"]\n )\n future_price = main_coin_data[\"usd\"] * (\n 1 + market_cap_difference_percentage / 100\n )\n diff_arr.append(\n [\n main_coin,\n main_coin_data[\"usd\"],\n main_coin_data[\"usd_market_cap\"],\n coin[\"id\"],\n future_price,\n coin[\"market_cap\"],\n market_cap_difference_percentage,\n ]\n )\n return pd.DataFrame(\n data=diff_arr,\n columns=COLUMNS,\n )\n\n if vs: # user passed a coin\n data = client.get_price(\n ids=f\"{main_coin},{vs}\",\n vs_currencies=\"usd\",\n include_market_cap=True,\n include_24hr_vol=False,\n include_24hr_change=False,\n include_last_updated_at=False,\n )\n main_coin_data = data[main_coin]\n vs_coin_data = data[vs]\n\n if main_coin_data and vs_coin_data:\n market_cap_difference_percentage = calc_change(\n vs_coin_data[\"usd_market_cap\"], main_coin_data[\"usd_market_cap\"]\n )\n future_price = main_coin_data[\"usd\"] * (\n 1 + market_cap_difference_percentage / 100\n )\n return pd.DataFrame(\n data=[\n [\n main_coin,\n main_coin_data[\"usd\"],\n main_coin_data[\"usd_market_cap\"],\n vs,\n future_price,\n vs_coin_data[\"usd_market_cap\"],\n market_cap_difference_percentage,\n ]\n ],\n columns=COLUMNS,\n )\n\n if price and price > 0: # user passed a price\n data = client.get_price(\n ids=main_coin,\n vs_currencies=\"usd\",\n include_market_cap=True,\n include_24hr_vol=False,\n include_24hr_change=False,\n include_last_updated_at=False,\n )\n main_coin_data = data[main_coin]\n if main_coin_data:\n final_market_cap = (\n main_coin_data[\"usd_market_cap\"] * price / main_coin_data[\"usd\"]\n )\n market_cap_difference_percentage = calc_change(\n final_market_cap, main_coin_data[\"usd_market_cap\"]\n )\n future_price = main_coin_data[\"usd\"] * (\n 1 + market_cap_difference_percentage / 100\n )\n return pd.DataFrame(\n data=[\n [\n main_coin,\n main_coin_data[\"usd\"],\n main_coin_data[\"usd_market_cap\"],\n \"\",\n future_price,\n final_market_cap,\n market_cap_difference_percentage,\n ]\n ],\n columns=COLUMNS,\n )\n\n return pd.DataFrame()\n\n\n@log_start_end(log=logger)\ndef check_coin(coin_id: str):\n coins = read_file_data(\"coingecko_coins.json\")\n for coin in coins:\n if coin[\"id\"] == coin_id:\n return coin[\"id\"]\n if coin[\"symbol\"] == coin_id:\n return coin[\"id\"]\n return None\n\n\n@log_start_end(log=logger)\ndef get_coin_market_chart(\n coin_id: str = \"\", vs_currency: str = \"usd\", days: int = 30, **kwargs: Any\n) -> pd.DataFrame:\n \"\"\"Get prices for given coin. [Source: CoinGecko]\n\n Parameters\n ----------\n vs_currency: str\n currency vs which display data\n days: int\n number of days to display the data\n kwargs\n\n Returns\n -------\n pandas.DataFrame\n Prices for given coin\n Columns: time, price, currency\n \"\"\"\n client = CoinGeckoAPI()\n prices = client.get_coin_market_chart_by_id(coin_id, vs_currency, days, **kwargs)\n prices = prices[\"prices\"]\n df = pd.DataFrame(data=prices, columns=[\"time\", \"price\"])\n df[\"time\"] = pd.to_datetime(df.time, unit=\"ms\")\n df = df.set_index(\"time\")\n df[\"currency\"] = vs_currency\n return df\n\n\n@log_start_end(log=logger)\ndef get_coin_tokenomics(symbol: str = \"\") -> pd.DataFrame:\n \"\"\"Get tokenomics for given coin. [Source: CoinGecko]\n\n Parameters\n ----------\n symbol: str\n coin symbol to check tokenomics\n\n Returns\n -------\n pandas.DataFrame\n Metric, Value with tokenomics\n \"\"\"\n client = CoinGeckoAPI()\n coin_data = client.get_coin_by_id(symbol)\n block_time = coin_data[\"block_time_in_minutes\"]\n total_supply = coin_data[\"market_data\"][\"total_supply\"]\n max_supply = coin_data[\"market_data\"][\"max_supply\"]\n circulating_supply = coin_data[\"market_data\"][\"circulating_supply\"]\n return pd.DataFrame(\n {\n \"Metric\": [\n \"Block time [min]\",\n \"Total Supply\",\n \"Max Supply\",\n \"Circulating Supply\",\n ],\n \"Value\": [block_time, total_supply, max_supply, circulating_supply],\n }\n )\n\n\nclass Coin:\n \"\"\"Coin class, it holds loaded coin\"\"\"\n\n @log_start_end(log=logger)\n def __init__(self, symbol: str, load_from_api: bool = True):\n self.client = CoinGeckoAPI()\n if load_from_api:\n self._coin_list = self.client.get_coins_list()\n else:\n self._coin_list = read_file_data(\"coingecko_coins.json\")\n\n self.coin_symbol, self.symbol = self._validate_coin(symbol)\n\n if self.coin_symbol:\n self.coin: Dict[Any, Any] = self._get_coin_info()\n else:\n pass\n\n @log_start_end(log=logger)\n def __str__(self):\n return f\"{self.coin_symbol}\"\n\n @log_start_end(log=logger)\n def _validate_coin(\n self,\n search_coin: str,\n ) -> Tuple[Optional[Any], Optional[Any]]:\n \"\"\"Validate if given coin symbol or id exists in list of available coins on CoinGecko.\n If yes it returns coin id. [Source: CoinGecko]\n\n Parameters\n ----------\n symbol: str\n Either coin symbol or coin id\n\n Returns\n -------\n Tuple[str, str]\n - str with coin\n - str with symbol\n \"\"\"\n\n coin = None\n symbol = None\n\n for dct in self._coin_list:\n if search_coin.lower() in [dct[\"symbol\"], dct[\"id\"]]:\n coin = dct.get(\"id\")\n symbol = dct.get(\"symbol\")\n return coin, symbol\n return None, None\n\n @log_start_end(log=logger)\n def coin_list(self) -> list:\n \"\"\"List all available coins [Source: CoinGecko]\n\n Returns\n -------\n list\n list of all available coin ids\n \"\"\"\n\n return [token.get(\"id\") for token in self._coin_list]\n\n @log_start_end(log=logger)\n def _get_coin_info(self) -> dict:\n \"\"\"Helper method which fetch the coin information by id from CoinGecko API like:\n (name, price, market, ... including exchange tickers) [Source: CoinGecko]\n\n Returns\n -------\n dict\n Coin information\n \"\"\"\n\n params = dict(localization=\"false\", tickers=\"false\", sparkline=True)\n return self.client.get_coin_by_id(self.coin_symbol, **params)\n\n @log_start_end(log=logger)\n def _get_links(self) -> Dict:\n \"\"\"Helper method that extracts links from coin [Source: CoinGecko]\n\n Returns\n -------\n dict\n Links related to coin\n \"\"\"\n\n return self.coin.get(\"links\", {})\n\n @log_start_end(log=logger)\n def get_repositories(self) -> Optional[Any]:\n \"\"\"Get list of all repositories for given coin [Source: CoinGecko]\n\n Returns\n -------\n list\n Repositories related to coin\n \"\"\"\n\n return self._get_links().get(\"repos_url\")\n\n @log_start_end(log=logger)\n def get_developers_data(self) -> pd.DataFrame:\n \"\"\"Get coin development data from GitHub or BitBucket like:\n number of pull requests, contributor etc [Source: CoinGecko]\n\n Returns\n -------\n pandas.DataFrame\n Developers Data\n Columns: Metric, Value\n \"\"\"\n\n dev = self.coin.get(\"developer_data\", {})\n useless_keys = (\n \"code_additions_deletions_4_weeks\",\n \"last_4_weeks_commit_activity_series\",\n )\n remove_keys(useless_keys, dev)\n df = pd.Series(dev).to_frame().reset_index()\n df.columns = [\"Metric\", \"Value\"]\n df[\"Metric\"] = df[\"Metric\"].apply(\n lambda x: lambda_replace_underscores_in_column_names(x)\n if isinstance(x, str)\n else x\n )\n\n return df[df[\"Value\"].notna()]\n\n @log_start_end(log=logger)\n def get_blockchain_explorers(self) -> Union[pd.DataFrame, Any]:\n \"\"\"Get list of URLs to blockchain explorers for given coin. [Source: CoinGecko]\n\n Returns\n -------\n pandas.DataFrame\n Blockchain Explorers\n Columns: Metric, Value\n \"\"\"\n\n blockchain = self._get_links().get(\"blockchain_site\")\n if blockchain:\n dct = filter_list(blockchain)\n df = pd.Series(dct).to_frame().reset_index()\n df.columns = [\"Metric\", \"Value\"]\n df[\"Metric\"] = df[\"Metric\"].apply(\n lambda x: lambda_replace_underscores_in_column_names(x)\n if isinstance(x, str)\n else x\n )\n return df[df[\"Value\"].notna()]\n return None\n\n @log_start_end(log=logger)\n def get_social_media(self) -> pd.DataFrame:\n \"\"\"Get list of URLs to social media like twitter, facebook, reddit... [Source: CoinGecko]\n\n Returns\n -------\n pandas.DataFrame\n Urls to social media\n Columns: Metric, Value\n \"\"\"\n\n social_dct = {}\n links = self._get_links()\n for (\n channel\n ) in CHANNELS.keys(): # pylint: disable=consider-iterating-dictionary)\n if channel in links:\n value = links.get(channel, \"\")\n if channel == \"twitter_screen_name\":\n value = \"https://twitter.com/\" + value\n elif channel == \"bitcointalk_thread_identifier\" and value is not None:\n value = f\"https://bitcointalk.org/index.php?topic={value}\"\n social_dct[channel] = value\n social_dct[\"discord\"] = find_discord(links.get(\"chat_url\"))\n dct = rename_columns_in_dct(social_dct, CHANNELS)\n df = pd.Series(dct).to_frame().reset_index()\n df.columns = [\"Metric\", \"Value\"]\n df[\"Metric\"] = df[\"Metric\"].apply(\n lambda x: lambda_replace_underscores_in_column_names(x)\n if isinstance(x, str)\n else x\n )\n return df[df[\"Value\"].notna()]\n\n @log_start_end(log=logger)\n def get_websites(self) -> pd.DataFrame:\n \"\"\"Get list of URLs to websites like homepage of coin, forum. [Source: CoinGecko]\n\n Returns\n -------\n pandas.DataFrame\n Urls to website, homepage, forum\n Columns: Metric, Value\n \"\"\"\n\n websites_dct = {}\n links = self._get_links()\n sites = [\"homepage\", \"official_forum_url\", \"announcement_url\"]\n for site in sites:\n websites_dct[site] = filter_list(links.get(site))\n df = pd.Series(websites_dct).to_frame().reset_index()\n df.columns = [\"Metric\", \"Value\"]\n df[\"Value\"] = df[\"Value\"].apply(lambda x: \",\".join(x))\n df[\"Metric\"] = df[\"Metric\"].apply(\n lambda x: lambda_replace_underscores_in_column_names(x)\n if isinstance(x, str)\n else x\n )\n return df[df[\"Value\"].notna()]\n\n @log_start_end(log=logging)\n def get_categories(self) -> Union[Dict[Any, Any], List[Any]]:\n \"\"\"Coins categories. [Source: CoinGecko]\n\n Returns\n -------\n list/dict\n Coin categories\n \"\"\"\n\n return self.coin.get(\"categories\", {})\n\n @log_start_end(log=logger)\n def _get_base_market_data_info(self) -> dict:\n \"\"\"Helper method that fetches all the base market/price information about given coin. [Source: CoinGecko]\n\n Returns\n -------\n dict\n All market related information for given coin\n \"\"\"\n market_dct = {}\n market_data = self.coin.get(\"market_data\", {})\n for stat in [\n \"total_supply\",\n \"max_supply\",\n \"circulating_supply\",\n \"price_change_percentage_24h\",\n \"price_change_percentage_7d\",\n \"price_change_percentage_30d\",\n ]:\n market_dct[stat] = market_data.get(stat)\n prices = create_dictionary_with_prefixes(\n [\"current_price\"], market_data, DENOMINATION\n )\n market_dct.update(prices)\n return market_dct\n\n @log_start_end(log=logger)\n def get_base_info(self) -> pd.DataFrame:\n \"\"\"Get all the base information about given coin. [Source: CoinGecko]\n\n Returns\n -------\n pandas.DataFrame\n Base information about coin\n \"\"\"\n\n regx = r'<a href=\"(.+?)\">|</a>'\n\n results = {}\n for attr in BASE_INFO:\n info_obj = self.coin.get(attr, {})\n if attr == \"description\":\n info_obj = info_obj.get(\"en\")\n info_obj = re.sub(regx, \"\", info_obj)\n info_obj = re.sub(r\"\\r\\n\\r\\n\", \" \", info_obj)\n results[attr] = info_obj\n results.update(self._get_base_market_data_info())\n df = pd.Series(results).to_frame().reset_index()\n df.columns = [\"Metric\", \"Value\"]\n df[\"Metric\"] = df[\"Metric\"].apply(\n lambda x: lambda_replace_underscores_in_column_names(x)\n if isinstance(x, str)\n else x\n )\n\n return df[df[\"Value\"].notna()]\n\n @log_start_end(log=logger)\n def get_market_data(self) -> pd.DataFrame:\n \"\"\"Get all the base market information about given coin. [Source: CoinGecko]\n\n Returns\n -------\n pandas.DataFrame\n Base market information about coin\n Metric,Value\n \"\"\"\n\n market_data = self.coin.get(\"market_data\", {})\n market_columns_denominated = [\n \"market_cap\",\n \"fully_diluted_valuation\",\n \"total_volume\",\n \"high_24h\",\n \"low_24h\",\n ]\n denominated_data = create_dictionary_with_prefixes(\n market_columns_denominated, market_data, DENOMINATION\n )\n\n market_single_columns = [\n \"market_cap_rank\",\n \"total_supply\",\n \"max_supply\",\n \"circulating_supply\",\n \"price_change_percentage_24h\",\n \"price_change_percentage_7d\",\n \"price_change_percentage_30d\",\n \"price_change_percentage_60d\",\n \"price_change_percentage_1y\",\n \"market_cap_change_24h\",\n ]\n single_stats = {}\n for col in market_single_columns:\n single_stats[col] = market_data.get(col)\n single_stats.update(denominated_data)\n\n try:\n single_stats[\"circulating_supply_to_total_supply_ratio\"] = (\n single_stats[\"circulating_supply\"] / single_stats[\"total_supply\"]\n )\n except (ZeroDivisionError, TypeError) as e:\n logger.exception(str(e))\n console.print(e)\n df = pd.Series(single_stats).to_frame().reset_index()\n df.columns = [\"Metric\", \"Value\"]\n df[\"Metric\"] = df[\"Metric\"].apply(\n lambda x: lambda_replace_underscores_in_column_names(x)\n if isinstance(x, str)\n else x\n )\n return df[df[\"Value\"].notna()]\n\n @log_start_end(log=logger)\n def get_all_time_high(self, currency: str = \"usd\") -> pd.DataFrame:\n \"\"\"Get all time high data for given coin. [Source: CoinGecko]\n\n Returns\n -------\n pandas.DataFrame\n All time high price data\n Metric,Value\n \"\"\"\n\n market_data = self.coin.get(\"market_data\", {})\n if market_data == {}:\n return pd.DataFrame()\n ath_columns = [\n \"current_price\",\n \"ath\",\n \"ath_date\",\n \"ath_change_percentage\",\n ]\n\n results = {}\n for column in ath_columns:\n results[column] = market_data[column].get(currency)\n\n df = pd.Series(results).to_frame().reset_index()\n df.columns = [\"Metric\", \"Value\"]\n df[\"Metric\"] = df[\"Metric\"].apply(\n lambda x: lambda_replace_underscores_in_column_names(x)\n if isinstance(x, str)\n else x\n )\n df[\"Metric\"] = df[\"Metric\"].apply(lambda x: x.replace(\"Ath\", \"All Time High\"))\n df[\"Metric\"] = df[\"Metric\"] + f\" {currency.upper()}\"\n return df[df[\"Value\"].notna()]\n\n @log_start_end(log=logger)\n def get_all_time_low(self, currency: str = \"usd\") -> pd.DataFrame:\n \"\"\"Get all time low data for given coin. [Source: CoinGecko]\n\n Returns\n -------\n pandas.DataFrame\n All time low price data\n Metric,Value\n \"\"\"\n\n market_data = self.coin.get(\"market_data\", {})\n if market_data == {}:\n return pd.DataFrame()\n\n ath_columns = [\n \"current_price\",\n \"atl\",\n \"atl_date\",\n \"atl_change_percentage\",\n ]\n results = {}\n for column in ath_columns:\n results[column] = market_data[column].get(currency)\n\n df = pd.Series(results).to_frame().reset_index()\n df.columns = [\"Metric\", \"Value\"]\n df[\"Metric\"] = df[\"Metric\"].apply(\n lambda x: lambda_replace_underscores_in_column_names(x)\n if isinstance(x, str)\n else x\n )\n df[\"Metric\"] = df[\"Metric\"].apply(lambda x: x.replace(\"Atl\", \"All Time Low\"))\n df[\"Metric\"] = df[\"Metric\"] + f\" {currency.upper()}\"\n return df[df[\"Value\"].notna()]\n\n @log_start_end(log=logger)\n def get_scores(self) -> pd.DataFrame:\n \"\"\"Get different kind of scores for given coin. [Source: CoinGecko]\n\n Returns\n -------\n pandas.DataFrame\n Social, community, sentiment scores for coin\n Metric,Value\n \"\"\"\n\n score_columns = [\n \"coingecko_rank\",\n \"coingecko_score\",\n \"developer_score\",\n \"community_score\",\n \"liquidity_score\",\n \"sentiment_votes_up_percentage\",\n \"sentiment_votes_down_percentage\",\n \"public_interest_score\",\n \"community_data\",\n \"public_interest_stats\",\n ]\n\n single_stats = {col: self.coin.get(col) for col in score_columns[:-2]}\n nested_stats = {}\n for col in score_columns[-2:]:\n _dct = self.coin.get(col, {})\n for k, _ in _dct.items():\n nested_stats[k] = _dct.get(k, {})\n\n single_stats.update(nested_stats)\n df = pd.Series(single_stats).reset_index()\n df.replace({0: \"\"}, inplace=True)\n df = df.fillna(\"\")\n df.columns = [\"Metric\", \"Value\"]\n\n # pylint: disable=unsupported-assignment-operation\n df[\"Metric\"] = df[\"Metric\"].apply(\n lambda x: lambda_replace_underscores_in_column_names(x)\n if isinstance(x, str)\n else x\n )\n return df[df[\"Value\"].notna()]\n\n @log_start_end(log=logger)\n def get_coin_market_chart(\n self, vs_currency: str = \"usd\", days: int = 30, **kwargs: Any\n ) -> pd.DataFrame:\n \"\"\"Get prices for given coin. [Source: CoinGecko]\n\n Parameters\n ----------\n vs_currency: str\n currency vs which display data\n days: int\n number of days to display the data\n kwargs\n\n Returns\n -------\n pandas.DataFrame\n Prices for given coin\n Columns: time, price, currency\n \"\"\"\n\n prices = self.client.get_coin_market_chart_by_id(\n self.coin_symbol, vs_currency, days, **kwargs\n )\n prices = prices[\"prices\"]\n df = pd.DataFrame(data=prices, columns=[\"time\", \"price\"])\n df[\"time\"] = pd.to_datetime(df.time, unit=\"ms\")\n df = df.set_index(\"time\")\n df[\"currency\"] = vs_currency\n return df\n\n @log_start_end(log=logger)\n def get_ohlc(self, vs_currency: str = \"usd\", days: int = 90) -> pd.DataFrame:\n \"\"\"Get Open, High, Low, Close prices for given coin. [Source: CoinGecko]\n\n Parameters\n ----------\n vs_currency: str\n currency vs which display data\n days: int\n number of days to display the data\n on from (1/7/14/30/90/180/365, max)\n\n Returns\n -------\n pandas.DataFrame\n OHLC data for coin\n Columns: time, price, currency\n \"\"\"\n\n prices = self.client.get_coin_ohlc_by_id(self.coin_symbol, vs_currency, days)\n df = pd.DataFrame(data=prices, columns=[\"time\", \"open\", \"high\", \"low\", \"close\"])\n df[\"time\"] = pd.to_datetime(df.time, unit=\"ms\")\n df = df.set_index(\"time\")\n df[\"currency\"] = vs_currency\n return df\n\n\n@log_start_end(log=logger)\ndef get_ohlc(symbol: str, vs_currency: str = \"usd\", days: int = 90) -> pd.DataFrame:\n \"\"\"Get Open, High, Low, Close prices for given coin. [Source: CoinGecko]\n\n Parameters\n ----------\n vs_currency: str\n currency vs which display data\n days: int\n number of days to display the data\n on from (1/7/14/30/90/180/365, max)\n\n Returns\n -------\n pandas.DataFrame\n OHLC data for coin\n Columns: time, price, currency\n \"\"\"\n client = CoinGeckoAPI()\n prices = client.get_coin_ohlc_by_id(symbol, vs_currency, days)\n df = pd.DataFrame(data=prices, columns=[\"date\", \"Open\", \"High\", \"Low\", \"Close\"])\n df[\"date\"] = pd.to_datetime(df.date, unit=\"ms\")\n df = df.set_index(\"date\")\n return df\n"
] | [
[
"pandas.plotting.register_matplotlib_converters"
],
[
"pandas.to_datetime",
"pandas.Series",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
haesleinhuepf/pyclesperanto_prototype | [
"65bc3035d3b2b61a2722c93b95bae310bfbd190e",
"65bc3035d3b2b61a2722c93b95bae310bfbd190e"
] | [
"pyclesperanto_prototype/_tier8/_affine_transform.py",
"tests/test_merge_touching_labels.py"
] | [
"from typing import Union\n\nfrom .._tier0 import plugin_function\nfrom .._tier0 import Image\nfrom .._tier0 import push\nfrom ._AffineTransform3D import AffineTransform3D\nfrom skimage.transform import AffineTransform\nimport numpy as np\n\n@plugin_function\ndef affine_transform(source : Image, destination : Image = None, transform : Union[np.ndarray, AffineTransform3D, AffineTransform] = None, linear_interpolation : bool = False):\n \"\"\"\n Applies an affine transform to an image.\n\n Parameters\n ----------\n source : Image\n image to be transformed\n destination : Image, optional\n image where the transformed image should be written to\n transform : 4x4 numpy array or AffineTransform3D object or skimage.transform.AffineTransform object\n transform matrix or object describing the transformation\n linear_interpolation: bool\n not implemented yet\n\n Returns\n -------\n destination\n\n \"\"\"\n import numpy as np\n from .._tier0 import empty_image_like\n from .._tier0 import execute\n from .._tier1 import copy\n from .._tier0 import create\n from .._tier1 import copy_slice\n\n # deal with 2D input images\n if len(source.shape) == 2:\n source_3d = create([1, source.shape[0], source.shape[1]])\n copy_slice(source, source_3d, 0)\n source = source_3d\n\n # deal with 2D output images\n original_destination = destination\n copy_back_after_transforming = False\n if len(destination.shape) == 2:\n destination = create([1, destination.shape[0], destination.shape[1]])\n copy_slice(original_destination, destination, 0)\n copy_back_after_transforming = True\n\n # we invert the transform because we go from the target image to the source image to read pixels\n if isinstance(transform, AffineTransform3D):\n transform_matrix = np.asarray(transform.copy().inverse())\n elif isinstance(transform, AffineTransform):\n matrix = np.asarray(transform.params)\n matrix = np.asarray([\n [matrix[0,0], matrix[0,1], 0, matrix[0,2]],\n [matrix[1,0], matrix[1,1], 0, matrix[1,2]],\n [0, 0, 1, 0],\n [matrix[2,0], matrix[2,1], 0, matrix[2,2]]\n ])\n transform_matrix = np.linalg.inv(matrix)\n else:\n transform_matrix = np.linalg.inv(transform)\n\n gpu_transform_matrix = push(transform_matrix)\n\n kernel_suffix = ''\n if linear_interpolation:\n image = empty_image_like(source)\n copy(source, image)\n source = image\n kernel_suffix = '_interpolate'\n\n\n parameters = {\n \"input\": source,\n \"output\": destination,\n \"mat\": gpu_transform_matrix\n }\n\n execute(__file__, '../clij-opencl-kernels/kernels/affine_transform_' + str(len(destination.shape)) + 'd' + kernel_suffix + '_x.cl',\n 'affine_transform_' + str(len(destination.shape)) + 'd' + kernel_suffix, destination.shape, parameters)\n\n # deal with 2D output images\n if copy_back_after_transforming:\n copy_slice(destination, original_destination, 0)\n\n return original_destination",
"import pyclesperanto_prototype as cle\nimport numpy as np\n\ndef test_merge_touching_labels():\n\n gpu_input = cle.push(np.asarray([\n [\n [1, 1, 0, 0, 0],\n [0, 2, 2, 0, 3],\n [0, 0, 2, 0, 3],\n ]\n ]))\n gpu_output = cle.create_like(gpu_input)\n\n gpu_reference = cle.push(np.asarray([\n [\n [1, 1, 0, 0, 0],\n [0, 1, 1, 0, 2],\n [0, 0, 1, 0, 2],\n ]\n ]))\n\n\n\n cle.merge_touching_labels(gpu_input, gpu_output)\n\n a = cle.pull(gpu_output)\n b = cle.pull(gpu_reference)\n\n print(a)\n print(b)\n\n assert (np.array_equal(a, b))"
] | [
[
"numpy.asarray",
"numpy.linalg.inv"
],
[
"numpy.asarray",
"numpy.array_equal"
]
] | [
{
"matplotlib": [],
"numpy": [
"1.10",
"1.12",
"1.11",
"1.19",
"1.24",
"1.13",
"1.16",
"1.9",
"1.18",
"1.23",
"1.21",
"1.22",
"1.20",
"1.7",
"1.15",
"1.14",
"1.17",
"1.8"
],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
tourdeml/SAM | [
"08cb3cccb39157859a1c77ef1e1852120df4a790"
] | [
"sam/utils.py"
] | [
"from typing import Iterable, Callable\n\nimport torch\nfrom torch.optim import Optimizer\n\n\ndef compute_sam(group: dict, closure: Callable):\n grads = []\n params_with_grads = []\n\n rho = group['rho']\n # update internal_optim's learning rate\n\n for p in group['params']:\n if p.grad is not None:\n # without clone().detach(), p.grad will be zeroed by closure()\n grads.append(p.grad.clone().detach())\n params_with_grads.append(p)\n device = grads[0].device\n\n # compute \\hat{\\epsilon}=\\rho/\\norm{g}\\|g\\|\n grad_norm = torch.stack(\n [g.detach().norm(2).to(device) for g in grads]).norm(2)\n epsilon = grads # alias for readability\n torch._foreach_mul_(epsilon, rho / grad_norm)\n\n # virtual step toward \\epsilon\n torch._foreach_add_(params_with_grads, epsilon)\n # compute g=\\nabla_w L_B(w)|_{w+\\hat{\\epsilon}}\n closure()\n # virtual step back to the original point\n torch._foreach_sub_(params_with_grads, epsilon)\n"
] | [
[
"torch._foreach_add_",
"torch._foreach_sub_",
"torch._foreach_mul_"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
andrewor14/benchmarks | [
"cb2457bbda6138b3e0af95a6d50b7d476d52c410"
] | [
"scripts/tf_cnn_benchmarks/models/ssd_model.py"
] | [
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\n\"\"\"SSD300 Model Configuration.\n\nReferences:\n Wei Liu, Dragomir Anguelov, Dumitru Erhan, Christian Szegedy, Scott Reed,\n Cheng-Yang Fu, Alexander C. Berg\n SSD: Single Shot MultiBox Detector\n arXiv:1512.02325\n\nPorted from MLPerf reference implementation:\n https://github.com/mlperf/reference/tree/ssd/single_stage_detector/ssd\n\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport multiprocessing\nimport os\nimport re\nimport threading\nimport tensorflow as tf\n\nimport constants\nimport mlperf\nimport ssd_constants\nfrom cnn_util import log_fn\nfrom models import model as model_lib\nfrom models import resnet_model\n\nBACKBONE_MODEL_SCOPE_NAME = 'resnet34_backbone'\n\n\nclass SSD300Model(model_lib.CNNModel):\n \"\"\"Single Shot Multibox Detection (SSD) model for 300x300 image datasets.\"\"\"\n\n def __init__(self, label_num=ssd_constants.NUM_CLASSES, batch_size=32,\n learning_rate=1e-3, backbone='resnet34', params=None):\n super(SSD300Model, self).__init__('ssd300', 300, batch_size, learning_rate,\n params=params)\n # For COCO dataset, 80 categories + 1 background = 81 labels\n self.label_num = label_num\n\n # Currently only support ResNet-34 as backbone model\n if backbone != 'resnet34':\n raise ValueError('Invalid backbone model %s for SSD.' % backbone)\n mlperf.logger.log(key=mlperf.tags.BACKBONE, value=backbone)\n\n # Number of channels and default boxes associated with the following layers:\n # ResNet34 layer, Conv7, Conv8_2, Conv9_2, Conv10_2, Conv11_2\n self.out_chan = [256, 512, 512, 256, 256, 256]\n mlperf.logger.log(key=mlperf.tags.LOC_CONF_OUT_CHANNELS,\n value=self.out_chan)\n\n # Number of default boxes from layers of different scales\n # 38x38x4, 19x19x6, 10x10x6, 5x5x6, 3x3x4, 1x1x4\n self.num_dboxes = [4, 6, 6, 6, 4, 4]\n mlperf.logger.log(key=mlperf.tags.NUM_DEFAULTS_PER_CELL,\n value=self.num_dboxes)\n\n # TODO(haoyuzhang): in order to correctly restore in replicated mode, need\n # to create a saver for each tower before graph is finalized. Use variable\n # manager for better efficiency.\n self.backbone_savers = []\n\n # Collected predictions for eval stage. It maps each image id in eval\n # dataset to a dict containing the following information:\n # source_id: raw ID of image\n # raw_shape: raw shape of image\n # pred_box: encoded box coordinates of prediction\n # pred_scores: scores of classes in prediction\n self.predictions = {}\n\n # Global step when predictions are collected.\n self.eval_global_step = 0\n\n # Average precision. In asynchronous eval mode, this is the latest AP we\n # get so far and may not be the results at current eval step.\n self.eval_coco_ap = 0\n\n # Process, queues, and thread for asynchronous evaluation. When enabled,\n # create a separte process (async_eval_process) that continously pull\n # intermediate results from the predictions queue (a multiprocessing queue),\n # process them, and push final results into results queue (another\n # multiprocessing queue). The main thread is responsible to push message\n # into predictions queue, and start a separate thread to continuously pull\n # messages from results queue to update final results.\n # Message in predictions queue should be a tuple of two elements:\n # (evaluation step, predictions)\n # Message in results queue should be a tuple of two elements:\n # (evaluation step, final results)\n self.async_eval_process = None\n self.async_eval_predictions_queue = None\n self.async_eval_results_queue = None\n self.async_eval_results_getter_thread = None\n\n # The MLPerf reference uses a starting lr of 1e-3 at bs=32.\n self.base_lr_batch_size = 32\n\n def skip_final_affine_layer(self):\n return True\n\n def add_backbone_model(self, cnn):\n # --------------------------------------------------------------------------\n # Resnet-34 backbone model -- modified for SSD\n # --------------------------------------------------------------------------\n\n # Input 300x300, output 150x150\n cnn.conv(64, 7, 7, 2, 2, mode='SAME_RESNET', use_batch_norm=True)\n cnn.mpool(3, 3, 2, 2, mode='SAME')\n\n resnet34_layers = [3, 4, 6, 3]\n version = 'v1'\n\n # ResNet-34 block group 1\n # Input 150x150, output 75x75\n for i in range(resnet34_layers[0]):\n # Last argument forces residual_block to use projection shortcut, even\n # though the numbers of input and output channels are equal\n resnet_model.residual_block(cnn, 64, 1, version)\n\n # ResNet-34 block group 2\n # Input 75x75, output 38x38\n for i in range(resnet34_layers[1]):\n stride = 2 if i == 0 else 1\n resnet_model.residual_block(cnn, 128, stride, version, i == 0)\n\n # ResNet-34 block group 3\n # This block group is modified: first layer uses stride=1 so that the image\n # size does not change in group of layers\n # Input 38x38, output 38x38\n for i in range(resnet34_layers[2]):\n # The following line is intentionally commented out to differentiate from\n # the original ResNet-34 model\n # stride = 2 if i == 0 else 1\n resnet_model.residual_block(cnn, 256, stride, version, i == 0)\n\n # ResNet-34 block group 4: removed final block group\n # The following 3 lines are intentially commented out to differentiate from\n # the original ResNet-34 model\n # for i in range(resnet34_layers[3]):\n # stride = 2 if i == 0 else 1\n # resnet_model.residual_block(cnn, 512, stride, version, i == 0)\n\n def add_inference(self, cnn):\n cnn.use_batch_norm = True\n cnn.batch_norm_config = {'decay': ssd_constants.BATCH_NORM_DECAY,\n 'epsilon': ssd_constants.BATCH_NORM_EPSILON,\n 'scale': True}\n\n with tf.variable_scope(BACKBONE_MODEL_SCOPE_NAME):\n self.add_backbone_model(cnn)\n\n # --------------------------------------------------------------------------\n # SSD additional layers\n # --------------------------------------------------------------------------\n\n def add_ssd_layer(cnn, depth, k_size, stride, mode):\n return cnn.conv(depth, k_size, k_size, stride, stride,\n mode=mode, use_batch_norm=False,\n kernel_initializer=tf.contrib.layers.xavier_initializer())\n\n # Activations for feature maps of different layers\n self.activations = [cnn.top_layer]\n # Conv7_1, Conv7_2\n # Input 38x38, output 19x19\n add_ssd_layer(cnn, 256, 1, 1, 'valid')\n self.activations.append(add_ssd_layer(cnn, 512, 3, 2, 'same'))\n\n # Conv8_1, Conv8_2\n # Input 19x19, output 10x10\n add_ssd_layer(cnn, 256, 1, 1, 'valid')\n self.activations.append(add_ssd_layer(cnn, 512, 3, 2, 'same'))\n\n # Conv9_1, Conv9_2\n # Input 10x10, output 5x5\n add_ssd_layer(cnn, 128, 1, 1, 'valid')\n self.activations.append(add_ssd_layer(cnn, 256, 3, 2, 'same'))\n\n # Conv10_1, Conv10_2\n # Input 5x5, output 3x3\n add_ssd_layer(cnn, 128, 1, 1, 'valid')\n self.activations.append(add_ssd_layer(cnn, 256, 3, 1, 'valid'))\n\n # Conv11_1, Conv11_2\n # Input 3x3, output 1x1\n add_ssd_layer(cnn, 128, 1, 1, 'valid')\n self.activations.append(add_ssd_layer(cnn, 256, 3, 1, 'valid'))\n\n self.loc = []\n self.conf = []\n\n for nd, ac, oc in zip(self.num_dboxes, self.activations, self.out_chan):\n l = cnn.conv(nd * 4, 3, 3, 1, 1, input_layer=ac,\n num_channels_in=oc, activation=None, use_batch_norm=False,\n kernel_initializer=tf.contrib.layers.xavier_initializer())\n scale = l.get_shape()[-1]\n # shape = [batch_size, nd * 4, scale, scale]\n l = tf.reshape(l, [self.batch_size, nd, 4, scale, scale])\n # shape = [batch_size, nd, 4, scale, scale]\n l = tf.transpose(l, [0, 1, 3, 4, 2])\n # shape = [batch_size, nd, scale, scale, 4]\n self.loc.append(tf.reshape(l, [self.batch_size, -1, 4]))\n # shape = [batch_size, nd * scale * scale, 4]\n\n c = cnn.conv(nd * self.label_num, 3, 3, 1, 1, input_layer=ac,\n num_channels_in=oc, activation=None, use_batch_norm=False,\n kernel_initializer=tf.contrib.layers.xavier_initializer())\n # shape = [batch_size, nd * label_num, scale, scale]\n c = tf.reshape(c, [self.batch_size, nd, self.label_num, scale, scale])\n # shape = [batch_size, nd, label_num, scale, scale]\n c = tf.transpose(c, [0, 1, 3, 4, 2])\n # shape = [batch_size, nd, scale, scale, label_num]\n self.conf.append(tf.reshape(c, [self.batch_size, -1, self.label_num]))\n # shape = [batch_size, nd * scale * scale, label_num]\n\n # Shape of locs: [batch_size, NUM_SSD_BOXES, 4]\n # Shape of confs: [batch_size, NUM_SSD_BOXES, label_num]\n locs, confs = tf.concat(self.loc, 1), tf.concat(self.conf, 1)\n\n # Pack location and confidence outputs into a single output layer\n # Shape of logits: [batch_size, NUM_SSD_BOXES, 4+label_num]\n logits = tf.concat([locs, confs], 2)\n\n cnn.top_layer = logits\n cnn.top_size = 4 + self.label_num\n\n return cnn.top_layer\n\n def get_learning_rate(self, global_step, batch_size):\n rescaled_lr = self.get_scaled_base_learning_rate(batch_size)\n # Defined in MLPerf reference model\n boundaries = [160000, 200000]\n boundaries = [b * self.base_lr_batch_size // batch_size for b in boundaries]\n decays = [1, 0.1, 0.01]\n learning_rates = [rescaled_lr * d for d in decays]\n lr = tf.train.piecewise_constant(global_step, boundaries, learning_rates)\n warmup_steps = int(118287 / batch_size * 5)\n warmup_lr = (\n rescaled_lr * tf.cast(global_step, tf.float32) / tf.cast(\n warmup_steps, tf.float32))\n return tf.cond(global_step < warmup_steps, lambda: warmup_lr, lambda: lr)\n\n def get_scaled_base_learning_rate(self, batch_size):\n \"\"\"Calculates base learning rate for creating lr schedule.\n\n In replicated mode, gradients are summed rather than averaged which, with\n the sgd and momentum optimizers, increases the effective learning rate by\n lr * num_gpus. Dividing the base lr by num_gpus negates the increase.\n\n Args:\n batch_size: Total batch-size.\n\n Returns:\n Base learning rate to use to create lr schedule.\n \"\"\"\n base_lr = self.learning_rate\n if self.params.variable_update == 'replicated':\n base_lr = self.learning_rate / self.params.num_gpus\n scaled_lr = base_lr * (batch_size / self.base_lr_batch_size)\n return scaled_lr\n\n def _collect_backbone_vars(self):\n backbone_vars = tf.get_collection(\n tf.GraphKeys.GLOBAL_VARIABLES, scope='.*'+ BACKBONE_MODEL_SCOPE_NAME)\n var_list = {}\n\n # Assume variables in the checkpoint are following the naming convention of\n # a model checkpoint trained with TF official model\n # TODO(haoyuzhang): the following variable name parsing is hacky and easy\n # to break if there is change in naming convention of either benchmarks or\n # official models.\n for v in backbone_vars:\n # conv2d variable example (model <-- checkpoint):\n # v/cg/conv24/conv2d/kernel:0 <-- conv2d_24/kernel\n if 'conv2d' in v.name:\n re_match = re.search(r'conv(\\d+)/conv2d/(.+):', v.name)\n if re_match:\n layer_id = int(re_match.group(1))\n param_name = re_match.group(2)\n vname_in_ckpt = self._var_name_in_official_model_ckpt(\n 'conv2d', layer_id, param_name)\n var_list[vname_in_ckpt] = v\n\n # batchnorm varariable example:\n # v/cg/conv24/batchnorm25/gamma:0 <-- batch_normalization_25/gamma\n elif 'batchnorm' in v.name:\n re_match = re.search(r'batchnorm(\\d+)/(.+):', v.name)\n if re_match:\n layer_id = int(re_match.group(1))\n param_name = re_match.group(2)\n vname_in_ckpt = self._var_name_in_official_model_ckpt(\n 'batch_normalization', layer_id, param_name)\n var_list[vname_in_ckpt] = v\n\n return var_list\n\n def _var_name_in_official_model_ckpt(self, layer_name, layer_id, param_name):\n \"\"\"Return variable names according to convention in TF official models.\"\"\"\n vname_in_ckpt = layer_name\n if layer_id > 0:\n vname_in_ckpt += '_' + str(layer_id)\n vname_in_ckpt += '/' + param_name\n return vname_in_ckpt\n\n def loss_function(self, inputs, build_network_result):\n logits = build_network_result.logits\n\n # Unpack model output back to locations and confidence scores of predictions\n # Shape of pred_loc: [batch_size, NUM_SSD_BOXES, 4]\n # Shape of pred_label: [batch_size, NUM_SSD_BOXES, label_num]\n pred_loc, pred_label = tf.split(logits, [4, self.label_num], 2)\n\n # Shape of gt_loc: [batch_size, NUM_SSD_BOXES, 4]\n # Shape of gt_label: [batch_size, NUM_SSD_BOXES, 1]\n # Shape of num_gt: [batch_size]\n _, gt_loc, gt_label, num_gt = inputs\n gt_label = tf.cast(gt_label, tf.int32)\n\n box_loss = self._localization_loss(pred_loc, gt_loc, gt_label, num_gt)\n class_loss = self._classification_loss(pred_label, gt_label, num_gt)\n\n tf.summary.scalar('box_loss', tf.reduce_mean(box_loss))\n tf.summary.scalar('class_loss', tf.reduce_mean(class_loss))\n return class_loss + box_loss\n\n def _localization_loss(self, pred_loc, gt_loc, gt_label, num_matched_boxes):\n \"\"\"Computes the localization loss.\n\n Computes the localization loss using smooth l1 loss.\n Args:\n pred_loc: a flatten tensor that includes all predicted locations. The\n shape is [batch_size, num_anchors, 4].\n gt_loc: a tensor representing box regression targets in\n [batch_size, num_anchors, 4].\n gt_label: a tensor that represents the classification groundtruth targets.\n The shape is [batch_size, num_anchors, 1].\n num_matched_boxes: the number of anchors that are matched to a groundtruth\n targets, used as the loss normalizater. The shape is [batch_size].\n Returns:\n box_loss: a float32 representing total box regression loss.\n \"\"\"\n mask = tf.greater(tf.squeeze(gt_label), 0)\n float_mask = tf.cast(mask, tf.float32)\n\n smooth_l1 = tf.reduce_sum(tf.losses.huber_loss(\n gt_loc, pred_loc,\n reduction=tf.losses.Reduction.NONE\n ), axis=2)\n smooth_l1 = tf.multiply(smooth_l1, float_mask)\n box_loss = tf.reduce_sum(smooth_l1, axis=1)\n\n return tf.reduce_mean(box_loss / num_matched_boxes)\n\n def _classification_loss(self, pred_label, gt_label, num_matched_boxes):\n \"\"\"Computes the classification loss.\n\n Computes the classification loss with hard negative mining.\n Args:\n pred_label: a flatten tensor that includes all predicted class. The shape\n is [batch_size, num_anchors, num_classes].\n gt_label: a tensor that represents the classification groundtruth targets.\n The shape is [batch_size, num_anchors, 1].\n num_matched_boxes: the number of anchors that are matched to a groundtruth\n targets. This is used as the loss normalizater.\n Returns:\n box_loss: a float32 representing total box regression loss.\n \"\"\"\n cross_entropy = tf.losses.sparse_softmax_cross_entropy(\n gt_label, pred_label, reduction=tf.losses.Reduction.NONE)\n\n mask = tf.greater(tf.squeeze(gt_label), 0)\n float_mask = tf.cast(mask, tf.float32)\n\n # Hard example mining\n neg_masked_cross_entropy = cross_entropy * (1 - float_mask)\n relative_position = tf.contrib.framework.argsort(\n tf.contrib.framework.argsort(\n neg_masked_cross_entropy, direction='DESCENDING'))\n num_neg_boxes = tf.minimum(\n tf.to_int32(num_matched_boxes) * ssd_constants.NEGS_PER_POSITIVE,\n ssd_constants.NUM_SSD_BOXES)\n top_k_neg_mask = tf.cast(tf.less(\n relative_position,\n tf.tile(num_neg_boxes[:, tf.newaxis], (1, ssd_constants.NUM_SSD_BOXES))\n ), tf.float32)\n\n class_loss = tf.reduce_sum(\n tf.multiply(cross_entropy, float_mask + top_k_neg_mask), axis=1)\n\n return tf.reduce_mean(class_loss / num_matched_boxes)\n\n def add_backbone_saver(self):\n # Create saver with mapping from variable names in checkpoint of backbone\n # model to variables in SSD model\n backbone_var_list = self._collect_backbone_vars()\n self.backbone_savers.append(tf.train.Saver(backbone_var_list))\n\n def load_backbone_model(self, sess, backbone_model_path):\n for saver in self.backbone_savers:\n saver.restore(sess, backbone_model_path)\n\n def get_input_data_types(self, subset):\n if subset == 'validation':\n return [self.data_type, tf.float32, tf.float32, tf.float32, tf.int32]\n return [self.data_type, tf.float32, tf.float32, tf.float32]\n\n def get_input_shapes(self, subset):\n \"\"\"Return encoded tensor shapes for train and eval data respectively.\"\"\"\n if subset == 'validation':\n # Validation data shapes:\n # 1. images\n # 2. ground truth locations of boxes\n # 3. ground truth classes of objects in boxes\n # 4. source image IDs\n # 5. raw image shapes\n return [\n [self.batch_size, self.image_size, self.image_size, self.depth],\n [self.batch_size, ssd_constants.MAX_NUM_EVAL_BOXES, 4],\n [self.batch_size, ssd_constants.MAX_NUM_EVAL_BOXES, 1],\n [self.batch_size],\n [self.batch_size, 3],\n ]\n\n # Training data shapes:\n # 1. images\n # 2. ground truth locations of boxes\n # 3. ground truth classes of objects in boxes\n # 4. numbers of objects in images\n return [\n [self.batch_size, self.image_size, self.image_size, self.depth],\n [self.batch_size, ssd_constants.NUM_SSD_BOXES, 4],\n [self.batch_size, ssd_constants.NUM_SSD_BOXES, 1],\n [self.batch_size]\n ]\n\n def accuracy_function(self, inputs, logits):\n \"\"\"Returns the ops to measure the mean precision of the model.\"\"\"\n try:\n import ssd_dataloader # pylint: disable=g-import-not-at-top\n from object_detection.box_coders import faster_rcnn_box_coder # pylint: disable=g-import-not-at-top\n from object_detection.core import box_coder # pylint: disable=g-import-not-at-top\n from object_detection.core import box_list # pylint: disable=g-import-not-at-top\n except ImportError:\n raise ImportError('To use the COCO dataset, you must clone the '\n 'repo https://github.com/tensorflow/models and add '\n 'tensorflow/models and tensorflow/models/research to '\n 'the PYTHONPATH, and compile the protobufs by '\n 'following https://github.com/tensorflow/models/blob/'\n 'master/research/object_detection/g3doc/installation.md'\n '#protobuf-compilation ; To evaluate using COCO'\n 'metric, download and install Python COCO API from'\n 'https://github.com/cocodataset/cocoapi')\n\n # Unpack model output back to locations and confidence scores of predictions\n # pred_locs: relative locations (coordiates) of objects in all SSD boxes\n # shape: [batch_size, NUM_SSD_BOXES, 4]\n # pred_labels: confidence scores of objects being of all categories\n # shape: [batch_size, NUM_SSD_BOXES, label_num]\n pred_locs, pred_labels = tf.split(logits, [4, self.label_num], 2)\n\n ssd_box_coder = faster_rcnn_box_coder.FasterRcnnBoxCoder(\n scale_factors=ssd_constants.BOX_CODER_SCALES)\n anchors = box_list.BoxList(\n tf.convert_to_tensor(ssd_dataloader.DefaultBoxes()('ltrb')))\n pred_boxes = box_coder.batch_decode(\n encoded_boxes=pred_locs, box_coder=ssd_box_coder, anchors=anchors)\n\n pred_scores = tf.nn.softmax(pred_labels, axis=2)\n\n # TODO(haoyuzhang): maybe use `gt_boxes` and `gt_classes` for visualization.\n _, gt_boxes, gt_classes, source_id, raw_shape = inputs # pylint: disable=unused-variable\n\n return {\n (constants.UNREDUCED_ACCURACY_OP_PREFIX +\n ssd_constants.PRED_BOXES): pred_boxes,\n (constants.UNREDUCED_ACCURACY_OP_PREFIX +\n ssd_constants.PRED_SCORES): pred_scores,\n # TODO(haoyuzhang): maybe use these values for visualization.\n # constants.UNREDUCED_ACCURACY_OP_PREFIX+'gt_boxes': gt_boxes,\n # constants.UNREDUCED_ACCURACY_OP_PREFIX+'gt_classes': gt_classes,\n (constants.UNREDUCED_ACCURACY_OP_PREFIX +\n ssd_constants.SOURCE_ID): source_id,\n (constants.UNREDUCED_ACCURACY_OP_PREFIX +\n ssd_constants.RAW_SHAPE): raw_shape\n }\n\n def postprocess(self, results):\n \"\"\"Postprocess results returned from model.\"\"\"\n try:\n import coco_metric # pylint: disable=g-import-not-at-top\n except ImportError:\n raise ImportError('To use the COCO dataset, you must clone the '\n 'repo https://github.com/tensorflow/models and add '\n 'tensorflow/models and tensorflow/models/research to '\n 'the PYTHONPATH, and compile the protobufs by '\n 'following https://github.com/tensorflow/models/blob/'\n 'master/research/object_detection/g3doc/installation.md'\n '#protobuf-compilation ; To evaluate using COCO'\n 'metric, download and install Python COCO API from'\n 'https://github.com/cocodataset/cocoapi')\n\n pred_boxes = results[ssd_constants.PRED_BOXES]\n pred_scores = results[ssd_constants.PRED_SCORES]\n # TODO(haoyuzhang): maybe use these values for visualization.\n # gt_boxes = results['gt_boxes']\n # gt_classes = results['gt_classes']\n source_id = results[ssd_constants.SOURCE_ID]\n raw_shape = results[ssd_constants.RAW_SHAPE]\n\n # COCO evaluation requires processing COCO_NUM_VAL_IMAGES exactly once. Due\n # to rounding errors (i.e., COCO_NUM_VAL_IMAGES % batch_size != 0), setting\n # `num_eval_epochs` to 1 is not enough and will often miss some images. We\n # expect user to set `num_eval_epochs` to >1, which will leave some unused\n # images from previous steps in `predictions`. Here we check if we are doing\n # eval at a new global step.\n if results['global_step'] > self.eval_global_step:\n self.eval_global_step = results['global_step']\n self.predictions.clear()\n\n for i, sid in enumerate(source_id):\n self.predictions[int(sid)] = {\n ssd_constants.PRED_BOXES: pred_boxes[i],\n ssd_constants.PRED_SCORES: pred_scores[i],\n ssd_constants.SOURCE_ID: source_id[i],\n ssd_constants.RAW_SHAPE: raw_shape[i]\n }\n\n # COCO metric calculates mAP only after a full epoch of evaluation. Return\n # dummy results for top_N_accuracy to be compatible with benchmar_cnn.py.\n if len(self.predictions) >= ssd_constants.COCO_NUM_VAL_IMAGES:\n log_fn('Got results for all {:d} eval examples. Calculate mAP...'.format(\n ssd_constants.COCO_NUM_VAL_IMAGES))\n\n annotation_file = os.path.join(self.params.data_dir,\n ssd_constants.ANNOTATION_FILE)\n # Size of predictions before decoding about 15--30GB, while size after\n # decoding is 100--200MB. When using async eval mode, decoding takes\n # 20--30 seconds of main thread time but is necessary to avoid OOM during\n # inter-process communication.\n decoded_preds = coco_metric.decode_predictions(self.predictions.values())\n self.predictions.clear()\n\n if self.params.collect_eval_results_async:\n def _eval_results_getter():\n \"\"\"Iteratively get eval results from async eval process.\"\"\"\n while True:\n step, eval_results = self.async_eval_results_queue.get()\n self.eval_coco_ap = eval_results['COCO/AP']\n mlperf.logger.log_eval_accuracy(\n self.eval_coco_ap, step, self.batch_size * self.params.num_gpus)\n if self.reached_target():\n # Reached target, clear all pending messages in predictions queue\n # and insert poison pill to stop the async eval process.\n while not self.async_eval_predictions_queue.empty():\n self.async_eval_predictions_queue.get()\n self.async_eval_predictions_queue.put('STOP')\n break\n\n if not self.async_eval_process:\n # Limiting the number of messages in predictions queue to prevent OOM.\n # Each message (predictions data) can potentially consume a lot of\n # memory, and normally there should only be few messages in the queue.\n # If often blocked on this, consider reducing eval frequency.\n self.async_eval_predictions_queue = multiprocessing.Queue(2)\n self.async_eval_results_queue = multiprocessing.Queue()\n\n # Reason to use a Process as opposed to Thread is mainly the\n # computationally intensive eval runner. Python multithreading is not\n # truly running in parallel, a runner thread would get significantly\n # delayed (or alternatively delay the main thread).\n self.async_eval_process = multiprocessing.Process(\n target=coco_metric.async_eval_runner,\n args=(self.async_eval_predictions_queue,\n self.async_eval_results_queue,\n annotation_file))\n self.async_eval_process.daemon = True\n self.async_eval_process.start()\n\n self.async_eval_results_getter_thread = threading.Thread(\n target=_eval_results_getter, args=())\n self.async_eval_results_getter_thread.daemon = True\n self.async_eval_results_getter_thread.start()\n\n self.async_eval_predictions_queue.put(\n (self.eval_global_step, decoded_preds))\n return {'top_1_accuracy': 0, 'top_5_accuracy': 0.}\n\n eval_results = coco_metric.compute_map(decoded_preds, annotation_file)\n self.eval_coco_ap = eval_results['COCO/AP']\n ret = {'top_1_accuracy': self.eval_coco_ap, 'top_5_accuracy': 0.}\n for metric_key, metric_value in eval_results.items():\n ret[constants.SIMPLE_VALUE_RESULT_PREFIX + metric_key] = metric_value\n mlperf.logger.log_eval_accuracy(self.eval_coco_ap, self.eval_global_step,\n self.batch_size * self.params.num_gpus)\n return ret\n log_fn('Got {:d} out of {:d} eval examples.'\n ' Waiting for the remaining to calculate mAP...'.format(\n len(self.predictions), ssd_constants.COCO_NUM_VAL_IMAGES))\n return {'top_1_accuracy': self.eval_coco_ap, 'top_5_accuracy': 0.}\n\n def get_synthetic_inputs(self, input_name, nclass):\n \"\"\"Generating synthetic data matching real data shape and type.\"\"\"\n inputs = tf.random_uniform(\n self.get_input_shapes('train')[0], dtype=self.data_type)\n inputs = tf.contrib.framework.local_variable(inputs, name=input_name)\n boxes = tf.random_uniform(\n [self.batch_size, ssd_constants.NUM_SSD_BOXES, 4], dtype=tf.float32)\n classes = tf.random_uniform(\n [self.batch_size, ssd_constants.NUM_SSD_BOXES, 1], dtype=tf.float32)\n nboxes = tf.random_uniform(\n [self.batch_size], minval=1, maxval=10, dtype=tf.float32)\n return (inputs, boxes, classes, nboxes)\n\n def reached_target(self):\n return (self.params.stop_at_top_1_accuracy and\n self.eval_coco_ap >= self.params.stop_at_top_1_accuracy)\n"
] | [
[
"tensorflow.cond",
"tensorflow.concat",
"tensorflow.reduce_sum",
"tensorflow.cast",
"tensorflow.to_int32",
"tensorflow.get_collection",
"tensorflow.squeeze",
"tensorflow.contrib.framework.argsort",
"tensorflow.contrib.framework.local_variable",
"tensorflow.train.piecewise_constant",
"tensorflow.contrib.layers.xavier_initializer",
"tensorflow.train.Saver",
"tensorflow.tile",
"tensorflow.losses.sparse_softmax_cross_entropy",
"tensorflow.split",
"tensorflow.multiply",
"tensorflow.nn.softmax",
"tensorflow.transpose",
"tensorflow.reduce_mean",
"tensorflow.reshape",
"tensorflow.losses.huber_loss",
"tensorflow.variable_scope",
"tensorflow.random_uniform"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
Nagasaki45/deep_disfluency | [
"4c57a194433af9601ebef0e4c9a451cce4c06252"
] | [
"deep_disfluency/rnn/elman.py"
] | [
"import theano\nimport numpy as np\nimport os\n\nfrom theano import tensor as T\nfrom collections import OrderedDict\n\n# nb might be theano.config.floatX\ndtype = T.config.floatX # @UndefinedVariable\n\n\nclass Elman(object):\n\n def __init__(self, ne, de, na, nh, n_out, cs, npos,\n update_embeddings=True):\n '''\n ne :: number of word embeddings in the vocabulary\n de :: dimension of the word embeddings\n na :: number of acoustic or language model features at each word step\n (acoustic context size in frames * number of features)\n nh :: dimension of the hidden layer\n n_out :: number of classes\n cs :: word window context size\n npos :: number of pos tags\n '''\n # parameters of the model\n self.emb = theano.shared(0.2 * np.random.uniform(-1.0, 1.0,\n (ne + 1, de)).\n astype(dtype)) # add one for PADDING\n if na == 0:\n # NB original one, now Wx becomes much bigger with acoustic data\n self.Wx = theano.shared(0.2 * np.random.uniform(-1.0, 1.0,\n ((de * cs) +\n (npos * cs),\n nh))\n .astype(dtype))\n else:\n self.Wx = theano.shared(0.2 * np.random.uniform(-1.0, 1.0,\n ((de * cs) +\n (npos * cs) +\n na, nh))\n .astype(dtype))\n self.Wh = theano.shared(0.2 * np.random.uniform(-1.0, 1.0,\n (nh, nh))\n .astype(dtype))\n self.W = theano.shared(0.2 * np.random.uniform(-1.0, 1.0,\n (nh, n_out))\n .astype(dtype))\n self.bh = theano.shared(np.zeros(nh, dtype=dtype))\n self.b = theano.shared(np.zeros(n_out, dtype=dtype))\n self.h0 = theano.shared(np.zeros(nh, dtype=dtype))\n # Use the eye function (diagonal 1s) for the POS, small in memory\n self.pos = T.eye(npos, npos, 0)\n self.n_acoust = na # the number of acoustic features\n\n # Weights for L1 and L2\n self.L1_reg = 0.0\n self.L2_reg = 0.00001\n\n # without embeddings updates\n self.params = [self.Wx, self.Wh, self.W, self.bh, self.b, self.h0]\n self.names = ['Wx', 'Wh', 'W', 'bh', 'b', 'h0']\n if update_embeddings:\n self.params = [self.emb, self.Wx, self.Wh, self.W, self.bh,\n self.b, self.h0]\n self.names = ['embeddings', 'Wx', 'Wh', 'W', 'bh', 'b', 'h0']\n\n # as many columns as context window size/lines as words in the sentence\n self.idxs = T.imatrix()\n self.pos_idxs = T.imatrix()\n\n # simply a matrix: number of features * length sentence\n self.extra_features = T.matrix()\n\n # TODO Old version no pos\n # x = self.emb[self.idxs].reshape((self.idxs.shape[0], de*cs))\n\n if na == 0:\n # POS version, not just the embeddings\n # but with the POS window concatenated\n x = T.concatenate((self.emb[self.idxs].reshape((self.idxs.shape[0],\n de*cs)),\n self.pos[self.pos_idxs].reshape(\n (self.pos_idxs.shape[0],\n npos*cs))), 1)\n else:\n # TODO new version with extra features\n x = T.concatenate((self.emb[self.idxs].reshape((self.idxs.shape[0],\n de*cs)),\n self.pos[self.pos_idxs].reshape(\n (self.pos_idxs.shape[0],\n npos*cs)),\n self.extra_features), 1)\n self.y = T.iscalar('y') # label\n # TODO for sentences\n # self.y = T.ivector('y') #labels for whole sentence\n\n def recurrence(x_t, h_tm1):\n h_t = T.nnet.sigmoid(T.dot(x_t, self.Wx) + T.dot(h_tm1, self.Wh) +\n self.bh)\n s_t = T.nnet.softmax(T.dot(h_t, self.W) + self.b)\n return [h_t, s_t]\n\n [h, s], _ = theano.scan(fn=recurrence,\n sequences=x, outputs_info=[self.h0, None],\n n_steps=x.shape[0])\n\n p_y_given_x_lastword = s[-1, 0, :]\n p_y_given_x_sentence = s[:, 0, :]\n p_y_given_x_sentence_hidden = (h, s[:, 0, :])\n y_pred = T.argmax(p_y_given_x_sentence, axis=1)\n\n # TODO adding this- zero one loss for the last word\n # y_pred_word = T.argmax(p_y_given_x_lastword)\n\n # learning rate not hard coded as could decay\n self.lr = T.scalar('lr')\n\n # Cost: standard nll loss\n self.nll = -T.mean(T.log(p_y_given_x_lastword)[self.y])\n self.sentence_nll = -T.mean(T.log(p_y_given_x_sentence)\n [T.arange(x.shape[0]), self.y])\n\n if na == 0:\n self.classify = theano.function(inputs=[self.idxs, self.pos_idxs],\n outputs=y_pred)\n else:\n self.classify = theano.function(inputs=[self.idxs, self.pos_idxs,\n self.extra_features],\n outputs=y_pred)\n\n # regularisation terms\n # L1 norm ; one regularization option is to enforce L1 norm to\n # be small\n # if not using this set this to 0 to avoid unecessary computation\n self.L1 = 0\n # self.L1 = abs(self.Wh.sum()) + abs(self.Wx.sum()) + \\\n # abs(self.W.sum()) + abs(self.emb.sum())\\\n # + abs(self.bh.sum()) + abs(self.b.sum()) + abs(self.h0.sum())\n\n # square of L2 norm ; one regularization option is to enforce\n # square of L2 norm to be small\n self.L2_sqr = (self.Wh ** 2).sum() + (self.Wx ** 2).sum() +\\\n (self.W ** 2).sum() + (self.emb ** 2).sum() +\\\n (self.bh ** 2).sum() + (self.b ** 2).sum() +\\\n (self.h0 ** 2).sum()\n\n self.cost = self.nll \\\n + self.L1_reg * self.L1 \\\n + self.L2_reg * self.L2_sqr\n gradients = T.grad(self.cost, self.params)\n\n self.updates = OrderedDict((p, p-self.lr*g)\n for p, g in zip(self.params, gradients))\n\n # costs for multiple labels (one for each in the input)\n self.sentence_cost = self.sentence_nll \\\n + self.L1_reg * self.L1 \\\n + self.L2_reg * self.L2_sqr\n sentence_gradients = T.grad(self.sentence_cost, self.params)\n\n self.sentence_updates = OrderedDict((p, p - self.lr*g)\n for p, g in\n zip(self.params,\n sentence_gradients))\n\n if na == 0:\n self.soft_max = theano.function(inputs=[self.idxs, self.pos_idxs],\n outputs=p_y_given_x_sentence)\n self.soft_max_return_hidden_layer = theano.function(\n inputs=[self.idxs, self.pos_idxs],\n outputs=p_y_given_x_sentence_hidden)\n else:\n self.soft_max = theano.function(inputs=[self.idxs, self.pos_idxs,\n self.extra_features],\n outputs=p_y_given_x_sentence)\n self.soft_max_return_hidden_layer = theano.function(\n inputs=[self.idxs, self.pos_idxs,\n self.extra_features],\n outputs=p_y_given_x_sentence_hidden)\n\n if na == 0:\n self.train = theano.function(inputs=[self.idxs, self.pos_idxs,\n self.y,\n self.lr],\n outputs=self.nll,\n updates=self.updates)\n else:\n self.train = theano.function(inputs=[self.idxs, self.pos_idxs,\n self.extra_features,\n self.y,\n self.lr],\n outputs=self.nll,\n updates=self.updates)\n\n self.normalize = theano.function(\n inputs=[],\n updates={self.emb:\n self.emb /\n T.sqrt((self.emb**2).sum(axis=1))\n .dimshuffle(0, 'x')}\n )\n\n def classify_by_index(self, word_idx, indices, pos_idx=None,\n extra_features=None):\n \"\"\"Classification method which assumes the dialogue matrix is\n in the right format.\n\n :param word_idx: window size * dialogue length matrix\n :param labels: vector dialogue length long\n :param indices: 2 * dialogue length matrix for start, stop indices\n :param pos_idx: pos window size * dialogue length matrix\n :param extra_features: number of features * dialogue length matrix\n \"\"\"\n output = []\n for start, stop in indices:\n\n if extra_features:\n\n output.extend(self.classify(word_idx[start:stop+1, :],\n pos_idx[start:stop+1, :],\n np.asarray(\n extra_features[start:stop+1, :],\n dtype='float32')\n )\n )\n else:\n output.extend(self.classify(word_idx[start:stop+1, :],\n pos_idx[start:stop+1, :]\n )\n )\n return output\n\n def fit(self, word_idx, labels, lr, indices, pos_idx=None,\n extra_features=None):\n \"\"\"Fit method which assumes the dialogue matrix is in the right\n format.\n\n :param word_idx: window size * dialogue length matrix\n :param labels: vector dialogue length long\n :param indices: 2 * dialogue length matrix for start, stop indices\n :param pos_idx: pos window size * dialogue length matrix\n :param extra_features: number of features * dialogue length matrix\n \"\"\"\n loss = 0\n test = 0\n testing = False\n for start, stop in indices:\n # print start, stop\n if testing:\n test += 1\n if test > 50:\n break\n if extra_features:\n\n x = self.train(word_idx[start:stop+1, :],\n pos_idx[start:stop+1, :],\n np.asarray(extra_features[start:stop+1, :],\n dtype='float32'),\n labels[stop],\n lr)\n else:\n x = self.train(word_idx[start:stop+1, :],\n pos_idx[start:stop+1, :],\n labels[stop],\n lr)\n loss += x\n self.normalize()\n return loss\n\n def shared_dataset(self, mycorpus, borrow=True, data_type='int32'):\n \"\"\" Load the dataset into shared variables \"\"\"\n return theano.shared(np.asarray(mycorpus, dtype=data_type),\n borrow=True)\n\n def load_weights_from_folder(self, folder):\n for name, param in zip(self.names, self.params):\n param.set_value(np.load(os.path.join(folder, name + \".npy\")))\n\n def load(self, folder):\n emb = np.load(os.path.join(folder, 'embeddings.npy'))\n Wx = np.load(os.path.join(folder, 'Wx.npy'))\n Wh = np.load(os.path.join(folder, 'Wh.npy'))\n W = np.load(os.path.join(folder, 'W.npy'))\n bh = np.load(os.path.join(folder, 'bh.npy'))\n b = np.load(os.path.join(folder, 'b.npy'))\n h0 = np.load(os.path.join(folder, 'h0.npy'))\n return emb, Wx, Wh, W, bh, b, h0\n\n def load_weights(self, emb=None, Wx=None, Wh=None, W=None, bh=None, b=None,\n h0=None):\n if emb is not None:\n self.emb.set_value(emb)\n if Wx is not None:\n self.Wx.set_value(Wx)\n if Wh is not None:\n self.Wh.set_value(Wh)\n if W is not None:\n self.W.set_value(W)\n if bh is not None:\n self.bh.set_value(bh)\n if b is not None:\n self.b.set_value(b)\n if h0 is not None:\n self.h0.set_value(h0)\n\n def save(self, folder):\n for param, name in zip(self.params, self.names):\n np.save(os.path.join(folder, name + '.npy'), param.get_value())\n"
] | [
[
"numpy.asarray",
"numpy.random.uniform",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mrakitin/xrt | [
"a2d09296860386ed3a83cea45ab43e7959e58f33"
] | [
"xrt/runner.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nModule :mod:`runner` defines the entry point of xrt - :func:`run_ray_tracing`,\ncontainers for job properties and functions for running the processes or\nthreads and accumulating the resulting histograms.\n\"\"\"\n__author__ = \"Konstantin Klementiev, Roman Chernikov\"\n__date__ = \"26 Mar 2016\"\n\nimport os\nimport sys\nimport time\nimport inspect\nimport pickle\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport multiprocessing\nimport errno\nimport threading\nif sys.version_info < (3, 1):\n import Queue\nelse:\n import queue\n Queue = queue\nimport uuid # is needed on some platforms with pyopencl # analysis:ignore\n\nfrom . import multipro\nfrom .backends import raycing\n\n# _DEBUG = True\n__fdir__ = os.path.abspath(os.path.dirname(__file__))\nrunCardVals = None\nrunCardProcs = None\n_plots = []\n\n\ndef retry_on_eintr(function, *args, **kw):\n \"\"\"\n Suggested in:\n http://mail.python.org/pipermail/python-list/2011-February/1266462.html\n as a solution for `IOError: [Errno 4] Interrupted system call` in Linux.\n \"\"\"\n while True:\n try:\n return function(*args, **kw)\n except IOError as e:\n if e.errno == errno.EINTR:\n continue\n else:\n raise\n\n\nclass RunCardVals(object):\n \"\"\"\n Serves as a global container for a sub-set of run properties passed by the\n user to :func:`run_ray_tracing`. The sub-set is limited to pickleable\n objects for passing it to job processes or threads.\n \"\"\"\n def __init__(self, threads, processes, repeats, updateEvery, pickleEvery,\n backend, globalNorm, runfile):\n if threads >= processes:\n self.Event = threading.Event\n self.Queue = Queue.Queue\n else:\n self.Event = multiprocessing.Event\n self.Queue = multiprocessing.Queue\n\n self.stop_event = self.Event()\n self.finished_event = self.Event()\n self.stop_event.clear()\n self.finished_event.clear()\n\n self.threads = threads\n self.processes = processes\n self.repeats = repeats\n self.updateEvery = updateEvery\n self.pickleEvery = pickleEvery\n self.backend = backend\n self.globalNorm = globalNorm\n self.runfile = runfile\n self.passNo = 0\n self.savedResults = []\n self.iteration = 0\n self.lastRunsPickleName = os.path.join(__fdir__, 'lastRuns.pickle')\n self.lastRuns = []\n try:\n with open(self.lastRunsPickleName, 'rb') as f:\n self.lastRuns = pickle.load(f)\n except: # analysis:ignore\n pass\n if self.lastRuns:\n print(\"The last {0} run{1}\".format(len(self.lastRuns),\n 's' if len(self.lastRuns) > 1 else ''))\n for lastRun in self.lastRuns:\n if len(lastRun) > 3:\n print(\"{0}::\".format(lastRun[3]))\n st0 = time.strftime(\"%a, %d %b %Y %H:%M:%S\", lastRun[0])\n if (time.strftime(\"%a, %d %b %Y\", lastRun[0]) ==\n time.strftime(\"%a, %d %b %Y\", lastRun[1])):\n st1 = time.strftime(\"%H:%M:%S\", lastRun[1])\n else:\n st1 = time.strftime(\"%a, %d %b %Y %H:%M:%S\", lastRun[1])\n print(\"start: {0}; stop: {1}; duration: {2:.1f} s\".format(\n st0, st1, lastRun[2]))\n\n\nclass RunCardProcs(object):\n \"\"\"\n Serves as a global container for a sub-set of run properties passed by the\n user to :func:`run_ray_tracing` limited to functions. These cannot be\n passed to job processes or threads (because are not pickleable) and have to\n be executed by the job server (this module).\n \"\"\"\n def __init__(self, afterScript, afterScriptArgs, afterScriptKWargs):\n self.afterScript = afterScript\n self.afterScriptArgs = afterScriptArgs\n self.afterScriptKWargs = afterScriptKWargs\n self.generatorNorm = None\n self.generatorPlot = None\n\n\ndef set_repeats(repeats=0):\n if runCardVals is not None:\n runCardVals.repeats = repeats\n\n\ndef _simple_generator():\n \"\"\"\n The simplest generator for running only one ray-tracing study. Search\n examples for generators that run complex ray-tracing studies.\n \"\"\"\n yield\n\n\ndef start_jobs():\n \"\"\"\n Restores the plots if requested and if the persistent files exist and\n starts the qt timer of the 1st plot.\n \"\"\"\n for plot in _plots:\n if plot.persistentName:\n plot.restore_plots()\n try:\n plot.fig.canvas.manager.set_window_title(plot.title)\n except AttributeError:\n pass\n\n runCardVals.iteration = np.long(0)\n noTimer = len(_plots) == 0 or\\\n (plt.get_backend().lower() in (x.lower() for x in\n mpl.rcsetup.non_interactive_bk))\n if noTimer:\n print(\"The job is running... \")\n while True:\n sys.stdout.flush()\n res = dispatch_jobs()\n tFromStart = time.time() - runCardVals.tstart\n msg = '{0} of {1} in {2:.1f} s'.format(\n runCardVals.iteration, runCardVals.repeats, tFromStart)\n if os.name == 'posix':\n sys.stdout.write(\"\\r\\x1b[K \" + msg)\n else:\n sys.stdout.write(\"\\r \")\n print(msg+' ')\n if res:\n return\n else:\n plot = _plots[0]\n plot.areProcessAlreadyRunning = False\n plot.timer = plot.fig.canvas.new_timer()\n plot.timer.add_callback(plot.timer_callback)\n plot.timer.start()\n\n\ndef dispatch_jobs():\n \"\"\"Runs the jobs in separate processes or threads and collects the resulted\n histograms from the output queues. One cannot run this function in a loop\n because the redrawing will not work. Instead, it is started from a timer\n event handler of a qt-graph.\"\"\"\n if (runCardVals.iteration >= runCardVals.repeats) or \\\n runCardVals.stop_event.is_set():\n on_finish()\n return True\n one_iteration()\n if (runCardVals.iteration >= runCardVals.repeats) or \\\n runCardVals.stop_event.is_set():\n on_finish()\n return True\n if runCardVals.iteration % runCardVals.updateEvery == 0:\n for plot in _plots:\n plot.plot_plots()\n if runCardVals.pickleEvery:\n if runCardVals.iteration % runCardVals.pickleEvery == 0:\n for plot in _plots:\n plot.store_plots()\n if len(_plots) > 0:\n _plots[0].areProcessAlreadyRunning = False\n\n\ndef one_iteration():\n \"\"\"The body of :func:`dispatch_jobs`.\"\"\"\n plots2Pickle = [plot.card_copy() for plot in _plots]\n outPlotQueues = [runCardVals.Queue() for plot in _plots]\n alarmQueue = runCardVals.Queue()\n\n# in the 1st iteration the plots may require some of x, y, e limits to be\n# calculated and thus this case is special:\n cpus = max(runCardVals.threads, runCardVals.processes)\n\n if runCardVals.iteration == 0:\n runCardVals.uniqueFirstRun = False\n if hasattr(runCardVals, 'beamLine'):\n bl = runCardVals.beamLine\n bl.forceAlign = False\n for oe in bl.oes + bl.slits + bl.screens:\n if raycing.is_auto_align_required(oe):\n bl.forceAlign = True\n runCardVals.uniqueFirstRun = True\n break\n\n if not runCardVals.uniqueFirstRun:\n for plot in _plots:\n xLimitsDefined = (plot.xaxis.limits is not None) and\\\n (not isinstance(plot.xaxis.limits, str))\n yLimitsDefined = (plot.yaxis.limits is not None) and\\\n (not isinstance(plot.yaxis.limits, str))\n cLimitsDefined = (plot.caxis.limits is not None) and\\\n (not isinstance(plot.caxis.limits, str)) or plot.ePos == 0\n if not (xLimitsDefined and yLimitsDefined and cLimitsDefined):\n runCardVals.uniqueFirstRun = True\n break\n\n if runCardVals.uniqueFirstRun:\n cpus = 1\n\n elif runCardVals.iteration == 1:\n if runCardVals.uniqueFirstRun: # balances the 1st iteration\n cpus -= 1\n\n if cpus < 1:\n cpus = 1\n\n if runCardVals.backend.startswith('raycing'):\n runCardVals.beamLine.alarms = []\n\n if runCardVals.threads >= runCardVals.processes or cpus == 1:\n BackendOrProcess = multipro.BackendThread\n else:\n BackendOrProcess = multipro.BackendProcess\n processes = [BackendOrProcess(runCardVals, plots2Pickle, outPlotQueues,\n alarmQueue, icpu) for icpu in range(cpus)]\n# print('top process:', os.getpid())\n for pid, p in enumerate(processes):\n p.ppid = pid + runCardVals.iteration\n p.start()\n\n for p in processes:\n if runCardVals.backend.startswith('raycing'):\n runCardVals.beamLine.alarms = retry_on_eintr(alarmQueue.get)\n for alarm in runCardVals.beamLine.alarms:\n print(alarm)\n outList = [0, ]\n for plot, aqueue in zip(_plots, outPlotQueues):\n outList = retry_on_eintr(aqueue.get)\n\n if len(outList) == 0:\n continue\n if (runCardVals.iteration >= runCardVals.repeats) or \\\n runCardVals.stop_event.is_set():\n continue\n\n plot.nRaysAll += outList[13]\n if runCardVals.backend.startswith('shadow'):\n plot.nRaysNeeded += outList[14]\n elif runCardVals.backend.startswith('raycing'):\n nRaysVarious = outList[14]\n plot.nRaysAlive += nRaysVarious[0]\n plot.nRaysGood += nRaysVarious[1]\n plot.nRaysOut += nRaysVarious[2]\n plot.nRaysOver += nRaysVarious[3]\n plot.nRaysDead += nRaysVarious[4]\n plot.nRaysAccepted += nRaysVarious[5]\n plot.nRaysAcceptedE += nRaysVarious[6]\n plot.nRaysSeeded += nRaysVarious[7]\n plot.nRaysSeededI += nRaysVarious[8]\n plot.displayAsAbsorbedPower = outList[15]\n\n for iaxis, axis in enumerate(\n [plot.xaxis, plot.yaxis, plot.caxis]):\n if (iaxis == 2) and (not plot.ePos):\n continue\n axis.total1D += outList[0+iaxis*3]\n axis.total1D_RGB += outList[1+iaxis*3]\n if runCardVals.iteration == 0:\n axis.binEdges = outList[2+iaxis*3]\n plot.total2D += outList[9]\n plot.total2D_RGB += outList[10]\n if plot.fluxKind.lower().endswith('4d'):\n plot.total4D += outList[11]\n elif plot.fluxKind.lower().endswith('pca'):\n plot.total4D.append(outList[11])\n plot.intensity += outList[12]\n\n if runCardVals.iteration == 0: # needed for multiprocessing\n plot.set_axes_limits(*outList.pop())\n\n tFromStart = time.time() - runCardVals.tstart\n plot.textStatus.set_text(\n \"{0} of {1} in {2:.1f} s (right click to stop)\".format(\n runCardVals.iteration+1, runCardVals.repeats, tFromStart))\n# aqueue.task_done()\n\n if len(outList) > 0:\n runCardVals.iteration += 1\n for p in processes:\n p.join(60.)\n if hasattr(runCardVals, 'beamLine'):\n bl = runCardVals.beamLine\n bl.forceAlign = False\n if bl.flowSource == 'legacy':\n bl.flowSource = 'done_once'\n\n\ndef on_finish():\n \"\"\"Executed on exit from the ray-tracing iteration loop.\"\"\"\n if len(_plots) > 0:\n plot = _plots[0]\n if plt.get_backend().lower() not in (\n x.lower() for x in mpl.rcsetup.non_interactive_bk):\n plot.timer.stop()\n plot.timer.remove_callback(plot.timer_callback)\n plot.areProcessAlreadyRunning = False\n for plot in _plots:\n if plot.fluxKind.startswith('E') and \\\n plot.fluxKind.lower().endswith('pca'):\n xbin, zbin = plot.xaxis.bins, plot.yaxis.bins\n plot.total4D = np.concatenate(plot.total4D).reshape(-1, xbin, zbin)\n plot.field3D = plot.total4D\n plot.textStatus.set_text('')\n plot.fig.canvas.mpl_disconnect(plot.cidp)\n plot.plot_plots()\n plot.save()\n runCardVals.tstop = time.time()\n runCardVals.tstopLong = time.localtime()\n print('The ray tracing with {0} iteration{1} took {2:0.1f} s'.format(\n runCardVals.iteration, 's' if runCardVals.iteration > 1 else '',\n runCardVals.tstop-runCardVals.tstart))\n runCardVals.finished_event.set()\n for plot in _plots:\n if runCardVals.globalNorm or plot.persistentName:\n plot.store_plots()\n if runCardVals.stop_event.is_set():\n print('Interrupted by user after iteration {0}'.format(\n runCardVals.iteration))\n return\n try:\n if runCardProcs.generatorPlot is not None:\n if sys.version_info < (3, 1):\n runCardProcs.generatorPlot.next()\n else:\n next(runCardProcs.generatorPlot)\n except StopIteration:\n pass\n else:\n for plot in _plots:\n plot.clean_plots()\n start_jobs()\n return\n\n if runCardVals.globalNorm:\n aSavedResult = -1\n print('normalizing ...')\n for aRenormalization in runCardProcs.generatorNorm:\n for plot in _plots:\n aSavedResult += 1\n saved = runCardVals.savedResults[aSavedResult]\n plot.clean_plots()\n saved.restore(plot)\n try:\n plot.fig.canvas.manager.set_window_title(plot.title)\n except AttributeError:\n pass\n for runCardVals.passNo in [1, 2]:\n plot.plot_plots()\n plot.save('_norm' + str(runCardVals.passNo))\n\n print('finished')\n\n runCardVals.lastRuns.append([runCardVals.tstartLong, runCardVals.tstopLong,\n runCardVals.tstop-runCardVals.tstart,\n runCardVals.runfile])\n try:\n with open(runCardVals.lastRunsPickleName, 'wb') as f:\n pickle.dump(runCardVals.lastRuns[-10:], f, protocol=2)\n except OSError: # Read-only file system\n pass # no history tracking of last 10 runs\n\n# plt.close('all')\n if runCardProcs.afterScript:\n runCardProcs.afterScript(\n *runCardProcs.afterScriptArgs, **runCardProcs.afterScriptKWargs)\n\n\ndef normalize_sibling_plots(plots):\n print('normalization started')\n max1Dx = 0\n max1Dy = 0\n max1Dc = 0\n max1Dx_RGB = 0\n max1Dy_RGB = 0\n max1Dc_RGB = 0\n max2D_RGB = 0\n for plot in plots:\n if max1Dx < plot.xaxis.max1D:\n max1Dx = plot.xaxis.max1D\n if max1Dy < plot.yaxis.max1D:\n max1Dy = plot.yaxis.max1D\n if max1Dc < plot.caxis.max1D:\n max1Dc = plot.caxis.max1D\n if max1Dx_RGB < plot.xaxis.max1D_RGB:\n max1Dx_RGB = plot.xaxis.max1D_RGB\n if max1Dy_RGB < plot.yaxis.max1D_RGB:\n max1Dy_RGB = plot.yaxis.max1D_RGB\n if max1Dc_RGB < plot.caxis.max1D_RGB:\n max1Dc_RGB = plot.caxis.max1D_RGB\n if max2D_RGB < plot.max2D_RGB:\n max2D_RGB = plot.max2D_RGB\n\n for plot in plots:\n plot.xaxis.globalMax1D = max1Dx\n plot.yaxis.globalMax1D = max1Dy\n plot.caxis.globalMax1D = max1Dc\n plot.xaxis.globalMax1D_RGB = max1Dx_RGB\n plot.yaxis.globalMax1D_RGB = max1Dy_RGB\n plot.caxis.globalMax1D_RGB = max1Dc_RGB\n plot.globalMax2D_RGB = max2D_RGB\n\n for runCardVals.passNo in [1, 2]:\n for plot in plots:\n plot.plot_plots()\n plot.save('_norm' + str(runCardVals.passNo))\n print('normalization finished')\n\n\ndef run_ray_tracing(\n plots=[], repeats=1, updateEvery=1, pickleEvery=None, energyRange=None,\n backend='raycing', beamLine=None, threads=1, processes=1,\n generator=None, generatorArgs=[], generatorKWargs='auto', globalNorm=0,\n afterScript=None, afterScriptArgs=[], afterScriptKWargs={}):\n u\"\"\"\n This function is the entry point of xrt.\n Parameters are all optional except the 1st one. Please use them as keyword\n arguments because the list of parameters may change in future versions.\n\n *plots*: instance of :class:`~xrt.plotter.XYCPlot` or a sequence of\n instances or an empty sequence if no graphical output is wanted.\n\n *repeats*: int\n The number of ray tracing runs. It should be stressed that\n accumulated are not rays, which would be limited by the physical\n memory, but rather the histograms from each run are summed up. In\n this way the number of rays is unlimited.\n\n *updateEvery*: int\n Redrawing rate. Redrawing happens when the current iteration index\n is divisible by *updateEvery*.\n\n *pickleEvery*: int\n Saving rate. Applicable to plots with a defined *persistentName*.\n If None, the pickling will happen once at the end.\n\n *energyRange*: [*eMin*: float, *eMax*: float]\n Only in `shadow` backend: If not None, sets the energy range of\n shadow source. Alternatively, this can be done directly inside\n the *generator*.\n\n *backend*: str\n so far supported: {'shadow' | 'raycing' | 'dummy'}\n\n *beamLine*: instance of :class:`~xrt.backends.raycing.BeamLine`, used\n with `raycing` backend.\n\n *threads*, *processes*: int or str\n The number of parallel threads or processes, should not be greater\n than the number of cores in your computer, otherwise it gives no\n gain. The bigger of the two will be used as a signal for using\n either :mod:`threading` or :mod:`multiprocessing`. If they are\n equal, :mod:`threading` is used. See also\n :ref:`performance tests<tests>`. If 'all' is given then the number\n returned by multiprocessing.cpu_count() will be used.\n\n .. warning::\n You cannot use multiprocessing in combination with OpenCL\n because the resources (CPU or GPU) are already shared by\n OpenCL. You will get an error if *processes* > 1. You can still\n use *threads* > 1 but with a little gain.\n\n .. note::\n For the :mod:`shadow` backend you must create ``tmp0``,\n ``tmp1`` etc. directories (counted by *threads* or *processes*)\n in your working directory. Even if the execution is not\n parallelized, there must be ``tmp0`` with the shadow files\n prepared in it.\n\n *generator*: generator object\n A generator for running complex ray-tracing studies. It must modify\n the optics, specify the graph limits, define the output file names\n etc. in a loop and return to xrt by ``yield``.\n See the supplied examples.\n\n *generatorArgs*, *generatorKWargs*: list and (dictionary or 'auto')\n If *generatorKWargs* is 'auto', the following keyword dictionary\n will be used for the generator: kwargs = {} if *generator* is\n defined within the caller of :func:`run_ray_tracing` or if\n *generatorArgs* is not empty, otherwise\n kwargs = {'plots'=pots, 'beamLine'=beamLine}.\n\n .. _globalNorm:\n\n *globalNorm*: bool\n If True, the intensity of the histograms will be normalized to the\n global maximum throughout the series of graphs. There are two\n flavors of normalization:\n\n 1) only the heights of 1D histograms are globally normalized while\n the brightness is kept with the normalization to the local\n maximum (i.e. the maximum in the given graph).\n 2) both the heights of 1D histograms and the brightness of 1D and\n 2D histograms are globally normalized.\n\n The second way is physically more correct but sometimes is less\n visual: some of the normalized pictures may become too dark, e.g.\n when you compare focused and strongly unfocused images. Both\n normalizations are saved with suffixes ``_norm1`` and ``_norm2``\n for you to select the better one.\n\n Here is a normalization example where the intensity maximum was\n found throughout a series of images for filters of different\n thickness. The brightest image was for the case of no filter (not\n shown here) and the normalization shown below was done relative to\n that image:\n\n +------------------+-----------------------------------------+\n | normalized | |\n | to local maximum | |image_nonorm| |\n +------------------+-----------------------------------------+\n | global | |\n | normalization, | |\n | type 1 | |image_norm1| |\n +------------------+-----------------------------------------+\n | global | |\n | normalization, | |\n | type 2 | |image_norm2| |\n +------------------+-----------------------------------------+\n\n .. |image_nonorm| imagezoom:: _images/filterFootprint2_I400mum.png\n :scale: 50 %\n .. |image_norm1| imagezoom:: _images/filterFootprint2_I400mum_norm1.png\n :scale: 50 %\n .. |image_norm2| imagezoom:: _images/filterFootprint2_I400mum_norm2.png\n :scale: 50 %\n\n *afterScript*: function object\n This function is executed at the end of the current script. For\n example, it may run the next ray-tracing script.\n\n *afterScriptArgs*, *afterScriptKWargs*: list and dictionary\n args and kwargs for *afterScript*.\n\n\n \"\"\"\n global runCardVals, runCardProcs, _plots\n frm = inspect.stack()[1]\n mod = inspect.getmodule(frm[0])\n runfile = mod.__file__\n # patch for starting a script with processes>1 from Spyder console\n if not hasattr(mod, \"__spec__\"):\n mod.__spec__ = None\n\n if isinstance(plots, (list, tuple)):\n _plots = plots\n else:\n _plots = [plots, ]\n for plot in _plots:\n if backend == 'raycing':\n if plot.caxis.useCategory:\n plot.caxis.limits = [raycing.hueMin, raycing.hueMax]\n if isinstance(plot.rayFlag, int):\n plot.rayFlag = plot.rayFlag,\n if updateEvery < 1:\n updateEvery = 1\n if (repeats > 1) and (updateEvery > repeats):\n updateEvery = repeats\n cpuCount = multiprocessing.cpu_count()\n if isinstance(processes, str):\n if processes.startswith('a'): # all\n processes = cpuCount\n else:\n processes = max(cpuCount // 2, 1)\n if isinstance(threads, str):\n if threads.startswith('a'): # all\n threads = cpuCount\n else:\n threads = max(cpuCount // 2, 1)\n runCardVals = RunCardVals(threads, processes, repeats, updateEvery,\n pickleEvery, backend, globalNorm, runfile)\n runCardProcs = RunCardProcs(\n afterScript, afterScriptArgs, afterScriptKWargs)\n\n runCardVals.cwd = os.getcwd()\n if backend.startswith('shadow'):\n from .backends import shadow\n cpuCount = max(processes, threads)\n shadow.check_shadow_dirs(cpuCount, runCardVals.cwd)\n runCardVals.fWiggler, runCardVals.fPolar, runCardVals.blockNRays = \\\n shadow.init_shadow(cpuCount, runCardVals.cwd, energyRange)\n elif backend == 'raycing':\n runCardVals.beamLine = beamLine\n\n if generator is None:\n runCardProcs.generatorPlot = _simple_generator()\n else:\n if generatorKWargs == 'auto':\n if (generator.__name__ in sys._getframe(1).f_locals) or\\\n len(generatorArgs) > 0:\n # generator is defined within the caller function\n kwargs = {}\n else:\n # outside the caller\n kwargs = {'plots': plots, 'beamLine': beamLine}\n else:\n kwargs = generatorKWargs\n runCardProcs.generatorPlot = generator(*generatorArgs, **kwargs)\n if globalNorm:\n runCardProcs.generatorNorm = generator(*generatorArgs, **kwargs)\n\n if runCardProcs.generatorPlot is not None:\n if sys.version_info < (3, 1):\n runCardProcs.generatorPlot.next()\n else:\n next(runCardProcs.generatorPlot)\n\n runCardVals.tstart = time.time()\n runCardVals.tstartLong = time.localtime()\n start_jobs()\n plt.show()\n"
] | [
[
"numpy.concatenate",
"numpy.long",
"matplotlib.pyplot.show",
"matplotlib.pyplot.get_backend"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
luckykamon/Morpion | [
"a4da849a354c542fc5a79a3742a86b040df7e016"
] | [
"create_image/white.py"
] | [
"import imageio\nimport matplotlib.pyplot as plt\nimport Image\nimport numpy as np\n\nim = Image.new(\"RGB\", (65,65), \"white\")\npic = np.array(im)\nim=pic\nimageio.imsave(\"white.png\", im)\n\n"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
faymek/compression | [
"20c6745b741e266f7118e6b3fc88d22f6179cfdf"
] | [
"examples/varate.py"
] | [
"#%%\n\"\"\"\nbmshj2018\n\n\"\"\"\n\nimport argparse\nimport glob\nimport sys\n\nfrom absl import app\nfrom absl.flags import argparse_flags\nimport numpy as np\nimport tensorflow.compat.v1 as tf\n\nimport tensorflow_compression as tfc\nfrom dynamic import *\n\n\nSCALES_MIN = 0.11\nSCALES_MAX = 256\nSCALES_LEVELS = 64\n\n\ndef read_png(filename):\n \"\"\"Loads a PNG image file.\"\"\"\n string = tf.read_file(filename)\n image = tf.image.decode_image(string, channels=3)\n image = tf.cast(image, tf.float32)\n image /= 255\n return image\n\n\ndef quantize_image(image):\n image = tf.round(image * 255)\n image = tf.saturate_cast(image, tf.uint8)\n return image\n\n\ndef write_png(filename, image):\n \"\"\"Saves an image to a PNG file.\"\"\"\n image = quantize_image(image)\n string = tf.image.encode_png(image)\n return tf.write_file(filename, string)\n\n\nclass AnalysisTransform(tf.keras.layers.Layer):\n \"\"\"The analysis transform.\"\"\"\n\n def __init__(self, num_filters, *args, **kwargs):\n self.num_filters = num_filters\n super(AnalysisTransform, self).__init__(*args, **kwargs)\n\n def build(self, input_shape):\n self._layers = [\n DynamicSignalConv2D(\n self.num_filters, (5, 5), name=\"layer_0\", corr=True, strides_down=2,\n padding=\"same_zeros\", use_bias=True,\n activation=DynamicGDN(name=\"gdn_0\")),\n DynamicSignalConv2D(\n self.num_filters, (5, 5), name=\"layer_1\", corr=True, strides_down=2,\n padding=\"same_zeros\", use_bias=True,\n activation=DynamicGDN(name=\"gdn_1\")),\n DynamicSignalConv2D(\n self.num_filters, (5, 5), name=\"layer_2\", corr=True, strides_down=2,\n padding=\"same_zeros\", use_bias=True,\n activation=DynamicGDN(name=\"gdn_2\")),\n DynamicSignalConv2D(\n self.num_filters, (5, 5), name=\"layer_3\", corr=True, strides_down=2,\n padding=\"same_zeros\", use_bias=True,\n activation=None),\n ]\n super(AnalysisTransform, self).build(input_shape)\n\n def call(self, tensor):\n for layer in self._layers:\n tensor = layer(tensor)\n return tensor\n\n\nclass SynthesisTransform(tf.keras.layers.Layer):\n \"\"\"The synthesis transform.\"\"\"\n\n def __init__(self, num_filters, *args, **kwargs):\n self.num_filters = num_filters\n super(SynthesisTransform, self).__init__(*args, **kwargs)\n\n def build(self, input_shape):\n self._layers = [\n DynamicSignalConv2D(\n self.num_filters, (5, 5), name=\"layer_0\", corr=False, strides_up=2,\n padding=\"same_zeros\", use_bias=True,\n activation=DynamicGDN(name=\"igdn_0\", inverse=True)),\n DynamicSignalConv2D(\n self.num_filters, (5, 5), name=\"layer_1\", corr=False, strides_up=2,\n padding=\"same_zeros\", use_bias=True,\n activation=DynamicGDN(name=\"igdn_1\", inverse=True)),\n DynamicSignalConv2D(\n self.num_filters, (5, 5), name=\"layer_2\", corr=False, strides_up=2,\n padding=\"same_zeros\", use_bias=True,\n activation=DynamicGDN(name=\"igdn_2\", inverse=True)),\n DynamicSignalConv2D(\n 3, (5, 5), name=\"layer_3\", corr=False, strides_up=2,\n padding=\"same_zeros\", use_bias=True,\n activation=None),\n ]\n super(SynthesisTransform, self).build(input_shape)\n\n def call(self, tensor):\n for layer in self._layers:\n tensor = layer(tensor)\n return tensor\n\n\nclass HyperAnalysisTransform(tf.keras.layers.Layer):\n \"\"\"The analysis transform for the entropy model parameters.\"\"\"\n\n def __init__(self, num_filters, *args, **kwargs):\n self.num_filters = num_filters\n super(HyperAnalysisTransform, self).__init__(*args, **kwargs)\n\n def build(self, input_shape):\n self._layers = [\n DynamicSignalConv2D(\n self.num_filters, (3, 3), name=\"layer_0\", corr=True, strides_down=1,\n padding=\"same_zeros\", use_bias=True,\n activation=tf.nn.relu),\n DynamicSignalConv2D(\n self.num_filters, (5, 5), name=\"layer_1\", corr=True, strides_down=2,\n padding=\"same_zeros\", use_bias=True,\n activation=tf.nn.relu),\n DynamicSignalConv2D(\n self.num_filters, (5, 5), name=\"layer_2\", corr=True, strides_down=2,\n padding=\"same_zeros\", use_bias=False,\n activation=None),\n ]\n super(HyperAnalysisTransform, self).build(input_shape)\n\n def call(self, tensor):\n for layer in self._layers:\n tensor = layer(tensor)\n return tensor\n\n\nclass HyperSynthesisTransform(tf.keras.layers.Layer):\n \"\"\"The synthesis transform for the entropy model parameters.\"\"\"\n\n def __init__(self, num_filters, *args, **kwargs):\n self.num_filters = num_filters\n super(HyperSynthesisTransform, self).__init__(*args, **kwargs)\n\n def build(self, input_shape):\n self._layers = [\n DynamicSignalConv2D(\n self.num_filters, (5, 5), name=\"layer_0\", corr=False, strides_up=2,\n padding=\"same_zeros\", use_bias=True, kernel_parameterizer=None,\n activation=tf.nn.relu),\n DynamicSignalConv2D(\n self.num_filters, (5, 5), name=\"layer_1\", corr=False, strides_up=2,\n padding=\"same_zeros\", use_bias=True, kernel_parameterizer=None,\n activation=tf.nn.relu),\n DynamicSignalConv2D(\n self.num_filters, (3, 3), name=\"layer_2\", corr=False, strides_up=1,\n padding=\"same_zeros\", use_bias=True, kernel_parameterizer=None,\n activation=None),\n ]\n super(HyperSynthesisTransform, self).build(input_shape)\n\n def call(self, tensor):\n for layer in self._layers:\n tensor = layer(tensor)\n return tensor\n\n\ndef train(args):\n \"\"\"Trains the model.\"\"\"\n\n if args.verbose:\n tf.logging.set_verbosity(tf.logging.INFO)\n\n # Create input data pipeline.\n with tf.device(\"/cpu:0\"):\n train_files = glob.glob(args.train_glob)\n if not train_files:\n raise RuntimeError(\n \"No training images found with glob '{}'.\".format(args.train_glob))\n train_dataset = tf.data.Dataset.from_tensor_slices(train_files)\n train_dataset = train_dataset.shuffle(buffer_size=len(train_files)).repeat()\n train_dataset = train_dataset.map(\n read_png, num_parallel_calls=args.preprocess_threads)\n train_dataset = train_dataset.map(\n lambda x: tf.random_crop(x, (args.patchsize, args.patchsize, 3)))\n train_dataset = train_dataset.batch(args.batchsize)\n train_dataset = train_dataset.prefetch(32)\n\n num_pixels = args.batchsize * args.patchsize ** 2\n\n # Get training patch from dataset.\n x = train_dataset.make_one_shot_iterator().get_next()\n\n # Instantiate model.\n analysis_transform = AnalysisTransform(args.num_filters)\n synthesis_transform = SynthesisTransform(args.num_filters)\n hyper_analysis_transform = HyperAnalysisTransform(args.num_filters)\n hyper_synthesis_transform = HyperSynthesisTransform(args.num_filters)\n entropy_bottleneck = DynamicEntropyBottleneck()\n\n # Build autoencoder and hyperprior.\n y = analysis_transform(x)\n z = hyper_analysis_transform(abs(y))\n z_tilde, z_likelihoods = entropy_bottleneck(z, training=True)\n sigma = hyper_synthesis_transform(z_tilde)\n scale_table = np.exp(np.linspace(\n np.log(SCALES_MIN), np.log(SCALES_MAX), SCALES_LEVELS))\n conditional_bottleneck = tfc.GaussianConditional(sigma, scale_table)\n y_tilde, y_likelihoods = conditional_bottleneck(y, training=True)\n x_tilde = synthesis_transform(y_tilde)\n\n # Total number of bits divided by number of pixels.\n train_bpp = (tf.reduce_sum(tf.log(y_likelihoods)) +\n tf.reduce_sum(tf.log(z_likelihoods))) / (-np.log(2) * num_pixels)\n\n # Mean squared error across pixels.\n train_mse = tf.reduce_mean(tf.squared_difference(x, x_tilde))\n # Multiply by 255^2 to correct for rescaling.\n train_mse *= 255 ** 2\n\n # The rate-distortion cost.\n train_loss = args.lmbda * train_mse + train_bpp\n\n # Minimize loss and auxiliary loss, and execute update op.\n step = tf.train.create_global_step()\n main_optimizer = tf.train.AdamOptimizer(learning_rate=1e-4)\n main_step = main_optimizer.minimize(train_loss, global_step=step)\n\n aux_optimizer = tf.train.AdamOptimizer(learning_rate=1e-3)\n aux_step = aux_optimizer.minimize(entropy_bottleneck.losses[0])\n\n train_op = tf.group(main_step, aux_step, entropy_bottleneck.updates[0])\n\n tf.summary.scalar(\"loss\", train_loss)\n tf.summary.scalar(\"bpp\", train_bpp)\n tf.summary.scalar(\"mse\", train_mse)\n\n tf.summary.image(\"original\", quantize_image(x))\n tf.summary.image(\"reconstruction\", quantize_image(x_tilde))\n\n hooks = [\n tf.train.StopAtStepHook(last_step=args.last_step),\n tf.train.NanTensorHook(train_loss),\n ]\n with tf.train.MonitoredTrainingSession(\n hooks=hooks, checkpoint_dir=args.checkpoint_dir,\n save_checkpoint_secs=300, save_summaries_secs=60) as sess:\n while not sess.should_stop():\n sess.run(train_op)\n\n\ndef test_train(args):\n \"\"\"Trains the model.\"\"\"\n\n if args.verbose:\n tf.logging.set_verbosity(tf.logging.INFO)\n\n # Create input data pipeline.\n with tf.device(\"/cpu:0\"):\n train_files = glob.glob(args.train_glob)\n if not train_files:\n raise RuntimeError(\n \"No training images found with glob '{}'.\".format(args.train_glob))\n train_dataset = tf.data.Dataset.from_tensor_slices(train_files)\n train_dataset = train_dataset.shuffle(buffer_size=len(train_files)).repeat()\n train_dataset = train_dataset.map(\n read_png, num_parallel_calls=args.preprocess_threads)\n train_dataset = train_dataset.map(\n lambda x: tf.random_crop(x, (args.patchsize, args.patchsize, 3)))\n train_dataset = train_dataset.batch(args.batchsize)\n train_dataset = train_dataset.prefetch(32)\n\n num_pixels = args.batchsize * args.patchsize ** 2\n\n # Get training patch from dataset.\n x = train_dataset.make_one_shot_iterator().get_next()\n\n # Instantiate model.\n analysis_transform = AnalysisTransform(args.num_filters)\n synthesis_transform = SynthesisTransform(args.num_filters)\n hyper_analysis_transform = HyperAnalysisTransform(args.num_filters)\n hyper_synthesis_transform = HyperSynthesisTransform(args.num_filters)\n entropy_bottleneck = DynamicEntropyBottleneck(name=\"entropy_bottleneck\")\n\n # Build autoencoder and hyperprior.\n y = analysis_transform(x)\n z = hyper_analysis_transform(abs(y))\n z_tilde, z_likelihoods = entropy_bottleneck(z, training=True)\n sigma = hyper_synthesis_transform(z_tilde)\n scale_table = np.exp(np.linspace(\n np.log(SCALES_MIN), np.log(SCALES_MAX), SCALES_LEVELS))\n conditional_bottleneck = tfc.GaussianConditional(sigma, scale_table)\n y_tilde, y_likelihoods = conditional_bottleneck(y, training=True)\n x_tilde = synthesis_transform(y_tilde)\n\n with tf.Session() as sess:\n latest = tf.train.latest_checkpoint(checkpoint_dir=\"./tfc256-05\")\n tf.train.Saver().restore(sess, save_path=latest)\n\n active_0 = 256\n x_tilde_0 = synthesis_transform(y_tilde[:,:,:,:active_0])\n train_bpp_0 = (tf.reduce_sum(tf.log(y_likelihoods[:,:,:,:active_0])) +\n tf.reduce_sum(tf.log(z_likelihoods))) / (-np.log(2) * num_pixels)\n train_mse_0 = tf.reduce_mean(tf.squared_difference(x, x_tilde_0)) * (255**2)\n\n active_1 = 248\n x_tilde_1 = synthesis_transform(y_tilde[:,:,:,:active_1])\n train_bpp_1 = (tf.reduce_sum(tf.log(y_likelihoods[:,:,:,:active_1])) +\n tf.reduce_sum(tf.log(z_likelihoods))) / (-np.log(2) * num_pixels)\n train_mse_1 = tf.reduce_mean(tf.squared_difference(x, x_tilde_1)) * (255**2)\n\n def RateOfWidth(W):\n return 0.0267 * np.exp(0.0178*W)\n\n # The rate-distortion cost.\n train_loss = train_mse_0 + train_mse_1 \\\n + 1000*tf.squared_difference(train_bpp_0, RateOfWidth(active_0)) \\\n + 1000*tf.squared_difference(train_bpp_1, RateOfWidth(active_1)) \n\n # Minimize loss and auxiliary loss, and execute update op.\n step = tf.train.create_global_step()\n main_optimizer = tf.train.AdamOptimizer(learning_rate=1e-4)\n main_step = main_optimizer.minimize(train_loss, global_step=step)\n\n aux_optimizer = tf.train.AdamOptimizer(learning_rate=1e-3)\n aux_step = aux_optimizer.minimize(entropy_bottleneck.losses[0])\n\n train_op = tf.group(main_step, aux_step, entropy_bottleneck.updates[0])\n\n tf.summary.scalar(\"loss\", train_loss)\n tf.summary.scalar(\"bpp\", train_bpp_1)\n tf.summary.scalar(\"mse\", train_mse_1)\n\n tf.summary.image(\"original\", quantize_image(x))\n tf.summary.image(\"reconstruction\", quantize_image(x_tilde_1))\n\n hooks = [\n tf.train.StopAtStepHook(last_step=args.last_step),\n tf.train.NanTensorHook(train_loss),\n ]\n with tf.train.MonitoredTrainingSession(\n hooks=hooks, checkpoint_dir=args.checkpoint_dir,\n save_checkpoint_secs=300, save_summaries_secs=60) as sess:\n while not sess.should_stop():\n sess.run(train_op)\n\n\n\n\ndef compress(args):\n \"\"\"Compresses an image.\"\"\"\n\n # Load input image and add batch dimension.\n x = read_png(args.input_file)\n x = tf.expand_dims(x, 0)\n x.set_shape([1, None, None, 3])\n x_shape = tf.shape(x)\n\n # Instantiate model.\n analysis_transform = AnalysisTransform(args.num_filters)\n synthesis_transform = SynthesisTransform(args.num_filters)\n hyper_analysis_transform = HyperAnalysisTransform(args.num_filters)\n hyper_synthesis_transform = HyperSynthesisTransform(args.num_filters)\n entropy_bottleneck = tfc.EntropyBottleneck()\n\n # Transform and compress the image.\n y = analysis_transform(x)\n y_shape = tf.shape(y)\n z = hyper_analysis_transform(abs(y))\n z_hat, z_likelihoods = entropy_bottleneck(z, training=False)\n sigma = hyper_synthesis_transform(z_hat)\n sigma = sigma[:, :y_shape[1], :y_shape[2], :]\n scale_table = np.exp(np.linspace(\n np.log(SCALES_MIN), np.log(SCALES_MAX), SCALES_LEVELS))\n conditional_bottleneck = tfc.GaussianConditional(sigma, scale_table)\n side_string = entropy_bottleneck.compress(z)\n string = conditional_bottleneck.compress(y)\n\n # Transform the quantized image back (if requested).\n y_hat, y_likelihoods = conditional_bottleneck(y, training=False)\n x_hat = synthesis_transform(y_hat)\n x_hat = x_hat[:, :x_shape[1], :x_shape[2], :]\n\n num_pixels = tf.cast(tf.reduce_prod(tf.shape(x)[:-1]), dtype=tf.float32)\n\n # Total number of bits divided by number of pixels.\n eval_bpp = (tf.reduce_sum(tf.log(y_likelihoods)) +\n tf.reduce_sum(tf.log(z_likelihoods))) / (-np.log(2) * num_pixels)\n\n # Bring both images back to 0..255 range.\n x *= 255\n x_hat = tf.clip_by_value(x_hat, 0, 1)\n x_hat = tf.round(x_hat * 255)\n\n mse = tf.reduce_mean(tf.squared_difference(x, x_hat))\n psnr = tf.squeeze(tf.image.psnr(x_hat, x, 255))\n msssim = tf.squeeze(tf.image.ssim_multiscale(x_hat, x, 255))\n\n with tf.Session() as sess:\n # Load the latest model checkpoint, get the compressed string and the tensor\n # shapes.\n latest = tf.train.latest_checkpoint(checkpoint_dir=args.checkpoint_dir)\n tf.train.Saver().restore(sess, save_path=latest)\n tensors = [string, side_string,\n tf.shape(x)[1:-1], tf.shape(y)[1:-1], tf.shape(z)[1:-1]]\n arrays = sess.run(tensors)\n\n # Write a binary file with the shape information and the compressed string.\n packed = tfc.PackedTensors()\n packed.pack(tensors, arrays)\n with open(args.output_file, \"wb\") as f:\n f.write(packed.string)\n\n # If requested, transform the quantized image back and measure performance.\n if args.verbose:\n eval_bpp, mse, psnr, msssim, num_pixels = sess.run(\n [eval_bpp, mse, psnr, msssim, num_pixels])\n\n # The actual bits per pixel including overhead.\n bpp = len(packed.string) * 8 / num_pixels\n\n print(\"Mean squared error: {:0.4f}\".format(mse))\n print(\"PSNR (dB): {:0.2f}\".format(psnr))\n print(\"Multiscale SSIM: {:0.4f}\".format(msssim))\n print(\"Multiscale SSIM (dB): {:0.2f}\".format(-10 * np.log10(1 - msssim)))\n print(\"Information content in bpp: {:0.4f}\".format(eval_bpp))\n print(\"Actual bits per pixel: {:0.4f}\".format(bpp))\n\n\ndef get_uninitialized_variables(sess):\n global_vars = tf.global_variables()\n\n # print([str(i.name) for i in global_vars])\n\n is_not_initialized = sess.run([tf.is_variable_initialized(var) for var in global_vars])\n not_initialized_vars = [v for (v, f) in zip(global_vars, is_not_initialized) if not f]\n print([str(i.name) for i in not_initialized_vars])\n return not_initialized_vars\n\n\nclass DyTFC():\n def __init__(self, num_filters):\n self.num_filters = num_filters\n self.analysis_transform = AnalysisTransform(num_filters)\n self.synthesis_transform = SynthesisTransform(num_filters)\n self.hyper_analysis_transform = HyperAnalysisTransform(num_filters)\n self.hyper_synthesis_transform = HyperSynthesisTransform(num_filters)\n self.entropy_bottleneck = DynamicEntropyBottleneck(name=\"entropy_bottleneck\")\n \n def build(self, x):\n self.x = x\n self.x_shape = tf.shape(self.x)\n\n # Transform and compress the image.\n self.y = self.analysis_transform(self.x)\n self.y_shape = tf.shape(self.y)\n self.z = self.hyper_analysis_transform(abs(self.y))\n self.z_shape = tf.shape(self.z)\n self.z_hat, self.z_likelihoods = self.entropy_bottleneck(self.z, training=False)\n sigma = self.hyper_synthesis_transform(self.z_hat)\n self.sigma = sigma[:, :self.y_shape[1], :self.y_shape[2], :]\n self.scale_table = np.exp(np.linspace(\n np.log(SCALES_MIN), np.log(SCALES_MAX), SCALES_LEVELS))\n self.conditional_bottleneck = DynamicGaussianConditional(self.sigma, self.scale_table, name=\"gaussian_conditional\")\n self.side_string = self.entropy_bottleneck.compress(self.z)\n self.string = self.conditional_bottleneck.compress(self.y)\n\n # Transform the quantized image back (if requested).\n self.y_hat, self.y_likelihoods = self.conditional_bottleneck(self.y, training=False)\n self.x_hat = self.synthesis_transform(self.y_hat)\n self.x_hat = self.x_hat[:, :self.x_shape[1], :self.x_shape[2], :]\n\n self.num_pixels = tf.cast(tf.reduce_prod(tf.shape(self.x)[:-1]), dtype=tf.float32)\n\n # Total number of bits divided by number of pixels.\n self.eval_bpp = (tf.reduce_sum(tf.log(self.y_likelihoods)) +\n tf.reduce_sum(tf.log(self.z_likelihoods))) / (-np.log(2) * self.num_pixels)\n\n x = self.x * 255\n x_hat = tf.clip_by_value(self.x_hat, 0, 1)\n x_hat = tf.round(x_hat * 255)\n\n self.mse = tf.reduce_mean(tf.squared_difference(x, x_hat))\n self.psnr = tf.squeeze(tf.image.psnr(x_hat, x, 255))\n self.msssim = tf.squeeze(tf.image.ssim_multiscale(x_hat, x, 255))\n\n self.vst = {v.name:v for v in tf.global_variables()}\n\n def _reorg(self, sess, trans, active_out_filters, sort_in, sort_out, flag=None):\n layers = trans._layers\n sorted_idx = sort_in\n for layer in layers[:-1]:\n layer.active_out_filters = active_out_filters\n sorted_idx = layer.sort_filter(sess, self.vst, sorted_idx, True)\n if flag is not \"tail\":\n layers[-1].active_out_filters = active_out_filters\n sorted_idx = layers[-1].sort_filter(sess, self.vst, sorted_idx, sort_out)\n return sorted_idx\n\n def reorg(self, sess, active):\n y_sorted_idx = self._reorg(sess, self.analysis_transform, active, False, True, \"head\")\n # print(sess.run(y_sorted_idx))\n self._reorg( sess, self.synthesis_transform, active, y_sorted_idx, False, \"tail\")\n z_sorted_idx = self._reorg(sess, self.hyper_analysis_transform, active, y_sorted_idx, True, \"body\")\n self.entropy_bottleneck.sort_weight(sess, self.vst, z_sorted_idx)\n self.entropy_bottleneck.input_spec = tf.keras.layers.InputSpec(ndim=4, axes={3: active})\n sorted_idx = self._reorg( sess, self.hyper_synthesis_transform, active, z_sorted_idx, y_sorted_idx, \"body\")\n\n def _active(self, trans, active_out_filters, flag=None):\n layers = trans._layers\n for layer in layers[:-1]:\n layer.active_out_filters = active_out_filters\n if flag is not \"tail\":\n layers[-1].active_out_filters = active_out_filters\n\n def set_active(self, active):\n self._active(self.analysis_transform, active, \"head\")\n self._active(self.synthesis_transform, active, \"tail\")\n self._active(self.hyper_analysis_transform, active, \"body\")\n self._active(self.hyper_synthesis_transform, active, \"body\")\n self.entropy_bottleneck.input_spec = tf.keras.layers.InputSpec(ndim=4, axes={3: active})\n self.conditional_bottleneck.input_spec = tf.keras.layers.InputSpec(ndim=4, axes={3: active})\n\n\ndef test_compress(args):\n \"\"\"Compresses an image.\"\"\"\n\n # Load input image and add batch dimension.\n x = read_png(args.input_file)\n x = tf.expand_dims(x, 0)\n x.set_shape([1, None, None, 3])\n x_shape = tf.shape(x)\n\n net = DyTFC(192)\n net.build(x)\n\n sess = tf.Session()\n latest = tf.train.latest_checkpoint(checkpoint_dir=args.checkpoint_dir)\n tf.train.Saver().restore(sess, save_path=latest)\n print(sess.run( tf.reduce_sum(tf.log(net.y_likelihoods), axis=(0,1,2)) / (-np.log(2) * net.num_pixels)) )\n return\n\n #vnames = ['gaussian_conditional/quantized_cdf:0', 'gaussian_conditional/cdf_length:0']\n #old_cb_weights = net.conditional_bottleneck.get_weights()\n\n #print(old_cb_weights)\n #net.set_active(192)\n #net.build(x)\n #sess.run(tf.variables_initializer(get_uninitialized_variables(sess)))\n #sess.run(tf.variables_initializer([net.vst[name] for name in vnames]))\n #net.conditional_bottleneck.set_weights(old_cb_weights)\n\n #\n #tf.train.Saver().save(sess,\"./sort128/model.ckpt\") \n\n tensors = [net.string, net.side_string,\n net.x_shape[1:-1], net.y_shape[1:-1], net.z_shape[1:-1]]\n \n arrays = sess.run(tensors)\n\n # Write a binary file with the shape information and the compressed string.\n packed = tfc.PackedTensors()\n packed.pack(tensors, arrays)\n with open(args.output_file, \"wb\") as f:\n f.write(packed.string)\n\n # If requested, transform the quantized image back and measure performance.\n if args.verbose:\n eval_bpp, mse, psnr, msssim, num_pixels = sess.run(\n [net.eval_bpp, net.mse, net.psnr, net.msssim, net.num_pixels])\n\n # The actual bits per pixel including overhead.\n bpp = len(packed.string) * 8 / num_pixels\n\n print(\"Mean squared error: {:0.4f}\".format(mse))\n print(\"PSNR (dB): {:0.2f}\".format(psnr))\n print(\"Multiscale SSIM: {:0.4f}\".format(msssim))\n print(\"Multiscale SSIM (dB): {:0.2f}\".format(-10 * np.log10(1 - msssim)))\n print(\"Information content in bpp: {:0.4f}\".format(eval_bpp))\n print(\"Actual bits per pixel: {:0.4f}\".format(bpp))\n \n\n\ndef test_decompress(args):\n \"\"\"Decompresses an image.\"\"\"\n\n # Read the shape information and compressed string from the binary file.\n string = tf.placeholder(tf.string, [1])\n side_string = tf.placeholder(tf.string, [1])\n x_shape = tf.placeholder(tf.int32, [2])\n y_shape = tf.placeholder(tf.int32, [2])\n z_shape = tf.placeholder(tf.int32, [2])\n with open(args.input_file, \"rb\") as f:\n packed = tfc.PackedTensors(f.read())\n tensors = [string, side_string, x_shape, y_shape, z_shape]\n arrays = packed.unpack(tensors)\n\n # Instantiate model.\n synthesis_transform = SynthesisTransform(args.num_filters)\n hyper_synthesis_transform = HyperSynthesisTransform(args.num_filters)\n entropy_bottleneck = tfc.EntropyBottleneck(dtype=tf.float32)\n\n # Decompress and transform the image back.\n z_shape = tf.concat([z_shape, [args.num_filters]], axis=0)\n z_hat = entropy_bottleneck.decompress(\n side_string, z_shape, channels=args.num_filters)\n sigma = hyper_synthesis_transform(z_hat)\n sigma = sigma[:, :y_shape[0], :y_shape[1], :]\n scale_table = np.exp(np.linspace(\n np.log(SCALES_MIN), np.log(SCALES_MAX), SCALES_LEVELS))\n conditional_bottleneck = tfc.GaussianConditional(\n sigma, scale_table, dtype=tf.float32)\n y_hat_all = conditional_bottleneck.decompress(string)\n\n x = read_png(\"kodak/kodim01.png\")\n x = tf.expand_dims(x, 0)\n x.set_shape([1, None, None, 3])\n x_shape = tf.shape(x)\n x *= 255\n\n active = 192\n y_hat = y_hat_all[:,:,:,:active]\n x_hat = synthesis_transform(y_hat)\n x_hat = tf.clip_by_value(x_hat, 0, 1)\n x_hat = tf.round(x_hat * 255)\n mse = tf.reduce_mean(tf.squared_difference(x, x_hat))\n psnr = tf.squeeze(tf.image.psnr(x_hat, x, 255))\n msssim = tf.squeeze(tf.image.ssim_multiscale(x_hat, x, 255))\n\n #x_hat = x_hat[0, :x_shape[0], :x_shape[1], :]\n #op = write_png(args.output_file, x_hat)\n\n sess = tf.Session()\n latest = tf.train.latest_checkpoint(checkpoint_dir=args.checkpoint_dir)\n tf.train.Saver().restore(sess, save_path=latest)\n #sess.run(op, feed_dict=dict(zip(tensors, arrays)))\n\n #vmse, vpsnr, vmsssim = sess.run([mse, psnr, msssim], feed_dict=dict(zip(tensors, arrays)))\n #print(vmse, vpsnr, vmsssim)\n\n for active in range(192,0,-8):\n y_hat = y_hat_all[:,:,:,:active]\n x_hat = synthesis_transform(y_hat)\n x_hat = tf.clip_by_value(x_hat, 0, 1)\n x_hat = tf.round(x_hat * 255)\n mse = tf.reduce_mean(tf.squared_difference(x, x_hat))\n psnr = tf.squeeze(tf.image.psnr(x_hat, x, 255))\n msssim = tf.squeeze(tf.image.ssim_multiscale(x_hat, x, 255))\n vmse, vpsnr, vmsssim = sess.run([mse, psnr, msssim], feed_dict=dict(zip(tensors, arrays)))\n print(active, vmse, vpsnr, vmsssim)\n\n\n\n\ndef decompress(args):\n \"\"\"Decompresses an image.\"\"\"\n\n # Read the shape information and compressed string from the binary file.\n string = tf.placeholder(tf.string, [1])\n side_string = tf.placeholder(tf.string, [1])\n x_shape = tf.placeholder(tf.int32, [2])\n y_shape = tf.placeholder(tf.int32, [2])\n z_shape = tf.placeholder(tf.int32, [2])\n with open(args.input_file, \"rb\") as f:\n packed = tfc.PackedTensors(f.read())\n tensors = [string, side_string, x_shape, y_shape, z_shape]\n arrays = packed.unpack(tensors)\n\n # Instantiate model.\n synthesis_transform = SynthesisTransform(args.num_filters)\n hyper_synthesis_transform = HyperSynthesisTransform(args.num_filters)\n entropy_bottleneck = tfc.EntropyBottleneck(dtype=tf.float32)\n\n # Decompress and transform the image back.\n z_shape = tf.concat([z_shape, [args.num_filters]], axis=0)\n z_hat = entropy_bottleneck.decompress(\n side_string, z_shape, channels=args.num_filters)\n sigma = hyper_synthesis_transform(z_hat)\n sigma = sigma[:, :y_shape[0], :y_shape[1], :]\n scale_table = np.exp(np.linspace(\n np.log(SCALES_MIN), np.log(SCALES_MAX), SCALES_LEVELS))\n conditional_bottleneck = tfc.GaussianConditional(\n sigma, scale_table, dtype=tf.float32)\n y_hat = conditional_bottleneck.decompress(string)\n x_hat = synthesis_transform(y_hat)\n\n # Remove batch dimension, and crop away any extraneous padding on the bottom\n # or right boundaries.\n x_hat = x_hat[0, :x_shape[0], :x_shape[1], :]\n\n # Write reconstructed image out as a PNG file.\n op = write_png(args.output_file, x_hat)\n\n # Load the latest model checkpoint, and perform the above actions.\n with tf.Session() as sess:\n latest = tf.train.latest_checkpoint(checkpoint_dir=args.checkpoint_dir)\n tf.train.Saver().restore(sess, save_path=latest)\n sess.run(op, feed_dict=dict(zip(tensors, arrays)))\n\n\ndef parse_args(argv):\n \"\"\"Parses command line arguments.\"\"\"\n parser = argparse_flags.ArgumentParser(\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n # High-level options.\n parser.add_argument(\n \"--verbose\", \"-V\", action=\"store_true\",\n help=\"Report bitrate and distortion when training or compressing.\")\n parser.add_argument(\n \"--num_filters\", type=int, default=192,\n help=\"Number of filters per layer.\")\n parser.add_argument(\n \"--checkpoint_dir\", default=\"train\",\n help=\"Directory where to save/load model checkpoints.\")\n subparsers = parser.add_subparsers(\n title=\"commands\", dest=\"command\",\n help=\"What to do: 'train' loads training data and trains (or continues \"\n \"to train) a new model. 'compress' reads an image file (lossless \"\n \"PNG format) and writes a compressed binary file. 'decompress' \"\n \"reads a binary file and reconstructs the image (in PNG format). \"\n \"input and output filenames need to be provided for the latter \"\n \"two options. Invoke '<command> -h' for more information.\")\n\n # 'train' subcommand.\n train_cmd = subparsers.add_parser(\n \"train\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n description=\"Trains (or continues to train) a new model.\")\n train_cmd.add_argument(\n \"--train_glob\", default=\"images/*.png\",\n help=\"Glob pattern identifying training data. This pattern must expand \"\n \"to a list of RGB images in PNG format.\")\n train_cmd.add_argument(\n \"--batchsize\", type=int, default=8,\n help=\"Batch size for training.\")\n train_cmd.add_argument(\n \"--patchsize\", type=int, default=256,\n help=\"Size of image patches for training.\")\n train_cmd.add_argument(\n \"--lambda\", type=float, default=0.01, dest=\"lmbda\",\n help=\"Lambda for rate-distortion tradeoff.\")\n train_cmd.add_argument(\n \"--last_step\", type=int, default=1000000,\n help=\"Train up to this number of steps.\")\n train_cmd.add_argument(\n \"--preprocess_threads\", type=int, default=16,\n help=\"Number of CPU threads to use for parallel decoding of training \"\n \"images.\")\n\n # 'compress' subcommand.\n compress_cmd = subparsers.add_parser(\n \"compress\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n description=\"Reads a PNG file, compresses it, and writes a TFCI file.\")\n\n # 'decompress' subcommand.\n decompress_cmd = subparsers.add_parser(\n \"decompress\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n description=\"Reads a TFCI file, reconstructs the image, and writes back \"\n \"a PNG file.\")\n\n # Arguments for both 'compress' and 'decompress'.\n for cmd, ext in ((compress_cmd, \".tfci\"), (decompress_cmd, \".png\")):\n cmd.add_argument(\n \"input_file\",\n help=\"Input filename.\")\n cmd.add_argument(\n \"output_file\", nargs=\"?\",\n help=\"Output filename (optional). If not provided, appends '{}' to \"\n \"the input filename.\".format(ext))\n\n # Parse arguments.\n args = parser.parse_args(argv[1:])\n if args.command is None:\n parser.print_usage()\n sys.exit(2)\n return args\n\n\ndef main(args):\n # Invoke subcommand.\n if args.command == \"train\":\n test_train(args)\n elif args.command == \"compress\":\n if not args.output_file:\n args.output_file = args.input_file + \".tfci\"\n test_compress(args)\n elif args.command == \"decompress\":\n if not args.output_file:\n args.output_file = args.input_file + \".png\"\n test_decompress(args)\n\n#%%\n\n#%%\n\nif __name__ == \"__main__\":\n app.run(main, flags_parser=parse_args)\n"
] | [
[
"tensorflow.compat.v1.write_file",
"tensorflow.compat.v1.concat",
"tensorflow.compat.v1.train.MonitoredTrainingSession",
"tensorflow.compat.v1.group",
"tensorflow.compat.v1.shape",
"tensorflow.compat.v1.image.encode_png",
"tensorflow.compat.v1.data.Dataset.from_tensor_slices",
"tensorflow.compat.v1.image.psnr",
"tensorflow.compat.v1.keras.layers.InputSpec",
"numpy.exp",
"tensorflow.compat.v1.train.Saver",
"tensorflow.compat.v1.train.AdamOptimizer",
"tensorflow.compat.v1.random_crop",
"tensorflow.compat.v1.saturate_cast",
"tensorflow.compat.v1.global_variables",
"tensorflow.compat.v1.round",
"tensorflow.compat.v1.train.NanTensorHook",
"tensorflow.compat.v1.image.decode_image",
"numpy.log",
"tensorflow.compat.v1.read_file",
"tensorflow.compat.v1.train.create_global_step",
"tensorflow.compat.v1.clip_by_value",
"numpy.log10",
"tensorflow.compat.v1.summary.scalar",
"tensorflow.compat.v1.image.ssim_multiscale",
"tensorflow.compat.v1.cast",
"tensorflow.compat.v1.train.latest_checkpoint",
"tensorflow.compat.v1.squared_difference",
"tensorflow.compat.v1.log",
"tensorflow.compat.v1.device",
"tensorflow.compat.v1.is_variable_initialized",
"tensorflow.compat.v1.expand_dims",
"tensorflow.compat.v1.logging.set_verbosity",
"tensorflow.compat.v1.Session",
"tensorflow.compat.v1.placeholder",
"tensorflow.compat.v1.train.StopAtStepHook"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
szokejokepu/natural-rws | [
"bb1ad4ca3ec714e6bf071d2136593dc853492b68",
"bb1ad4ca3ec714e6bf071d2136593dc853492b68",
"bb1ad4ca3ec714e6bf071d2136593dc853492b68",
"bb1ad4ca3ec714e6bf071d2136593dc853492b68",
"bb1ad4ca3ec714e6bf071d2136593dc853492b68"
] | [
"core/argo/core/network/MultivariateNormalTriL.py",
"core/argo/core/network/GaussianDiagonalPlusMinusOne.py",
"core/argo/core/TFDeepLearningModel.py",
"core/argo/core/optimizers/utilsOptimizers.py",
"core/argo/core/utils/collect_across.py"
] | [
"import tensorflow as tf\nfrom tensorflow_probability import distributions as tfd\nfrom functools import partial\nfrom .AbstractGaussianSimple import AbstractGaussianSimple\nimport types\nimport sonnet as snt\n\nclass MultivariateNormalTriL(AbstractGaussianSimple):\n\n def __init__(self,\n output_size,\n minimal_covariance=0.,\n initializers={},\n regularizers={},\n custom_getter={},\n name='normal_tril'):\n super().__init__(output_size=output_size,\n minimal_covariance=minimal_covariance,\n initializers=initializers,\n regularizers=regularizers,\n custom_getter=custom_getter,\n name=name)\n\n def _build(self, inputs):\n\n inputs = tf.layers.flatten(inputs)\n\n self.dense_loc = snt.Linear(self._output_size, **self._extra_kwargs)\n self.dense_diag_params = snt.Linear(self._output_size, **self._extra_kwargs)\n n_out_of_diag_elems = int(self._output_size * (self._output_size - 1) / 2)\n self.dense_out_of_diag_params = snt.Linear(n_out_of_diag_elems, **self._extra_kwargs)\n\n\n loc = self.dense_loc(inputs)\n diag_params = self.dense_diag_params(inputs)\n out_of_diag_params = self.dense_out_of_diag_params(inputs)\n\n lower_triangle = tf.contrib.distributions.fill_triangular(out_of_diag_params)\n lower_triangle = tf.pad(lower_triangle, [[0, 0], [1, 0], [0, 1]])\n\n diag_positive = self._minimal_covariance + tf.nn.softplus(diag_params)\n\n scale_tril = tf.linalg.set_diag(lower_triangle, diag_positive)\n\n dtype = inputs.dtype\n n_tril = n_out_of_diag_elems + self._output_size\n self._calibration_tril_params = tf.get_variable(\"calibration_tril_params\",\n shape=(n_tril,),\n dtype=dtype,\n trainable=False,\n initializer=tf.initializers.constant(value=1.))\n\n self.calibration_tril = tf.contrib.distributions.fill_triangular(self._calibration_tril_params, name=\"calibration_tril\")\n\n\n ouput_params = {\"loc\" : loc, \"scale_tril\" : tf.multiply(self.calibration_tril, scale_tril)}\n\n distr = tfd.MultivariateNormalTriL(**ouput_params)\n\n return distr\n\n",
"import tensorflow as tf\nimport sonnet as snt\n\nimport numpy as np\n\nfrom tensorflow_probability import distributions as tfd\nfrom tensorflow_probability import bijectors as tfb\n\nfrom .AbstractGaussian import AbstractGaussian\n\nimport types\n\nclass GaussianDiagonalPlusMinusOne(AbstractGaussian):\n \n def __init__(self, module_tuple = (\"Linear\", {}),\n output_size=None,\n output_shape=None,\n minimal_covariance=0,\n covariance_parameterization=\"softplus\",\n scalar_covariance = False,\n initializers = {},\n regularizers = {},\n contractive_regularizer = None,\n name='gaussian_diagonal_zero_one'):\n\n super().__init__(module_tuple = module_tuple,\n output_size = output_size,\n output_shape = output_shape,\n minimal_covariance = minimal_covariance,\n covariance_parameterization=covariance_parameterization,\n scalar_covariance = scalar_covariance,\n initializers = initializers,\n regularizers = regularizers,\n contractive_regularizer = contractive_regularizer,\n name = name)\n\n def _build(self, inputs):\n mean, covariance, scale = self.create_mean_n_cov_layers(inputs)\n \n mean_plus_minus_one = tf.tanh(mean)\n \n self.set_contractive_regularizer(mean_plus_minus_one, covariance,\n self._contractive_regularizer_inputs,\n self._contractive_regularizer_tuple,\n self._contractive_collection_network_str)\n \n output_distribution = tfd.Normal(loc=mean_plus_minus_one, scale=scale)\n \n # add reconstruction_node method (needed to some sort of mean or median to get reconstructions without sampling)\n def reconstruction_node(self):\n return self.mean()\n \n output_distribution.reconstruction_node = types.MethodType(reconstruction_node, output_distribution)\n \n def distribution_parameters(self):\n return [mean_plus_minus_one, np.square(scale)]\n output_distribution.distribution_parameters = types.MethodType(distribution_parameters, output_distribution)\n\n return output_distribution\n \n",
"import os\nfrom abc import abstractmethod\nfrom itertools import chain\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom datasets.Dataset import Dataset, TRAIN_LOOP\nfrom .ArgoLauncher import ArgoLauncher\nfrom .DeepLearningModel import DeepLearningModel\nfrom .Regularizers import Regularizers\nfrom .argoLogging import get_logger\nfrom .hooks.ArgoHook import STEPS, EPOCHS\nfrom .hooks.CheckpointSaverHook import CheckpointSaverHook\nfrom .hooks.FisherMatrixHook import FisherMatrixHook\nfrom .hooks.ImagesInputHook import ImagesInputHook\nfrom .hooks.LoggingMeanTensorsHook import LoggingMeanTensorsHook\nfrom .optimizers.NaturalGradientOptimizer import NaturalGradientOptimizer\nfrom .utils.argo_utils import AC_REGULARIZATION, load_class, load_module, get_clipping_id, NUMTOL, CUSTOM_REGULARIZATION\n\ntf_logging = get_logger()\n\nfrom .optimizers.TFOptimizers import TFOptimizers\n\n\ndef load_model(conf_file, global_step=None, dataset=None, extra_mp_dict={}, gpu=0, seed=0, model_class_base_path='',\n monitorSession=True):\n \"\"\"Load a TFDeepLearningModel and optionally save its network\n\n Args:\n conf_file (str): the conf file of the model where to find the experiment.\n dataset (datasets.Dataset): (optional) the argo Dataset of the model for the training. If not passed it will be reloaded.\n global_step (int): the global step to load the checkpoint (if None the last checkpoint present will be loaded).\n gpu (int) : the gpu on which the model will create the session\n seed (int) : the seed that the model will set\n model_class_base_path (str): the base path where to look for the model class\n\n Returns:\n TFDeepLearningModel: The loaded Argo TFDeepLearningModel.\n datasets.Dataset: the argo Dataset of the model for the training.\n\n \"\"\"\n\n dataset_conf, model_parameters, config = ArgoLauncher.process_conf_file(conf_file)\n\n if not dataset:\n dataset = Dataset.load_dataset(dataset_conf)\n\n model_parameters = {\n **model_parameters,\n **extra_mp_dict\n }\n\n ArgoTFDeepLearningModelClass = load_class(model_parameters[\"model\"], base_path=model_class_base_path)\n\n update_model_params(model_parameters, dataset)\n\n model_dir = os.path.split(os.path.dirname(conf_file))[0]\n model = ArgoTFDeepLearningModelClass(model_parameters, model_dir, gpu=gpu, seed=seed)\n model.init(dataset)\n\n model.create_session(config, monitorSession=monitorSession)\n\n # if global_step is None it will restore the last checkpoint in the folder model._checkpoint_dir, you can pass global_step to restore a particular chackpoint\n model.restore(global_step=global_step)\n return model, dataset\n\n\ndef load_model_without_session(conf_file, global_step=None, dataset=None, gpu=0, seed=0, model_class_base_path=''):\n \"\"\"Load a TFDeepLearningModel without session\n\n Args:\n conf_file (str): the conf file of the model where to find the experiment.\n dataset (datasets.Dataset): (optional) the argo Dataset of the model for the training. If not passed it will be reloaded.\n global_step (int): the global step to load the checkpoint (if None the last checkpoint present will be loaded).\n gpu (int) : the gpu on which the model will create the session\n seed (int) : the seed that the model will set\n model_class_base_path (str): the base path where to look for the model class\n\n Returns:\n TFDeepLearningModel: The loaded Argo TFDeepLearningModel.\n datasets.Dataset: the argo Dataset of the model for the training.\n\n \"\"\"\n\n dataset_conf, model_parameters, config = ArgoLauncher.process_conf_file(conf_file)\n\n if not dataset:\n dataset = Dataset.load_dataset(dataset_conf)\n\n ArgoTFDeepLearningModelClass = load_class(model_parameters[\"model\"], base_path=model_class_base_path)\n\n update_model_params(model_parameters, dataset)\n\n # baseDir = config[\"dirName\"]+\"/\"+dataset.id\n model_dir = os.path.split(os.path.dirname(conf_file))[0]\n model = ArgoTFDeepLearningModelClass(model_parameters, model_dir, gpu=gpu, seed=seed)\n model.init(dataset)\n\n checkpoint_name = model.checkpoint_name(global_step)\n\n return model, dataset, checkpoint_name\n\n\ndef load_network(ArgoTFDeepLearningModelClass, conf_file, dataset, global_step=None):\n \"\"\"Load the network of a specific model and the corresponding checkpoint.\n The Network needs to be applied (to generate the variables, that are instantiated in the _build of Sonnet)\n and then restored from the checkpoint.\n\n e.g.\n ```\n network, checkpoint_name = load_network(ClassificationModel, model_dir,\n dataset, model_params, config)\n logits = network(x)\n network.restore(sess, checkpoint_name)\n ```\n\n Args:\n ArgoTFDeepLearningModelClass (Class): the TFDeepLearningModel class to load.\n conf_file (str): the conf file of the model where to find the experiment.\n dataset (datasets.Dataset): (optional) the argo Dataset of the model for the training. If not passed it will be reloaded.\n global_step (int): (optional) the global step to load the checkpoint (if None the last checkpoint present will be loaded).\n\n Returns:\n ArgoAbstractNetwork: the Argo Network to load\n str: checkpoint_name\n \"\"\"\n\n dataset_conf, model_parameters, config = ArgoLauncher.process_conf_file(conf_file)\n\n update_model_params(model_parameters, dataset)\n\n model_dir = os.path.split(os.path.dirname(conf_file))[0]\n model = ArgoTFDeepLearningModelClass(model_parameters, model_dir)\n\n network = model._network\n checkpoint_name = model.checkpoint_name(global_step)\n return network, checkpoint_name\n\n\ndef update_model_params(model_parameters, dataset):\n try:\n output_shape = dataset.y_shape\n except ValueError:\n output_shape = None\n\n dataset_info = {\"output_shape\": output_shape,\n \"input_shape\": dataset.x_shape_train}\n\n model_parameters.update(dataset_info)\n\n\nclass TFDeepLearningModel(DeepLearningModel):\n default_params = {\n **DeepLearningModel.default_params,\n \"optimizer\": (\"AdamOptimizer\", {\"learning_rate\": 0.001,\n \"beta1\": 0.9,\n \"beta2\": 0.999}),\n\n \"regularizers\": {},\n\n \"grad_clipping\": (None, {}),\n\n \"batch_size_train\": 128,\n \"batch_size_eval\": 512,\n }\n\n def create_id(self):\n _id = '-bs' + str(self._opts[\"batch_size_train\"]) + \\\n '-tr' + TFOptimizers.create_id(self._opts[\"optimizer\"]) + \\\n '-gc' + get_clipping_id(self._opts[\"grad_clipping\"])\n\n if \"note\" in self._opts.keys():\n _id += '-N' + self._opts[\"note\"]\n super_id = super().create_id()\n _id += super_id\n return _id\n\n def __init__(self, opts, dirName, check_ops=False, gpu=-1, seed=0):\n\n super().__init__(opts, dirName, seed)\n\n self._check_ops = check_ops\n self._numerics_ops = None\n\n self._gpu = gpu\n\n self.sess = None\n self._saver = None\n self.global_step = None\n\n tf.compat.v1.set_random_seed(seed)\n\n # checkpoints\n self._checkpoint_dir = self.dirName + \"/saved_models/\"\n # tensorboard\n self._tensorboard_dir = self.dirName + \"/tensorboard/\"\n\n self.summary_keys = [tf.compat.v1.GraphKeys.SUMMARIES]\n self.summary_nodes = {ck: [] for ck in self.summary_keys}\n self.summary_writers = {ck: [] for ck in self.summary_keys}\n\n # this check ccan be removed at a certain point\n assert (\n \"stochastic\" not in self._opts), \"The stochastic parameter was moved to dataset, please remove it from your conf\"\n\n if \"rescale\" in self._opts:\n raise KeyError(\n \"the key `rescale` is not supported anymore. Rescaling is not allowed, remove it from the conf.\")\n\n self.batch_size = {}\n self.batch_size[\"train\"] = self._opts[\"batch_size_train\"]\n self.batch_size[\"eval\"] = self._opts[\"batch_size_eval\"]\n\n # important nodes\n self.x = None\n self.y = None\n self.x_shape = {}\n\n self.optimizer_tuple = self._opts[\"optimizer\"]\n\n self._grad_clipping_tuple = self._opts[\"grad_clipping\"]\n\n # important nodes\n self.loss = None\n self.regularizers = []\n\n # create regularizers\n if (\"regularizers\" not in self._opts) or (\n \"weights\" in self._opts[\"regularizers\"].keys() or \"bias\" in self._opts[\n \"regularizers\"].keys() or \"custom\" in self._opts[\"regularizers\"].keys()) or len(\n self._opts[\"regularizers\"].keys()) == 0:\n self.custom_regularizers = []\n else:\n self.custom_regularizers = {}\n for key in self._opts[\"regularizers\"].keys():\n self.custom_regularizers[key] = []\n\n self.update_ops = []\n # list of kl_losses on the weights in case of bayesian learning\n self.kl_losses = []\n\n self.datasets_initializers = {}\n self.datasets_handles_nodes = {}\n self.datasets_handles = {}\n\n # passed to ChechpoitSaverHook\n self._pb_output_nodes = None\n\n def init(self, dataset):\n\n self.binary = dataset.binary_input\n\n # TODO these two are probably useless... if you need the input shape just do tf.shape(self.raw_x) for some networks the input could change from train to eval\n # TODO if there is a way to avoid using explicitly the input dimension it is probably better...\n self.x_shape[\"train\"] = dataset.x_shape_train\n self.x_shape[\"eval\"] = dataset.x_shape_eval\n\n self.dataset = dataset\n\n self.create_feedable_placeholders()\n\n # create global steps\n self.create_global_steps(dataset.n_samples_train)\n\n self.create_input_nodes(dataset)\n\n # set optimizer\n self.set_optimizer()\n\n # self.create_is_training_node()\n\n self.create_network()\n\n # define self.loss and check it is finite\n self.create_loss()\n\n self.create_custom_regularizers()\n\n # define self.regularizers and self.update_ops\n self.create_regularizers_and_updates()\n\n # set the training operation for self.loss + self.regularizers + self.custom_regularizers\n self.set_training_op()\n\n # not used at the moment, could be useful at a certain point\n # self.create_random_update_op()\n\n # there are case in which multiple losses exit\n if isinstance(self.loss, dict):\n for k, v in self.loss.items():\n self.loss[k] = tf.debugging.check_numerics(v, \"self.loss\" + str(k) + \" is not finite\")\n else:\n self.loss = tf.check_numerics(self.loss, \"self.loss is not finite\")\n\n # session will be created after init\n\n def create_datasets_with_handles(self, dataset):\n datasets_nodes, handle, ds_initializers, ds_handles = dataset.get_dataset_with_handle(self.batch_size[\"train\"],\n self.batch_size[\"eval\"])\n self.datasets_initializers = ds_initializers\n self.datasets_handles_nodes = ds_handles\n self.ds_handle = handle\n self.datasets_nodes = datasets_nodes # this is needed, since ds_raw_x may be modified in create_input_nodes to remove the mask\n\n self.ds_raw_x = datasets_nodes[0][0]\n self.ds_aug_x = datasets_nodes[0][1]\n self.ds_perturb_x = datasets_nodes[0][2]\n\n # return datasets_nodes, handle, ds_initializers, ds_handles\n\n def create_feedable_placeholders(self):\n \"\"\"\n DO NOT USE FOR MODEL SPECIFIC PLACEHOLDERS (e.g. losses or samples..)\n Create feedables. This function is setting additional placeholder\n (it probably should never be used since placeholders should be set 3in the right places)\n\n Sets:\n feedable placeholders with general purpose\n\n \"\"\"\n\n self.is_training = tf.compat.v1.placeholder_with_default(False, shape=(), name=\"is_training\")\n\n # def create_is_training_node(self):\n # self._is_training = tf.compat.v1.placeholder_with_default(False, shape=(), name=\"is_training\")\n\n @abstractmethod\n def create_network(self):\n \"\"\"\n It gets the input nodes from the dataset and creates the network\n starting from the input nodes created by `create_input_nodes`\n\n Sets:\n network nodes depending on the specific child class\n \"\"\"\n pass\n\n @abstractmethod\n def create_input_nodes(self, dataset):\n \"\"\"\n create input nodes for the network\n starting from the dataset\n\n Sets:\n input nodes depending on the specific child class\n \"\"\"\n pass\n\n @abstractmethod\n def create_loss(self):\n \"\"\"create loss nodes for the network\n based on the nodes that create_networks has created,\n this method will create the loss nodes\n\n Sets:\n self.loss\n other additional loss nodes to be monitored during train can be set\n\n \"\"\"\n pass\n\n # create custom regularizers \n def create_custom_regularizers(self):\n\n if isinstance(self.custom_regularizers, list):\n self._create_custom_regularizers()\n elif isinstance(self.custom_regularizers, dict):\n for key in self.custom_regularizers.keys():\n # add regularizers for discriminator\n self._create_custom_regularizers(key)\n else:\n raise Exception(\"self.custom_regularizers should be a list or a dict\")\n\n def _create_custom_regularizers(self, network=None):\n if network is None:\n regularizers = self._opts[\"regularizers\"]\n custom_regularizers = self.custom_regularizers\n else:\n regularizers = self._opts[\"regularizers\"][network]\n custom_regularizers = self.custom_regularizers[network]\n\n if \"custom\" in regularizers.keys():\n\n for regularizer_tuple in regularizers[\"custom\"]:\n regularizer_name = regularizer_tuple[0]\n regularizer_tuple[1][\"model\"] = self\n custom_regularizer = Regularizers.instantiate_regularizer(regularizer_tuple, module_path=\"\")\n\n custom_regularizers.append(custom_regularizer)\n self.check_regularizers(regularizer_name, network)\n\n def check_regularizers(self, regularizer_name, network=None):\n pass\n\n # save in self.regularizers the regularizers of the model\n def create_regularizers_and_updates(self):\n\n wb_regularizers = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.REGULARIZATION_LOSSES)\n # see keras_utils.py: activity_and_contractive_regularizers\n ac_regularizers = tf.compat.v1.get_collection(AC_REGULARIZATION)\n # if (not wb_regularizers) and (not ac_regularizers):\n # wb_regularizers = [tf.constant(0.)]\n\n # import pdb;pdb.set_trace()\n if len(wb_regularizers) > 0:\n self.regularizers += wb_regularizers\n if len(ac_regularizers) > 0:\n self.regularizers += ac_regularizers\n\n # self.regularizers += ([self.custom_regularizers[r] for r in self._opts[\"regularizers\"].keys() if len(self.custom_regularizers[r])>0])\n # we need to flatten the list if we have both custom regularizers and another type of regularizers\n # (weight/bias or contractive)\n self.regularizers += list(chain.from_iterable([self.custom_regularizers[r]\n for r in self._opts[\"regularizers\"].keys()\n if len(self.custom_regularizers[r]) > 0]))\n\n self.update_ops += tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.UPDATE_OPS)\n\n def create_global_steps(self, n_points_train_set):\n self.n_batches_per_epoch = np.ceil(n_points_train_set / self.batch_size[\"train\"])\n\n self.global_step = tf.compat.v1.train.get_or_create_global_step()\n self.global_epoch = tf.cast(tf.floor(tf.cast(self.global_step, tf.float32) /\n self.n_batches_per_epoch),\n tf.int64, \"global_epoch\")\n\n tf.compat.v1.add_to_collection(\"global_epoch\", self.global_epoch)\n\n # this creates an operation to add to all trainable variables a white noise of param\n def create_random_update_op(self):\n\n vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)\n\n update_opts = []\n for var in vars:\n _, variance = tf.nn.moments(tf.reshape(var, [-1]), axes=[0])\n\n normal = tf.distributions.Normal(loc=0.0, scale=tf.sqrt(variance) / 10)\n white_noise = normal.sample(var.get_shape())\n\n update_opts.append(var.assign(var + white_noise))\n\n self.random_update_op = tf.group(update_opts)\n\n # apply clipping\n def _clip_gradients(self, grads_and_vars, grad_clipping_tuple):\n\n clipping_method, clipping_kwargs = grad_clipping_tuple\n\n grads_and_vars_not_none = [(g, v) for (g, v) in grads_and_vars if g is not None]\n grads = [g for (g, v) in grads_and_vars_not_none]\n\n self.grads = grads\n self.grads_norm = tf.linalg.global_norm(grads)\n\n # see https://www.tensorflow.org/api_docs/python/tf/train/Optimizer#processing_gradients_before_applying_them\n if clipping_method == \"clip_by_global_norm\":\n\n # clip_by_global_norm requires all the grads as argument, not only grad[i]\n grads_and_vars_not_none = [(g, v) for (g, v) in grads_and_vars if g is not None]\n grads = [g for (g, v) in grads_and_vars_not_none]\n variables = [v for (g, v) in grads_and_vars_not_none]\n\n clip_value = clipping_kwargs[\"value\"]\n clipped_grads, global_norm = tf.clip_by_global_norm(grads, clip_value)\n clipped_grads_and_vars = [(clipped_grads[i], variables[i]) for i in range(len(grads))]\n\n elif clipping_method == \"clip_by_norm\":\n\n grads_and_vars_not_none = [(g, v) for (g, v) in grads_and_vars if g is not None]\n\n grads = [g for (g, v) in grads_and_vars_not_none]\n variables = [v for (g, v) in grads_and_vars_not_none]\n\n # How t handle numerical issues\n # 1) set nan/inf to zero\n # grads = [tf.where(tf.is_finite(g), g, tf.zeros_like(g)) for (g, v) in grads_and_vars_not_none]\n # 2) set nan/inf to noisy gradient,\n # grads = [tf.where(tf.is_finite(g), g, tfd.Normal(loc=0.0, scale=tf.sqrt(tf.nn.moments(tf.reshape(v,[-1]),axes=[0])[1])/10 + 0.01).sample(g.get_shape())) for (g, v) in grads_and_vars_not_none]\n\n clip_value = clipping_kwargs[\"value\"]\n clipped_grads_and_vars = [(tf.clip_by_norm(g, clip_value), v) for (g, v) in zip(grads, variables)]\n\n elif clipping_method == \"clip_by_value\":\n\n clip_value = clipping_kwargs[\"value\"]\n clipped_grads_and_vars = [(tf.clip_by_value(g, -clip_value, clip_value), v) for (g, v) in grads_and_vars if\n g is not None]\n\n elif not clipping_method:\n\n grads_and_vars_not_none = [(g, v) for (g, v) in grads_and_vars if g is not None]\n clipped_grads_and_vars = grads_and_vars_not_none\n\n else:\n raise Exception(\"clipping method not recognized: \" + clipping_method)\n\n return clipped_grads_and_vars\n\n def set_optimizer(self):\n\n with tf.compat.v1.variable_scope('optimizer'):\n self._optimizer, self._learning_rate = TFOptimizers.instantiate_optimizer(self, self.optimizer_tuple)\n\n def set_training_op(self):\n '''\n #########################################\n # Euclidean gradient computed in two steps, through the Jacobian\n #########################################\n '''\n\n total_loss = self.loss\n # add regularizers in case there are any\n if len(self.regularizers) > 0:\n total_loss += tf.add_n(self.regularizers, name=\"regularization\")\n\n # 1st part of minimize: compute_gradient\n self.grads_and_vars = self._optimizer.compute_gradients(total_loss)\n\n # clip gradients\n clipped_grads_and_vars = self._clip_gradients(self.grads_and_vars, self._grad_clipping_tuple)\n\n # compute norms in case they need to be logged\n self.gradient_norms = [tf.norm(g) + NUMTOL for (g, v) in clipped_grads_and_vars]\n self.weight_norms = [tf.norm(v) + NUMTOL for (g, v) in clipped_grads_and_vars]\n # check that gradients are finite\n grads = [tf.check_numerics(g, \"grads is not finite\") for (g, v) in clipped_grads_and_vars]\n variables = [tf.check_numerics(v, \"grads is not finite\") for (g, v) in clipped_grads_and_vars]\n self.gradient_weight_global_norms = [tf.global_norm(grads), tf.global_norm(variables)]\n\n # 2nd part of minimize: apply_gradient\n optimizer_step = self._optimizer.apply_gradients(clipped_grads_and_vars, global_step=self.global_step)\n\n update_ops = tf.group(*self.update_ops)\n self.training_op = tf.group(update_ops, optimizer_step)\n\n def set_check_ops(self):\n self._check_ops = 1\n\n # TODO argo2 This is not working anymore with the new session\n # with self.sess.graph.as_default():\n self._numerics_ops = tf.add_check_numerics_ops()\n\n def release(self):\n super().release()\n self.sess.close()\n tf.reset_default_graph()\n\n def set_summaries(self):\n \"\"\"This function sets summaries and summaryFileWriters, it needs to be invoked before\n training to keep track of the summaries.\n (cannot be invoked in create_and_init_network because the FileWriter will corrupt data in the logfolder\n at each initialization)\n \"\"\"\n\n # I set up a filewriter for each summary node\n self.summary_nodes = {sk: tf.get_collection(sk) for sk in self.summary_keys}\n\n for sk in self.summary_keys:\n self.summary_writers[sk] = [tf.compat.v1.summary.FileWriter(self._tensorboard_dir + sn.name)\n for sn in self.summary_nodes[sk]]\n\n def create_hooks(self, config):\n\n hooks = []\n\n # get general arguments for the models hook\n self._time_reference_str = config[\"time_reference\"]\n self._check_time_reference(self._time_reference_str)\n self._plot_offset = config.get(\"plot_offset\", 0)\n self._default_model_hooks_kwargs = {\"time_reference\": self._time_reference_str}\n\n self._plot_model_hooks_kwargs = {\"time_reference\": self._time_reference_str,\n \"plot_offset\": self._plot_offset}\n\n self._n_steps_stats = self._get_steps(config[\"stats_period\"], self._time_reference_str)\n\n # stop hook\n tot_steps = int(self._opts['epochs'] + 1) * self.n_batches_per_epoch\n hooks.append(tf.estimator.StopAtStepHook(last_step=tot_steps))\n\n # general info hook (no average on validation but only on train loop)\n hooks.append(self._create_general_info_hook(config))\n\n # regularizers hook (no average on validation but only on train loop)\n hooks.append(self._create_regularizers_hook(config))\n\n # checkpoint hooks\n self._save_model = config[\"save_model\"]\n if self._save_model:\n max_to_keep = config.get(\"save_max_to_keep\", 5)\n self._init_session_saver(max_to_keep)\n self._checkpoint_basename = \"model.ckpt\"\n save_steps = self._get_steps(config[\"save_model_period\"], self._time_reference_str)\n\n hooks.append(CheckpointSaverHook(self._checkpoint_dir,\n save_steps=save_steps,\n saver=self._saver,\n checkpoint_basename=self._checkpoint_basename,\n pb_output_nodes=self._pb_output_nodes,\n save_pb_at_end=config.get(\"save_pb\", 0)\n ))\n\n # summary hook\n if config[\"save_summaries\"]:\n save_steps_summaries = self._get_steps(config[\"save_summaries_period\"], self._time_reference_str)\n\n self.set_summaries()\n\n summary_hooks = [tf.train.SummarySaverHook(save_steps=save_steps_summaries,\n output_dir=self._tensorboard_dir + sn.name,\n summary_op=sn,\n summary_writer=fw)\n for sk in self.summary_keys for sn, fw in\n zip(self.summary_nodes[sk], self.summary_writers[sk])]\n\n hooks += summary_hooks\n\n # images input hook\n kwargs = config.get(\"ImagesInputHook\", None)\n if kwargs:\n kwargs = {**self._default_model_hooks_kwargs,\n **kwargs}\n\n hooks.append(ImagesInputHook(model=self,\n dirName=self.dirName,\n **kwargs)\n )\n\n gradient_hook = self._create_gradient_hook(config)\n if gradient_hook is not None:\n hooks.append(gradient_hook)\n\n kwargs = config.get(\"FisherMatrixHook\", None)\n if kwargs and isinstance(self._optimizer, NaturalGradientOptimizer):\n kwargs = {**self._default_model_hooks_kwargs,\n # 'dataset_keys' : [TRAIN_LOOP],\n **kwargs}\n hooks.append(FisherMatrixHook(model=self,\n dirName=self.dirName,\n **kwargs\n )\n )\n\n return hooks\n\n def _create_gradient_hook(self, config):\n\n # gradienthook\n tensors_to_average = [\n [[self.gradient_weight_global_norms[0]],\n self.gradient_norms\n ],\n [[self.gradient_weight_global_norms[1]],\n self.weight_norms\n ],\n ]\n\n layer_names = np.array(list(range(len(self.gradient_norms))))\n layer_names = np.floor(layer_names / 2) + 1\n layer_names = [\"L\" + str(int(l)) for l in layer_names]\n\n tensors_to_average_names = [\n [[\"gradient_global_norms\"],\n layer_names\n ],\n [[\"weight_global_norms\"],\n layer_names\n ],\n ]\n\n tensors_to_average_plots = [\n [{\"fileName\": \"gradient_global_norms\", \"logscale-y\": 1, \"compose-label\": 0},\n {\"fileName\": \"gradient_norms\", \"logscale-y\": 1, \"compose-label\": 0}\n ],\n [{\"fileName\": \"weight_global_norms\", \"logscale-y\": 1, \"compose-label\": 0},\n {\"fileName\": \"weight_norms\", \"logscale-y\": 1, \"compose-label\": 0}\n ],\n ]\n\n kwargs = config.get(\"GradientsHook\", None)\n if kwargs:\n gradient_period = config[\"GradientsHook\"][\"period\"]\n gradient_steps = self._get_steps(gradient_period, self._time_reference_str)\n hook = LoggingMeanTensorsHook(model=self,\n fileName=\"gradient\",\n dirName=self.dirName,\n tensors_to_average=tensors_to_average,\n tensors_to_average_names=tensors_to_average_names,\n tensors_to_average_plots=tensors_to_average_plots,\n average_steps=gradient_steps,\n tensorboard_dir=self._tensorboard_dir,\n trigger_summaries=config[\"save_summaries\"],\n # trigger_plot=True,\n print_to_screen=False,\n plot_offset=self._plot_offset, # config.get(\"plot_start_epoch\", 1),\n train_loop_key=TRAIN_LOOP,\n dataset_keys=[],\n time_reference=self._time_reference_str\n )\n\n return hook\n else:\n return None\n\n # create custom regularizers id\n # passing the network equal to None support the possibility to use this function in presence\n # of multiple networks, used in gan and vae, not in hm\n def create_custom_regularizers_id(self, network=None):\n\n if network is None:\n regularizers = self._opts[\"regularizers\"]\n else:\n regularizers = self._opts[\"regularizers\"][network]\n\n ids = \"\"\n if \"custom\" in regularizers.keys():\n\n for regularizer_tuple in regularizers[\"custom\"]:\n\n regularizer_name = regularizer_tuple[0]\n\n try:\n base_path = '.'.join(__name__.split('.')[:-3])\n regularizer_module = load_module(\"Regularizers\", base_path=base_path)\n id = regularizer_module.create_id(regularizer_tuple)\n except Exception as e:\n # try to load from argo\n try:\n id = Regularizers.create_id(regularizer_tuple)\n except Exception as e:\n raise Exception(\"regularizer %s not found\" % regularizer_name) from e\n\n if ids == \"\":\n ids = id\n else:\n ids = ids + \"_\" + id\n\n return ids\n\n def _create_regularizers_hook(self, config):\n\n wb_regularizers = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)\n # see keras_utils.py: activity_and_contractive_regularizers\n ac_regularizers = tf.get_collection(AC_REGULARIZATION)\n custom_regularizers = tf.get_collection(CUSTOM_REGULARIZATION)\n\n if wb_regularizers:\n wb_regularizers_names = [r.name for r in wb_regularizers]\n else:\n wb_regularizers = [tf.zeros([1])]\n wb_regularizers_names = [\"none\"]\n wb_regularizers_fileNames = {\"fileName\": \"wb_regularizers\"}\n\n if ac_regularizers:\n ac_regularizers_names = [r.name for r in ac_regularizers]\n else:\n ac_regularizers = [tf.zeros([1])]\n ac_regularizers_names = [\"none\"]\n ac_regularizers_fileNames = {\"fileName\": \"ac_regularizers\"}\n\n if custom_regularizers:\n custom_regularizers_names = [r.name for r in custom_regularizers]\n else:\n custom_regularizers = [tf.zeros([1])]\n custom_regularizers_names = [\"none\"]\n custom_regularizers_fileNames = {\"fileName\": \"custom_regularizers\"}\n\n # logging hooks\n tensors_to_average = [[wb_regularizers], [ac_regularizers, custom_regularizers]]\n tensors_to_average_names = [[wb_regularizers_names], [ac_regularizers_names, custom_regularizers_names]]\n tensors_to_average_plots = [[wb_regularizers_fileNames],\n [ac_regularizers_fileNames, custom_regularizers_fileNames]]\n\n hook = LoggingMeanTensorsHook(model=self,\n fileName=\"regularizers\",\n dirName=self.dirName,\n tensors_to_average=tensors_to_average,\n tensors_to_average_names=tensors_to_average_names,\n tensors_to_average_plots=tensors_to_average_plots,\n average_steps=self._n_steps_stats,\n tensorboard_dir=self._tensorboard_dir,\n trigger_summaries=config[\"save_summaries\"],\n print_to_screen=False,\n # trigger_plot = True,\n plot_offset=self._plot_offset,\n train_loop_key=TRAIN_LOOP,\n dataset_keys=[],\n time_reference=self._time_reference_str\n )\n return hook\n\n def _create_general_info_hook(self, config):\n # logging hooks\n tensors_to_average = [\n [[self._learning_rate]]\n ]\n tensors_to_average_names = [\n [[\"learning_rate\"]],\n ]\n tensors_to_average_plots = [\n [{\"fileName\": \"learning_rate\"}]\n ]\n\n hook = LoggingMeanTensorsHook(model=self,\n fileName=\"info\",\n dirName=self.dirName,\n tensors_to_average=tensors_to_average,\n tensors_to_average_names=tensors_to_average_names,\n tensors_to_average_plots=tensors_to_average_plots,\n average_steps=self._n_steps_stats,\n tensorboard_dir=self._tensorboard_dir,\n trigger_summaries=config[\"save_summaries\"],\n print_to_screen=False,\n # trigger_plot = True,\n plot_offset=self._plot_offset,\n train_loop_key=TRAIN_LOOP,\n dataset_keys=[]\n )\n return hook\n\n # why passing opt?\n def create_session(self, config, monitorSession=True):\n\n # save to set the right behavior in self.get_raw_session()\n self.monitorSession = monitorSession\n\n # set some important options\n if self._gpu == -1:\n sess_config = tf.compat.v1.ConfigProto(device_count={'GPU': 0},\n allow_soft_placement=True)\n else:\n # config = tf.ConfigProto(log_device_placement=True)\n sess_config = tf.compat.v1.ConfigProto(allow_soft_placement=True)\n\n sess_config.gpu_options.allow_growth = True\n\n # self.sess = tf.Session(config=config)\n # self.sess = tf.InteractiveSession()\n\n # not needed anymore, moved in hooks...\n # self.set_summaries()\n\n if self._check_ops:\n self.set_check_ops()\n\n self.hooks = self.create_hooks(config)\n\n # TODO-ARGO2 if we would use a SingularMonitoredSession, it is possible to directly pass it to a saver for custom user saving..\n # TODO-ARGO2 How to handle this with the more stable Monitored Session? Maybe a TFTrainableDeepLearningModel\n # TODO-ARGO2 by the way it is possible to define a custom Monitored session\n # TODO-ARGO2 (to handle only hooks without fancy session stuffs http://davideng.me/2017/10/11/designing-a-custom-monitored-training-session.html\n\n if monitorSession:\n # MonitoredSession\n # this will restore all the variables from the latest checkpoint if it exists\n self._fix_checkpoint_abs_to_rel(self._checkpoint_dir) # need to ensure checkpoint has relative path saved\n\n chiefsess_creator = tf.compat.v1.train.ChiefSessionCreator(config=sess_config,\n checkpoint_dir=self._checkpoint_dir)\n\n # this is restoring variables \n self.sess = tf.compat.v1.train.MonitoredSession(session_creator=chiefsess_creator, hooks=self.hooks)\n else:\n self.sess = tf.Session(config=sess_config)\n\n if self._save_model:\n self._save_graph()\n\n # I do not want to trigger hooks for this!!\n self.datasets_handles = self.get_raw_session().run(self.datasets_handles_nodes)\n\n # to get the raw session in MonitoredSession see\n # https://github.com/tensorflow/tensorflow/issues/8425\n # https://github.com/tensorflow/tensorflow/issues/11971\n def get_raw_session(self):\n if self.sess is None:\n raise Exception(\"The session is None\")\n\n if self.monitorSession:\n return self.sess._tf_sess()\n else:\n # suppose regular Session()\n return self.sess\n\n def train(self):\n for hook in self.hooks:\n before_training = getattr(hook, 'before_training', None)\n if before_training is not None:\n before_training(self.get_raw_session())\n\n print(\"id: \" + self.dirName) # + '/' + self.id)\n print(\"graph size: \" + str(self.graph_size))\n\n # loops over the batches\n while not self.sess.should_stop():\n # import pdb;pdb.set_trace()\n try:\n # loss must be evaluated and fetched to raise InvalidArgumentError if nan, see https://github.com/tensorflow/tensorflow/issues/11098\n _, _, global_epoch = self.sess.run([self.training_op, self.loss, self.global_epoch],\n feed_dict={self.ds_handle: self.datasets_handles[TRAIN_LOOP],\n self.is_training: True})\n\n\n except tf.errors.InvalidArgumentError:\n\n raise Exception(\"an error has occurred during training, check stack trace UP HERE\")\n\n def _init_session_saver(self, max_to_keep, variables=None):\n \"\"\" A saver with all the variables for the session is instantiated and set in self._saver, with variables,\n by default variables is None, all variables in the graph will be saved.\n It is probably a good idea since the whole session must be later be restored by the ChiefSession\n \"\"\"\n os.makedirs(self._checkpoint_dir, exist_ok=True)\n self._saver = tf.compat.v1.train.Saver(variables, max_to_keep=max_to_keep, save_relative_paths=True)\n\n def _save_graph(self):\n writer = tf.compat.v1.summary.FileWriter(logdir=self._checkpoint_dir,\n # graph=self.sess.graph,\n graph=tf.compat.v1.get_default_graph(),\n filename_suffix=\"-graph\"\n )\n writer.flush()\n\n def _assemble_checkpoint_name(self, checkpoint_dir):\n path = os.path.join(checkpoint_dir, \"model.ckpt\")\n return path\n\n def _latest_checkpoint(self, checkpoint_dir):\n with open(checkpoint_dir + 'checkpoint') as fs:\n potentiallyabsolutepath = fs.readline().split()[1]\n\n potentiallyabsolutepath = os.path.basename(potentiallyabsolutepath.strip('\"'))\n path = checkpoint_dir + os.path.basename(potentiallyabsolutepath)\n return path\n\n def _fix_checkpoint_abs_to_rel(self, checkpoint_dir):\n checkpointfilename = checkpoint_dir + 'checkpoint'\n exists = os.path.isfile(checkpointfilename)\n if exists:\n with open(checkpointfilename) as fs:\n lines = fs.readlines()\n\n fs = open(checkpointfilename, 'w')\n for line in lines:\n which_model, potentiallyabsolutepath = line.split()\n potentiallyabsolutepath = os.path.basename(potentiallyabsolutepath.strip('\"'))\n rel_path = '\\\"' + os.path.basename(potentiallyabsolutepath) + '\\\"'\n fs.write(\" \".join([which_model, rel_path]) + \"\\n\")\n\n fs.close()\n\n def checkpoint_name(self, global_step):\n if global_step:\n path = self._assemble_checkpoint_name(self._checkpoint_dir)\n path += \"-\" + str(global_step)\n else:\n path = self._latest_checkpoint(self._checkpoint_dir)\n\n if not path:\n raise Exception(\"could not find saved checkpoints in %s\" % self._checkpoint_dir)\n\n return path\n\n def save(self, global_step=None):\n if self._saver is None:\n raise Exception(\"saver must be initialized before attempt to save\")\n else:\n session = self.get_raw_session()\n path = self._assemble_checkpoint_name()\n self._saver.save(session, path, global_step=global_step)\n\n def restore(self, global_step=None):\n \"\"\"Restore the model variables.\n\n Args:\n global_step (type): the step from which to restore. By default it is None\n and the latest checkpoint in self.checkpoint_dir will be restored\n \"\"\"\n\n path = \"\"\n session = self.get_raw_session()\n\n if self._saver is None:\n raise Exception(\"saver must be initialized before attempt to restore\")\n else:\n path = self.checkpoint_name(global_step)\n self._saver.restore(session, path)\n\n @property\n def graph_size(self):\n return len([n.name for n in self.sess.graph.as_graph_def().node])\n\n def _check_time_reference(self, time_ref):\n time_choices = [EPOCHS, STEPS]\n if not time_ref in time_choices:\n raise ValueError(\"time_reference in the frequency tuple can only be in %s\" % time_choices)\n\n def _get_steps(self, n, time_reference):\n\n self._check_time_reference(time_reference)\n n = float(n)\n\n if time_reference == EPOCHS:\n n = n * self.n_batches_per_epoch\n\n return int(n)\n",
"import tensorflow as tf\n\nfrom ..utils.argo_utils import NUMTOL\n\n\ndef my_loss_full_logits(y, logits):\n\n n = logits.get_shape().as_list()[1]\n probabilities = tf.nn.softmax(logits)\n\n clipped_probabilities = tf.clip_by_value(probabilities, NUMTOL, 1 - NUMTOL)\n loss = tf.reduce_sum(-tf.one_hot(y,depth=n)*tf.log(clipped_probabilities),axis=1)\n\n return loss\n\n# TO BE REMOVED!\n'''\ndef my_loss(y, logits_add_last_node):\n\n n = logits_add_last_node.get_shape().as_list()[1]\n\n probabilities = tf.nn.softmax(logits_add_last_node)\n probabilities_sliced = tf.slice(probabilities, [0, 0], [-1, n-1])\n new_probabilities = tf.concat([probabilities_sliced,\n tf.reshape(1 - tf.reduce_sum(probabilities_sliced, axis=1),[-1,1])],\n 1)\n\n clipped_probabilies = tf.clip_by_value(new_probabilities, NUMTOL, 1-NUMTOL)\n loss = tf.reduce_sum(-tf.one_hot(y,depth=n)*tf.log(clipped_probabilies),axis=1)\n\n return loss\n'''\n\n",
"import os, re\nfrom glob import glob\nfrom pprint import pprint\nimport pandas as pd\nimport numpy as np\nfrom itertools import product\n\npd_csv_kwargs = {\n \"sep\" : \"\\t\"\n }\n\ndef all_same(items):\n return all(x == items[0] for x in items)\n\ndef check_matches(matches, base_dir):\n trimmed_matches = [match.split(base_dir)[1].strip('/') for match in matches]\n ds_strings, net_strings = zip(*[m.split(\"/\")[:2] for m in trimmed_matches])\n if not all_same(net_strings):\n raise Exception(\"net_strings are not all the same, check your regular expression, found: {:}\".format(set(net_strings)))\n return sorted(ds_strings), list(set(net_strings))[0]\n\ndef get_ds_field_value(match, pre_ds_dir, post_ds_dir):\n value = match.partition(pre_ds_dir)[2].partition(post_ds_dir)[0]\n return value\n\ndef read_value(path, col, val, field):\n df = read_argo_csv(path, **pd_csv_kwargs)\n row = df[df[col]==val]\n return row[field].values[0]\n\ndef read_max_value(path, field, col_ref):\n df = read_argo_csv(path, **pd_csv_kwargs)\n imax = df[field].idxmax()\n return df[col_ref][imax], df[field][imax]\n\ndef read_argo_csv(path, **pd_csv_kwargs):\n df = pd.read_csv(path, **pd_csv_kwargs)\n df = df.rename(columns={\"# epochs\": \"epoch\"})\n rename_dict = {col: col.strip() for col in df.columns}\n df = df.rename(columns=rename_dict)\n return df\n\ndef convert_alpha(alpha_str, limitfloat):\n if alpha_str == 'limit':\n return limitfloat\n else:\n return float(alpha_str)\n\ndef tryfloat(alpha_str):\n try:\n value = float(alpha_str)\n except ValueError:\n value = alpha_str\n\n return value\n\ndef get_limit_float(data):\n found_limit = False\n xs = [tryfloat(d[0]) for d in data]\n if \"limit\" in xs:\n found_limit = True\n\n xs = [x for x in xs if isinstance(x, float)]\n\n return min(xs)-1, found_limit\n\ndef key_sort(a):\n if a[0] == 'limit':\n return -np.inf\n\n return a[0]\n\n\ndef collect_data_across_ds_single_before(base_dir, ds_dir, net_dir, log_file, outdirname=\".\"):\n\n dir_list = [base_dir, ds_dir, net_dir, log_file]\n matches = glob(os.path.join(*dir_list))\n if len(matches)==0:\n raise ValueError(\"No file found matching the provided regexpr: `{:}`\".format(os.path.join(*dir_list)))\n\n ds_strings, net_string = check_matches(matches, base_dir)\n pprint(ds_strings)\n\n pre_ds_dir, _, post_ds_dir = ds_dir.partition(\"*\")\n ds_param_name = \"alpha\"\n log_name = os.path.splitext(log_file)[0]\n field_train = log_name+'_train'\n field_val = log_name+'_validation'\n field_test = log_name+'_test'\n\n all_fields = [ds_param_name, 'epoch', field_train, field_val, field_test]\n\n data = []\n for match in matches:\n x = get_ds_field_value(match, pre_ds_dir, post_ds_dir)\n epmax, y_val = read_max_value(match, field_val, 'epoch')\n y_train = read_value(match, 'epoch', epmax, field_train)\n y_test = read_value(match, 'epoch', epmax, field_test)\n data.append((tryfloat(x), epmax, y_train, y_val, y_test))\n\n sorted_data = sorted(data, key=key_sort)\n\n df = pd.DataFrame(sorted_data, columns=all_fields)\n\n outpath = os.path.join(outdirname, pre_ds_dir+'W'+post_ds_dir, net_string)\n os.makedirs(outpath, exist_ok=True)\n outpath = os.path.join(outpath, \"collected_\"+log_file)\n df.to_csv(outpath, index=False, float_format='%.6g', **pd_csv_kwargs)\n\n\ndef collect_data_across_ds_before(base_dir, ds_dirs, net_dirs, log_file, outdirname=\".\"):\n for ds_dir, net_dir in product(ds_dirs, net_dirs):\n collect_data_across_ds_single_before(base_dir, ds_dir, net_dir, log_file, outdirname=outdirname)\n\n\n# collect_dict = {\n# 'main_field_spec' : ('a',),\n#\n# 'plot' : {\n# 'main_field_value' : 'float',\n# 'plot_field_spec' : ('alpha', ('a','v')),\n# },\n#\n# 'table' : {\n# 'main_field_value' : 'limit',\n# 'table_field_spec' : [\n# ('ntop', ('a', 't')),\n# ('weighted', ('a', 'w')),\n# ('fraction', ('a', 'f')),\n# ],\n# }\n# }\n\ndef collect_data_across_ds_single(base_dir, ds_dir, net_dir, log_file, collect_dict, outdirname=\".\"):\n log_filename = log_file['filename']\n target_col = log_file['target_col']\n\n dir_list = [base_dir, ds_dir, net_dir, log_filename]\n\n matches = glob(os.path.join(*dir_list))\n if len(matches)==0:\n raise ValueError(\"No file found matching the provided regexpr: `{:}`\".format(os.path.join(*dir_list)))\n\n ds_strings, net_string = check_matches(matches, base_dir)\n pprint(ds_strings)\n\n mainfield_spec = collect_dict['main_field_spec']\n baseoutputname = name_with_wildcard(ds_dir, mainfield_spec)\n\n level = -3 # usually -3 for dataset in a split, basedir/dsdir/netdir/logfile\n # csv for plot\n\n conf_plot = collect_dict['plot']\n conf_table = collect_dict['table']\n\n # log_name = os.path.splitext(os.path.basename(log_filename))[0]\n\n outdirname = os.path.join(outdirname, baseoutputname, net_string)\n os.makedirs(outdirname, exist_ok=True)\n\n make_plot(matches, conf_plot, mainfield_spec, target_col, outdirname, level=level)\n make_table(matches, conf_table, mainfield_spec, target_col, outdirname, level=level)\n\n\n\ndef make_plot(matches, conf_plot, mainfield_spec, target_col, outdirname, level=-3):\n pv = conf_plot['main_field_value']\n matches_plot = [m for m in matches if check_where(m.split('/')[level], mainfield_spec, pv)]\n ds_param_name, plot_field_spec = conf_plot['plot_field_spec']\n field_train = target_col + '_train'\n field_val = target_col + '_validation'\n field_test = target_col + '_test'\n all_fields = [ds_param_name, 'epoch', field_train, field_val, field_test]\n data = []\n for match in matches_plot:\n x = get_field(match.split('/')[level], plot_field_spec)\n epmax, y_val = read_max_value(match, field_val, 'epoch')\n y_train = read_value(match, 'epoch', epmax, field_train)\n y_test = read_value(match, 'epoch', epmax, field_test)\n data.append((tryfloat(x), epmax, y_train, y_val, y_test))\n\n sorted_data = sorted(data)\n df = pd.DataFrame(sorted_data, columns=all_fields)\n outpath_plot = os.path.join(outdirname, mainfield_spec[0] + pv + \"_\" + target_col + \".txt\")\n df.to_csv(outpath_plot, index=False, float_format='%.6g', **pd_csv_kwargs)\n\n\ndef make_table(matches, conf_table, mainfield_spec, target_col, outdirname, level=-3):\n tv = conf_table['main_field_value']\n matches_table = [m for m in matches if check_where(m.split('/')[level], mainfield_spec, tv)]\n\n all_fields = [fn for fn, fs in conf_table['table_field_spec']]\n\n log_field_train = target_col + '_train'\n log_field_val = target_col + '_validation'\n log_field_test = target_col + '_test'\n all_fields += ['epoch', log_field_train, log_field_val, log_field_test]\n\n data = []\n for match in matches_table:\n # get values identifying this match as from specifications\n tables_field_values = [get_field(match.split('/')[level], tfspec, with_subfields=False) \\\n for tfn, tfspec in conf_table['table_field_spec']]\n\n epmax, y_val = read_max_value(match, log_field_val, 'epoch')\n y_train = read_value(match, 'epoch', epmax, log_field_train)\n y_test = read_value(match, 'epoch', epmax, log_field_test)\n\n data.append(tuple(tables_field_values)+(epmax, y_train, y_val, y_test))\n\n sorted_data = sorted(data)\n df = pd.DataFrame(sorted_data, columns=all_fields)\n outpath_table = os.path.join(outdirname, mainfield_spec[0] + tv + \"_\" + target_col + \".txt\")\n df.to_csv(outpath_table, index=False, float_format='%.6g', **pd_csv_kwargs)\n\n\ndef name_with_wildcard(ds_dir, field_spec):\n mainfield = field_spec[0]\n tag = '-' + mainfield + get_field(ds_dir, (mainfield,))\n pre_ds_dir, _, post_ds_dir = ds_dir.partition(tag)\n name_with_wildcard = pre_ds_dir + '-' + mainfield + 'W' + post_ds_dir\n return name_with_wildcard\n\n\ndef collect_data_across_ds(base_dir, ds_dirs, net_dirs, log_file, collect_dict, outdirname=\".\"):\n for ds_dir, net_dir in product(ds_dirs, net_dirs):\n collect_data_across_ds_single(base_dir, ds_dir, net_dir, log_file, collect_dict, outdirname=outdirname)\n\n\ndef get_field(string, field_spec, with_subfields=True):\n l = len(field_spec)\n\n if l == 0 or l > 2:\n raise ValueError(\"Not implemented tuple length `{:}`, found field spec `{:}`\".format(l, field_spec))\n\n m = re.split('(-|^)' + field_spec[0], string)\n try:\n after = m[2]\n ss1 = re.split('(-[a-zA-Z]|$)', after)[0]\n except:\n ss1 = ''\n\n if l == 1:\n if with_subfields:\n return ss1\n else:\n ss1val = re.split('(_|$)', ss1)[0]\n return ss1val\n\n m = re.search('(_|^)' + field_spec[1] + '([\\.\\-\\,A-Za-z0-9]+)' + '(_|$)', ss1)\n\n if m is None:\n ss2 = ''\n else:\n ss2 = m.group(2)\n\n return ss2\n\n\ndef check_where(string, field_spec, value):\n return get_field(string, field_spec, with_subfields=False) == value\n\n\n"
] | [
[
"tensorflow.layers.flatten",
"tensorflow.multiply",
"tensorflow.contrib.distributions.fill_triangular",
"tensorflow.linalg.set_diag",
"tensorflow.pad",
"tensorflow.initializers.constant",
"tensorflow.nn.softplus"
],
[
"numpy.square",
"tensorflow.tanh"
],
[
"tensorflow.zeros",
"tensorflow.cast",
"tensorflow.compat.v1.train.MonitoredSession",
"tensorflow.estimator.StopAtStepHook",
"tensorflow.compat.v1.train.Saver",
"tensorflow.add_n",
"tensorflow.group",
"tensorflow.compat.v1.placeholder_with_default",
"tensorflow.get_collection",
"tensorflow.check_numerics",
"tensorflow.compat.v1.train.get_or_create_global_step",
"numpy.ceil",
"tensorflow.linalg.global_norm",
"tensorflow.reset_default_graph",
"tensorflow.clip_by_norm",
"tensorflow.Session",
"tensorflow.compat.v1.set_random_seed",
"tensorflow.compat.v1.variable_scope",
"tensorflow.compat.v1.train.ChiefSessionCreator",
"tensorflow.norm",
"tensorflow.compat.v1.get_collection",
"numpy.floor",
"tensorflow.add_check_numerics_ops",
"tensorflow.train.SummarySaverHook",
"tensorflow.global_norm",
"tensorflow.clip_by_value",
"tensorflow.compat.v1.get_default_graph",
"tensorflow.compat.v1.ConfigProto",
"tensorflow.compat.v1.summary.FileWriter",
"tensorflow.reshape",
"tensorflow.compat.v1.add_to_collection",
"tensorflow.clip_by_global_norm",
"tensorflow.sqrt"
],
[
"tensorflow.clip_by_value",
"tensorflow.nn.softmax",
"tensorflow.one_hot",
"tensorflow.log"
],
[
"pandas.read_csv",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"1.12",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"1.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.8",
"1.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"1.10",
"1.12",
"2.7",
"2.6",
"1.4",
"1.13",
"2.3",
"2.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.2",
"1.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
cristhiandcl/AD-DL | [
"b7abb3fe619e736b269067033ba4aad1f03cf3b8"
] | [
"clinicadl/clinicadl/tools/tsv/tsv_utils.py"
] | [
"# coding: utf8\n\nfrom copy import copy\nimport numpy as np\nimport pandas as pd\nfrom os import path\n\n\ndef neighbour_session(session, session_list, neighbour):\n if session not in session_list:\n temp_list = session_list + [session]\n temp_list.sort()\n else:\n temp_list = copy(session_list)\n temp_list.sort()\n index_session = temp_list.index(session)\n\n if index_session + neighbour < 0 or index_session + neighbour >= len(temp_list):\n return None\n else:\n if temp_list[index_session + neighbour] < 10:\n return 'ses-M0' + str(temp_list[index_session + neighbour])\n else:\n return 'ses-M' + str(temp_list[index_session + neighbour])\n\n\ndef after_end_screening(session, session_list):\n if session in session_list:\n return False\n else:\n temp_list = session_list + [session]\n temp_list.sort()\n index_session = temp_list.index(session)\n return index_session == len(temp_list) - 1\n\n\ndef last_session(session_list):\n temp_list = copy(session_list)\n temp_list.sort()\n if temp_list[-1] < 10:\n return 'ses-M0' + str(temp_list[-1])\n else:\n return 'ses-M' + str(temp_list[-1])\n\n\ndef complementary_list(total_list, sub_list):\n result_list = []\n for element in total_list:\n if element not in sub_list:\n result_list.append(element)\n return result_list\n\n\ndef first_session(subject_df):\n session_list = [int(session[5:]) for _, session in subject_df.index.values]\n session_list.sort()\n first_session = session_list[0]\n if first_session < 10:\n return 'ses-M0' + str(first_session)\n else:\n return 'ses-M' + str(first_session)\n\n\ndef next_session(subject_df, session_orig):\n session_list = [int(session[5:]) for _, session in subject_df.index.values]\n session_list.sort()\n session_id_list = []\n for session in session_list:\n if session < 10:\n session_id_list.append('ses-M0' + str(session))\n else:\n session_id_list.append('ses-M' + str(session))\n index = session_id_list.index(session_orig)\n if index < len(session_id_list) - 1:\n return session_id_list[index + 1]\n else:\n raise ValueError('The argument session is the last session')\n\n\ndef extract_baseline(diagnosis_df, diagnosis, set_index=True):\n from copy import deepcopy\n\n if set_index:\n all_df = diagnosis_df.set_index(['participant_id', 'session_id'])\n else:\n all_df = deepcopy(diagnosis_df)\n\n result_df = pd.DataFrame()\n for subject, subject_df in all_df.groupby(level=0):\n baseline = first_session(subject_df)\n subject_baseline_df = pd.DataFrame(data=[[subject, baseline] +\n subject_df.loc[(subject, baseline)].tolist()],\n columns=[\"participant_id\", \"session_id\"] + subject_df.columns.values.tolist())\n result_df = pd.concat([result_df, subject_baseline_df])\n\n result_df[\"diagnosis\"] = [diagnosis] * len(result_df)\n result_df.reset_index(inplace=True, drop=True)\n\n return result_df\n\n\ndef chi2(x_test, x_train):\n from scipy.stats import chisquare\n\n # Look for chi2 computation\n total_categories = np.concatenate([x_test, x_train])\n unique_categories = np.unique(total_categories)\n f_obs = [(x_test == category).sum() / len(x_test) for category in unique_categories]\n f_exp = [(x_train == category).sum() / len(x_train) for category in unique_categories]\n\n T, p = chisquare(f_obs, f_exp)\n\n return T, p\n\n\ndef add_demographics(df, demographics_df, diagnosis):\n out_df = pd.DataFrame()\n tmp_demo_df = copy(demographics_df)\n tmp_demo_df.reset_index(inplace=True)\n for idx in df.index.values:\n participant = df.loc[idx, \"participant_id\"]\n session = df.loc[idx, \"session_id\"]\n row_df = tmp_demo_df[(tmp_demo_df.participant_id == participant) & (tmp_demo_df.session_id == session)]\n out_df = pd.concat([out_df, row_df])\n out_df.reset_index(inplace=True, drop=True)\n out_df.diagnosis = [diagnosis] * len(out_df)\n return out_df\n\n\ndef remove_unicity(values_list):\n \"\"\"Count the values of each class and label all the classes with only one label under the same label.\"\"\"\n unique_classes, counts = np.unique(values_list, return_counts=True)\n one_sub_classes = unique_classes[(counts == 1)]\n for class_element in one_sub_classes:\n values_list[values_list.index(class_element)] = unique_classes.min()\n\n return values_list\n\n\ndef category_conversion(values_list):\n values_np = np.array(values_list)\n unique_classes = np.unique(values_np)\n for index, unique_class in enumerate(unique_classes):\n values_np[values_np == unique_class] = index + 1\n\n return values_np.astype(int).tolist()\n\n\ndef find_label(labels_list, target_label):\n if target_label in labels_list:\n return target_label\n else:\n min_length = np.inf\n found_label = None\n for label in labels_list:\n if target_label.lower() in label.lower() and min_length > len(label):\n min_length = len(label)\n found_label = label\n if found_label is None:\n raise ValueError(f\"No label was found in {labels_list} for target label {target_label}.\")\n\n return found_label\n\n\ndef retrieve_longitudinal(df, diagnosis_df):\n final_df = pd.DataFrame()\n for idx in df.index.values:\n subject = df.loc[idx, 'participant_id']\n row_df = diagnosis_df[diagnosis_df.participant_id == subject]\n final_df = pd.concat([final_df, row_df])\n\n return final_df\n\n\ndef remove_sub_labels(diagnosis_df, sub_labels, diagnosis_df_paths, results_path,\n logger=None):\n\n from ..deep_learning.iotools import return_logger\n\n if logger is None:\n logger = return_logger(2, \"remove sub labels\")\n\n supplementary_diagnoses = []\n\n logger.debug('Before subjects removal')\n sub_df = diagnosis_df.reset_index().groupby('participant_id')['session_id'].nunique()\n logger.debug(f'{len(sub_df)} subjects, {len(diagnosis_df)} scans')\n\n for label in sub_labels:\n if f'{label}.tsv' in diagnosis_df_paths:\n sub_diag_df = pd.read_csv(path.join(results_path, f'{label}.tsv'), sep='\\t')\n sub_diag_baseline_df = extract_baseline(sub_diag_df, label)\n for idx in sub_diag_baseline_df.index.values:\n subject = sub_diag_baseline_df.loc[idx, 'participant_id']\n diagnosis_df.drop(subject, inplace=True, level=0)\n supplementary_diagnoses.append(label)\n\n logger.debug(f'Removed {len(sub_diag_baseline_df)} subjects based on {label} label')\n sub_df = diagnosis_df.reset_index().groupby('participant_id')['session_id'].nunique()\n logger.debug(f'{len(sub_df)} subjects, {len(diagnosis_df)} scans')\n\n return diagnosis_df, supplementary_diagnoses\n"
] | [
[
"pandas.concat",
"numpy.unique",
"pandas.DataFrame",
"numpy.concatenate",
"scipy.stats.chisquare",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
jeetbanik/Corona-Real-Time-Face-Mask-and-Keypoints-Detection | [
"3232f5d7b84fffcc61c2bb84d1b5109154bcc6bb"
] | [
"Testing Model Including Facial Keypoints.py"
] | [
"import numpy as np\nfrom PIL import Image\nimport cv2\nfrom model import Net\nimport torch\nfrom torchvision import transforms\nfrom mtcnn import MTCNN\n\ndef LoadModel(fpath):\n '''\n function to load saved model\n '''\n c = torch.load(fpath, map_location='cpu')\n model = c['model']\n model.load_state_dict(c['state_dict'])\n # as we've to perform testing, we don't need backpropagation so setting 'requires_grad' as false\n for parameter in model.parameters():\n parameter.requires_grad = False\n # model.eval() -> .eval() does not change any behaviour of gradient calculations , but are used to set specific layers\n # like dropout and batchnorm to evaluation mode i.e. dropout layer won't drop activations and \n # batchnorm will use running estimates instead of batch statistics.\n return model.eval()\n\ntrain_transforms = transforms.Compose([\n transforms.Resize((224,224)),\n transforms.ToTensor(),\n transforms.Normalize((0.5,0.5,0.5), (0.5,0.5,0.5))\n])\n\n# Initializing file paths for both the models\nfpath1 = 'Real-Time Face Mask Detection Model.pth'\nfpath2 = 'Facial Keypoints Model.pt'\n\n# Loading the models for testing\nmodel = LoadModel(fpath1)\nnet = Net()\nnet.load_state_dict(torch.load(fpath2))\nfor parameter in net.parameters():\n parameter.requires_grad = False\nnet.eval()\nmodel_lm = net\n\ndevice = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n\ndetector = MTCNN()\n\n# Accessing the webcam\ncap = cv2.VideoCapture(0)\nf = cv2.FONT_HERSHEY_DUPLEX\nt = 2\nred = (0,0,255)\ngreen = (0,255,0)\nblue = (255,255,0)\nyellow = (0,155,255)\nwhile (cap.isOpened()):\n # getting the frame in 'frm' and a bool value in 'ret' which is true if a frame is returned\n ret, frm = cap.read()\n if ret == True:\n # converting into grayscale for feature reduction and grayscale images are less computation intensive to operate on\n gray = cv2.cvtColor(frm, cv2.COLOR_BGR2GRAY)\n col = cv2.cvtColor(frm, cv2.COLOR_BGR2RGB)\n # detecting the faces in the frame returned, it will return the coords of bounding box along with its height and width\n result = detector.detect_faces(col)\n for box in result:\n x, y, w, h = box['box']\n keypoints = box['keypoints']\n # drawing the bounding box based on the coordinates provided by haar_cascade\n cv2.rectangle(frm, (x,y), (x+w,y+h), 2)\n # cropping the portion of image covered by the bounding box\n crp = Image.fromarray(frm,mode = 'RGB')\n #cropped_img = frm[y:y+h, x:x+w]\n cropped_img = crp.crop((x,y,x+w,y+h))\n s = (w*h)/(50000)\n if s<0.5:\n s=0.5\n pil_image = train_transforms(cropped_img)\n image = pil_image.unsqueeze(0)\n # feeding the test cropped image into the model\n result = model(image)\n img = np.array(image)\n img = img[:,0,:,:]\n img = img.reshape(img.shape[0], 1, img.shape[1], img.shape[2])\n result_lm = model_lm(torch.from_numpy(img))\n result_lm = np.array(result_lm)\n result_lm = result_lm*(0.19*h)\n result_lm = result_lm.reshape(68,2)\n result_lm[:,0] += x+(0.28*h)\n result_lm[:,1] += y+(0.49*w)\n _, maximum = torch.max(result.data, 1)\n pred = maximum.item()\n # displaying results based on classification\n if pred == 0:\n cv2.circle(frm, (keypoints['left_eye']), 2, yellow, 2)\n cv2.circle(frm, (keypoints['right_eye']), 2, yellow, 2)\n cv2.circle(frm, (keypoints['nose']), 2, yellow, 2)\n cv2.circle(frm, (keypoints['mouth_left']), 2, yellow, 2)\n cv2.circle(frm, (keypoints['mouth_right']), 2, yellow, 2)\n (lw,lh), bl = cv2.getTextSize(\"Correctly Masked\", f, s, t)\n cv2.putText(frm, \"Correctly Masked\", ((int(((w+x)-x-lw)/2)+x),y-10), f, s, green, t)\n cv2.rectangle(frm, (x,y), (x+w,y+h), green, 2) # green colour rectangle if mask is worn correctly\n elif pred == 1:\n cv2.circle(frm, (keypoints['left_eye']), 2, yellow, 2)\n cv2.circle(frm, (keypoints['right_eye']), 2, yellow, 2)\n cv2.circle(frm, (keypoints['nose']), 2, yellow, 2)\n cv2.circle(frm, (keypoints['mouth_left']), 2, yellow, 2)\n cv2.circle(frm, (keypoints['mouth_right']), 2, yellow, 2)\n (lw,lh), bl = cv2.getTextSize(\"Unmasked\", f, s, t)\n cv2.putText(frm, \"Unmasked\", ((int(((w+x)-x-lw)/2)+x),y-10), f, s, red, t)\n cv2.rectangle(frm, (x,y), (x+w,y+h), red, 2) # red colour rectangle if mask is not being worn\n elif pred == 2:\n cv2.circle(frm, (keypoints['left_eye']), 2, yellow, 2)\n cv2.circle(frm, (keypoints['right_eye']), 2, yellow, 2)\n cv2.circle(frm, (keypoints['nose']), 2, yellow, 2)\n cv2.circle(frm, (keypoints['mouth_left']), 2, yellow, 2)\n cv2.circle(frm, (keypoints['mouth_right']), 2, yellow, 2)\n (lw,lh), bl = cv2.getTextSize(\"Incorrectly Masked\", f, s, t)\n cv2.putText(frm, \"Incorrectly Masked\", ((int(((w+x)-x-lw)/2)+x),y-10), f, s, blue, t)\n cv2.rectangle(frm, (x,y), (x+w,y+h), blue, 2) # blue colour rectangle if mask is not worn correctly\n cv2.imshow('frame',frm)\n if (cv2.waitKey(1) & 0xFF) == ord('q'): # press 'q' to exit\n break\n else:\n break\ncap.release()\ncv2.destroyAllWindows()"
] | [
[
"torch.max",
"torch.load",
"torch.from_numpy",
"torch.cuda.is_available",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
KarthikKothareddy/AirFlow | [
"faaf0b8b4467bcf5bff4a5b49086a9e02cb9c112"
] | [
"tests/core.py"
] | [
"# -*- coding: utf-8 -*-\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\n\nimport json\nimport unittest\n\nimport bleach\nimport doctest\nimport mock\nimport multiprocessing\nimport os\nimport re\nimport signal\nimport sqlalchemy\nimport tempfile\nimport warnings\nfrom datetime import timedelta\nfrom dateutil.relativedelta import relativedelta\nfrom email.mime.application import MIMEApplication\nfrom email.mime.multipart import MIMEMultipart\nfrom freezegun import freeze_time\nfrom numpy.testing import assert_array_almost_equal\nfrom six.moves.urllib.parse import urlencode\nfrom time import sleep\n\nfrom airflow import configuration\nfrom airflow.executors import SequentialExecutor\nfrom airflow.models import Variable\n\nconfiguration.load_test_config()\nfrom airflow import jobs, models, DAG, utils, macros, settings, exceptions\nfrom airflow.models import BaseOperator\nfrom airflow.operators.bash_operator import BashOperator\nfrom airflow.operators.check_operator import CheckOperator, ValueCheckOperator\nfrom airflow.operators.dagrun_operator import TriggerDagRunOperator\nfrom airflow.operators.python_operator import PythonOperator\nfrom airflow.operators.dummy_operator import DummyOperator\n\nfrom airflow.hooks.base_hook import BaseHook\nfrom airflow.hooks.sqlite_hook import SqliteHook\nfrom airflow.bin import cli\nfrom airflow.www import app as application\nfrom airflow.settings import Session\nfrom airflow.utils import timezone\nfrom airflow.utils.timezone import datetime\nfrom airflow.utils.state import State\nfrom airflow.utils.dates import infer_time_unit, round_time, scale_time_units\nfrom lxml import html\nfrom airflow.exceptions import AirflowException\nfrom airflow.configuration import AirflowConfigException, run_command\nfrom jinja2.sandbox import SecurityError\nfrom jinja2 import UndefinedError\n\nimport six\n\nNUM_EXAMPLE_DAGS = 19\nDEV_NULL = '/dev/null'\nTEST_DAG_FOLDER = os.path.join(\n os.path.dirname(os.path.realpath(__file__)), 'dags')\nDEFAULT_DATE = datetime(2015, 1, 1)\nDEFAULT_DATE_ISO = DEFAULT_DATE.isoformat()\nDEFAULT_DATE_DS = DEFAULT_DATE_ISO[:10]\nTEST_DAG_ID = 'unit_tests'\n\ntry:\n import cPickle as pickle\nexcept ImportError:\n # Python 3\n import pickle\n\n\ndef reset(dag_id=TEST_DAG_ID):\n session = Session()\n tis = session.query(models.TaskInstance).filter_by(dag_id=dag_id)\n tis.delete()\n session.commit()\n session.close()\n\n\nreset()\n\n\nclass OperatorSubclass(BaseOperator):\n \"\"\"\n An operator to test template substitution\n \"\"\"\n template_fields = ['some_templated_field']\n\n def __init__(self, some_templated_field, *args, **kwargs):\n super(OperatorSubclass, self).__init__(*args, **kwargs)\n self.some_templated_field = some_templated_field\n\n def execute(*args, **kwargs):\n pass\n\n\nclass CoreTest(unittest.TestCase):\n # These defaults make the test faster to run\n default_scheduler_args = {\"file_process_interval\": 0,\n \"processor_poll_interval\": 0.5,\n \"num_runs\": 1}\n\n def setUp(self):\n configuration.load_test_config()\n self.dagbag = models.DagBag(\n dag_folder=DEV_NULL, include_examples=True)\n self.args = {'owner': 'airflow', 'start_date': DEFAULT_DATE}\n self.dag = DAG(TEST_DAG_ID, default_args=self.args)\n self.dag_bash = self.dagbag.dags['example_bash_operator']\n self.runme_0 = self.dag_bash.get_task('runme_0')\n self.run_after_loop = self.dag_bash.get_task('run_after_loop')\n self.run_this_last = self.dag_bash.get_task('run_this_last')\n\n def test_schedule_dag_no_previous_runs(self):\n \"\"\"\n Tests scheduling a dag with no previous runs\n \"\"\"\n dag = DAG(TEST_DAG_ID + 'test_schedule_dag_no_previous_runs')\n dag.add_task(models.BaseOperator(\n task_id=\"faketastic\",\n owner='Also fake',\n start_date=datetime(2015, 1, 2, 0, 0)))\n\n dag_run = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag)\n self.assertIsNotNone(dag_run)\n self.assertEqual(dag.dag_id, dag_run.dag_id)\n self.assertIsNotNone(dag_run.run_id)\n self.assertNotEqual('', dag_run.run_id)\n self.assertEqual(\n datetime(2015, 1, 2, 0, 0),\n dag_run.execution_date,\n msg='dag_run.execution_date did not match expectation: {0}'\n .format(dag_run.execution_date)\n )\n self.assertEqual(State.RUNNING, dag_run.state)\n self.assertFalse(dag_run.external_trigger)\n dag.clear()\n\n def test_schedule_dag_fake_scheduled_previous(self):\n \"\"\"\n Test scheduling a dag where there is a prior DagRun\n which has the same run_id as the next run should have\n \"\"\"\n delta = timedelta(hours=1)\n dag = DAG(TEST_DAG_ID + 'test_schedule_dag_fake_scheduled_previous',\n schedule_interval=delta,\n start_date=DEFAULT_DATE)\n dag.add_task(models.BaseOperator(\n task_id=\"faketastic\",\n owner='Also fake',\n start_date=DEFAULT_DATE))\n\n scheduler = jobs.SchedulerJob(**self.default_scheduler_args)\n dag.create_dagrun(run_id=models.DagRun.id_for_date(DEFAULT_DATE),\n execution_date=DEFAULT_DATE,\n state=State.SUCCESS,\n external_trigger=True)\n dag_run = scheduler.create_dag_run(dag)\n self.assertIsNotNone(dag_run)\n self.assertEqual(dag.dag_id, dag_run.dag_id)\n self.assertIsNotNone(dag_run.run_id)\n self.assertNotEqual('', dag_run.run_id)\n self.assertEqual(\n DEFAULT_DATE + delta,\n dag_run.execution_date,\n msg='dag_run.execution_date did not match expectation: {0}'\n .format(dag_run.execution_date)\n )\n self.assertEqual(State.RUNNING, dag_run.state)\n self.assertFalse(dag_run.external_trigger)\n\n def test_schedule_dag_once(self):\n \"\"\"\n Tests scheduling a dag scheduled for @once - should be scheduled the first time\n it is called, and not scheduled the second.\n \"\"\"\n dag = DAG(TEST_DAG_ID + 'test_schedule_dag_once')\n dag.schedule_interval = '@once'\n dag.add_task(models.BaseOperator(\n task_id=\"faketastic\",\n owner='Also fake',\n start_date=datetime(2015, 1, 2, 0, 0)))\n dag_run = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag)\n dag_run2 = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag)\n\n self.assertIsNotNone(dag_run)\n self.assertIsNone(dag_run2)\n dag.clear()\n\n def test_fractional_seconds(self):\n \"\"\"\n Tests if fractional seconds are stored in the database\n \"\"\"\n dag = DAG(TEST_DAG_ID + 'test_fractional_seconds')\n dag.schedule_interval = '@once'\n dag.add_task(models.BaseOperator(\n task_id=\"faketastic\",\n owner='Also fake',\n start_date=datetime(2015, 1, 2, 0, 0)))\n\n start_date = timezone.utcnow()\n\n run = dag.create_dagrun(\n run_id='test_' + start_date.isoformat(),\n execution_date=start_date,\n start_date=start_date,\n state=State.RUNNING,\n external_trigger=False\n )\n\n run.refresh_from_db()\n\n self.assertEqual(start_date, run.execution_date,\n \"dag run execution_date loses precision\")\n self.assertEqual(start_date, run.start_date,\n \"dag run start_date loses precision \")\n\n def test_schedule_dag_start_end_dates(self):\n \"\"\"\n Tests that an attempt to schedule a task after the Dag's end_date\n does not succeed.\n \"\"\"\n delta = timedelta(hours=1)\n runs = 3\n start_date = DEFAULT_DATE\n end_date = start_date + (runs - 1) * delta\n dag = DAG(TEST_DAG_ID + 'test_schedule_dag_start_end_dates',\n start_date=start_date,\n end_date=end_date,\n schedule_interval=delta)\n dag.add_task(models.BaseOperator(task_id='faketastic',\n owner='Also fake'))\n\n # Create and schedule the dag runs\n dag_runs = []\n scheduler = jobs.SchedulerJob(**self.default_scheduler_args)\n for i in range(runs):\n dag_runs.append(scheduler.create_dag_run(dag))\n\n additional_dag_run = scheduler.create_dag_run(dag)\n\n for dag_run in dag_runs:\n self.assertIsNotNone(dag_run)\n\n self.assertIsNone(additional_dag_run)\n\n @freeze_time('2016-01-01')\n def test_schedule_dag_no_end_date_up_to_today_only(self):\n \"\"\"\n Tests that a Dag created without an end_date can only be scheduled up\n to and including the current datetime.\n\n For example, if today is 2016-01-01 and we are scheduling from a\n start_date of 2015-01-01, only jobs up to, but not including\n 2016-01-01 should be scheduled.\n \"\"\"\n session = settings.Session()\n delta = timedelta(days=1)\n start_date = DEFAULT_DATE\n runs = 365\n dag = DAG(TEST_DAG_ID + 'test_schedule_dag_no_end_date_up_to_today_only',\n start_date=start_date,\n schedule_interval=delta)\n dag.add_task(models.BaseOperator(task_id='faketastic',\n owner='Also fake'))\n\n dag_runs = []\n scheduler = jobs.SchedulerJob(**self.default_scheduler_args)\n for i in range(runs):\n dag_run = scheduler.create_dag_run(dag)\n dag_runs.append(dag_run)\n\n # Mark the DagRun as complete\n dag_run.state = State.SUCCESS\n session.merge(dag_run)\n session.commit()\n\n # Attempt to schedule an additional dag run (for 2016-01-01)\n additional_dag_run = scheduler.create_dag_run(dag)\n\n for dag_run in dag_runs:\n self.assertIsNotNone(dag_run)\n\n self.assertIsNone(additional_dag_run)\n\n def test_confirm_unittest_mod(self):\n self.assertTrue(configuration.get('core', 'unit_test_mode'))\n\n def test_pickling(self):\n dp = self.dag.pickle()\n self.assertEqual(dp.pickle.dag_id, self.dag.dag_id)\n\n def test_rich_comparison_ops(self):\n\n class DAGsubclass(DAG):\n pass\n\n dag_eq = DAG(TEST_DAG_ID, default_args=self.args)\n\n dag_diff_load_time = DAG(TEST_DAG_ID, default_args=self.args)\n dag_diff_name = DAG(TEST_DAG_ID + '_neq', default_args=self.args)\n\n dag_subclass = DAGsubclass(TEST_DAG_ID, default_args=self.args)\n dag_subclass_diff_name = DAGsubclass(\n TEST_DAG_ID + '2', default_args=self.args)\n\n for d in [dag_eq, dag_diff_name, dag_subclass, dag_subclass_diff_name]:\n d.last_loaded = self.dag.last_loaded\n\n # test identity equality\n self.assertEqual(self.dag, self.dag)\n\n # test dag (in)equality based on _comps\n self.assertEqual(dag_eq, self.dag)\n self.assertNotEqual(dag_diff_name, self.dag)\n self.assertNotEqual(dag_diff_load_time, self.dag)\n\n # test dag inequality based on type even if _comps happen to match\n self.assertNotEqual(dag_subclass, self.dag)\n\n # a dag should equal an unpickled version of itself\n d = pickle.dumps(self.dag)\n self.assertEqual(pickle.loads(d), self.dag)\n\n # dags are ordered based on dag_id no matter what the type is\n self.assertLess(self.dag, dag_diff_name)\n self.assertGreater(self.dag, dag_diff_load_time)\n self.assertLess(self.dag, dag_subclass_diff_name)\n\n # greater than should have been created automatically by functools\n self.assertGreater(dag_diff_name, self.dag)\n\n # hashes are non-random and match equality\n self.assertEqual(hash(self.dag), hash(self.dag))\n self.assertEqual(hash(dag_eq), hash(self.dag))\n self.assertNotEqual(hash(dag_diff_name), hash(self.dag))\n self.assertNotEqual(hash(dag_subclass), hash(self.dag))\n\n def test_check_operators(self):\n\n conn_id = \"sqlite_default\"\n\n captainHook = BaseHook.get_hook(conn_id=conn_id)\n captainHook.run(\"CREATE TABLE operator_test_table (a, b)\")\n captainHook.run(\"insert into operator_test_table values (1,2)\")\n\n t = CheckOperator(\n task_id='check',\n sql=\"select count(*) from operator_test_table\",\n conn_id=conn_id,\n dag=self.dag)\n t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)\n\n t = ValueCheckOperator(\n task_id='value_check',\n pass_value=95,\n tolerance=0.1,\n conn_id=conn_id,\n sql=\"SELECT 100\",\n dag=self.dag)\n t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)\n\n captainHook.run(\"drop table operator_test_table\")\n\n def test_clear_api(self):\n task = self.dag_bash.tasks[0]\n task.clear(\n start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,\n upstream=True, downstream=True)\n ti = models.TaskInstance(task=task, execution_date=DEFAULT_DATE)\n ti.are_dependents_done()\n\n def test_illegal_args(self):\n \"\"\"\n Tests that Operators reject illegal arguments\n \"\"\"\n with warnings.catch_warnings(record=True) as w:\n t = BashOperator(\n task_id='test_illegal_args',\n bash_command='echo success',\n dag=self.dag,\n illegal_argument_1234='hello?')\n self.assertTrue(\n issubclass(w[0].category, PendingDeprecationWarning))\n self.assertIn(\n 'Invalid arguments were passed to BashOperator.',\n w[0].message.args[0])\n\n def test_bash_operator(self):\n t = BashOperator(\n task_id='test_bash_operator',\n bash_command=\"echo success\",\n dag=self.dag)\n t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)\n\n def test_bash_operator_multi_byte_output(self):\n t = BashOperator(\n task_id='test_multi_byte_bash_operator',\n bash_command=u\"echo \\u2600\",\n dag=self.dag,\n output_encoding='utf-8')\n t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)\n\n def test_bash_operator_kill(self):\n import psutil\n sleep_time = \"100%d\" % os.getpid()\n t = BashOperator(\n task_id='test_bash_operator_kill',\n execution_timeout=timedelta(seconds=1),\n bash_command=\"/bin/bash -c 'sleep %s'\" % sleep_time,\n dag=self.dag)\n self.assertRaises(\n exceptions.AirflowTaskTimeout,\n t.run,\n start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)\n sleep(2)\n pid = -1\n for proc in psutil.process_iter():\n if proc.cmdline() == ['sleep', sleep_time]:\n pid = proc.pid\n if pid != -1:\n os.kill(pid, signal.SIGTERM)\n self.fail(\"BashOperator's subprocess still running after stopping on timeout!\")\n\n def test_trigger_dagrun(self):\n def trigga(context, obj):\n if True:\n return obj\n\n t = TriggerDagRunOperator(\n task_id='test_trigger_dagrun',\n trigger_dag_id='example_bash_operator',\n python_callable=trigga,\n dag=self.dag)\n t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)\n\n def test_dryrun(self):\n t = BashOperator(\n task_id='test_dryrun',\n bash_command=\"echo success\",\n dag=self.dag)\n t.dry_run()\n\n def test_sqlite(self):\n import airflow.operators.sqlite_operator\n t = airflow.operators.sqlite_operator.SqliteOperator(\n task_id='time_sqlite',\n sql=\"CREATE TABLE IF NOT EXISTS unitest (dummy VARCHAR(20))\",\n dag=self.dag)\n t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)\n\n def test_timeout(self):\n t = PythonOperator(\n task_id='test_timeout',\n execution_timeout=timedelta(seconds=1),\n python_callable=lambda: sleep(5),\n dag=self.dag)\n self.assertRaises(\n exceptions.AirflowTaskTimeout,\n t.run,\n start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)\n\n def test_python_op(self):\n def test_py_op(templates_dict, ds, **kwargs):\n if not templates_dict['ds'] == ds:\n raise Exception(\"failure\")\n\n t = PythonOperator(\n task_id='test_py_op',\n provide_context=True,\n python_callable=test_py_op,\n templates_dict={'ds': \"{{ ds }}\"},\n dag=self.dag)\n t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)\n\n def test_complex_template(self):\n def verify_templated_field(context):\n self.assertEqual(context['ti'].task.some_templated_field['bar'][1], context['ds'])\n\n t = OperatorSubclass(\n task_id='test_complex_template',\n some_templated_field={\n 'foo': '123',\n 'bar': ['baz', '{{ ds }}']\n },\n on_success_callback=verify_templated_field,\n dag=self.dag)\n t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)\n\n def test_template_with_variable(self):\n \"\"\"\n Test the availability of variables in templates\n \"\"\"\n val = {\n 'success': False,\n 'test_value': 'a test value'\n }\n Variable.set(\"a_variable\", val['test_value'])\n\n def verify_templated_field(context):\n self.assertEqual(context['ti'].task.some_templated_field,\n val['test_value'])\n val['success'] = True\n\n t = OperatorSubclass(\n task_id='test_complex_template',\n some_templated_field='{{ var.value.a_variable }}',\n on_success_callback=verify_templated_field,\n dag=self.dag)\n t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)\n self.assertTrue(val['success'])\n\n def test_template_with_json_variable(self):\n \"\"\"\n Test the availability of variables (serialized as JSON) in templates\n \"\"\"\n val = {\n 'success': False,\n 'test_value': {'foo': 'bar', 'obj': {'v1': 'yes', 'v2': 'no'}}\n }\n Variable.set(\"a_variable\", val['test_value'], serialize_json=True)\n\n def verify_templated_field(context):\n self.assertEqual(context['ti'].task.some_templated_field,\n val['test_value']['obj']['v2'])\n val['success'] = True\n\n t = OperatorSubclass(\n task_id='test_complex_template',\n some_templated_field='{{ var.json.a_variable.obj.v2 }}',\n on_success_callback=verify_templated_field,\n dag=self.dag)\n t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)\n self.assertTrue(val['success'])\n\n def test_template_with_json_variable_as_value(self):\n \"\"\"\n Test the availability of variables (serialized as JSON) in templates, but\n accessed as a value\n \"\"\"\n val = {\n 'success': False,\n 'test_value': {'foo': 'bar'}\n }\n Variable.set(\"a_variable\", val['test_value'], serialize_json=True)\n\n def verify_templated_field(context):\n self.assertEqual(context['ti'].task.some_templated_field,\n u'{\"foo\": \"bar\"}')\n val['success'] = True\n\n t = OperatorSubclass(\n task_id='test_complex_template',\n some_templated_field='{{ var.value.a_variable }}',\n on_success_callback=verify_templated_field,\n dag=self.dag)\n t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)\n self.assertTrue(val['success'])\n\n def test_template_non_bool(self):\n \"\"\"\n Test templates can handle objects with no sense of truthiness\n \"\"\"\n\n class NonBoolObject(object):\n def __len__(self):\n return NotImplemented\n\n def __bool__(self):\n return NotImplemented\n\n t = OperatorSubclass(\n task_id='test_bad_template_obj',\n some_templated_field=NonBoolObject(),\n dag=self.dag)\n t.resolve_template_files()\n\n def test_import_examples(self):\n self.assertEqual(len(self.dagbag.dags), NUM_EXAMPLE_DAGS)\n\n def test_local_task_job(self):\n TI = models.TaskInstance\n ti = TI(\n task=self.runme_0, execution_date=DEFAULT_DATE)\n job = jobs.LocalTaskJob(task_instance=ti, ignore_ti_state=True)\n job.run()\n\n def test_raw_job(self):\n TI = models.TaskInstance\n ti = TI(\n task=self.runme_0, execution_date=DEFAULT_DATE)\n ti.dag = self.dag_bash\n ti.run(ignore_ti_state=True)\n\n def test_doctests(self):\n modules = [utils, macros]\n for mod in modules:\n failed, tests = doctest.testmod(mod)\n if failed:\n raise Exception(\"Failed a doctest\")\n\n def test_variable_set_get_round_trip(self):\n Variable.set(\"tested_var_set_id\", \"Monday morning breakfast\")\n self.assertEqual(\"Monday morning breakfast\", Variable.get(\"tested_var_set_id\"))\n\n def test_variable_set_get_round_trip_json(self):\n value = {\"a\": 17, \"b\": 47}\n Variable.set(\"tested_var_set_id\", value, serialize_json=True)\n self.assertEqual(value, Variable.get(\"tested_var_set_id\", deserialize_json=True))\n\n def test_get_non_existing_var_should_return_default(self):\n default_value = \"some default val\"\n self.assertEqual(default_value, Variable.get(\"thisIdDoesNotExist\",\n default_var=default_value))\n\n def test_get_non_existing_var_should_not_deserialize_json_default(self):\n default_value = \"}{ this is a non JSON default }{\"\n self.assertEqual(default_value, Variable.get(\"thisIdDoesNotExist\",\n default_var=default_value,\n deserialize_json=True))\n\n def test_variable_setdefault_round_trip(self):\n key = \"tested_var_setdefault_1_id\"\n value = \"Monday morning breakfast in Paris\"\n Variable.setdefault(key, value)\n self.assertEqual(value, Variable.get(key))\n\n def test_variable_setdefault_round_trip_json(self):\n key = \"tested_var_setdefault_2_id\"\n value = {\"city\": 'Paris', \"Hapiness\": True}\n Variable.setdefault(key, value, deserialize_json=True)\n self.assertEqual(value, Variable.get(key, deserialize_json=True))\n\n def test_variable_setdefault_existing_json(self):\n key = \"tested_var_setdefault_2_id\"\n value = {\"city\": 'Paris', \"Hapiness\": True}\n Variable.set(key, value, serialize_json=True)\n val = Variable.setdefault(key, value, deserialize_json=True)\n # Check the returned value, and the stored value are handled correctly.\n self.assertEqual(value, val)\n self.assertEqual(value, Variable.get(key, deserialize_json=True))\n\n def test_parameterized_config_gen(self):\n\n cfg = configuration.parameterized_config(configuration.DEFAULT_CONFIG)\n\n # making sure some basic building blocks are present:\n self.assertIn(\"[core]\", cfg)\n self.assertIn(\"dags_folder\", cfg)\n self.assertIn(\"sql_alchemy_conn\", cfg)\n self.assertIn(\"fernet_key\", cfg)\n\n # making sure replacement actually happened\n self.assertNotIn(\"{AIRFLOW_HOME}\", cfg)\n self.assertNotIn(\"{FERNET_KEY}\", cfg)\n\n def test_config_use_original_when_original_and_fallback_are_present(self):\n self.assertTrue(configuration.has_option(\"core\", \"FERNET_KEY\"))\n self.assertFalse(configuration.has_option(\"core\", \"FERNET_KEY_CMD\"))\n\n FERNET_KEY = configuration.get('core', 'FERNET_KEY')\n\n configuration.set(\"core\", \"FERNET_KEY_CMD\", \"printf HELLO\")\n\n FALLBACK_FERNET_KEY = configuration.get(\n \"core\",\n \"FERNET_KEY\"\n )\n\n self.assertEqual(FERNET_KEY, FALLBACK_FERNET_KEY)\n\n # restore the conf back to the original state\n configuration.remove_option(\"core\", \"FERNET_KEY_CMD\")\n\n def test_config_throw_error_when_original_and_fallback_is_absent(self):\n self.assertTrue(configuration.has_option(\"core\", \"FERNET_KEY\"))\n self.assertFalse(configuration.has_option(\"core\", \"FERNET_KEY_CMD\"))\n\n FERNET_KEY = configuration.get(\"core\", \"FERNET_KEY\")\n configuration.remove_option(\"core\", \"FERNET_KEY\")\n\n with self.assertRaises(AirflowConfigException) as cm:\n configuration.get(\"core\", \"FERNET_KEY\")\n\n exception = str(cm.exception)\n message = \"section/key [core/fernet_key] not found in config\"\n self.assertEqual(message, exception)\n\n # restore the conf back to the original state\n configuration.set(\"core\", \"FERNET_KEY\", FERNET_KEY)\n self.assertTrue(configuration.has_option(\"core\", \"FERNET_KEY\"))\n\n def test_config_override_original_when_non_empty_envvar_is_provided(self):\n key = \"AIRFLOW__CORE__FERNET_KEY\"\n value = \"some value\"\n self.assertNotIn(key, os.environ)\n\n os.environ[key] = value\n FERNET_KEY = configuration.get('core', 'FERNET_KEY')\n self.assertEqual(value, FERNET_KEY)\n\n # restore the envvar back to the original state\n del os.environ[key]\n\n def test_config_override_original_when_empty_envvar_is_provided(self):\n key = \"AIRFLOW__CORE__FERNET_KEY\"\n value = \"\"\n self.assertNotIn(key, os.environ)\n\n os.environ[key] = value\n FERNET_KEY = configuration.get('core', 'FERNET_KEY')\n self.assertEqual(value, FERNET_KEY)\n\n # restore the envvar back to the original state\n del os.environ[key]\n\n def test_round_time(self):\n\n rt1 = round_time(datetime(2015, 1, 1, 6), timedelta(days=1))\n self.assertEqual(datetime(2015, 1, 1, 0, 0), rt1)\n\n rt2 = round_time(datetime(2015, 1, 2), relativedelta(months=1))\n self.assertEqual(datetime(2015, 1, 1, 0, 0), rt2)\n\n rt3 = round_time(datetime(2015, 9, 16, 0, 0), timedelta(1), datetime(\n 2015, 9, 14, 0, 0))\n self.assertEqual(datetime(2015, 9, 16, 0, 0), rt3)\n\n rt4 = round_time(datetime(2015, 9, 15, 0, 0), timedelta(1), datetime(\n 2015, 9, 14, 0, 0))\n self.assertEqual(datetime(2015, 9, 15, 0, 0), rt4)\n\n rt5 = round_time(datetime(2015, 9, 14, 0, 0), timedelta(1), datetime(\n 2015, 9, 14, 0, 0))\n self.assertEqual(datetime(2015, 9, 14, 0, 0), rt5)\n\n rt6 = round_time(datetime(2015, 9, 13, 0, 0), timedelta(1), datetime(\n 2015, 9, 14, 0, 0))\n self.assertEqual(datetime(2015, 9, 14, 0, 0), rt6)\n\n def test_infer_time_unit(self):\n\n self.assertEqual('minutes', infer_time_unit([130, 5400, 10]))\n\n self.assertEqual('seconds', infer_time_unit([110, 50, 10, 100]))\n\n self.assertEqual('hours', infer_time_unit([100000, 50000, 10000, 20000]))\n\n self.assertEqual('days', infer_time_unit([200000, 100000]))\n\n def test_scale_time_units(self):\n\n # use assert_almost_equal from numpy.testing since we are comparing\n # floating point arrays\n arr1 = scale_time_units([130, 5400, 10], 'minutes')\n assert_array_almost_equal(arr1, [2.167, 90.0, 0.167], decimal=3)\n\n arr2 = scale_time_units([110, 50, 10, 100], 'seconds')\n assert_array_almost_equal(arr2, [110.0, 50.0, 10.0, 100.0], decimal=3)\n\n arr3 = scale_time_units([100000, 50000, 10000, 20000], 'hours')\n assert_array_almost_equal(arr3, [27.778, 13.889, 2.778, 5.556],\n decimal=3)\n\n arr4 = scale_time_units([200000, 100000], 'days')\n assert_array_almost_equal(arr4, [2.315, 1.157], decimal=3)\n\n def test_duplicate_dependencies(self):\n\n regexp = \"Dependency (.*)runme_0(.*)run_after_loop(.*) \" \\\n \"already registered\"\n\n with self.assertRaisesRegexp(AirflowException, regexp):\n self.runme_0.set_downstream(self.run_after_loop)\n\n with self.assertRaisesRegexp(AirflowException, regexp):\n self.run_after_loop.set_upstream(self.runme_0)\n\n def test_cyclic_dependencies_1(self):\n\n regexp = \"Cycle detected in DAG. (.*)runme_0(.*)\"\n with self.assertRaisesRegexp(AirflowException, regexp):\n self.runme_0.set_upstream(self.run_after_loop)\n\n def test_cyclic_dependencies_2(self):\n regexp = \"Cycle detected in DAG. (.*)run_after_loop(.*)\"\n with self.assertRaisesRegexp(AirflowException, regexp):\n self.run_after_loop.set_downstream(self.runme_0)\n\n def test_cyclic_dependencies_3(self):\n regexp = \"Cycle detected in DAG. (.*)run_this_last(.*)\"\n with self.assertRaisesRegexp(AirflowException, regexp):\n self.run_this_last.set_downstream(self.runme_0)\n\n def test_bad_trigger_rule(self):\n with self.assertRaises(AirflowException):\n DummyOperator(\n task_id='test_bad_trigger',\n trigger_rule=\"non_existant\",\n dag=self.dag)\n\n def test_terminate_task(self):\n \"\"\"If a task instance's db state get deleted, it should fail\"\"\"\n TI = models.TaskInstance\n dag = self.dagbag.dags.get('test_utils')\n task = dag.task_dict.get('sleeps_forever')\n\n ti = TI(task=task, execution_date=DEFAULT_DATE)\n job = jobs.LocalTaskJob(\n task_instance=ti, ignore_ti_state=True, executor=SequentialExecutor())\n\n # Running task instance asynchronously\n p = multiprocessing.Process(target=job.run)\n p.start()\n sleep(5)\n settings.engine.dispose()\n session = settings.Session()\n ti.refresh_from_db(session=session)\n # making sure it's actually running\n self.assertEqual(State.RUNNING, ti.state)\n ti = session.query(TI).filter_by(\n dag_id=task.dag_id,\n task_id=task.task_id,\n execution_date=DEFAULT_DATE\n ).one()\n\n # deleting the instance should result in a failure\n session.delete(ti)\n session.commit()\n # waiting for the async task to finish\n p.join()\n\n # making sure that the task ended up as failed\n ti.refresh_from_db(session=session)\n self.assertEqual(State.FAILED, ti.state)\n session.close()\n\n def test_task_fail_duration(self):\n \"\"\"If a task fails, the duration should be recorded in TaskFail\"\"\"\n\n p = BashOperator(\n task_id='pass_sleepy',\n bash_command='sleep 3',\n dag=self.dag)\n f = BashOperator(\n task_id='fail_sleepy',\n bash_command='sleep 5',\n execution_timeout=timedelta(seconds=3),\n retry_delay=timedelta(seconds=0),\n dag=self.dag)\n session = settings.Session()\n try:\n p.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)\n except:\n pass\n try:\n f.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)\n except:\n pass\n p_fails = session.query(models.TaskFail).filter_by(\n task_id='pass_sleepy',\n dag_id=self.dag.dag_id,\n execution_date=DEFAULT_DATE).all()\n f_fails = session.query(models.TaskFail).filter_by(\n task_id='fail_sleepy',\n dag_id=self.dag.dag_id,\n execution_date=DEFAULT_DATE).all()\n print(f_fails)\n self.assertEqual(0, len(p_fails))\n self.assertEqual(1, len(f_fails))\n # C\n self.assertGreaterEqual(sum([f.duration for f in f_fails]), 3)\n\n def test_dag_stats(self):\n \"\"\"Correctly sets/dirties/cleans rows of DagStat table\"\"\"\n\n session = settings.Session()\n\n session.query(models.DagRun).delete()\n session.query(models.DagStat).delete()\n session.commit()\n\n models.DagStat.update([], session=session)\n\n run1 = self.dag_bash.create_dagrun(\n run_id=\"run1\",\n execution_date=DEFAULT_DATE,\n state=State.RUNNING)\n\n models.DagStat.update([self.dag_bash.dag_id], session=session)\n\n qry = session.query(models.DagStat).all()\n\n self.assertEqual(3, len(qry))\n self.assertEqual(self.dag_bash.dag_id, qry[0].dag_id)\n for stats in qry:\n if stats.state == State.RUNNING:\n self.assertEqual(stats.count, 1)\n else:\n self.assertEqual(stats.count, 0)\n self.assertFalse(stats.dirty)\n\n run2 = self.dag_bash.create_dagrun(\n run_id=\"run2\",\n execution_date=DEFAULT_DATE + timedelta(days=1),\n state=State.RUNNING)\n\n models.DagStat.update([self.dag_bash.dag_id], session=session)\n\n qry = session.query(models.DagStat).all()\n\n self.assertEqual(3, len(qry))\n self.assertEqual(self.dag_bash.dag_id, qry[0].dag_id)\n for stats in qry:\n if stats.state == State.RUNNING:\n self.assertEqual(stats.count, 2)\n else:\n self.assertEqual(stats.count, 0)\n self.assertFalse(stats.dirty)\n\n session.query(models.DagRun).first().state = State.SUCCESS\n session.commit()\n\n models.DagStat.update([self.dag_bash.dag_id], session=session)\n\n qry = session.query(models.DagStat).filter(models.DagStat.state == State.SUCCESS).all()\n self.assertEqual(1, len(qry))\n self.assertEqual(self.dag_bash.dag_id, qry[0].dag_id)\n self.assertEqual(State.SUCCESS, qry[0].state)\n self.assertEqual(1, qry[0].count)\n self.assertFalse(qry[0].dirty)\n\n qry = session.query(models.DagStat).filter(models.DagStat.state == State.RUNNING).all()\n self.assertEqual(1, len(qry))\n self.assertEqual(self.dag_bash.dag_id, qry[0].dag_id)\n self.assertEqual(State.RUNNING, qry[0].state)\n self.assertEqual(1, qry[0].count)\n self.assertFalse(qry[0].dirty)\n\n session.query(models.DagRun).delete()\n session.query(models.DagStat).delete()\n session.commit()\n session.close()\n\n def test_run_command(self):\n if six.PY3:\n write = r'sys.stdout.buffer.write(\"\\u1000foo\".encode(\"utf8\"))'\n else:\n write = r'sys.stdout.write(u\"\\u1000foo\".encode(\"utf8\"))'\n\n cmd = 'import sys; {0}; sys.stdout.flush()'.format(write)\n\n self.assertEqual(run_command(\"python -c '{0}'\".format(cmd)),\n u'\\u1000foo' if six.PY3 else 'foo')\n\n self.assertEqual(run_command('echo \"foo bar\"'), u'foo bar\\n')\n self.assertRaises(AirflowConfigException, run_command, 'bash -c \"exit 1\"')\n\n\nclass CliTests(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n super(CliTests, cls).setUpClass()\n cls._cleanup()\n\n def setUp(self):\n super(CliTests, self).setUp()\n configuration.load_test_config()\n app = application.create_app()\n app.config['TESTING'] = True\n self.parser = cli.CLIFactory.get_parser()\n self.dagbag = models.DagBag(dag_folder=DEV_NULL, include_examples=True)\n self.session = Session()\n\n def tearDown(self):\n self._cleanup(session=self.session)\n super(CliTests, self).tearDown()\n\n @staticmethod\n def _cleanup(session=None):\n if session is None:\n session = Session()\n\n session.query(models.Pool).delete()\n session.query(models.Variable).delete()\n session.commit()\n session.close()\n\n def test_cli_list_dags(self):\n args = self.parser.parse_args(['list_dags', '--report'])\n cli.list_dags(args)\n\n def test_cli_list_tasks(self):\n for dag_id in self.dagbag.dags.keys():\n args = self.parser.parse_args(['list_tasks', dag_id])\n cli.list_tasks(args)\n\n args = self.parser.parse_args([\n 'list_tasks', 'example_bash_operator', '--tree'])\n cli.list_tasks(args)\n\n @mock.patch(\"airflow.bin.cli.db_utils.initdb\")\n def test_cli_initdb(self, initdb_mock):\n cli.initdb(self.parser.parse_args(['initdb']))\n\n initdb_mock.assert_called_once_with()\n\n @mock.patch(\"airflow.bin.cli.db_utils.resetdb\")\n def test_cli_resetdb(self, resetdb_mock):\n cli.resetdb(self.parser.parse_args(['resetdb', '--yes']))\n\n resetdb_mock.assert_called_once_with()\n\n def test_cli_connections_list(self):\n with mock.patch('sys.stdout',\n new_callable=six.StringIO) as mock_stdout:\n cli.connections(self.parser.parse_args(['connections', '--list']))\n stdout = mock_stdout.getvalue()\n conns = [[x.strip(\"'\") for x in re.findall(\"'\\w+'\", line)[:2]]\n for ii, line in enumerate(stdout.split('\\n'))\n if ii % 2 == 1]\n conns = [conn for conn in conns if len(conn) > 0]\n\n # Assert that some of the connections are present in the output as\n # expected:\n self.assertIn(['aws_default', 'aws'], conns)\n self.assertIn(['beeline_default', 'beeline'], conns)\n self.assertIn(['bigquery_default', 'bigquery'], conns)\n self.assertIn(['emr_default', 'emr'], conns)\n self.assertIn(['mssql_default', 'mssql'], conns)\n self.assertIn(['mysql_default', 'mysql'], conns)\n self.assertIn(['postgres_default', 'postgres'], conns)\n self.assertIn(['wasb_default', 'wasb'], conns)\n\n # Attempt to list connections with invalid cli args\n with mock.patch('sys.stdout',\n new_callable=six.StringIO) as mock_stdout:\n cli.connections(self.parser.parse_args(\n ['connections', '--list', '--conn_id=fake', '--conn_uri=fake-uri',\n '--conn_type=fake-type', '--conn_host=fake_host',\n '--conn_login=fake_login', '--conn_password=fake_password',\n '--conn_schema=fake_schema', '--conn_port=fake_port', '--conn_extra=fake_extra']))\n stdout = mock_stdout.getvalue()\n\n # Check list attempt stdout\n lines = [l for l in stdout.split('\\n') if len(l) > 0]\n self.assertListEqual(lines, [\n (\"\\tThe following args are not compatible with the \" +\n \"--list flag: ['conn_id', 'conn_uri', 'conn_extra', 'conn_type', 'conn_host', 'conn_login', 'conn_password', 'conn_schema', 'conn_port']\"),\n ])\n\n def test_cli_connections_add_delete(self):\n # Add connections:\n uri = 'postgresql://airflow:airflow@host:5432/airflow'\n with mock.patch('sys.stdout',\n new_callable=six.StringIO) as mock_stdout:\n cli.connections(self.parser.parse_args(\n ['connections', '--add', '--conn_id=new1',\n '--conn_uri=%s' % uri]))\n cli.connections(self.parser.parse_args(\n ['connections', '-a', '--conn_id=new2',\n '--conn_uri=%s' % uri]))\n cli.connections(self.parser.parse_args(\n ['connections', '--add', '--conn_id=new3',\n '--conn_uri=%s' % uri, '--conn_extra', \"{'extra': 'yes'}\"]))\n cli.connections(self.parser.parse_args(\n ['connections', '-a', '--conn_id=new4',\n '--conn_uri=%s' % uri, '--conn_extra', \"{'extra': 'yes'}\"]))\n cli.connections(self.parser.parse_args(\n ['connections', '--add', '--conn_id=new5',\n '--conn_type=hive_metastore', '--conn_login=airflow',\n '--conn_password=airflow', '--conn_host=host',\n '--conn_port=9083', '--conn_schema=airflow']))\n cli.connections(self.parser.parse_args(\n ['connections', '-a', '--conn_id=new6',\n '--conn_uri', \"\", '--conn_type=google_cloud_platform', '--conn_extra', \"{'extra': 'yes'}\"]))\n stdout = mock_stdout.getvalue()\n\n # Check addition stdout\n lines = [l for l in stdout.split('\\n') if len(l) > 0]\n self.assertListEqual(lines, [\n (\"\\tSuccessfully added `conn_id`=new1 : \" +\n \"postgresql://airflow:airflow@host:5432/airflow\"),\n (\"\\tSuccessfully added `conn_id`=new2 : \" +\n \"postgresql://airflow:airflow@host:5432/airflow\"),\n (\"\\tSuccessfully added `conn_id`=new3 : \" +\n \"postgresql://airflow:airflow@host:5432/airflow\"),\n (\"\\tSuccessfully added `conn_id`=new4 : \" +\n \"postgresql://airflow:airflow@host:5432/airflow\"),\n (\"\\tSuccessfully added `conn_id`=new5 : \" +\n \"hive_metastore://airflow:airflow@host:9083/airflow\"),\n (\"\\tSuccessfully added `conn_id`=new6 : \" +\n \"google_cloud_platform://:@:\")\n ])\n\n # Attempt to add duplicate\n with mock.patch('sys.stdout',\n new_callable=six.StringIO) as mock_stdout:\n cli.connections(self.parser.parse_args(\n ['connections', '--add', '--conn_id=new1',\n '--conn_uri=%s' % uri]))\n stdout = mock_stdout.getvalue()\n\n # Check stdout for addition attempt\n lines = [l for l in stdout.split('\\n') if len(l) > 0]\n self.assertListEqual(lines, [\n \"\\tA connection with `conn_id`=new1 already exists\",\n ])\n\n # Attempt to add without providing conn_id\n with mock.patch('sys.stdout',\n new_callable=six.StringIO) as mock_stdout:\n cli.connections(self.parser.parse_args(\n ['connections', '--add', '--conn_uri=%s' % uri]))\n stdout = mock_stdout.getvalue()\n\n # Check stdout for addition attempt\n lines = [l for l in stdout.split('\\n') if len(l) > 0]\n self.assertListEqual(lines, [\n (\"\\tThe following args are required to add a connection:\" +\n \" ['conn_id']\"),\n ])\n\n # Attempt to add without providing conn_uri\n with mock.patch('sys.stdout',\n new_callable=six.StringIO) as mock_stdout:\n cli.connections(self.parser.parse_args(\n ['connections', '--add', '--conn_id=new']))\n stdout = mock_stdout.getvalue()\n\n # Check stdout for addition attempt\n lines = [l for l in stdout.split('\\n') if len(l) > 0]\n self.assertListEqual(lines, [\n (\"\\tThe following args are required to add a connection:\" +\n \" ['conn_uri or conn_type']\"),\n ])\n\n # Prepare to add connections\n session = settings.Session()\n extra = {'new1': None,\n 'new2': None,\n 'new3': \"{'extra': 'yes'}\",\n 'new4': \"{'extra': 'yes'}\"}\n\n # Add connections\n for index in range(1, 6):\n conn_id = 'new%s' % index\n result = (session\n .query(models.Connection)\n .filter(models.Connection.conn_id == conn_id)\n .first())\n result = (result.conn_id, result.conn_type, result.host,\n result.port, result.get_extra())\n if conn_id in ['new1', 'new2', 'new3', 'new4']:\n self.assertEqual(result, (conn_id, 'postgres', 'host', 5432,\n extra[conn_id]))\n elif conn_id == 'new5':\n self.assertEqual(result, (conn_id, 'hive_metastore', 'host',\n 9083, None))\n elif conn_id == 'new6':\n self.assertEqual(result, (conn_id, 'google_cloud_platform',\n None, None, \"{'extra': 'yes'}\"))\n\n # Delete connections\n with mock.patch('sys.stdout',\n new_callable=six.StringIO) as mock_stdout:\n cli.connections(self.parser.parse_args(\n ['connections', '--delete', '--conn_id=new1']))\n cli.connections(self.parser.parse_args(\n ['connections', '--delete', '--conn_id=new2']))\n cli.connections(self.parser.parse_args(\n ['connections', '--delete', '--conn_id=new3']))\n cli.connections(self.parser.parse_args(\n ['connections', '--delete', '--conn_id=new4']))\n cli.connections(self.parser.parse_args(\n ['connections', '--delete', '--conn_id=new5']))\n cli.connections(self.parser.parse_args(\n ['connections', '--delete', '--conn_id=new6']))\n stdout = mock_stdout.getvalue()\n\n # Check deletion stdout\n lines = [l for l in stdout.split('\\n') if len(l) > 0]\n self.assertListEqual(lines, [\n \"\\tSuccessfully deleted `conn_id`=new1\",\n \"\\tSuccessfully deleted `conn_id`=new2\",\n \"\\tSuccessfully deleted `conn_id`=new3\",\n \"\\tSuccessfully deleted `conn_id`=new4\",\n \"\\tSuccessfully deleted `conn_id`=new5\",\n \"\\tSuccessfully deleted `conn_id`=new6\"\n ])\n\n # Check deletions\n for index in range(1, 7):\n conn_id = 'new%s' % index\n result = (session.query(models.Connection)\n .filter(models.Connection.conn_id == conn_id)\n .first())\n\n self.assertTrue(result is None)\n\n # Attempt to delete a non-existing connnection\n with mock.patch('sys.stdout',\n new_callable=six.StringIO) as mock_stdout:\n cli.connections(self.parser.parse_args(\n ['connections', '--delete', '--conn_id=fake']))\n stdout = mock_stdout.getvalue()\n\n # Check deletion attempt stdout\n lines = [l for l in stdout.split('\\n') if len(l) > 0]\n self.assertListEqual(lines, [\n \"\\tDid not find a connection with `conn_id`=fake\",\n ])\n\n # Attempt to delete with invalid cli args\n with mock.patch('sys.stdout',\n new_callable=six.StringIO) as mock_stdout:\n cli.connections(self.parser.parse_args(\n ['connections', '--delete', '--conn_id=fake',\n '--conn_uri=%s' % uri, '--conn_type=fake-type']))\n stdout = mock_stdout.getvalue()\n\n # Check deletion attempt stdout\n lines = [l for l in stdout.split('\\n') if len(l) > 0]\n self.assertListEqual(lines, [\n (\"\\tThe following args are not compatible with the \" +\n \"--delete flag: ['conn_uri', 'conn_type']\"),\n ])\n\n session.close()\n\n def test_cli_test(self):\n cli.test(self.parser.parse_args([\n 'test', 'example_bash_operator', 'runme_0',\n DEFAULT_DATE.isoformat()]))\n cli.test(self.parser.parse_args([\n 'test', 'example_bash_operator', 'runme_0', '--dry_run',\n DEFAULT_DATE.isoformat()]))\n\n def test_cli_test_with_params(self):\n cli.test(self.parser.parse_args([\n 'test', 'example_passing_params_via_test_command', 'run_this',\n '-tp', '{\"foo\":\"bar\"}', DEFAULT_DATE.isoformat()]))\n cli.test(self.parser.parse_args([\n 'test', 'example_passing_params_via_test_command', 'also_run_this',\n '-tp', '{\"foo\":\"bar\"}', DEFAULT_DATE.isoformat()]))\n\n def test_cli_run(self):\n cli.run(self.parser.parse_args([\n 'run', 'example_bash_operator', 'runme_0', '-l',\n DEFAULT_DATE.isoformat()]))\n\n def test_task_state(self):\n cli.task_state(self.parser.parse_args([\n 'task_state', 'example_bash_operator', 'runme_0',\n DEFAULT_DATE.isoformat()]))\n\n def test_dag_state(self):\n self.assertEqual(None, cli.dag_state(self.parser.parse_args([\n 'dag_state', 'example_bash_operator', DEFAULT_DATE.isoformat()])))\n\n def test_pause(self):\n args = self.parser.parse_args([\n 'pause', 'example_bash_operator'])\n cli.pause(args)\n self.assertIn(self.dagbag.dags['example_bash_operator'].is_paused, [True, 1])\n\n args = self.parser.parse_args([\n 'unpause', 'example_bash_operator'])\n cli.unpause(args)\n self.assertIn(self.dagbag.dags['example_bash_operator'].is_paused, [False, 0])\n\n def test_subdag_clear(self):\n args = self.parser.parse_args([\n 'clear', 'example_subdag_operator', '--no_confirm'])\n cli.clear(args)\n args = self.parser.parse_args([\n 'clear', 'example_subdag_operator', '--no_confirm', '--exclude_subdags'])\n cli.clear(args)\n\n def test_get_dags(self):\n dags = cli.get_dags(self.parser.parse_args(['clear', 'example_subdag_operator', '-c']))\n self.assertEqual(len(dags), 1)\n\n dags = cli.get_dags(self.parser.parse_args(['clear', 'subdag', '-dx', '-c']))\n self.assertGreater(len(dags), 1)\n\n with self.assertRaises(AirflowException):\n cli.get_dags(self.parser.parse_args(['clear', 'foobar', '-dx', '-c']))\n\n def test_backfill(self):\n cli.backfill(self.parser.parse_args([\n 'backfill', 'example_bash_operator',\n '-s', DEFAULT_DATE.isoformat()]))\n\n cli.backfill(self.parser.parse_args([\n 'backfill', 'example_bash_operator', '-t', 'runme_0', '--dry_run',\n '-s', DEFAULT_DATE.isoformat()]))\n\n cli.backfill(self.parser.parse_args([\n 'backfill', 'example_bash_operator', '--dry_run',\n '-s', DEFAULT_DATE.isoformat()]))\n\n cli.backfill(self.parser.parse_args([\n 'backfill', 'example_bash_operator', '-l',\n '-s', DEFAULT_DATE.isoformat()]))\n\n def test_process_subdir_path_with_placeholder(self):\n self.assertEqual(os.path.join(settings.DAGS_FOLDER, 'abc'), cli.process_subdir('DAGS_FOLDER/abc'))\n\n def test_trigger_dag(self):\n cli.trigger_dag(self.parser.parse_args([\n 'trigger_dag', 'example_bash_operator',\n '-c', '{\"foo\": \"bar\"}']))\n self.assertRaises(\n ValueError,\n cli.trigger_dag,\n self.parser.parse_args([\n 'trigger_dag', 'example_bash_operator',\n '--run_id', 'trigger_dag_xxx',\n '-c', 'NOT JSON'])\n )\n\n def test_delete_dag(self):\n DM = models.DagModel\n key = \"my_dag_id\"\n session = settings.Session()\n session.add(DM(dag_id=key))\n session.commit()\n cli.delete_dag(self.parser.parse_args([\n 'delete_dag', key, '--yes']))\n self.assertEqual(session.query(DM).filter_by(dag_id=key).count(), 0)\n self.assertRaises(\n AirflowException,\n cli.delete_dag,\n self.parser.parse_args([\n 'delete_dag',\n 'does_not_exist_dag',\n '--yes'])\n )\n\n def test_pool_create(self):\n cli.pool(self.parser.parse_args(['pool', '-s', 'foo', '1', 'test']))\n self.assertEqual(self.session.query(models.Pool).count(), 1)\n\n def test_pool_get(self):\n cli.pool(self.parser.parse_args(['pool', '-s', 'foo', '1', 'test']))\n try:\n cli.pool(self.parser.parse_args(['pool', '-g', 'foo']))\n except Exception as e:\n self.fail(\"The 'pool -g foo' command raised unexpectedly: %s\" % e)\n\n def test_pool_delete(self):\n cli.pool(self.parser.parse_args(['pool', '-s', 'foo', '1', 'test']))\n cli.pool(self.parser.parse_args(['pool', '-x', 'foo']))\n self.assertEqual(self.session.query(models.Pool).count(), 0)\n\n def test_pool_no_args(self):\n try:\n cli.pool(self.parser.parse_args(['pool']))\n except Exception as e:\n self.fail(\"The 'pool' command raised unexpectedly: %s\" % e)\n\n def test_variables(self):\n # Checks if all subcommands are properly received\n cli.variables(self.parser.parse_args([\n 'variables', '-s', 'foo', '{\"foo\":\"bar\"}']))\n cli.variables(self.parser.parse_args([\n 'variables', '-g', 'foo']))\n cli.variables(self.parser.parse_args([\n 'variables', '-g', 'baz', '-d', 'bar']))\n cli.variables(self.parser.parse_args([\n 'variables']))\n cli.variables(self.parser.parse_args([\n 'variables', '-x', 'bar']))\n cli.variables(self.parser.parse_args([\n 'variables', '-i', DEV_NULL]))\n cli.variables(self.parser.parse_args([\n 'variables', '-e', DEV_NULL]))\n\n cli.variables(self.parser.parse_args([\n 'variables', '-s', 'bar', 'original']))\n # First export\n cli.variables(self.parser.parse_args([\n 'variables', '-e', 'variables1.json']))\n\n first_exp = open('variables1.json', 'r')\n\n cli.variables(self.parser.parse_args([\n 'variables', '-s', 'bar', 'updated']))\n cli.variables(self.parser.parse_args([\n 'variables', '-s', 'foo', '{\"foo\":\"oops\"}']))\n cli.variables(self.parser.parse_args([\n 'variables', '-x', 'foo']))\n # First import\n cli.variables(self.parser.parse_args([\n 'variables', '-i', 'variables1.json']))\n\n self.assertEqual('original', models.Variable.get('bar'))\n self.assertEqual('{\"foo\": \"bar\"}', models.Variable.get('foo'))\n # Second export\n cli.variables(self.parser.parse_args([\n 'variables', '-e', 'variables2.json']))\n\n second_exp = open('variables2.json', 'r')\n self.assertEqual(first_exp.read(), second_exp.read())\n second_exp.close()\n first_exp.close()\n # Second import\n cli.variables(self.parser.parse_args([\n 'variables', '-i', 'variables2.json']))\n\n self.assertEqual('original', models.Variable.get('bar'))\n self.assertEqual('{\"foo\": \"bar\"}', models.Variable.get('foo'))\n\n os.remove('variables1.json')\n os.remove('variables2.json')\n\n def _wait_pidfile(self, pidfile):\n while True:\n try:\n with open(pidfile) as f:\n return int(f.read())\n except:\n sleep(1)\n\n def test_cli_webserver_foreground(self):\n import subprocess\n\n # Confirm that webserver hasn't been launched.\n # pgrep returns exit status 1 if no process matched.\n self.assertEqual(1, subprocess.Popen([\"pgrep\", \"-c\", \"airflow\"]).wait())\n self.assertEqual(1, subprocess.Popen([\"pgrep\", \"-c\", \"gunicorn\"]).wait())\n\n # Run webserver in foreground and terminate it.\n p = subprocess.Popen([\"airflow\", \"webserver\"])\n p.terminate()\n p.wait()\n\n # Assert that no process remains.\n self.assertEqual(1, subprocess.Popen([\"pgrep\", \"-c\", \"airflow\"]).wait())\n self.assertEqual(1, subprocess.Popen([\"pgrep\", \"-c\", \"gunicorn\"]).wait())\n\n @unittest.skipIf(\"TRAVIS\" in os.environ and bool(os.environ[\"TRAVIS\"]),\n \"Skipping test due to lack of required file permission\")\n def test_cli_webserver_foreground_with_pid(self):\n import subprocess\n\n # Run webserver in foreground with --pid option\n pidfile = tempfile.mkstemp()[1]\n p = subprocess.Popen([\"airflow\", \"webserver\", \"--pid\", pidfile])\n\n # Check the file specified by --pid option exists\n self._wait_pidfile(pidfile)\n\n # Terminate webserver\n p.terminate()\n p.wait()\n\n @unittest.skipIf(\"TRAVIS\" in os.environ and bool(os.environ[\"TRAVIS\"]),\n \"Skipping test due to lack of required file permission\")\n def test_cli_webserver_background(self):\n import subprocess\n import psutil\n\n # Confirm that webserver hasn't been launched.\n self.assertEqual(1, subprocess.Popen([\"pgrep\", \"-c\", \"airflow\"]).wait())\n self.assertEqual(1, subprocess.Popen([\"pgrep\", \"-c\", \"gunicorn\"]).wait())\n\n # Run webserver in background.\n subprocess.Popen([\"airflow\", \"webserver\", \"-D\"])\n pidfile = cli.setup_locations(\"webserver\")[0]\n self._wait_pidfile(pidfile)\n\n # Assert that gunicorn and its monitor are launched.\n self.assertEqual(0, subprocess.Popen([\"pgrep\", \"-c\", \"airflow\"]).wait())\n self.assertEqual(0, subprocess.Popen([\"pgrep\", \"-c\", \"gunicorn\"]).wait())\n\n # Terminate monitor process.\n pidfile = cli.setup_locations(\"webserver-monitor\")[0]\n pid = self._wait_pidfile(pidfile)\n p = psutil.Process(pid)\n p.terminate()\n p.wait()\n\n # Assert that no process remains.\n self.assertEqual(1, subprocess.Popen([\"pgrep\", \"-c\", \"airflow\"]).wait())\n self.assertEqual(1, subprocess.Popen([\"pgrep\", \"-c\", \"gunicorn\"]).wait())\n\n\nclass SecurityTests(unittest.TestCase):\n def setUp(self):\n configuration.load_test_config()\n configuration.conf.set(\"webserver\", \"authenticate\", \"False\")\n configuration.conf.set(\"webserver\", \"expose_config\", \"True\")\n app = application.create_app()\n app.config['TESTING'] = True\n self.app = app.test_client()\n\n self.dagbag = models.DagBag(\n dag_folder=DEV_NULL, include_examples=True)\n self.dag_bash = self.dagbag.dags['example_bash_operator']\n self.runme_0 = self.dag_bash.get_task('runme_0')\n\n def get_csrf(self, response):\n tree = html.fromstring(response.data)\n form = tree.find('.//form')\n\n return form.find('.//input[@name=\"_csrf_token\"]').value\n\n def test_csrf_rejection(self):\n endpoints = ([\n \"/admin/queryview/\",\n \"/admin/airflow/paused?dag_id=example_python_operator&is_paused=false\",\n ])\n for endpoint in endpoints:\n response = self.app.post(endpoint)\n self.assertIn('CSRF token is missing', response.data.decode('utf-8'))\n\n def test_csrf_acceptance(self):\n response = self.app.get(\"/admin/queryview/\")\n csrf = self.get_csrf(response)\n response = self.app.post(\"/admin/queryview/\", data=dict(csrf_token=csrf))\n self.assertEqual(200, response.status_code)\n\n def test_xss(self):\n try:\n self.app.get(\"/admin/airflow/tree?dag_id=<script>alert(123456)</script>\")\n except:\n # exception is expected here since dag doesnt exist\n pass\n response = self.app.get(\"/admin/log\", follow_redirects=True)\n self.assertIn(bleach.clean(\"<script>alert(123456)</script>\"), response.data.decode('UTF-8'))\n\n def test_chart_data_template(self):\n \"\"\"Protect chart_data from being able to do RCE.\"\"\"\n session = settings.Session()\n Chart = models.Chart\n chart1 = Chart(\n label='insecure_chart',\n conn_id='airflow_db',\n chart_type='bar',\n sql=\"SELECT {{ ''.__class__.__mro__[1].__subclasses__() }}\"\n )\n chart2 = Chart(\n label=\"{{ ''.__class__.__mro__[1].__subclasses__() }}\",\n conn_id='airflow_db',\n chart_type='bar',\n sql=\"SELECT 1\"\n )\n chart3 = Chart(\n label=\"{{ subprocess.check_output('ls') }}\",\n conn_id='airflow_db',\n chart_type='bar',\n sql=\"SELECT 1\"\n )\n session.add(chart1)\n session.add(chart2)\n session.add(chart3)\n session.commit()\n chart1 = session.query(Chart).filter(Chart.label == 'insecure_chart').first()\n with self.assertRaises(SecurityError):\n self.app.get(\"/admin/airflow/chart_data?chart_id={}\".format(chart1.id))\n\n chart2 = session.query(Chart).filter(\n Chart.label == \"{{ ''.__class__.__mro__[1].__subclasses__() }}\"\n ).first()\n with self.assertRaises(SecurityError):\n self.app.get(\"/admin/airflow/chart_data?chart_id={}\".format(chart2.id))\n\n chart3 = session.query(Chart).filter(\n Chart.label == \"{{ subprocess.check_output('ls') }}\"\n ).first()\n with self.assertRaises(UndefinedError):\n self.app.get(\"/admin/airflow/chart_data?chart_id={}\".format(chart3.id))\n\n def tearDown(self):\n configuration.conf.set(\"webserver\", \"expose_config\", \"False\")\n self.dag_bash.clear(start_date=DEFAULT_DATE, end_date=timezone.utcnow())\n\n\nclass WebUiTests(unittest.TestCase):\n def setUp(self):\n configuration.load_test_config()\n configuration.conf.set(\"webserver\", \"authenticate\", \"False\")\n configuration.conf.set(\"webserver\", \"expose_config\", \"True\")\n app = application.create_app()\n app.config['TESTING'] = True\n app.config['WTF_CSRF_METHODS'] = []\n self.app = app.test_client()\n\n self.dagbag = models.DagBag(include_examples=True)\n self.dag_bash = self.dagbag.dags['example_bash_operator']\n self.dag_bash2 = self.dagbag.dags['test_example_bash_operator']\n self.sub_dag = self.dagbag.dags['example_subdag_operator']\n self.runme_0 = self.dag_bash.get_task('runme_0')\n self.example_xcom = self.dagbag.dags['example_xcom']\n\n self.dagrun_bash2 = self.dag_bash2.create_dagrun(\n run_id=\"test_{}\".format(models.DagRun.id_for_date(timezone.utcnow())),\n execution_date=DEFAULT_DATE,\n start_date=timezone.utcnow(),\n state=State.RUNNING\n )\n\n self.sub_dag.create_dagrun(\n run_id=\"test_{}\".format(models.DagRun.id_for_date(timezone.utcnow())),\n execution_date=DEFAULT_DATE,\n start_date=timezone.utcnow(),\n state=State.RUNNING\n )\n\n self.example_xcom.create_dagrun(\n run_id=\"test_{}\".format(models.DagRun.id_for_date(timezone.utcnow())),\n execution_date=DEFAULT_DATE,\n start_date=timezone.utcnow(),\n state=State.RUNNING\n )\n\n def test_index(self):\n response = self.app.get('/', follow_redirects=True)\n resp_html = response.data.decode('utf-8')\n self.assertIn(\"DAGs\", resp_html)\n self.assertIn(\"example_bash_operator\", resp_html)\n\n # The HTML should contain data for the last-run. A link to the specific run, and the text of\n # the date.\n url = \"/admin/airflow/graph?\" + urlencode({\n \"dag_id\": self.dag_bash2.dag_id,\n \"execution_date\": self.dagrun_bash2.execution_date,\n }).replace(\"&\", \"&\")\n self.assertIn(url, resp_html)\n self.assertIn(self.dagrun_bash2.execution_date.strftime(\"%Y-%m-%d %H:%M\"), resp_html)\n\n def test_query(self):\n response = self.app.get('/admin/queryview/')\n self.assertIn(\"Ad Hoc Query\", response.data.decode('utf-8'))\n response = self.app.post(\n \"/admin/queryview/\", data=dict(\n conn_id=\"airflow_db\",\n sql=\"SELECT+COUNT%281%29+as+TEST+FROM+task_instance\"))\n self.assertIn(\"TEST\", response.data.decode('utf-8'))\n\n def test_health(self):\n response = self.app.get('/health')\n self.assertIn('The server is healthy!', response.data.decode('utf-8'))\n\n def test_noaccess(self):\n response = self.app.get('/admin/airflow/noaccess')\n self.assertIn(\"You don't seem to have access.\", response.data.decode('utf-8'))\n\n def test_pickle_info(self):\n response = self.app.get('/admin/airflow/pickle_info')\n self.assertIn('{', response.data.decode('utf-8'))\n\n def test_dag_views(self):\n response = self.app.get(\n '/admin/airflow/graph?dag_id=example_bash_operator')\n self.assertIn(\"runme_0\", response.data.decode('utf-8'))\n response = self.app.get(\n '/admin/airflow/tree?num_runs=25&dag_id=example_bash_operator')\n self.assertIn(\"runme_0\", response.data.decode('utf-8'))\n response = self.app.get(\n '/admin/airflow/duration?days=30&dag_id=example_bash_operator')\n self.assertIn(\"example_bash_operator\", response.data.decode('utf-8'))\n response = self.app.get(\n '/admin/airflow/tries?days=30&dag_id=example_bash_operator')\n self.assertIn(\"example_bash_operator\", response.data.decode('utf-8'))\n response = self.app.get(\n '/admin/airflow/landing_times?'\n 'days=30&dag_id=test_example_bash_operator')\n self.assertIn(\"test_example_bash_operator\", response.data.decode('utf-8'))\n response = self.app.get(\n '/admin/airflow/landing_times?'\n 'days=30&dag_id=example_xcom')\n self.assertIn(\"example_xcom\", response.data.decode('utf-8'))\n response = self.app.get(\n '/admin/airflow/gantt?dag_id=example_bash_operator')\n self.assertIn(\"example_bash_operator\", response.data.decode('utf-8'))\n response = self.app.get(\n '/admin/airflow/code?dag_id=example_bash_operator')\n self.assertIn(\"example_bash_operator\", response.data.decode('utf-8'))\n response = self.app.get(\n '/admin/airflow/blocked')\n response = self.app.get(\n '/admin/configurationview/')\n self.assertIn(\"Airflow Configuration\", response.data.decode('utf-8'))\n self.assertIn(\"Running Configuration\", response.data.decode('utf-8'))\n response = self.app.get(\n '/admin/airflow/rendered?'\n 'task_id=runme_1&dag_id=example_bash_operator&'\n 'execution_date={}'.format(DEFAULT_DATE_ISO))\n self.assertIn(\"example_bash_operator\", response.data.decode('utf-8'))\n response = self.app.get(\n '/admin/airflow/log?task_id=run_this_last&'\n 'dag_id=example_bash_operator&execution_date={}'\n ''.format(DEFAULT_DATE_ISO))\n self.assertIn(\"run_this_last\", response.data.decode('utf-8'))\n response = self.app.get(\n '/admin/airflow/task?'\n 'task_id=runme_0&dag_id=example_bash_operator&'\n 'execution_date={}'.format(DEFAULT_DATE_DS))\n self.assertIn(\"Attributes\", response.data.decode('utf-8'))\n response = self.app.get(\n '/admin/airflow/dag_stats')\n self.assertIn(\"example_bash_operator\", response.data.decode('utf-8'))\n response = self.app.get(\n '/admin/airflow/task_stats')\n self.assertIn(\"example_bash_operator\", response.data.decode('utf-8'))\n url = (\n \"/admin/airflow/success?task_id=run_this_last&\"\n \"dag_id=test_example_bash_operator&upstream=false&downstream=false&\"\n \"future=false&past=false&execution_date={}&\"\n \"origin=/admin\".format(DEFAULT_DATE_DS))\n response = self.app.get(url)\n self.assertIn(\"Wait a minute\", response.data.decode('utf-8'))\n response = self.app.get(url + \"&confirmed=true\")\n response = self.app.get(\n '/admin/airflow/clear?task_id=run_this_last&'\n 'dag_id=test_example_bash_operator&future=true&past=false&'\n 'upstream=true&downstream=false&'\n 'execution_date={}&'\n 'origin=/admin'.format(DEFAULT_DATE_DS))\n self.assertIn(\"Wait a minute\", response.data.decode('utf-8'))\n url = (\n \"/admin/airflow/success?task_id=section-1&\"\n \"dag_id=example_subdag_operator&upstream=true&downstream=true&\"\n \"future=false&past=false&execution_date={}&\"\n \"origin=/admin\".format(DEFAULT_DATE_DS))\n response = self.app.get(url)\n self.assertIn(\"Wait a minute\", response.data.decode('utf-8'))\n self.assertIn(\"section-1-task-1\", response.data.decode('utf-8'))\n self.assertIn(\"section-1-task-2\", response.data.decode('utf-8'))\n self.assertIn(\"section-1-task-3\", response.data.decode('utf-8'))\n self.assertIn(\"section-1-task-4\", response.data.decode('utf-8'))\n self.assertIn(\"section-1-task-5\", response.data.decode('utf-8'))\n response = self.app.get(url + \"&confirmed=true\")\n url = (\n \"/admin/airflow/clear?task_id=runme_1&\"\n \"dag_id=test_example_bash_operator&future=false&past=false&\"\n \"upstream=false&downstream=true&\"\n \"execution_date={}&\"\n \"origin=/admin\".format(DEFAULT_DATE_DS))\n response = self.app.get(url)\n self.assertIn(\"Wait a minute\", response.data.decode('utf-8'))\n response = self.app.get(url + \"&confirmed=true\")\n url = (\n \"/admin/airflow/run?task_id=runme_0&\"\n \"dag_id=example_bash_operator&ignore_all_deps=false&ignore_ti_state=true&\"\n \"ignore_task_deps=true&execution_date={}&\"\n \"origin=/admin\".format(DEFAULT_DATE_DS))\n response = self.app.get(url)\n response = self.app.get(\n \"/admin/airflow/refresh?dag_id=example_bash_operator\")\n response = self.app.get(\"/admin/airflow/refresh_all\")\n response = self.app.post(\n \"/admin/airflow/paused?\"\n \"dag_id=example_python_operator&is_paused=false\")\n self.assertIn(\"OK\", response.data.decode('utf-8'))\n response = self.app.get(\"/admin/xcom\", follow_redirects=True)\n self.assertIn(\"Xcoms\", response.data.decode('utf-8'))\n\n def test_charts(self):\n session = Session()\n chart_label = \"Airflow task instance by type\"\n chart = session.query(\n models.Chart).filter(models.Chart.label == chart_label).first()\n chart_id = chart.id\n session.close()\n response = self.app.get(\n '/admin/airflow/chart'\n '?chart_id={}&iteration_no=1'.format(chart_id))\n self.assertIn(\"Airflow task instance by type\", response.data.decode('utf-8'))\n response = self.app.get(\n '/admin/airflow/chart_data'\n '?chart_id={}&iteration_no=1'.format(chart_id))\n self.assertIn(\"example\", response.data.decode('utf-8'))\n response = self.app.get(\n '/admin/airflow/dag_details?dag_id=example_branch_operator')\n self.assertIn(\"run_this_first\", response.data.decode('utf-8'))\n\n def test_fetch_task_instance(self):\n url = (\n \"/admin/airflow/object/task_instances?\"\n \"dag_id=test_example_bash_operator&\"\n \"execution_date={}\".format(DEFAULT_DATE_DS))\n response = self.app.get(url)\n self.assertIn(\"run_this_last\", response.data.decode('utf-8'))\n\n def tearDown(self):\n configuration.conf.set(\"webserver\", \"expose_config\", \"False\")\n self.dag_bash.clear(start_date=DEFAULT_DATE, end_date=timezone.utcnow())\n session = Session()\n session.query(models.DagRun).delete()\n session.query(models.TaskInstance).delete()\n session.commit()\n session.close()\n\n\nclass SecureModeWebUiTests(unittest.TestCase):\n def setUp(self):\n configuration.load_test_config()\n configuration.conf.set(\"webserver\", \"authenticate\", \"False\")\n configuration.conf.set(\"core\", \"secure_mode\", \"True\")\n app = application.create_app()\n app.config['TESTING'] = True\n self.app = app.test_client()\n\n def test_query(self):\n response = self.app.get('/admin/queryview/')\n self.assertEqual(response.status_code, 404)\n\n def test_charts(self):\n response = self.app.get('/admin/chart/')\n self.assertEqual(response.status_code, 404)\n\n def tearDown(self):\n configuration.remove_option(\"core\", \"SECURE_MODE\")\n\n\nclass WebPasswordAuthTest(unittest.TestCase):\n def setUp(self):\n configuration.conf.set(\"webserver\", \"authenticate\", \"True\")\n configuration.conf.set(\"webserver\", \"auth_backend\", \"airflow.contrib.auth.backends.password_auth\")\n\n app = application.create_app()\n app.config['TESTING'] = True\n self.app = app.test_client()\n from airflow.contrib.auth.backends.password_auth import PasswordUser\n\n session = Session()\n user = models.User()\n password_user = PasswordUser(user)\n password_user.username = 'airflow_passwordauth'\n password_user.password = 'password'\n print(password_user._password)\n session.add(password_user)\n session.commit()\n session.close()\n\n def get_csrf(self, response):\n tree = html.fromstring(response.data)\n form = tree.find('.//form')\n\n return form.find('.//input[@name=\"_csrf_token\"]').value\n\n def login(self, username, password):\n response = self.app.get('/admin/airflow/login')\n csrf_token = self.get_csrf(response)\n\n return self.app.post('/admin/airflow/login', data=dict(\n username=username,\n password=password,\n csrf_token=csrf_token\n ), follow_redirects=True)\n\n def logout(self):\n return self.app.get('/admin/airflow/logout', follow_redirects=True)\n\n def test_login_logout_password_auth(self):\n self.assertTrue(configuration.getboolean('webserver', 'authenticate'))\n\n response = self.login('user1', 'whatever')\n self.assertIn('Incorrect login details', response.data.decode('utf-8'))\n\n response = self.login('airflow_passwordauth', 'wrongpassword')\n self.assertIn('Incorrect login details', response.data.decode('utf-8'))\n\n response = self.login('airflow_passwordauth', 'password')\n self.assertIn('Data Profiling', response.data.decode('utf-8'))\n\n response = self.logout()\n self.assertIn('form-signin', response.data.decode('utf-8'))\n\n def test_unauthorized_password_auth(self):\n response = self.app.get(\"/admin/airflow/landing_times\")\n self.assertEqual(response.status_code, 302)\n\n def tearDown(self):\n configuration.load_test_config()\n session = Session()\n session.query(models.User).delete()\n session.commit()\n session.close()\n configuration.conf.set(\"webserver\", \"authenticate\", \"False\")\n\n\nclass WebLdapAuthTest(unittest.TestCase):\n def setUp(self):\n configuration.conf.set(\"webserver\", \"authenticate\", \"True\")\n configuration.conf.set(\"webserver\", \"auth_backend\", \"airflow.contrib.auth.backends.ldap_auth\")\n try:\n configuration.conf.add_section(\"ldap\")\n except:\n pass\n configuration.conf.set(\"ldap\", \"uri\", \"ldap://localhost:3890\")\n configuration.conf.set(\"ldap\", \"user_filter\", \"objectClass=*\")\n configuration.conf.set(\"ldap\", \"user_name_attr\", \"uid\")\n configuration.conf.set(\"ldap\", \"bind_user\", \"cn=Manager,dc=example,dc=com\")\n configuration.conf.set(\"ldap\", \"bind_password\", \"insecure\")\n configuration.conf.set(\"ldap\", \"basedn\", \"dc=example,dc=com\")\n configuration.conf.set(\"ldap\", \"cacert\", \"\")\n\n app = application.create_app()\n app.config['TESTING'] = True\n self.app = app.test_client()\n\n def get_csrf(self, response):\n tree = html.fromstring(response.data)\n form = tree.find('.//form')\n\n return form.find('.//input[@name=\"_csrf_token\"]').value\n\n def login(self, username, password):\n response = self.app.get('/admin/airflow/login')\n csrf_token = self.get_csrf(response)\n\n return self.app.post('/admin/airflow/login', data=dict(\n username=username,\n password=password,\n csrf_token=csrf_token\n ), follow_redirects=True)\n\n def logout(self):\n return self.app.get('/admin/airflow/logout', follow_redirects=True)\n\n def test_login_logout_ldap(self):\n self.assertTrue(configuration.getboolean('webserver', 'authenticate'))\n\n response = self.login('user1', 'userx')\n self.assertIn('Incorrect login details', response.data.decode('utf-8'))\n\n response = self.login('userz', 'user1')\n self.assertIn('Incorrect login details', response.data.decode('utf-8'))\n\n response = self.login('user1', 'user1')\n self.assertIn('Data Profiling', response.data.decode('utf-8'))\n\n response = self.logout()\n self.assertIn('form-signin', response.data.decode('utf-8'))\n\n def test_unauthorized(self):\n response = self.app.get(\"/admin/airflow/landing_times\")\n self.assertEqual(response.status_code, 302)\n\n def test_no_filter(self):\n response = self.login('user1', 'user1')\n self.assertIn('Data Profiling', response.data.decode('utf-8'))\n self.assertIn('Connections', response.data.decode('utf-8'))\n\n def test_with_filters(self):\n configuration.conf.set('ldap', 'superuser_filter',\n 'description=superuser')\n configuration.conf.set('ldap', 'data_profiler_filter',\n 'description=dataprofiler')\n\n response = self.login('dataprofiler', 'dataprofiler')\n self.assertIn('Data Profiling', response.data.decode('utf-8'))\n\n response = self.login('superuser', 'superuser')\n self.assertIn('Connections', response.data.decode('utf-8'))\n\n def tearDown(self):\n configuration.load_test_config()\n session = Session()\n session.query(models.User).delete()\n session.commit()\n session.close()\n configuration.conf.set(\"webserver\", \"authenticate\", \"False\")\n\n\nclass LdapGroupTest(unittest.TestCase):\n def setUp(self):\n configuration.conf.set(\"webserver\", \"authenticate\", \"True\")\n configuration.conf.set(\"webserver\", \"auth_backend\", \"airflow.contrib.auth.backends.ldap_auth\")\n try:\n configuration.conf.add_section(\"ldap\")\n except:\n pass\n configuration.conf.set(\"ldap\", \"uri\", \"ldap://localhost:3890\")\n configuration.conf.set(\"ldap\", \"user_filter\", \"objectClass=*\")\n configuration.conf.set(\"ldap\", \"user_name_attr\", \"uid\")\n configuration.conf.set(\"ldap\", \"bind_user\", \"cn=Manager,dc=example,dc=com\")\n configuration.conf.set(\"ldap\", \"bind_password\", \"insecure\")\n configuration.conf.set(\"ldap\", \"basedn\", \"dc=example,dc=com\")\n configuration.conf.set(\"ldap\", \"cacert\", \"\")\n\n def test_group_belonging(self):\n from airflow.contrib.auth.backends.ldap_auth import LdapUser\n users = {\"user1\": [\"group1\", \"group3\"],\n \"user2\": [\"group2\"]\n }\n for user in users:\n mu = models.User(username=user,\n is_superuser=False)\n auth = LdapUser(mu)\n self.assertEqual(set(users[user]), set(auth.ldap_groups))\n\n def tearDown(self):\n configuration.load_test_config()\n configuration.conf.set(\"webserver\", \"authenticate\", \"False\")\n\n\nclass FakeWebHDFSHook(object):\n def __init__(self, conn_id):\n self.conn_id = conn_id\n\n def get_conn(self):\n return self.conn_id\n\n def check_for_path(self, hdfs_path):\n return hdfs_path\n\n\nclass FakeSnakeBiteClientException(Exception):\n pass\n\n\nclass FakeSnakeBiteClient(object):\n\n def __init__(self):\n self.started = True\n\n def ls(self, path, include_toplevel=False):\n \"\"\"\n the fake snakebite client\n :param path: the array of path to test\n :param include_toplevel: to return the toplevel directory info\n :return: a list for path for the matching queries\n \"\"\"\n if path[0] == '/datadirectory/empty_directory' and not include_toplevel:\n return []\n elif path[0] == '/datadirectory/datafile':\n return [{\n 'group': u'supergroup',\n 'permission': 420,\n 'file_type': 'f',\n 'access_time': 1481122343796,\n 'block_replication': 3,\n 'modification_time': 1481122343862,\n 'length': 0,\n 'blocksize': 134217728,\n 'owner': u'hdfs',\n 'path': '/datadirectory/datafile'\n }]\n elif path[0] == '/datadirectory/empty_directory' and include_toplevel:\n return [{\n 'group': u'supergroup',\n 'permission': 493,\n 'file_type': 'd',\n 'access_time': 0,\n 'block_replication': 0,\n 'modification_time': 1481132141540,\n 'length': 0,\n 'blocksize': 0,\n 'owner': u'hdfs',\n 'path': '/datadirectory/empty_directory'\n }]\n elif path[0] == '/datadirectory/not_empty_directory' and include_toplevel:\n return [{\n 'group': u'supergroup',\n 'permission': 493,\n 'file_type': 'd',\n 'access_time': 0,\n 'block_replication': 0,\n 'modification_time': 1481132141540,\n 'length': 0,\n 'blocksize': 0,\n 'owner': u'hdfs',\n 'path': '/datadirectory/empty_directory'\n }, {\n 'group': u'supergroup',\n 'permission': 420,\n 'file_type': 'f',\n 'access_time': 1481122343796,\n 'block_replication': 3,\n 'modification_time': 1481122343862,\n 'length': 0,\n 'blocksize': 134217728,\n 'owner': u'hdfs',\n 'path': '/datadirectory/not_empty_directory/test_file'\n }]\n elif path[0] == '/datadirectory/not_empty_directory':\n return [{\n 'group': u'supergroup',\n 'permission': 420,\n 'file_type': 'f',\n 'access_time': 1481122343796,\n 'block_replication': 3,\n 'modification_time': 1481122343862,\n 'length': 0,\n 'blocksize': 134217728,\n 'owner': u'hdfs',\n 'path': '/datadirectory/not_empty_directory/test_file'\n }]\n elif path[0] == '/datadirectory/not_existing_file_or_directory':\n raise FakeSnakeBiteClientException\n elif path[0] == '/datadirectory/regex_dir':\n return [{\n 'group': u'supergroup',\n 'permission': 420,\n 'file_type': 'f',\n 'access_time': 1481122343796,\n 'block_replication': 3,\n 'modification_time': 1481122343862, 'length': 12582912,\n 'blocksize': 134217728,\n 'owner': u'hdfs',\n 'path': '/datadirectory/regex_dir/test1file'\n }, {\n 'group': u'supergroup',\n 'permission': 420,\n 'file_type': 'f',\n 'access_time': 1481122343796,\n 'block_replication': 3,\n 'modification_time': 1481122343862,\n 'length': 12582912,\n 'blocksize': 134217728,\n 'owner': u'hdfs',\n 'path': '/datadirectory/regex_dir/test2file'\n }, {\n 'group': u'supergroup',\n 'permission': 420,\n 'file_type': 'f',\n 'access_time': 1481122343796,\n 'block_replication': 3,\n 'modification_time': 1481122343862,\n 'length': 12582912,\n 'blocksize': 134217728,\n 'owner': u'hdfs',\n 'path': '/datadirectory/regex_dir/test3file'\n }, {\n 'group': u'supergroup',\n 'permission': 420,\n 'file_type': 'f',\n 'access_time': 1481122343796,\n 'block_replication': 3,\n 'modification_time': 1481122343862,\n 'length': 12582912,\n 'blocksize': 134217728,\n 'owner': u'hdfs',\n 'path': '/datadirectory/regex_dir/copying_file_1.txt._COPYING_'\n }, {\n 'group': u'supergroup',\n 'permission': 420,\n 'file_type': 'f',\n 'access_time': 1481122343796,\n 'block_replication': 3,\n 'modification_time': 1481122343862,\n 'length': 12582912,\n 'blocksize': 134217728,\n 'owner': u'hdfs',\n 'path': '/datadirectory/regex_dir/copying_file_3.txt.sftp'\n }]\n else:\n raise FakeSnakeBiteClientException\n\n\nclass FakeHDFSHook(object):\n def __init__(self, conn_id=None):\n self.conn_id = conn_id\n\n def get_conn(self):\n client = FakeSnakeBiteClient()\n return client\n\n\nclass ConnectionTest(unittest.TestCase):\n def setUp(self):\n configuration.load_test_config()\n utils.db.initdb()\n os.environ['AIRFLOW_CONN_TEST_URI'] = (\n 'postgres://username:[email protected]:5432/the_database')\n os.environ['AIRFLOW_CONN_TEST_URI_NO_CREDS'] = (\n 'postgres://ec2.compute.com/the_database')\n\n def tearDown(self):\n env_vars = ['AIRFLOW_CONN_TEST_URI', 'AIRFLOW_CONN_AIRFLOW_DB']\n for ev in env_vars:\n if ev in os.environ:\n del os.environ[ev]\n\n def test_using_env_var(self):\n c = SqliteHook.get_connection(conn_id='test_uri')\n self.assertEqual('ec2.compute.com', c.host)\n self.assertEqual('the_database', c.schema)\n self.assertEqual('username', c.login)\n self.assertEqual('password', c.password)\n self.assertEqual(5432, c.port)\n\n def test_using_unix_socket_env_var(self):\n c = SqliteHook.get_connection(conn_id='test_uri_no_creds')\n self.assertEqual('ec2.compute.com', c.host)\n self.assertEqual('the_database', c.schema)\n self.assertIsNone(c.login)\n self.assertIsNone(c.password)\n self.assertIsNone(c.port)\n\n def test_param_setup(self):\n c = models.Connection(conn_id='local_mysql', conn_type='mysql',\n host='localhost', login='airflow',\n password='airflow', schema='airflow')\n self.assertEqual('localhost', c.host)\n self.assertEqual('airflow', c.schema)\n self.assertEqual('airflow', c.login)\n self.assertEqual('airflow', c.password)\n self.assertIsNone(c.port)\n\n def test_env_var_priority(self):\n c = SqliteHook.get_connection(conn_id='airflow_db')\n self.assertNotEqual('ec2.compute.com', c.host)\n\n os.environ['AIRFLOW_CONN_AIRFLOW_DB'] = \\\n 'postgres://username:[email protected]:5432/the_database'\n c = SqliteHook.get_connection(conn_id='airflow_db')\n self.assertEqual('ec2.compute.com', c.host)\n self.assertEqual('the_database', c.schema)\n self.assertEqual('username', c.login)\n self.assertEqual('password', c.password)\n self.assertEqual(5432, c.port)\n del os.environ['AIRFLOW_CONN_AIRFLOW_DB']\n\n def test_dbapi_get_uri(self):\n conn = BaseHook.get_connection(conn_id='test_uri')\n hook = conn.get_hook()\n self.assertEqual('postgres://username:[email protected]:5432/the_database', hook.get_uri())\n conn2 = BaseHook.get_connection(conn_id='test_uri_no_creds')\n hook2 = conn2.get_hook()\n self.assertEqual('postgres://ec2.compute.com/the_database', hook2.get_uri())\n\n def test_dbapi_get_sqlalchemy_engine(self):\n conn = BaseHook.get_connection(conn_id='test_uri')\n hook = conn.get_hook()\n engine = hook.get_sqlalchemy_engine()\n self.assertIsInstance(engine, sqlalchemy.engine.Engine)\n self.assertEqual('postgres://username:[email protected]:5432/the_database', str(engine.url))\n\n def test_get_connections_env_var(self):\n conns = SqliteHook.get_connections(conn_id='test_uri')\n assert len(conns) == 1\n assert conns[0].host == 'ec2.compute.com'\n assert conns[0].schema == 'the_database'\n assert conns[0].login == 'username'\n assert conns[0].password == 'password'\n assert conns[0].port == 5432\n\n def test_get_connections_db(self):\n conns = BaseHook.get_connections(conn_id='airflow_db')\n assert len(conns) == 1\n assert conns[0].host == 'localhost'\n assert conns[0].schema == 'airflow'\n assert conns[0].login == 'root'\n\n\nclass WebHDFSHookTest(unittest.TestCase):\n def setUp(self):\n configuration.load_test_config()\n\n def test_simple_init(self):\n from airflow.hooks.webhdfs_hook import WebHDFSHook\n c = WebHDFSHook()\n self.assertIsNone(c.proxy_user)\n\n def test_init_proxy_user(self):\n from airflow.hooks.webhdfs_hook import WebHDFSHook\n c = WebHDFSHook(proxy_user='someone')\n self.assertEqual('someone', c.proxy_user)\n\n\ntry:\n from airflow.hooks.hdfs_hook import HDFSHook\n import snakebite\nexcept ImportError:\n HDFSHook = None\n\n\[email protected](HDFSHook is None,\n \"Skipping test because HDFSHook is not installed\")\nclass HDFSHookTest(unittest.TestCase):\n def setUp(self):\n configuration.load_test_config()\n os.environ['AIRFLOW_CONN_HDFS_DEFAULT'] = ('hdfs://localhost:8020')\n\n def test_get_client(self):\n client = HDFSHook(proxy_user='foo').get_conn()\n self.assertIsInstance(client, snakebite.client.Client)\n self.assertEqual('localhost', client.host)\n self.assertEqual(8020, client.port)\n self.assertEqual('foo', client.service.channel.effective_user)\n\n @mock.patch('airflow.hooks.hdfs_hook.AutoConfigClient')\n @mock.patch('airflow.hooks.hdfs_hook.HDFSHook.get_connections')\n def test_get_autoconfig_client(self, mock_get_connections,\n MockAutoConfigClient):\n c = models.Connection(conn_id='hdfs', conn_type='hdfs',\n host='localhost', port=8020, login='foo',\n extra=json.dumps({'autoconfig': True}))\n mock_get_connections.return_value = [c]\n HDFSHook(hdfs_conn_id='hdfs').get_conn()\n MockAutoConfigClient.assert_called_once_with(effective_user='foo',\n use_sasl=False)\n\n @mock.patch('airflow.hooks.hdfs_hook.AutoConfigClient')\n def test_get_autoconfig_client_no_conn(self, MockAutoConfigClient):\n HDFSHook(hdfs_conn_id='hdfs_missing', autoconfig=True).get_conn()\n MockAutoConfigClient.assert_called_once_with(effective_user=None,\n use_sasl=False)\n\n @mock.patch('airflow.hooks.hdfs_hook.HDFSHook.get_connections')\n def test_get_ha_client(self, mock_get_connections):\n c1 = models.Connection(conn_id='hdfs_default', conn_type='hdfs',\n host='localhost', port=8020)\n c2 = models.Connection(conn_id='hdfs_default', conn_type='hdfs',\n host='localhost2', port=8020)\n mock_get_connections.return_value = [c1, c2]\n client = HDFSHook().get_conn()\n self.assertIsInstance(client, snakebite.client.HAClient)\n\n\ntry:\n from airflow.hooks.http_hook import HttpHook\nexcept ImportError:\n HttpHook = None\n\n\[email protected](HttpHook is None,\n \"Skipping test because HttpHook is not installed\")\nclass HttpHookTest(unittest.TestCase):\n def setUp(self):\n configuration.load_test_config()\n\n @mock.patch('airflow.hooks.http_hook.HttpHook.get_connection')\n def test_http_connection(self, mock_get_connection):\n c = models.Connection(conn_id='http_default', conn_type='http',\n host='localhost', schema='http')\n mock_get_connection.return_value = c\n hook = HttpHook()\n hook.get_conn({})\n self.assertEqual(hook.base_url, 'http://localhost')\n\n @mock.patch('airflow.hooks.http_hook.HttpHook.get_connection')\n def test_https_connection(self, mock_get_connection):\n c = models.Connection(conn_id='http_default', conn_type='http',\n host='localhost', schema='https')\n mock_get_connection.return_value = c\n hook = HttpHook()\n hook.get_conn({})\n self.assertEqual(hook.base_url, 'https://localhost')\n\n @mock.patch('airflow.hooks.http_hook.HttpHook.get_connection')\n def test_host_encoded_http_connection(self, mock_get_connection):\n c = models.Connection(conn_id='http_default', conn_type='http',\n host='http://localhost')\n mock_get_connection.return_value = c\n hook = HttpHook()\n hook.get_conn({})\n self.assertEqual(hook.base_url, 'http://localhost')\n\n @mock.patch('airflow.hooks.http_hook.HttpHook.get_connection')\n def test_host_encoded_https_connection(self, mock_get_connection):\n c = models.Connection(conn_id='http_default', conn_type='http',\n host='https://localhost')\n mock_get_connection.return_value = c\n hook = HttpHook()\n hook.get_conn({})\n self.assertEqual(hook.base_url, 'https://localhost')\n\n\nsend_email_test = mock.Mock()\n\n\nclass EmailTest(unittest.TestCase):\n def setUp(self):\n configuration.remove_option('email', 'EMAIL_BACKEND')\n\n @mock.patch('airflow.utils.email.send_email')\n def test_default_backend(self, mock_send_email):\n res = utils.email.send_email('to', 'subject', 'content')\n mock_send_email.assert_called_with('to', 'subject', 'content')\n self.assertEqual(mock_send_email.return_value, res)\n\n @mock.patch('airflow.utils.email.send_email_smtp')\n def test_custom_backend(self, mock_send_email):\n configuration.set('email', 'EMAIL_BACKEND', 'tests.core.send_email_test')\n utils.email.send_email('to', 'subject', 'content')\n send_email_test.assert_called_with(\n 'to', 'subject', 'content', files=None, dryrun=False,\n cc=None, bcc=None, mime_subtype='mixed'\n )\n self.assertFalse(mock_send_email.called)\n\n\nclass EmailSmtpTest(unittest.TestCase):\n def setUp(self):\n configuration.set('smtp', 'SMTP_SSL', 'False')\n\n @mock.patch('airflow.utils.email.send_MIME_email')\n def test_send_smtp(self, mock_send_mime):\n attachment = tempfile.NamedTemporaryFile()\n attachment.write(b'attachment')\n attachment.seek(0)\n utils.email.send_email_smtp('to', 'subject', 'content', files=[attachment.name])\n self.assertTrue(mock_send_mime.called)\n call_args = mock_send_mime.call_args[0]\n self.assertEqual(configuration.get('smtp', 'SMTP_MAIL_FROM'), call_args[0])\n self.assertEqual(['to'], call_args[1])\n msg = call_args[2]\n self.assertEqual('subject', msg['Subject'])\n self.assertEqual(configuration.get('smtp', 'SMTP_MAIL_FROM'), msg['From'])\n self.assertEqual(2, len(msg.get_payload()))\n self.assertEqual(u'attachment; filename=\"' + os.path.basename(attachment.name) + '\"',\n msg.get_payload()[-1].get(u'Content-Disposition'))\n mimeapp = MIMEApplication('attachment')\n self.assertEqual(mimeapp.get_payload(), msg.get_payload()[-1].get_payload())\n\n @mock.patch('airflow.utils.email.send_MIME_email')\n def test_send_bcc_smtp(self, mock_send_mime):\n attachment = tempfile.NamedTemporaryFile()\n attachment.write(b'attachment')\n attachment.seek(0)\n utils.email.send_email_smtp('to', 'subject', 'content', files=[attachment.name], cc='cc', bcc='bcc')\n self.assertTrue(mock_send_mime.called)\n call_args = mock_send_mime.call_args[0]\n self.assertEqual(configuration.get('smtp', 'SMTP_MAIL_FROM'), call_args[0])\n self.assertEqual(['to', 'cc', 'bcc'], call_args[1])\n msg = call_args[2]\n self.assertEqual('subject', msg['Subject'])\n self.assertEqual(configuration.get('smtp', 'SMTP_MAIL_FROM'), msg['From'])\n self.assertEqual(2, len(msg.get_payload()))\n self.assertEqual(u'attachment; filename=\"' + os.path.basename(attachment.name) + '\"',\n msg.get_payload()[-1].get(u'Content-Disposition'))\n mimeapp = MIMEApplication('attachment')\n self.assertEqual(mimeapp.get_payload(), msg.get_payload()[-1].get_payload())\n\n @mock.patch('smtplib.SMTP_SSL')\n @mock.patch('smtplib.SMTP')\n def test_send_mime(self, mock_smtp, mock_smtp_ssl):\n mock_smtp.return_value = mock.Mock()\n mock_smtp_ssl.return_value = mock.Mock()\n msg = MIMEMultipart()\n utils.email.send_MIME_email('from', 'to', msg, dryrun=False)\n mock_smtp.assert_called_with(\n configuration.get('smtp', 'SMTP_HOST'),\n configuration.getint('smtp', 'SMTP_PORT'),\n )\n self.assertTrue(mock_smtp.return_value.starttls.called)\n mock_smtp.return_value.login.assert_called_with(\n configuration.get('smtp', 'SMTP_USER'),\n configuration.get('smtp', 'SMTP_PASSWORD'),\n )\n mock_smtp.return_value.sendmail.assert_called_with('from', 'to', msg.as_string())\n self.assertTrue(mock_smtp.return_value.quit.called)\n\n @mock.patch('smtplib.SMTP_SSL')\n @mock.patch('smtplib.SMTP')\n def test_send_mime_ssl(self, mock_smtp, mock_smtp_ssl):\n configuration.set('smtp', 'SMTP_SSL', 'True')\n mock_smtp.return_value = mock.Mock()\n mock_smtp_ssl.return_value = mock.Mock()\n utils.email.send_MIME_email('from', 'to', MIMEMultipart(), dryrun=False)\n self.assertFalse(mock_smtp.called)\n mock_smtp_ssl.assert_called_with(\n configuration.get('smtp', 'SMTP_HOST'),\n configuration.getint('smtp', 'SMTP_PORT'),\n )\n\n @mock.patch('smtplib.SMTP_SSL')\n @mock.patch('smtplib.SMTP')\n def test_send_mime_noauth(self, mock_smtp, mock_smtp_ssl):\n configuration.conf.remove_option('smtp', 'SMTP_USER')\n configuration.conf.remove_option('smtp', 'SMTP_PASSWORD')\n mock_smtp.return_value = mock.Mock()\n mock_smtp_ssl.return_value = mock.Mock()\n utils.email.send_MIME_email('from', 'to', MIMEMultipart(), dryrun=False)\n self.assertFalse(mock_smtp_ssl.called)\n mock_smtp.assert_called_with(\n configuration.get('smtp', 'SMTP_HOST'),\n configuration.getint('smtp', 'SMTP_PORT'),\n )\n self.assertFalse(mock_smtp.login.called)\n\n @mock.patch('smtplib.SMTP_SSL')\n @mock.patch('smtplib.SMTP')\n def test_send_mime_dryrun(self, mock_smtp, mock_smtp_ssl):\n utils.email.send_MIME_email('from', 'to', MIMEMultipart(), dryrun=True)\n self.assertFalse(mock_smtp.called)\n self.assertFalse(mock_smtp_ssl.called)\n\n\nif __name__ == '__main__':\n unittest.main()\n"
] | [
[
"numpy.testing.assert_array_almost_equal"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
fheyen/ClaVis | [
"528ca85dd05606d39761b5a00d755500cf1cd2f6"
] | [
"backend/modules/classifiers/cifar10_cnn/__init__.py"
] | [
"import numpy\nimport keras\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Activation, Flatten\nfrom keras.layers import Conv2D, MaxPooling2D\nfrom keras.callbacks import EarlyStopping\nimport os\nfrom ...tools import check_argument as check\nfrom termcolor import cprint\n\n\"\"\"\nFrom https://keras.io/examples/cifar10_cnn/\n\"\"\"\n\nCLF_INFO = {\n 'name': 'cifar10_cnn',\n 'short': 'C10 CNN',\n 'description': 'Cifar10 Convolutional Neural Network',\n 'parameters': [\n {\n 'name': 'save_model',\n 'description': 'Save model to file',\n 'type': 'boolean',\n 'default_value': False\n },\n {\n 'name': 'data_augmentation',\n 'description': 'Data augmentation',\n 'type': 'boolean',\n 'default_value': True\n },\n {\n 'name': 'early_stopping',\n 'description': 'Early stopping',\n 'type': 'boolean',\n 'default_value': True\n },\n {\n 'name': 'early_stopping_patience',\n 'description': 'Early stopping patience',\n 'type': 'integer',\n 'range': [0, 1000000],\n 'default_value': 10\n },\n {\n 'name': 'epochs',\n 'description': 'Epochs',\n 'type': 'integer',\n 'range': [0, 10000],\n 'default_value': 20\n },\n {\n 'name': 'steps_per_epoch',\n 'description': 'Steps per epoch',\n 'type': 'integer',\n 'range': [0, 1000000],\n 'default_value': 1000\n },\n {\n 'name': 'batch_size',\n 'description': 'Batch size',\n 'type': 'integer',\n 'range': [0, 100000],\n 'default_value': 32\n },\n {\n 'name': 'random_state',\n 'description': 'Random seed',\n 'type': 'integer',\n 'range': [0, 1000000],\n 'default_value': 0\n }\n ]\n}\n\n\ndef get_clf(args, data_specs):\n return Classifier(args, data_specs)\n\n\ndef get_info():\n return CLF_INFO\n\n\nclass Classifier():\n def __init__(self, args, data_specs):\n self.title = args['title']\n\n # check all params and save them\n self.save_model = check('save_model', args, CLF_INFO)\n\n self.data_augmentation = check('data_augmentation', args, CLF_INFO)\n\n self.epochs = check('epochs', args, CLF_INFO)\n self.steps_per_epoch = check('steps_per_epoch', args, CLF_INFO)\n self.early_stopping = check('early_stopping', args, CLF_INFO)\n self.early_stopping_patience = check(\n 'early_stopping_patience', args, CLF_INFO)\n\n self.batch_size = check('batch_size', args, CLF_INFO)\n self.random_state = check('random_state', args, CLF_INFO)\n\n # save data parameters\n self.job_title = data_specs['job_title']\n self.dataset_name = data_specs['dataset_name']\n self.num_classes = data_specs['num_classes']\n self.original_shape = data_specs['original_shape']\n if len(self.original_shape) == 3:\n # Conv layer needs 4 dimensions\n self.original_shape += (1,)\n\n def get_info(self):\n return CLF_INFO\n\n def fit(self, X_train, y_train):\n numpy.random.seed(self.random_state)\n print('Number of classes: {}'.format(self.num_classes))\n\n # Reshape data back to original\n new_shape = (len(X_train),) + self.original_shape[1:]\n cprint('Scaling and reshaping to {}'.format(new_shape))\n X_train = X_train.astype('float32')\n X_train /= 255\n X_train = X_train.reshape(new_shape)\n\n # Convert class vectors to binary class matrices.\n y_train = keras.utils.to_categorical(y_train, self.num_classes)\n\n # model architecture\n model = Sequential()\n model.add(Conv2D(32, (3, 3), padding='same',\n input_shape=X_train.shape[1:]))\n model.add(Activation('relu'))\n model.add(Conv2D(32, (3, 3)))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(0.25))\n\n model.add(Conv2D(64, (3, 3), padding='same'))\n model.add(Activation('relu'))\n model.add(Conv2D(64, (3, 3)))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(0.25))\n\n model.add(Flatten())\n model.add(Dense(512))\n model.add(Activation('relu'))\n model.add(Dropout(0.5))\n model.add(Dense(self.num_classes))\n model.add(Activation('softmax'))\n\n # optimizer\n opt = keras.optimizers.rmsprop(lr=0.0001, decay=1e-6)\n\n # compile model\n model.compile(loss='categorical_crossentropy',\n optimizer=opt,\n metrics=['accuracy'])\n\n if not self.data_augmentation:\n cprint('Not using data augmentation (as specified in args).', 'cyan')\n\n # early stopping\n callbacks = None\n if self.early_stopping:\n cprint(\n f'Using early stopping with patience {self.early_stopping_patience}', 'green')\n early_stopping = EarlyStopping(monitor='val_loss',\n patience=self.early_stopping_patience,\n restore_best_weights=True,\n verbose=2)\n callbacks = [early_stopping]\n else:\n cprint('Not using early stopping', 'yellow')\n\n # train model\n history = model.fit(X_train, y_train,\n batch_size=self.batch_size,\n epochs=self.epochs,\n validation_split=0.2,\n shuffle=True,\n callbacks=callbacks)\n else:\n cprint('Using real-time data augmentation.', 'cyan')\n # This will do preprocessing and realtime data augmentation:\n datagen = ImageDataGenerator(\n featurewise_center=False, # set input mean to 0 over the dataset\n samplewise_center=False, # set each sample mean to 0\n featurewise_std_normalization=False, # divide inputs by std of the dataset\n samplewise_std_normalization=False, # divide each input by its std\n zca_whitening=False, # apply ZCA whitening\n zca_epsilon=1e-06, # epsilon for ZCA whitening\n # randomly rotate images in the range (degrees, 0 to 180)\n rotation_range=0,\n # randomly shift images horizontally (fraction of total width)\n width_shift_range=0.1,\n # randomly shift images vertically (fraction of total height)\n height_shift_range=0.1,\n shear_range=0., # set range for random shear\n zoom_range=0., # set range for random zoom\n channel_shift_range=0., # set range for random channel shifts\n # set mode for filling points outside the input boundaries\n fill_mode='nearest',\n cval=0., # value used for fill_mode = \"constant\"\n horizontal_flip=True, # randomly flip images\n vertical_flip=False, # randomly flip images\n # set rescaling factor (applied before any other transformation)\n rescale=None,\n # set function that will be applied on each input\n preprocessing_function=None,\n # image data format, either \"channels_first\" or \"channels_last\"\n data_format=None,\n # fraction of images reserved for validation (strictly between 0 and 1)\n validation_split=0.1)\n\n # Compute quantities required for feature-wise normalization\n # (std, mean, and principal components if ZCA whitening is applied).\n datagen.fit(X_train)\n\n # Fit the model on the batches generated by datagen.flow().\n history = model.fit_generator(\n datagen.flow(X_train, y_train, batch_size=self.batch_size),\n epochs=self.epochs, steps_per_epoch=self.steps_per_epoch, workers=8\n )\n\n self.clf = model\n self.history = history\n\n # save model and weights\n if self.save_model:\n save_dir = os.path.join(\n os.getcwd(), 'saved_models', self.job_title)\n if not os.path.isdir(save_dir):\n os.makedirs(save_dir)\n model_path = os.path.join(save_dir, f'{self.title}.h5')\n model.save(model_path)\n cprint('Saved trained model at {}'.format(model_path), 'green')\n\n def get_history(self):\n return self.history.history\n\n def predict(self, X_test, y_test):\n # same format as X_train\n X_test = X_test.astype('float32')\n X_test /= 255\n\n # adapt shape for test set\n new_shape = (len(X_test),) + self.original_shape[1:]\n X_test = X_test.reshape(new_shape)\n\n return self.clf.predict_classes(X_test)\n\n def predict_proba(self, X_test, y_test):\n # same format as X_train\n X_test = X_test.astype('float32')\n X_test /= 255\n\n # adapt shape for test set\n new_shape = (len(X_test),) + self.original_shape[1:]\n X_test = X_test.reshape(new_shape)\n\n return self.clf.predict_proba(X_test)\n"
] | [
[
"numpy.random.seed"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Tianxiaomo/ROI | [
"8422716605f846c6f4276051a9738cb6c162611d"
] | [
"roi/layers/nms.py"
] | [
"# -*- coding: utf-8 -*-\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n\nimport torch\nfrom torchvision.ops import boxes as box_ops\nfrom torchvision.ops import nms # BC-compat\n\n\ndef batched_nms(boxes, scores, idxs, iou_threshold):\n \"\"\"\n Same as torchvision.ops.boxes.batched_nms, but safer.\n \"\"\"\n assert boxes.shape[-1] == 4\n # TODO may need better strategy.\n # Investigate after having a fully-cuda NMS op.\n if len(boxes) < 40000:\n return box_ops.batched_nms(boxes, scores, idxs, iou_threshold)\n\n result_mask = scores.new_zeros(scores.size(), dtype=torch.bool)\n for id in torch.unique(idxs).cpu().tolist():\n mask = (idxs == id).nonzero().view(-1)\n keep = nms(boxes[mask], scores[mask], iou_threshold)\n result_mask[mask[keep]] = True\n keep = result_mask.nonzero().view(-1)\n keep = keep[scores[keep].argsort(descending=True)]\n return keep\n\n\n# Note: this function (nms_rotated) might be moved into\n# torchvision/ops/boxes.py in the future\ndef nms_rotated(boxes, scores, iou_threshold):\n \"\"\"\n Performs non-maximum suppression (NMS) on the rotated boxes according\n to their intersection-over-union (IoU).\n\n Rotated NMS iteratively removes lower scoring rotated boxes which have an\n IoU greater than iou_threshold with another (higher scoring) rotated box.\n\n Note that RotatedBox (5, 3, 4, 2, -90) covers exactly the same region as\n RotatedBox (5, 3, 4, 2, 90) does, and their IoU will be 1. However, they\n can be representing completely different objects in certain tasks, e.g., OCR.\n\n As for the question of whether rotated-NMS should treat them as faraway boxes\n even though their IOU is 1, it depends on the application and/or ground truth annotation.\n\n As an extreme example, consider a single character v and the square box around it.\n\n If the angle is 0 degree, the object (text) would be read as 'v';\n\n If the angle is 90 degrees, the object (text) would become '>';\n\n If the angle is 180 degrees, the object (text) would become '^';\n\n If the angle is 270/-90 degrees, the object (text) would become '<'\n\n All of these cases have IoU of 1 to each other, and rotated NMS that only\n uses IoU as criterion would only keep one of them with the highest score -\n which, practically, still makes sense in most cases because typically\n only one of theses orientations is the correct one. Also, it does not matter\n as much if the box is only used to classify the object (instead of transcribing\n them with a sequential OCR recognition model) later.\n\n On the other hand, when we use IoU to filter proposals that are close to the\n ground truth during training, we should definitely take the angle into account if\n we know the ground truth is labeled with the strictly correct orientation (as in,\n upside-down words are annotated with -180 degrees even though they can be covered\n with a 0/90/-90 degree box, etc.)\n\n The way the original dataset is annotated also matters. For example, if the dataset\n is a 4-point polygon dataset that does not enforce ordering of vertices/orientation,\n we can estimate a minimum rotated bounding box to this polygon, but there's no way\n we can tell the correct angle with 100% confidence (as shown above, there could be 4 different\n rotated boxes, with angles differed by 90 degrees to each other, covering the exactly\n same region). In that case we have to just use IoU to determine the box\n proximity (as many detection benchmarks (even for text) do) unless there're other\n assumptions we can make (like width is always larger than height, or the object is not\n rotated by more than 90 degrees CCW/CW, etc.)\n\n In summary, not considering angles in rotated NMS seems to be a good option for now,\n but we should be aware of its implications.\n\n Args:\n boxes (Tensor[N, 5]): Rotated boxes to perform NMS on. They are expected to be in\n (x_center, y_center, width, height, angle_degrees) format.\n scores (Tensor[N]): Scores for each one of the rotated boxes\n iou_threshold (float): Discards all overlapping rotated boxes with IoU < iou_threshold\n\n Returns:\n keep (Tensor): int64 tensor with the indices of the elements that have been kept\n by Rotated NMS, sorted in decreasing order of scores\n \"\"\"\n from roi import _C\n\n return _C.nms_rotated(boxes, scores, iou_threshold)\n\n\n# Note: this function (batched_nms_rotated) might be moved into\n# torchvision/ops/boxes.py in the future\ndef batched_nms_rotated(boxes, scores, idxs, iou_threshold):\n \"\"\"\n Performs non-maximum suppression in a batched fashion.\n\n Each index value correspond to a category, and NMS\n will not be applied between elements of different categories.\n\n Args:\n boxes (Tensor[N, 5]):\n boxes where NMS will be performed. They\n are expected to be in (x_ctr, y_ctr, width, height, angle_degrees) format\n scores (Tensor[N]):\n scores for each one of the boxes\n idxs (Tensor[N]):\n indices of the categories for each one of the boxes.\n iou_threshold (float):\n discards all overlapping boxes\n with IoU < iou_threshold\n\n Returns:\n Tensor:\n int64 tensor with the indices of the elements that have been kept\n by NMS, sorted in decreasing order of scores\n \"\"\"\n assert boxes.shape[-1] == 5\n\n if boxes.numel() == 0:\n return torch.empty((0,), dtype=torch.int64, device=boxes.device)\n # Strategy: in order to perform NMS independently per class,\n # we add an offset to all the boxes. The offset is dependent\n # only on the class idx, and is large enough so that boxes\n # from different classes do not overlap\n\n # Note that batched_nms in torchvision/ops/boxes.py only uses max_coordinate,\n # which won't handle negative coordinates correctly.\n # Here by using min_coordinate we can make sure the negative coordinates are\n # correctly handled.\n max_coordinate = (\n torch.max(boxes[:, 0], boxes[:, 1]) + torch.max(boxes[:, 2], boxes[:, 3]) / 2\n ).max()\n min_coordinate = (\n torch.min(boxes[:, 0], boxes[:, 1]) - torch.min(boxes[:, 2], boxes[:, 3]) / 2\n ).min()\n offsets = idxs.to(boxes) * (max_coordinate - min_coordinate + 1)\n boxes_for_nms = boxes.clone() # avoid modifying the original values in boxes\n boxes_for_nms[:, :2] += offsets[:, None]\n keep = nms_rotated(boxes_for_nms, scores, iou_threshold)\n return keep\n"
] | [
[
"torch.min",
"torch.max",
"torch.unique",
"torch.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
xmengxin/MFGR | [
"ba807d0f52c0eb00d330eaa9bcef56c1343d2588"
] | [
"models/dcgan_conv.py"
] | [
"import torch\nimport torch.nn as nn\n\n\n# custom weights initialization called on netG and netD\ndef weights_init(m):\n classname = m.__class__.__name__\n if classname.find('Conv') != -1:\n nn.init.normal_(m.weight.data, 0.0, 0.02)\n elif classname.find('BatchNorm') != -1:\n nn.init.normal_(m.weight.data, 1.0, 0.02)\n nn.init.constant_(m.bias.data, 0)\n\n\nclass Generator(nn.Module):\n def __init__(self, latent_dim, img_size=32):\n super(Generator, self).__init__()\n\n self.init_size = img_size // 4\n self.l1 = nn.Sequential(nn.Linear(latent_dim, 128 * self.init_size ** 2))\n\n self.conv_blocks = nn.Sequential(\n nn.BatchNorm2d(128),\n nn.Upsample(scale_factor=2),\n nn.Conv2d(128, 128, 3, stride=1, padding=1),\n nn.BatchNorm2d(128, 0.8),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Upsample(scale_factor=2),\n nn.Conv2d(128, 64, 3, stride=1, padding=1),\n nn.BatchNorm2d(64, 0.8),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Conv2d(64, 3, 3, stride=1, padding=1),\n nn.Tanh(),\n )\n\n def forward(self, z):\n out = self.l1(z)\n out = out.view(out.shape[0], 128, self.init_size, self.init_size)\n img = self.conv_blocks(out)\n return img\n\n\nclass Discriminator(nn.Module):\n def __init__(self, img_size=32):\n super(Discriminator, self).__init__()\n\n def discriminator_block(in_filters, out_filters, bn=True):\n block = [nn.Conv2d(in_filters, out_filters, 3, 2, 1), nn.LeakyReLU(0.2, inplace=True), nn.Dropout2d(0.25)]\n if bn:\n block.append(nn.BatchNorm2d(out_filters, 0.8))\n return block\n\n self.conv_blocks = nn.Sequential(\n *discriminator_block(3, 16, bn=False),\n *discriminator_block(16, 32),\n *discriminator_block(32, 64),\n *discriminator_block(64, 128),\n )\n\n # The height and width of downsampled image\n ds_size = img_size // 2 ** 4\n self.adv_layer = nn.Sequential(nn.Linear(128 * ds_size ** 2, 1), nn.Sigmoid())\n\n def forward(self, img):\n out = self.conv_blocks(img)\n out = torch.flatten(out, 1)\n validity = self.adv_layer(out)\n\n return validity\n\n\ndef test():\n n_class, nz = 10, 100\n netG = Generator(n_class)\n netD = Discriminator()\n noise = torch.randn([32, nz])\n label = torch.randint(0, n_class, [32])\n img = netG(noise, label)\n valid, output = netD(img)\n pass\n\n# test()\n"
] | [
[
"torch.randint",
"torch.nn.Dropout2d",
"torch.nn.init.constant_",
"torch.randn",
"torch.nn.Conv2d",
"torch.nn.Tanh",
"torch.nn.Sigmoid",
"torch.nn.Linear",
"torch.nn.init.normal_",
"torch.nn.Upsample",
"torch.nn.BatchNorm2d",
"torch.flatten",
"torch.nn.LeakyReLU"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
hiagopinacio/ross | [
"1bc84061f23df455d9e37cb11b244ac795c836ad",
"1bc84061f23df455d9e37cb11b244ac795c836ad"
] | [
"ross/api_report.py",
"ross/tests/test_rotor_assembly.py"
] | [
"# fmt: off\nfrom copy import copy, deepcopy\n\nimport numpy as np\nimport pandas as pd\nfrom plotly import express as px\nfrom plotly import graph_objects as go\nfrom plotly.subplots import make_subplots\nfrom scipy.interpolate import interp1d\nfrom scipy.signal import argrelextrema\n\nfrom ross.bearing_seal_element import BearingElement, SealElement\nfrom ross.disk_element import DiskElement\nfrom ross.materials import steel\nfrom ross.rotor_assembly import Rotor\nfrom ross.shaft_element import ShaftElement\n\n# fmt: on\n\n# set Plotly palette of colors\ncolors1 = px.colors.qualitative.Dark24\ncolors2 = px.colors.sequential.PuBu\n\n__all__ = [\"Report\", \"report_example\"]\n\n\nclass Report:\n \"\"\"Report according to standard analysis.\n\n - Perform unbalance response\n - Perform Stability_level1 analysis\n - Apply Level 1 Screening Criteria\n - Perform Stability_level2 analysis\n\n Parameters\n ----------\n rotor : object\n A rotor built from rotor_assembly.\n speed_range : tuple\n Tuple with (min, max) for speed range.\n tripspeed : float\n Machine trip speed.\n bearing_stiffness_range : tuple, optional\n Tuple with (start, end) bearing stiffness range.\n Argument to calculate the Undamped Critical Speed Map.\n bearing_clearance_lists : list of lists, optional\n List with two bearing elements lists:\n The first bearing list is set for minimum clearance.\n The second bearing list it set for maximum clearance.\n machine_type : str\n Machine type analyzed. Options: compressor, turbine or axial_flow.\n If other option is given, it will be treated as a compressor\n Default is compressor\n speed_units : str\n String defining the unit for rotor speed.\n Default is \"rpm\".\n tag : str\n String to name the rotor model\n Default is the Rotor.tag attribute\n\n Attributes\n ----------\n rotor_type: str\n Defines if the rotor is between bearings or overhung\n disk_nodes: list\n List of disk between bearings or overhung (depending on the\n rotor type)\n\n Returns\n -------\n A Report object\n\n Examples\n --------\n >>> import ross as rs\n >>> rotor = rs.rotor_example()\n >>>\n >>> # coefficients for minimum clearance\n >>> stfx = [0.7e7, 0.8e7, 0.9e7, 1.0e7]\n >>> damp = [2.0e3, 1.9e3, 1.8e3, 1.7e3]\n >>> freq = [400, 800, 1200, 1600]\n >>> bearing0 = rs.BearingElement(0, kxx=stfx, cxx=damp, frequency=freq)\n >>> bearing1 = rs.BearingElement(6, kxx=stfx, cxx=damp, frequency=freq)\n >>> min_clearance_brg = [bearing0, bearing1]\n >>>\n >>> # coefficients for maximum clearance\n >>> stfx = [0.4e7, 0.5e7, 0.6e7, 0.7e7]\n >>> damp = [2.8e3, 2.7e3, 2.6e3, 2.5e3]\n >>> freq = [400, 800, 1200, 1600]\n >>> bearing0 = rs.BearingElement(0, kxx=stfx, cxx=damp, frequency=freq)\n >>> bearing1 = rs.BearingElement(6, kxx=stfx, cxx=damp, frequency=freq)\n >>> max_clearance_brg = [bearing0, bearing1]\n >>>\n >>> bearings = [min_clearance_brg, max_clearance_brg]\n >>> report = rs.Report(rotor=rotor,\n ... speed_range=(400, 1000),\n ... tripspeed=1200,\n ... bearing_stiffness_range=(5,8),\n ... bearing_clearance_lists=bearings,\n ... speed_units=\"rad/s\")\n >>> report.rotor_type\n 'between_bearings'\n \"\"\"\n\n def __init__(\n self,\n rotor,\n speed_range,\n tripspeed,\n bearing_stiffness_range=None,\n bearing_clearance_lists=None,\n machine_type=\"compressor\",\n speed_units=\"rpm\",\n tag=None,\n ):\n self.rotor = rotor\n self.speed_units = speed_units\n self.speed_range = speed_range\n\n if speed_units == \"rpm\":\n self.minspeed = speed_range[0] * np.pi / 30\n self.maxspeed = speed_range[1] * np.pi / 30\n self.tripspeed = tripspeed * np.pi / 30\n if speed_units == \"rad/s\":\n self.minspeed = speed_range[0]\n self.maxspeed = speed_range[1]\n self.tripspeed = tripspeed\n\n self.bearing_stiffness_range = bearing_stiffness_range\n self.bearing_clearance_lists = bearing_clearance_lists\n\n # check if rotor is between bearings, single or double overhung\n # fmt: off\n if(\n all(i > min(rotor.df_bearings[\"n\"]) for i in rotor.df_disks[\"n\"]) and\n all(i < max(rotor.df_bearings[\"n\"]) for i in rotor.df_disks[\"n\"])\n ):\n rotor_type = \"between_bearings\"\n disk_nodes = [\n i for i in rotor.df_disks[\"n\"] if(\n i > min(rotor.df_bearings[\"n\"]) and\n i < max(rotor.df_bearings[\"n\"])\n )\n ]\n elif(\n any(i < min(rotor.df_bearings[\"n\"]) for i in rotor.df_disks[\"n\"]) and\n all(i < max(rotor.df_bearings[\"n\"]) for i in rotor.df_disks[\"n\"])\n ):\n rotor_type = \"single_overhung_l\"\n disk_nodes = [\n i for i in rotor.df_disks[\"n\"] if i < min(rotor.df_bearings[\"n\"])\n ]\n elif(\n all(i > min(rotor.df_bearings[\"n\"]) for i in rotor.df_disks[\"n\"]) and\n any(i > max(rotor.df_bearings[\"n\"]) for i in rotor.df_disks[\"n\"])\n ):\n rotor_type = \"single_overhung_r\"\n disk_nodes = [\n i for i in rotor.df_disks[\"n\"] if i > max(rotor.df_bearings[\"n\"])\n ]\n elif(\n any(i < min(rotor.df_bearings[\"n\"]) for i in rotor.df_disks[\"n\"]) and\n any(i > max(rotor.df_bearings[\"n\"]) for i in rotor.df_disks[\"n\"])\n ):\n rotor_type = \"double_overhung\"\n disk_nodes = [\n i for i in rotor.df_disks[\"n\"] if(\n i < min(rotor.df_bearings[\"n\"]) or\n i > max(rotor.df_bearings[\"n\"])\n )\n ]\n # fmt: on\n\n self.rotor_type = rotor_type\n self.disk_nodes = disk_nodes\n\n machine_options = [\"compressor\", \"turbine\", \"axial_flow\"]\n if machine_type not in machine_options:\n machine_type = \"compressor\"\n self.machine_type = machine_type\n\n if tag is None:\n self.tag = rotor.tag\n else:\n self.tag = tag\n\n # Multiplicative factor of the speed range - according to API 684\n self.speed_factor = 1.25\n\n # list of attributes\n self.Q0 = None\n self.Qa = None\n self.log_dec_a = None\n self.CSR = None\n self.Qratio = None\n self.crit_speed = None\n self.MCS = None\n self.RHO_gas = None\n self.condition = None\n self.node_min = None\n self.node_max = None\n self.U_force = None\n\n @classmethod\n def from_saved_rotors(\n cls,\n path,\n speed_range,\n tripspeed,\n bearing_stiffness_range=None,\n bearing_clearance_lists=None,\n machine_type=\"compressor\",\n speed_units=\"rpm\",\n tag=None,\n ):\n \"\"\"Instantiate a rotor from a previously saved rotor model.\n\n Parameters\n ----------\n path : str\n File name\n maxspeed : float\n Maximum operation speed.\n minspeed : float\n Minimum operation speed.\n tripspeed : float\n Machine trip speed.\n stiffness_range : tuple, optional\n Tuple with (start, end) for stiffness range. Argument to calculate\n the Undamped Critical Speed Map\n machine_type : str\n Machine type analyzed. Options: compressor, turbine or axial_flow.\n If other option is given, it will be treated as a compressor\n Default is compressor\n speed_units : str\n String defining the unit for rotor speed.\n Default is \"rpm\".\n\n Returns\n -------\n A Report object\n \"\"\"\n rotor = Rotor.load(path)\n return cls(\n rotor,\n speed_range,\n tripspeed,\n bearing_stiffness_range,\n bearing_clearance_lists,\n machine_type,\n speed_units,\n tag,\n )\n\n def rotor_instance(self, rotor, bearing_list):\n \"\"\"Build an instance of an auxiliary rotor with different bearing clearances.\n\n Parameters\n ----------\n rotor : object\n A rotor built from rotor_assembly.\n bearing_list : list\n List with the bearing elements.\n\n Returns\n -------\n aux_rotor : Rotor.object\n Returns a rotor object copy with different bearing clearance.\n\n Example\n -------\n >>> import ross as rs\n >>> stfx = [0.4e7, 0.5e7, 0.6e7, 0.7e7]\n >>> damp = [2.8e3, 2.7e3, 2.6e3, 2.5e3]\n >>> freq = [400, 800, 1200, 1600]\n >>> bearing0 = rs.BearingElement(0, kxx=stfx, cxx=damp, frequency=freq)\n >>> bearing1 = rs.BearingElement(6, kxx=stfx, cxx=damp, frequency=freq)\n >>> bearings = [bearing0, bearing1]\n >>> rotor = rs.rotor_example()\n >>> report = rs.report_example()\n >>> aux_rotor = report.rotor_instance(rotor, bearings)\n \"\"\"\n sh_elm = rotor.shaft_elements\n dk_elm = rotor.disk_elements\n pm_elm = rotor.point_mass_elements\n min_w = rotor.min_w\n max_w = rotor.max_w\n rated_w = rotor.rated_w\n tag = rotor.tag\n\n aux_rotor = Rotor(\n sh_elm, dk_elm, bearing_list, pm_elm, min_w, max_w, rated_w, tag\n )\n\n return aux_rotor\n\n def run(self, D, H, HP, oper_speed, RHO_ratio, RHOs, RHOd, unit=\"m\"):\n \"\"\"Run API report.\n\n This method runs the API analysis and prepare the results to\n generate the PDF report.\n\n Parameters\n ----------\n D: list\n Impeller diameter, m (in.),\n Blade pitch diameter, m (in.),\n H: list\n Minimum diffuser width per impeller, m (in.),\n Effective blade height, m (in.),\n HP: list\n Rated power per stage/impeller, W (HP),\n oper_speed: float\n Operating speed, rpm,\n RHO_ratio: list\n Density ratio between the discharge gas density and the suction\n gas density per impeller (RHO_discharge / RHO_suction),\n kg/m3 (lbm/in.3),\n RHOs: float\n Suction gas density in the first stage, kg/m3 (lbm/in.3).\n RHOd: float\n Discharge gas density in the last stage, kg/m3 (lbm/in.3),\n unit: str, optional\n Adopted unit system. Options are \"m\" (meter) and \"in\" (inch)\n Default is \"m\"\n\n Returns\n -------\n fig_ucs : list\n List with undamped critical speed map figures.\n fig_mode_shape : list\n List with mode shape figures.\n fig_unbalance : list\n List with unbalance response figures.\n df_unbalance : dataframe\n Dataframe for the unbalance response informations.\n fig_a_lvl1 : list\n List with \"Applied Cross-Coupled Stiffness\" (stability level 1) figures.\n fig_b_lvl1 : list\n List with \"CSR vs. Mean Gas Density\" (stability level 1) figures.\n df_lvl2 : dataframe\n Dataframe for the stability level 2 informations.\n summaries : pd.Dataframe\n Dataframes with a summary of stability level 1 and 2 analyses.\n\n Example\n -------\n >>> import ross as rs\n >>> report = rs.report_example()\n >>> D = [0.35, 0.35]\n >>> H = [0.08, 0.08]\n >>> HP = [10000, 10000]\n >>> RHO_ratio = [1.11, 1.14]\n >>> RHOd = 30.45\n >>> RHOs = 37.65\n >>> oper_speed = 1000.0\n >>> # to run the API report analysis, use:\n >>> # report.run(D, H, HP, oper_speed, RHO_ratio, RHOs, RHOd)\n \"\"\"\n fig_ucs = []\n fig_mode_shape = []\n fig_unbalance = []\n fig_a_lvl1 = []\n fig_b_lvl1 = []\n df_unbalance = []\n summaries = []\n\n rotor0 = self.rotor\n\n for bearings in self.bearing_clearance_lists:\n self.rotor = self.rotor_instance(rotor0, bearings)\n\n # undamped critical speed map\n fig_ucs.append(self.plot_ucs(stiffness_range=self.bearing_stiffness_range))\n\n for i, mode in enumerate([0, 2]):\n # mode shape figures\n fig_mode_shape.append(self.mode_shape(mode))\n\n # unbalance response figures and dataframe\n fig, _dict = self.unbalance_response(mode)\n fig_unbalance.append(fig)\n df = pd.DataFrame(_dict).astype(object)\n df_unbalance.append(df)\n\n # stability level 1 figures\n figs = self.stability_level_1(D, H, HP, oper_speed, RHO_ratio, RHOs, RHOd)\n fig_a_lvl1.append(figs[0])\n fig_b_lvl1.append(figs[1])\n\n # stability level 2 dataframe\n df_lvl2 = self.stability_level_2()\n\n # API summary tables\n summaries.append(self.summary())\n\n df_unbalance = pd.concat(df_unbalance)\n\n self.rotor = rotor0\n\n return (\n fig_ucs,\n fig_mode_shape,\n fig_unbalance,\n df_unbalance,\n fig_a_lvl1,\n fig_b_lvl1,\n df_lvl2,\n summaries,\n )\n\n def plot_ucs(self, stiffness_range=None, num=20):\n \"\"\"Plot undamped critical speed map.\n\n This method will plot the undamped critical speed map for a given range\n of stiffness values. If the range is not provided, the bearing\n stiffness at rated speed will be used to create a range.\n\n Parameters\n ----------\n stiffness_range : tuple, optional\n Tuple with (start, end) for stiffness range.\n num : int\n Number of steps in the range.\n Default is 20.\n\n Returns\n -------\n fig : Plotly graph_objects.Figure()\n The figure object with the plot.\n\n Example\n -------\n >>> import ross as rs\n >>> report = rs.report_example()\n >>> fig = report.plot_ucs(stiffness_range=(5, 8))\n \"\"\"\n if stiffness_range is None:\n if self.rotor.rated_w is not None:\n bearing = self.rotor.bearing_elements[0]\n k = bearing.kxx.interpolated(self.rotor.rated_w)\n k = int(np.log10(k))\n stiffness_range = (k - 3, k + 3)\n else:\n stiffness_range = (6, 11)\n\n stiffness_log = np.logspace(*stiffness_range, num=num)\n rotor_wn = np.zeros((4, len(stiffness_log)))\n\n bearings_elements = [] # exclude the seals\n for bearing in self.rotor.bearing_elements:\n if not isinstance(bearing, SealElement):\n bearings_elements.append(bearing)\n\n for i, k in enumerate(stiffness_log):\n bearings = [BearingElement(b.n, kxx=k, cxx=0) for b in bearings_elements]\n rotor = self.rotor.__class__(\n self.rotor.shaft_elements, self.rotor.disk_elements, bearings\n )\n modal = rotor.run_modal(speed=0, num_modes=16)\n rotor_wn[:, i] = modal.wn[:8:2]\n\n bearing0 = bearings_elements[0]\n\n fig = go.Figure()\n\n fig.add_trace(\n go.Scatter(\n x=bearing0.kxx.interpolated(bearing0.frequency),\n y=bearing0.frequency,\n mode=\"markers\",\n marker=dict(size=10, symbol=\"circle\", color=\"#888844\"),\n name=\"Kxx\",\n hovertemplate=(\"Kxx: %{x:.2e}<br>\" + \"Frequency: %{y:.2f}\"),\n )\n )\n fig.add_trace(\n go.Scatter(\n x=bearing0.kyy.interpolated(bearing0.frequency),\n y=bearing0.frequency,\n mode=\"markers\",\n marker=dict(size=10, symbol=\"square\", color=\"#888844\"),\n name=\"Kyy\",\n hovertemplate=(\"Kyy: %{x:.2e}<br>\" + \"Frequency: %{y:.2f}\"),\n )\n )\n\n # Speeds References\n fig.add_trace(\n go.Scatter(\n x=stiffness_log,\n y=[self.maxspeed] * num,\n mode=\"lines\",\n line=dict(dash=\"dot\", width=4, color=colors2[8]),\n name=\"MCS Speed\",\n hoverinfo=\"none\",\n )\n )\n fig.add_trace(\n go.Scatter(\n x=stiffness_log,\n y=[self.minspeed] * num,\n mode=\"lines\",\n line=dict(dash=\"dash\", width=4, color=colors2[8]),\n name=\"MOS Speed\",\n hoverinfo=\"none\",\n )\n )\n fig.add_trace(\n go.Scatter(\n x=stiffness_log,\n y=[self.tripspeed] * num,\n mode=\"lines\",\n line=dict(dash=\"dashdot\", width=4, color=colors2[8]),\n name=\"Trip Speed\",\n hoverinfo=\"none\",\n )\n )\n fig.add_trace(\n go.Scatter(\n x=stiffness_log,\n y=[self.speed_factor * self.tripspeed] * num,\n mode=\"lines\",\n line=dict(dash=\"longdash\", width=4, color=colors2[8]),\n name=\"{}% Trip Speed\".format(100 * self.speed_factor),\n hoverinfo=\"none\",\n )\n )\n for j in range(rotor_wn.T.shape[1]):\n fig.add_trace(\n go.Scatter(\n x=stiffness_log,\n y=np.transpose(rotor_wn.T)[j],\n mode=\"lines\",\n line=dict(width=4, color=colors1[j]),\n hoverinfo=\"none\",\n showlegend=False,\n )\n )\n fig.update_xaxes(\n title_text=\"<b>Bearing Stiffness</b>\",\n title_font=dict(size=16),\n tickfont=dict(size=14),\n gridcolor=\"lightgray\",\n showline=True,\n linewidth=2.5,\n linecolor=\"black\",\n mirror=True,\n type=\"log\",\n exponentformat=\"power\",\n )\n fig.update_yaxes(\n title_text=\"<b>Critical Speed</b>\",\n title_font=dict(size=16),\n tickfont=dict(size=14),\n gridcolor=\"lightgray\",\n showline=True,\n linewidth=2.5,\n linecolor=\"black\",\n mirror=True,\n type=\"log\",\n exponentformat=\"power\",\n )\n fig.update_layout(\n width=800,\n height=600,\n plot_bgcolor=\"white\",\n legend=dict(\n font=dict(family=\"sans-serif\", size=14),\n bgcolor=\"white\",\n bordercolor=\"black\",\n borderwidth=2,\n ),\n title=dict(text=\"<b>Undamped Critical Speed Map</b>\", font=dict(size=16)),\n )\n\n return fig\n\n def static_forces(self):\n \"\"\"Calculate the bearing reaction forces.\n\n Returns\n -------\n Fb : list\n Bearing reaction forces.\n\n Example\n -------\n >>> import ross as rs\n >>> report = rs.report_example()\n >>> report.static_forces()\n array([44.09320349, 44.09320349])\n \"\"\"\n # get reaction forces on bearings\n self.rotor.run_static()\n Fb = list(self.rotor.bearing_forces_nodal.values())\n Fb = np.array(Fb) / 9.8065\n\n return Fb\n\n def unbalance_forces(self, mode):\n \"\"\"Calculate the unbalance forces.\n\n The unbalance forces are calculated base on the rotor type:\n between_bearings :\n The unbalance forces derives from the reaction bearing forces.\n single_overung_l :\n The unbalance forces derives from the disk's masses on the\n shaft left end.\n single_overung_r :\n The unbalance forces derives from the disk's masses on the\n shaft right end.\n double_overung :\n The unbalance forces derives from the disk's masses on the\n shaft left and right ends.\n\n Parameters\n ----------\n mode : int\n n'th mode shape.\n\n Returns\n -------\n U : list\n Unbalancing forces.\n\n Example\n -------\n >>> import ross as rs\n >>> report = rs.report_example()\n >>> report.unbalance_forces(mode=0)\n [58.641354289961676]\n \"\"\"\n if mode > 3:\n raise ValueError(\n \"This module calculates only the response for the first \"\n \"two backward and forward modes. \"\n )\n\n N = 60 * self.maxspeed / (2 * np.pi)\n\n # get reaction forces on bearings\n if self.rotor_type == \"between_bearings\":\n Fb = self.static_forces()\n if mode == 0 or mode == 1:\n U_force = [max(6350 * np.sum(Fb) / N, 254e-6 * np.sum(Fb))]\n\n if mode == 2 or mode == 3:\n U_force = [max(6350 * f / N, 254e-6 * f) for f in Fb]\n\n # get disk masses\n elif self.rotor_type == \"single_overhung_l\":\n Wd = [\n disk.m\n for disk in self.rotor.disk_elements\n if disk.n < min(self.rotor.df_bearings[\"n\"])\n ]\n Ws = [\n sh.m\n for sh in self.rotor.shaft_elements\n if sh.n_l < min(self.rotor.df_bearings[\"n\"])\n ]\n W3 = np.sum(Wd + Ws)\n\n U_force = [6350 * W3 / N]\n\n elif self.rotor_type == \"single_overhung_r\":\n Wd = [\n disk.m\n for disk in self.rotor.disk_elements\n if disk.n > max(self.rotor.df_bearings[\"n\"])\n ]\n Ws = [\n sh.m\n for sh in self.rotor.shaft_elements\n if sh.n_r > max(self.rotor.df_bearings[\"n\"])\n ]\n W3 = np.sum(Wd + Ws)\n\n U_force = [6350 * W3 / N]\n\n elif self.rotor_type == \"double_overhung\":\n Wd_l = [\n disk.m\n for disk in self.rotor.disk_elements\n if disk.n < min(self.rotor.df_bearings[\"n\"])\n ]\n Ws_l = [\n sh.m\n for sh in self.rotor.shaft_elements\n if sh.n_l < min(self.rotor.df_bearings[\"n\"])\n ]\n Wd_r = [\n disk.m\n for disk in self.rotor.disk_elements\n if disk.n > max(self.rotor.df_bearings[\"n\"])\n ]\n Ws_r = [\n sh.m\n for sh in self.rotor.shaft_elements\n if sh.n_r > max(self.rotor.df_bearings[\"n\"])\n ]\n W3 = np.array([np.sum(Wd_l + Ws_l), np.sum(Wd_r + Ws_r)])\n\n U_force = 6350 * W3 / N\n\n self.U_force = U_force\n\n return U_force\n\n def unbalance_response(self, mode, samples=201):\n \"\"\"Evaluate the unbalance response for the rotor.\n\n This analysis takes the critical speeds of interest, calculates the\n position and weight of the required unbalance and performs the analysis\n including:\n - Check if vibration at MCS is below the limit with the applied weight;\n - Check if the clearances are ok if the vibration deteriorate to the\n limit level;\n\n Parameters\n ----------\n mode : int\n n'th mode shape.\n samples : int\n Number of samples to generate de frequency range.\n\n Returns\n -------\n subplots : Plotly graph_objects.make_subplots()\n Plotly figure with Amplitude vs Frequency and Phase vs Frequency plots.\n unbalance_dict : dict\n A dictionary with information about simulation parameters to be\n displayed in the report. The dictionary contains:\n - Mode number;\n - Critical frequencies;\n - Amplification factors;\n - Separation margins (actual and required);\n - Unbalance stations;\n - Unbalance weights;\n - Unbalance phases;\n\n Example\n -------\n >>> import ross as rs\n >>> report = rs.report_example()\n >>> fig, unbalance_dict = report.unbalance_response(mode=0)\n \"\"\"\n maxspeed = self.maxspeed\n minspeed = self.minspeed\n freq_range = np.linspace(0, self.speed_factor * maxspeed, 201)\n\n # returns de nodes where forces will be applied\n self.mode_shape(mode)\n node_min = self.node_min\n node_max = self.node_max\n nodes = [int(node) for sub_nodes in [node_min, node_max] for node in sub_nodes]\n\n force = self.unbalance_forces(mode)\n\n phase = []\n phase_angle = 0\n for node in nodes:\n phase.append(phase_angle)\n phase_angle += np.pi\n\n unbalance_dict = {\n \"Mode\": mode + 1,\n \"Frequency\": [],\n \"Amplification factor\": [],\n \"Separation margin - ACTUAL\": [],\n \"Separation margin - REQUIRED\": [],\n \"Unbalance station(s)\": [nodes],\n \"Unbalance weight(s)\": [force],\n \"Unbalance phase(s)\": [phase],\n }\n\n response = self.rotor.run_unbalance_response(nodes, force, phase, freq_range)\n mag = response.magnitude\n\n for node in nodes:\n dof = 4 * node + 1\n mag_plot = response.plot_magnitude([(node, np.pi / 2)])\n phs_plot = response.plot_phase([(node, np.pi / 2)])\n\n magnitude = mag[dof]\n idx_max = argrelextrema(magnitude, np.greater)[0].tolist()\n wn = freq_range[idx_max]\n\n for i, peak in enumerate(magnitude[idx_max]):\n peak_n = 0.707 * peak\n peak_aux = np.linspace(peak_n, peak_n, len(freq_range))\n\n idx = np.argwhere(np.diff(np.sign(peak_aux - magnitude))).flatten()\n idx = np.sort(np.append(idx, idx_max[i]))\n\n # if speed range is not long enough to catch the magnitudes\n try:\n idx_aux = [\n list(idx).index(idx_max[i]) - 1,\n list(idx).index(idx_max[i]) + 1,\n ]\n idx = idx[idx_aux]\n except IndexError:\n idx = [list(idx).index(idx_max[i]) - 1, len(freq_range) - 1]\n\n # Amplification Factor (AF) - API684 - SP6.8.2.1\n AF = wn[i] / (freq_range[idx[1]] - freq_range[idx[0]])\n\n # Separation Margin (SM) - API684 - SP6.8.2.10\n if AF > 2.5 and wn[i] < minspeed:\n SM = min([16, 17 * (1 - 1 / (AF - 1.5))]) / 100\n SMspeed = wn[i] * (1 + SM)\n SM_ref = (minspeed - wn[i]) / wn[i]\n\n hovertemplate = (\n f\"<b>Critical Speed: {wn[i]:.2f}<b><br>\"\n + f\"<b>Speed at 0.707 x amplitude peak: {SMspeed:.2f}<b><br>\"\n )\n mag_plot.add_trace(\n go.Scatter(\n x=[wn[i], SMspeed, SMspeed, wn[i], wn[i]],\n y=[0, 0, max(magnitude[idx_max]), max(magnitude[idx_max]), 0],\n text=hovertemplate,\n mode=\"lines\",\n opacity=0.3,\n fill=\"toself\",\n fillcolor=colors1[3],\n line=dict(width=1.5, color=colors1[3]),\n showlegend=True if i == 0 else False,\n name=\"Separation Margin\",\n legendgroup=\"Separation Margin\",\n hoveron=\"points+fills\",\n hoverinfo=\"text\",\n hovertemplate=hovertemplate,\n hoverlabel=dict(bgcolor=colors1[3]),\n )\n )\n\n elif AF > 2.5 and wn[i] > maxspeed:\n SM = min([26, 10 + 17 * (1 - 1 / (AF - 1.5))]) / 100\n SMspeed = wn[i] * (1 - SM)\n SM_ref = (wn[i] - maxspeed) / maxspeed\n\n hovertemplate = (\n f\"<b>Critical Speed: {wn[i]:.2f}<b><br>\"\n + f\"<b>Speed at 0.707 x amplitude peak: {SMspeed:.2f}<b><br>\"\n )\n mag_plot.add_trace(\n go.Scatter(\n x=[SMspeed, wn[i], wn[i], SMspeed, SMspeed],\n y=[0, 0, max(magnitude[idx_max]), max(magnitude[idx_max]), 0],\n text=hovertemplate,\n mode=\"lines\",\n opacity=0.3,\n fill=\"toself\",\n fillcolor=colors1[3],\n line=dict(width=1.5, color=colors1[3]),\n showlegend=True if i == 0 else False,\n name=\"Separation Margin\",\n legendgroup=\"Separation Margin\",\n hoveron=\"points+fills\",\n hoverinfo=\"text\",\n hovertemplate=hovertemplate,\n hoverlabel=dict(bgcolor=colors1[3]),\n )\n )\n\n else:\n SM = None\n SM_ref = None\n SMspeed = None\n\n unbalance_dict[\"Amplification factor\"].append(AF)\n unbalance_dict[\"Separation margin - ACTUAL\"].append(SM)\n unbalance_dict[\"Separation margin - REQUIRED\"].append(SM_ref)\n unbalance_dict[\"Frequency\"].append(wn[i])\n\n # amplitude limit in micrometers (A1) - API684 - SP6.8.2.11\n A1 = 25.4 * np.sqrt(12000 / (30 * maxspeed / np.pi))\n\n Amax = max(mag[dof])\n\n # Scale Factor (Scc) - API684 - SP6.8.2.11 / API617 - 4.8.2.11\n Scc = max(A1 / Amax, 0.5)\n Scc = min(Scc, 6.0)\n\n mag_plot.add_trace(\n go.Scatter(\n x=[minspeed, maxspeed, maxspeed, minspeed, minspeed],\n y=[0, 0, max(mag[dof]), max(mag[dof]), 0],\n text=\"Operation Speed Range\",\n mode=\"lines\",\n opacity=0.3,\n fill=\"toself\",\n fillcolor=colors1[2],\n line=dict(width=1.5, color=colors1[2]),\n name=\"Operation Speed Range\",\n legendgroup=\"Operation Speed Range\",\n hoveron=\"points+fills\",\n hoverinfo=\"text\",\n hoverlabel=dict(bgcolor=colors1[2]),\n )\n )\n mag_plot.add_trace(\n go.Scatter(\n x=[minspeed, maxspeed],\n y=[A1, A1],\n mode=\"lines\",\n line=dict(width=2.0, color=colors1[5], dash=\"dashdot\"),\n name=\"Av1 - Mechanical test vibration limit\",\n hoverinfo=\"none\",\n )\n )\n mag_plot.add_annotation(\n x=(minspeed + maxspeed) / 2,\n y=A1,\n axref=\"x\",\n ayref=\"y\",\n xshift=0,\n yshift=10,\n text=\"<b>Av1</b>\",\n font=dict(size=18),\n showarrow=False,\n )\n mag_plot[\"data\"][0][\"line\"] = dict(width=4.0, color=colors1[5])\n phs_plot[\"data\"][0][\"line\"] = dict(width=4.0, color=colors1[5])\n\n subplots = make_subplots(rows=2, cols=1)\n for data in mag_plot[\"data\"]:\n subplots.add_trace(data, row=1, col=1)\n for data in phs_plot[\"data\"]:\n subplots.add_trace(data, row=2, col=1)\n\n subplots.update_xaxes(mag_plot.layout.xaxis, row=1, col=1)\n subplots.update_yaxes(mag_plot.layout.yaxis, row=1, col=1)\n subplots.update_xaxes(phs_plot.layout.xaxis, row=2, col=1)\n subplots.update_yaxes(phs_plot.layout.yaxis, row=2, col=1)\n subplots.update_layout(\n width=1800,\n height=900,\n plot_bgcolor=\"white\",\n hoverlabel_align=\"right\",\n legend=dict(\n itemsizing=\"constant\",\n bgcolor=\"white\",\n borderwidth=2,\n font=dict(size=14),\n ),\n )\n\n return subplots, unbalance_dict\n\n def mode_shape(self, mode):\n \"\"\"Evaluate the mode shapes for the rotor.\n\n This analysis presents the vibration mode for each critical speed.\n The importance is to locate the critical node, where the displacement\n is the greatest, then apply loads for unbalance response (stability\n level 1)\n\n Parameters\n ----------\n mode : int\n the n'th vibration mode\n\n Attributes\n ----------\n node_min : int\n Nodes where the maximum displacements occur\n node_max : int\n Nodes where the minimum displacements occur\n\n Returns\n -------\n fig : Plotly graph_objects.Figure()\n The figure object with the plot.\n\n Example\n -------\n >>> import ross as rs\n >>> report = rs.report_example()\n >>> fig = report.mode_shape(mode=0)\n >>> report.node_min\n array([], dtype=float64)\n >>> report.node_max\n array([3.])\n \"\"\"\n nodes_pos = self.rotor.nodes_pos\n df_bearings = self.rotor.df_bearings\n df_disks = self.rotor.df_disks\n\n modal = self.rotor.run_modal(speed=self.maxspeed)\n xn, yn, zn, xc, yc, zc_pos, nn = modal.calc_mode_shape(mode=mode)\n\n # reduce 3D view to 2D view\n vn = np.zeros(len(zn))\n for i in range(len(zn)):\n theta = np.arctan(xn[i] / yn[i])\n vn[i] = xn[i] * np.sin(theta) + yn[i] * np.cos(theta)\n\n # remove repetitive values from zn and vn\n idx_remove = []\n for i in range(1, len(zn)):\n if zn[i] == zn[i - 1]:\n idx_remove.append(i)\n zn = np.delete(zn, idx_remove)\n vn = np.delete(vn, idx_remove)\n\n node_min = np.array([])\n node_max = np.array([])\n\n if self.rotor_type == \"between_bearings\":\n\n aux_idx_max = argrelextrema(vn, np.greater)[0].tolist()\n aux_idx_min = argrelextrema(vn, np.less)[0].tolist()\n\n # verification of rigid modes\n if len(aux_idx_max) == 0 and len(aux_idx_min) == 0:\n idx_max = np.argmax(vn)\n idx_min = np.argmin(vn)\n\n # corrects the index by the removed points\n for i in idx_remove:\n if idx_min > i:\n idx_min += 1\n if idx_max > i:\n idx_max += 1\n node_max = np.round(np.array([idx_max]) / nn)\n node_min = np.round(np.array([idx_min]) / nn)\n\n if len(aux_idx_min) != 0:\n idx_min = np.where(vn == min(vn[aux_idx_min]))[0].tolist()\n\n # corrects the index by the removed points\n for i in idx_remove:\n if idx_min[0] > i:\n idx_min[0] += 1\n node_min = np.round(np.array(idx_min) / nn)\n\n if len(aux_idx_max) != 0:\n idx_max = np.where(vn == max(vn[aux_idx_max]))[0].tolist()\n\n # corrects the index by the removed points\n for i in idx_remove:\n if idx_max[0] > i:\n idx_max[0] += 1\n node_max = np.round(np.array(idx_max) / nn)\n\n elif self.rotor_type == \"double_overhung\":\n node_max = [max(df_disks[\"n\"])]\n node_min = [min(df_disks[\"n\"])]\n\n elif self.rotor_type == \"single_overhung_l\":\n node_min = [min(df_disks[\"n\"])]\n\n elif self.rotor_type == \"single_overhung_r\":\n node_max = [max(df_disks[\"n\"])]\n\n nodes_pos = np.array(nodes_pos)\n rpm_speed = (30 / np.pi) * modal.wn[mode]\n\n self.node_min = node_min\n self.node_max = node_max\n\n fig = go.Figure()\n fig.add_trace(\n go.Scatter(\n x=zn,\n y=vn,\n mode=\"lines\",\n line=dict(width=4, color=colors1[3]),\n name=\"<b>Mode {}</b><br><b>Speed = {:.1f} RPM</b>\".format(\n mode, rpm_speed\n ),\n hovertemplate=\"Axial position: %{x:.2f}<br>Deformation: %{y:.2f}\",\n )\n )\n fig.add_trace(\n go.Scatter(\n x=nodes_pos,\n y=np.zeros(len(nodes_pos)),\n mode=\"lines\",\n line=dict(width=4, color=colors1[5], dash=\"dashdot\"),\n name=\"centerline\",\n hoverinfo=\"none\",\n showlegend=False,\n )\n )\n fig.add_trace(\n go.Scatter(\n x=nodes_pos[df_bearings[\"n\"]],\n y=np.zeros(len(df_bearings)),\n mode=\"markers\",\n marker=dict(size=12, color=colors1[5]),\n name=\"bearing_node\",\n showlegend=False,\n hovertemplate=\"Bearing Position: %{x:.2f}\",\n )\n )\n\n pos0 = nodes_pos[min(df_bearings[\"n\"])]\n pos1 = nodes_pos[max(df_bearings[\"n\"])]\n fig.add_annotation(\n x=np.mean(nodes_pos[df_bearings[\"n\"]]),\n y=0,\n axref=\"x\",\n ayref=\"y\",\n xshift=0,\n yshift=20,\n text=\"<b>Bearing Span = {:.2f}</b>\".format(pos1 - pos0),\n font=dict(size=18),\n showarrow=False,\n )\n\n for node in nodes_pos[df_bearings[\"n\"]]:\n fig.add_trace(\n go.Scatter(\n x=[node, node],\n y=[-2, 2],\n mode=\"lines\",\n line=dict(width=2.5, color=colors1[5], dash=\"dash\"),\n name=\"Span\",\n legendgroup=\"Span\",\n hoverinfo=\"none\",\n showlegend=False,\n )\n )\n\n fig.update_xaxes(\n title_text=\"<b>Rotor lenght</b>\",\n title_font=dict(family=\"Arial\", size=20),\n tickfont=dict(size=16),\n gridcolor=\"lightgray\",\n showline=True,\n linewidth=2.5,\n linecolor=\"black\",\n mirror=True,\n )\n fig.update_yaxes(\n title_text=\"<b>Non dimensional deformation</b>\",\n title_font=dict(family=\"Arial\", size=20),\n tickfont=dict(size=16),\n range=[-2, 2],\n gridcolor=\"lightgray\",\n showline=True,\n linewidth=2.5,\n linecolor=\"black\",\n mirror=True,\n )\n fig.update_layout(\n width=1200,\n height=900,\n plot_bgcolor=\"white\",\n hoverlabel_align=\"right\",\n title=dict(\n text=\"<b>Undamped Mode Shape</b>\".format(node), font=dict(size=20)\n ),\n )\n\n return fig\n\n def stability_level_1(self, D, H, HP, oper_speed, RHO_ratio, RHOs, RHOd, unit=\"m\"):\n \"\"\"Stability analysis level 1.\n\n This analysis consider a anticipated cross coupling QA based on\n conditions at the normal operating point and the cross-coupling\n required to produce a zero log decrement, Q0.\n\n Components such as seals and impellers are not considered in this\n analysis.\n\n Parameters\n ----------\n D: list\n Impeller diameter, m (in.),\n Blade pitch diameter, m (in.),\n H: list\n Minimum diffuser width per impeller, m (in.),\n Effective blade height, m (in.),\n HP: list\n Rated power per stage/impeller, W (HP),\n oper_speed: float\n Operating speed, rpm,\n RHO_ratio: list\n Density ratio between the discharge gas density and the suction\n gas density per impeller (RHO_discharge / RHO_suction),\n kg/m3 (lbm/in.3),\n RHOs: float\n Suction gas density in the first stage, kg/m3 (lbm/in.3).\n RHOd: float\n Discharge gas density in the last stage, kg/m3 (lbm/in.3),\n unit: str, optional\n Adopted unit system. Options are \"m\" (meter) and \"in\" (inch)\n Default is \"m\"\n\n Attributes\n ----------\n condition: bool\n False: Stability Level 1 satisfies the analysis;\n True: Stability Level 2 is required.\n\n Return\n ------\n fig1 : Plotly graph_objects.Figure()\n Applied Cross-Coupled Stiffness vs. Log Decrement plot.\n fig2 : Plotly graph_objects.Figure()\n CSR vs. Mean Gas Density plot.\n\n Example\n -------\n >>> import ross as rs\n >>> report = rs.report_example()\n >>> fig1, fig2 = report.stability_level_1(D=[0.35, 0.35],\n ... H=[0.08, 0.08],\n ... HP=[10000, 10000],\n ... RHO_ratio=[1.11, 1.14],\n ... RHOd=30.45,\n ... RHOs=37.65,\n ... oper_speed=1000.0)\n >>> report.Qa\n 23022.32142857143\n \"\"\"\n steps = 11\n if unit == \"m\":\n C = 9.55\n elif unit == \"in\":\n C = 63.0\n else:\n raise TypeError(\"choose between meters (m) or inches (in)\")\n\n if len(D) != len(H):\n raise Exception(\"length of D must be the same of H\")\n\n Qa = 0.0\n cross_coupled_array = np.array([])\n # Qa - Anticipated cross-coupling for compressors - API 684 - SP6.8.5.6\n if self.machine_type == \"compressor\":\n Bc = 3.0\n Dc, Hc = D, H\n for i, disk in enumerate(self.rotor.disk_elements):\n if disk.n in self.disk_nodes:\n qi = HP[i] * Bc * C * RHO_ratio[i] / (Dc[i] * Hc[i] * oper_speed)\n Qi = np.linspace(0, 10 * qi, steps)\n cross_coupled_array = np.append(cross_coupled_array, Qi)\n Qa += qi\n\n # Qa - Anticipated cross-coupling for turbines - API 684 - SP6.8.5.6\n if self.machine_type == \"turbine\" or self.machine_type == \"axial_flow\":\n Bt = 1.5\n Dt, Ht = D, H\n for i, disk in enumerate(self.rotor.disk_elements):\n if disk.n in self.disk_nodes:\n qi = (HP[i] * Bt * C) / (Dt[i] * Ht[i] * oper_speed)\n Qi = np.linspace(0, 10 * qi, steps)\n cross_coupled_array = np.append(cross_coupled_array, Qi)\n Qa += qi\n\n # Defining cross-coupling range to 10*Qa - API 684 - SP6.8.5.8\n Qi = np.linspace(0, 10 * Qa, steps)\n cross_coupled_array = np.append(cross_coupled_array, Qi)\n cross_coupled_array = cross_coupled_array.reshape(\n [len(self.disk_nodes) + 1, steps]\n ).T\n\n log_dec = np.zeros(len(cross_coupled_array))\n\n # remove disks and seals from the rotor model\n bearing_list = [\n copy(b)\n for b in self.rotor.bearing_elements\n if not isinstance(b, SealElement)\n ]\n\n # Applying cross-coupling on rotor mid-span\n if self.rotor_type == \"between_bearings\":\n for i, Q in enumerate(cross_coupled_array[:, -1]):\n bearings = [copy(b) for b in bearing_list]\n\n # cross-coupling introduced at the rotor mid-span\n n = np.round(np.mean(self.rotor.nodes))\n cross_coupling = BearingElement(n=int(n), kxx=0, cxx=0, kxy=Q, kyx=-Q)\n bearings.append(cross_coupling)\n\n aux_rotor = Rotor(\n shaft_elements=self.rotor.shaft_elements,\n disk_elements=[],\n bearing_elements=bearings,\n rated_w=self.rotor.rated_w,\n )\n modal = aux_rotor.run_modal(speed=oper_speed * np.pi / 30)\n non_backward = modal.whirl_direction() != \"Backward\"\n log_dec[i] = modal.log_dec[non_backward][0]\n\n # Applying cross-coupling for each disk - API 684 - SP6.8.5.9\n else:\n for i, Q in enumerate(cross_coupled_array[:, :-1]):\n bearings = [copy(b) for b in bearing_list]\n # cross-coupling introduced at overhung disks\n for n, q in zip(self.disk_nodes, Q):\n cross_coupling = BearingElement(n=n, kxx=0, cxx=0, kxy=q, kyx=-q)\n bearings.append(cross_coupling)\n\n aux_rotor = Rotor(\n shaft_elements=self.rotor.shaft_elements,\n disk_elements=[],\n bearing_elements=bearings,\n rated_w=self.rotor.rated_w,\n )\n modal = aux_rotor.run_modal(speed=oper_speed * np.pi / 30)\n non_backward = modal.whirl_direction() != \"Backward\"\n log_dec[i] = modal.log_dec[non_backward][0]\n\n # verifies if log dec is greater than zero to begin extrapolation\n cross_coupled_Qa = cross_coupled_array[:, -1]\n if log_dec[-1] >= 0:\n g = interp1d(\n cross_coupled_Qa, log_dec, fill_value=\"extrapolate\", kind=\"linear\"\n )\n stiff = cross_coupled_Qa[-1] * (1 + 1 / (len(cross_coupled_Qa)))\n while g(stiff) > 0:\n log_dec = np.append(log_dec, g(stiff))\n cross_coupled_Qa = np.append(cross_coupled_Qa, stiff)\n stiff += cross_coupled_Qa[-1] / (len(cross_coupled_Qa))\n Q0 = cross_coupled_Qa[-1]\n\n else:\n idx = min(range(len(log_dec)), key=lambda i: abs(log_dec[i]))\n Q0 = cross_coupled_Qa[idx]\n\n # Find value for log_dec corresponding to Qa\n log_dec_a = log_dec[np.where(cross_coupled_Qa == Qa)][0]\n\n # CSR - Critical Speed Ratio\n crit_speed = self.rotor.run_modal(speed=self.maxspeed).wn[0]\n CSR = self.maxspeed / crit_speed\n\n # RHO_mean - Average gas density\n RHO_mean = (RHOd + RHOs) / 2\n RHO = np.linspace(0, RHO_mean * 5, 501)\n\n # CSR_boundary - function to define the CSR boundaries\n CSR_boundary = np.piecewise(\n RHO,\n [RHO <= 16.53, RHO > 16.53, RHO == 60, RHO > 60],\n [2.5, lambda RHO: (-0.0115 * RHO + 2.69), 2.0, 0.0],\n )\n\n # Plotting area\n\n fig1 = go.Figure()\n\n fig1.add_trace(\n go.Scatter(\n x=cross_coupled_Qa,\n y=log_dec,\n mode=\"lines\",\n showlegend=False,\n hoverinfo=\"none\",\n )\n )\n fig1.add_trace(\n go.Scatter(\n x=[Qa],\n y=[log_dec_a],\n mode=\"markers\",\n name=\"<b>Qa: Anticipated cross-coupling</b>\",\n hoverinfo=\"none\",\n )\n )\n fig1.add_annotation(\n x=Qa,\n y=log_dec_a,\n axref=\"x\",\n ayref=\"y\",\n xshift=15,\n yshift=15,\n text=\"<b>Qa</b>\",\n showarrow=False,\n )\n fig1.update_xaxes(\n title_text=\"<b>Applied Cross-Coupled Stiffness, Q (N/m)</b>\",\n rangemode=\"nonnegative\",\n )\n fig1.update_yaxes(title_text=\"<b>Log Dec</b>\", rangemode=\"nonnegative\")\n fig1.update_layout(\n title=dict(\n text=(\n \"<b>Applied Cross-Coupled Stiffness vs. Log Decrement</b><br>\"\n + \"<b>(API 684 - SP 6.8.5.10)</b>\"\n )\n )\n )\n\n fig2 = go.Figure()\n fig2.add_annotation(\n x=RHO_mean,\n y=CSR,\n axref=\"x\",\n ayref=\"y\",\n xshift=40,\n yshift=0,\n text=\"<b>{}</b>\".format(self.tag),\n showarrow=False,\n )\n\n for text, x, y in zip([\"Region A\", \"Region B\"], [30, 60], [1.20, 2.75]):\n fig2.add_annotation(\n x=x,\n y=y,\n axref=\"x\",\n ayref=\"y\",\n xshift=0,\n yshift=0,\n text=f\"<b>{text}</b>\",\n opacity=0.4,\n showarrow=False,\n )\n\n fig2.add_trace(\n go.Scatter(\n x=RHO,\n y=CSR_boundary,\n mode=\"lines\",\n showlegend=False,\n hoverinfo=\"none\",\n xaxis=\"x\",\n )\n )\n fig2.add_trace(\n go.Scatter(\n x=0.062428 * RHO,\n y=CSR_boundary,\n mode=\"lines\",\n showlegend=False,\n hoverinfo=\"none\",\n xaxis=\"x2\",\n )\n )\n fig2.add_trace(\n go.Scatter(\n x=[RHO_mean],\n y=[CSR],\n mode=\"markers\",\n name=\"<b>CSR: Critical Speed Ratio</b>\",\n hoverinfo=\"none\",\n xaxis=\"x\",\n )\n )\n\n fig2.update_xaxes(mirror=True)\n fig2.update_yaxes(\n title_text=\"<b>Maximum Critical Speed Ratio</b>\",\n rangemode=\"nonnegative\",\n domain=[0.1, 1],\n )\n fig2.update_layout(\n xaxis=dict(\n title_text=\"<b>kg/m³</b>\",\n rangemode=\"nonnegative\",\n overlaying=\"x2\",\n anchor=\"y\",\n ),\n xaxis2=dict(\n title_text=\"<b>lb/ft³</b>\",\n rangemode=\"nonnegative\",\n anchor=\"free\",\n side=\"bottom\",\n position=0,\n ),\n title=dict(\n text=(\n \"<b>CSR vs. Mean Gas Density</b><br>\"\n + \"<b>(API 684 - SP 6.8.5.10)</b>\"\n )\n ),\n )\n\n # Level 1 screening criteria - API 684 - SP6.8.5.10\n idx = min(range(len(RHO)), key=lambda i: abs(RHO[i] - RHO_mean))\n\n if self.machine_type == \"compressor\":\n if Q0 / Qa < 2.0:\n condition = True\n\n if log_dec_a < 0.1:\n condition = True\n\n if 2.0 < Q0 / Qa < 10.0 and CSR > CSR_boundary[idx]:\n condition = True\n\n else:\n condition = False\n\n if self.machine_type == \"turbine\" or self.machine_type == \"axial flow\":\n if log_dec_a < 0.1:\n condition = True\n\n else:\n condition = False\n\n # updating attributes\n self.Q0 = Q0\n self.Qa = Qa\n self.log_dec_a = log_dec_a\n self.CSR = CSR\n self.Qratio = Q0 / Qa\n self.crit_speed = crit_speed\n self.MCS = self.maxspeed\n self.RHO_gas = RHO_mean\n self.condition = condition\n\n return fig1, fig2\n\n def stability_level_2(self):\n \"\"\"Stability analysis level 2.\n\n For the level 2 stability analysis additional sources that contribute\n to the rotor stability shall be considered such as:\n a) labyrinth seals;\n b) damper seals;\n c) impeller/blade flow aerodynamic effects;\n d) internal friction.\n\n Returns\n -------\n df_logdec: pd.DataFrame\n A dataframe relating the logarithmic decrement for each case analyzed.\n\n Example\n -------\n >>> import ross as rs\n >>> report = rs.report_example()\n >>> dataframe = report.stability_level_2()\n \"\"\"\n # Build a list of seals\n seal_list = [\n copy(b) for b in self.rotor.bearing_elements if isinstance(b, SealElement)\n ]\n\n bearing_list = [\n copy(b)\n for b in self.rotor.bearing_elements\n if not isinstance(b, SealElement)\n ]\n\n log_dec_seal = []\n log_dec_disk = []\n log_dec_full = []\n data_seal = {}\n data_disk = {}\n data_rotor = {}\n\n # Evaluate log dec for each component - Disks\n if len(self.rotor.disk_elements):\n for disk in self.rotor.disk_elements:\n aux_rotor = Rotor(\n shaft_elements=self.rotor.shaft_elements,\n disk_elements=[disk],\n bearing_elements=bearing_list,\n rated_w=self.maxspeed,\n )\n modal = aux_rotor.run_modal(speed=self.maxspeed)\n non_backward = modal.whirl_direction() != \"Backward\"\n log_dec_disk.append(modal.log_dec[non_backward][0])\n\n # Evaluate log dec for group bearings + disks\n disk_tags = [\n \"Shaft + Bearings + \" + disk.tag for disk in self.rotor.disk_elements\n ]\n\n # Evaluate log dec for group bearings + all disks\n if len(self.rotor.disk_elements) > 1:\n all_disks_tag = \" + \".join(\n [disk.tag for disk in self.rotor.disk_elements]\n )\n disk_tags.append(\"Shaft + Bearings + \" + all_disks_tag)\n\n aux_rotor = Rotor(\n shaft_elements=self.rotor.shaft_elements,\n disk_elements=self.rotor.disk_elements,\n bearing_elements=bearing_list,\n rated_w=self.maxspeed,\n )\n modal = aux_rotor.run_modal(speed=self.maxspeed)\n non_backward = modal.whirl_direction() != \"Backward\"\n log_dec_disk.append(modal.log_dec[non_backward][0])\n\n data_disk = {\"tags\": disk_tags, \"log_dec\": log_dec_disk}\n\n # Evaluate log dec for each component - Seals\n if len(seal_list):\n for seal in seal_list:\n bearings_seal = deepcopy(bearing_list)\n bearings_seal.append(seal)\n\n aux_rotor = Rotor(\n shaft_elements=self.rotor.shaft_elements,\n disk_elements=[],\n bearing_elements=bearings_seal,\n rated_w=self.maxspeed,\n )\n modal = aux_rotor.run_modal(speed=self.maxspeed)\n non_backward = modal.whirl_direction() != \"Backward\"\n log_dec_seal.append(modal.log_dec[non_backward][0])\n\n seal_tags = [\"Shaft + Bearings + \" + seal.tag for seal in seal_list]\n\n if len(seal_list) > 1:\n # Evaluate log dec for group bearings + seals\n all_seals_tag = \" + \".join([seal.tag for seal in seal_list])\n seal_tags.append(\"Shaft + Bearings + \" + all_seals_tag)\n\n aux_rotor = Rotor(\n shaft_elements=self.rotor.shaft_elements,\n disk_elements=[],\n bearing_elements=self.rotor.bearing_elements,\n rated_w=self.maxspeed,\n )\n modal = aux_rotor.run_modal(speed=self.maxspeed)\n non_backward = modal.whirl_direction() != \"Backward\"\n log_dec_seal.append(modal.log_dec[non_backward][0])\n\n data_seal = {\"tags\": seal_tags, \"log_dec\": log_dec_seal}\n\n # Evaluate log dec for all components\n modal = self.rotor.run_modal(speed=self.maxspeed)\n non_backward = modal.whirl_direction() != \"Backward\"\n log_dec_full.append(modal.log_dec[non_backward][0])\n rotor_tags = [self.tag]\n\n data_rotor = {\"tags\": rotor_tags, \"log_dec\": log_dec_full}\n\n df_logdec_disk = pd.DataFrame(data_disk)\n df_logdec_seal = pd.DataFrame(data_seal)\n df_logdec_full = pd.DataFrame(data_rotor)\n df_logdec = pd.concat([df_logdec_disk, df_logdec_seal, df_logdec_full])\n df_logdec = df_logdec.reset_index(drop=True)\n\n self.df_logdec_disk = df_logdec_disk\n self.df_logdec_seal = df_logdec_seal\n self.df_logdec_full = df_logdec_full\n self.df_logdec = df_logdec\n\n return df_logdec\n\n def summary(self):\n \"\"\"Return datarfreames for Report summary.\n\n This method will create dataframes with relevant info about the report.\n\n Returns\n -------\n df_stab_lvl1 : pd.DataFrame\n Dataframe with stability level 1 results\n df_stab_lvl2 : pd.DataFrame\n Dataframe with stability level 2 results\n\n Example\n -------\n >>> import ross as rs\n >>> report = rs.report_example()\n >>> stability1 = report.stability_level_1(D=[0.35, 0.35],\n ... H=[0.08, 0.08],\n ... HP=[10000, 10000],\n ... RHO_ratio=[1.11, 1.14],\n ... RHOd=30.45,\n ... RHOs=37.65,\n ... oper_speed=1000.0)\n >>> stability2 = report.stability_level_2()\n >>> df_lvl1, df_lvl2 = report.summary()\n \"\"\"\n stab_lvl1_data = dict(\n tags=[self.tag],\n machine_type=[self.machine_type],\n Q0=[self.Q0],\n Qa=[self.Qa],\n log_dec_a=[self.log_dec_a],\n Qratio=[self.Qratio],\n crti_speed=[self.crit_speed],\n MCS=[self.MCS],\n CSR=[self.CSR],\n RHO_gas=[self.RHO_gas],\n )\n stab_lvl2_data = dict(\n tags=self.df_logdec[\"tags\"], logdec=self.df_logdec[\"log_dec\"]\n )\n\n df_stab_lvl1 = pd.DataFrame(stab_lvl1_data)\n df_stab_lvl2 = pd.DataFrame(stab_lvl2_data)\n\n return df_stab_lvl1, df_stab_lvl2\n\n def plot_summary(self):\n \"\"\"Plot the report .\n\n This method will create tables to be presented in the report.\n\n Returns\n -------\n fig : Plotly graph_objects.make_subplots()\n The figure object with the tables.\n\n Example\n -------\n >>> import ross as rs\n >>> report = rs.report_example()\n >>> stability1 = report.stability_level_1(D=[0.35, 0.35],\n ... H=[0.08, 0.08],\n ... HP=[10000, 10000],\n ... RHO_ratio=[1.11, 1.14],\n ... RHOd=30.45,\n ... RHOs=37.65,\n ... oper_speed=1000.0)\n >>> stability2 = report.stability_level_2()\n >>> table = report.plot_summary()\n \"\"\"\n stab_lvl1_data, stab_lvl2_data = self.summary()\n for var in stab_lvl1_data.columns[2:]:\n stab_lvl1_data[str(var)] = np.round(stab_lvl1_data[str(var)], 3)\n\n stab_lvl2_data[\"logdec\"] = np.round(stab_lvl2_data[\"logdec\"], 4)\n\n stab_lvl1_titles = [\n \"<b>Rotor Tag</b>\",\n \"<b>Machine Type</b>\",\n \"<b>Q_0</b>\",\n \"<b>Q_A</b>\",\n \"<b>log dec (δ)</b>\",\n \"<b>Q_0 / Q_A</b>\",\n \"<b>1st Critical Spped</b>\",\n \"<b>MCS</b>\",\n \"<b>CSR</b>\",\n \"<b>Gas Density</b>\",\n ]\n stab_lvl2_titles = [\"<b>Components</b>\", \"<b>Log. Dec.</b>\"]\n\n fig = make_subplots(\n rows=2,\n cols=1,\n specs=[[{\"type\": \"table\"}], [{\"type\": \"table\"}]],\n subplot_titles=[\"<b>Stability Level 1</b>\", \"<b>Stability Level 2</b>\"],\n )\n\n colors = [\"#ffffff\", \"#c4d9ed\"]\n cell_colors = [colors[i % 2] for i in range(len(stab_lvl1_data[\"tags\"]))]\n fig.add_trace(\n go.Table(\n header=dict(\n values=stab_lvl1_titles,\n font=dict(family=\"Verdana\", size=14, color=\"white\"),\n line=dict(color=\"#1e4162\", width=1.5),\n fill=dict(color=\"#1e4162\"),\n align=\"center\",\n ),\n cells=dict(\n values=[stab_lvl1_data[str(var)] for var in stab_lvl1_data.columns],\n font=dict(family=\"Verdana\", size=14, color=\"#12263b\"),\n line=dict(color=\"#c4d9ed\", width=1.5),\n fill=dict(color=[cell_colors * len(stab_lvl1_data[\"tags\"])]),\n align=\"center\",\n height=25,\n ),\n ),\n row=1,\n col=1,\n )\n\n cell_colors = [colors[i % 2] for i in range(len(stab_lvl2_data[\"tags\"]))]\n fig.add_trace(\n go.Table(\n header=dict(\n values=stab_lvl2_titles,\n font=dict(family=\"Verdana\", size=14, color=\"white\"),\n line=dict(color=\"#1e4162\", width=1.5),\n fill=dict(color=\"#1e4162\"),\n align=\"center\",\n ),\n cells=dict(\n values=[stab_lvl2_data[str(var)] for var in stab_lvl2_data.columns],\n font=dict(family=\"Verdana\", size=14, color=\"#12263b\"),\n line=dict(color=\"#c4d9ed\", width=1.5),\n fill=dict(color=[cell_colors * len(stab_lvl2_data[\"tags\"])]),\n align=\"center\",\n height=25,\n ),\n ),\n row=2,\n col=1,\n )\n\n return fig\n\n\ndef report_example():\n \"\"\"Build a report example.\n\n This function returns an instance of a simple report from a rotor\n example. The purpose of this is to make available a simple model\n so that doctest can be written using this.\n\n Returns\n -------\n An instance of a report object.\n\n Examples\n --------\n >>> import ross as rs\n >>> report = rs.report_example()\n >>> report.rotor_type\n 'between_bearings'\n \"\"\"\n i_d = 0\n o_d = 0.05\n n = 6\n L = [0.25 for _ in range(n)]\n\n shaft_elem = [\n ShaftElement(\n l,\n i_d,\n o_d,\n material=steel,\n shear_effects=True,\n rotary_inertia=True,\n gyroscopic=True,\n )\n for l in L\n ]\n\n disk0 = DiskElement.from_geometry(\n n=2, material=steel, width=0.07, i_d=0.05, o_d=0.28\n )\n disk1 = DiskElement.from_geometry(\n n=4, material=steel, width=0.07, i_d=0.05, o_d=0.28\n )\n\n stfx = [0.4e7, 0.5e7, 0.6e7, 0.7e7]\n stfy = [0.8e7, 0.9e7, 1.0e7, 1.1e7]\n freq = [400, 800, 1200, 1600]\n bearing0 = BearingElement(0, kxx=stfx, kyy=stfy, cxx=2e3, frequency=freq)\n bearing1 = BearingElement(6, kxx=stfx, kyy=stfy, cxx=2e3, frequency=freq)\n\n rotor = Rotor(shaft_elem, [disk0, disk1], [bearing0, bearing1])\n\n # coefficients for minimum clearance\n stfx = [0.7e7, 0.8e7, 0.9e7, 1.0e7]\n dampx = [2.0e3, 1.9e3, 1.8e3, 1.7e3]\n freq = [400, 800, 1200, 1600]\n bearing0 = BearingElement(0, kxx=stfx, cxx=dampx, frequency=freq)\n bearing1 = BearingElement(6, kxx=stfx, cxx=dampx, frequency=freq)\n min_clearance_brg = [bearing0, bearing1]\n\n # coefficients for maximum clearance\n stfx = [0.4e7, 0.5e7, 0.6e7, 0.7e7]\n dampx = [2.8e3, 2.7e3, 2.6e3, 2.5e3]\n freq = [400, 800, 1200, 1600]\n bearing0 = BearingElement(0, kxx=stfx, cxx=dampx, frequency=freq)\n bearing1 = BearingElement(6, kxx=stfx, cxx=dampx, frequency=freq)\n max_clearance_brg = [bearing0, bearing1]\n\n bearings = [min_clearance_brg, max_clearance_brg]\n return Report(\n rotor=rotor,\n speed_range=(400, 1000),\n tripspeed=1200,\n bearing_stiffness_range=(5, 8),\n bearing_clearance_lists=bearings,\n speed_units=\"rad/s\",\n )\n",
"from pathlib import Path\nfrom tempfile import tempdir\n\nimport numpy as np\nimport pytest\nfrom numpy.testing import assert_allclose, assert_almost_equal, assert_equal\n\nfrom ross.bearing_seal_element import *\nfrom ross.disk_element import *\nfrom ross.materials import steel\nfrom ross.point_mass import *\nfrom ross.rotor_assembly import *\nfrom ross.shaft_element import *\n\n\[email protected]\ndef rotor1():\n # Rotor without damping with 2 shaft elements - no disks and no bearings\n le_ = 0.25\n i_d_ = 0\n o_d_ = 0.05\n\n tim0 = ShaftElement(\n le_,\n i_d_,\n o_d_,\n material=steel,\n shear_effects=True,\n rotary_inertia=True,\n gyroscopic=True,\n )\n tim1 = ShaftElement(\n le_,\n i_d_,\n o_d_,\n material=steel,\n shear_effects=True,\n rotary_inertia=True,\n gyroscopic=True,\n )\n\n shaft_elm = [tim0, tim1]\n return Rotor(shaft_elm, [], [])\n\n\ndef test_index_eigenvalues_rotor1(rotor1):\n evalues = np.array(\n [\n -3.8 + 68.6j,\n -3.8 - 68.6j,\n -1.8 + 30.0j,\n -1.8 - 30.0j,\n -0.7 + 14.4j,\n -0.7 - 14.4j,\n ]\n )\n evalues2 = np.array(\n [0.0 + 68.7j, 0.0 - 68.7j, 0.0 + 30.1j, 0.0 - 30.1j, -0.0 + 14.4j, -0.0 - 14.4j]\n )\n assert_almost_equal([4, 2, 0, 1, 3, 5], rotor1._index(evalues))\n assert_almost_equal([4, 2, 0, 1, 3, 5], rotor1._index(evalues2))\n\n\ndef test_mass_matrix_rotor1(rotor1):\n # fmt: off\n Mr1 = np.array([[ 1.421, 0. , 0. , 0.049, 0.496, 0. , 0. , -0.031, 0. , 0. , 0. , 0. ],\n [ 0. , 1.421, -0.049, 0. , 0. , 0.496, 0.031, 0. , 0. , 0. , 0. , 0. ],\n [ 0. , -0.049, 0.002, 0. , 0. , -0.031, -0.002, 0. , 0. , 0. , 0. , 0. ],\n [ 0.049, 0. , 0. , 0.002, 0.031, 0. , 0. , -0.002, 0. , 0. , 0. , 0. ],\n [ 0.496, 0. , 0. , 0.031, 2.841, 0. , 0. , 0. , 0.496, 0. , 0. , -0.031],\n [ 0. , 0.496, -0.031, 0. , 0. , 2.841, 0. , 0. , 0. , 0.496, 0.031, 0. ],\n [ 0. , 0.031, -0.002, 0. , 0. , 0. , 0.005, 0. , 0. , -0.031, -0.002, 0. ],\n [-0.031, 0. , 0. , -0.002, 0. , 0. , 0. , 0.005, 0.031, 0. , 0. , -0.002],\n [ 0. , 0. , 0. , 0. , 0.496, 0. , 0. , 0.031, 1.421, 0. , 0. , -0.049],\n [ 0. , 0. , 0. , 0. , 0. , 0.496, -0.031, 0. , 0. , 1.421, 0.049, 0. ],\n [ 0. , 0. , 0. , 0. , 0. , 0.031, -0.002, 0. , 0. , 0.049, 0.002, 0. ],\n [ 0. , 0. , 0. , 0. , -0.031, 0. , 0. , -0.002, -0.049, 0. , 0. , 0.002]])\n # fmt: on\n assert_almost_equal(rotor1.M(), Mr1, decimal=3)\n\n\ndef test_raise_if_element_outside_shaft():\n le_ = 0.25\n i_d_ = 0\n o_d_ = 0.05\n\n tim0 = ShaftElement(\n le_,\n i_d_,\n o_d_,\n material=steel,\n shear_effects=True,\n rotary_inertia=True,\n gyroscopic=True,\n )\n tim1 = ShaftElement(\n le_,\n i_d_,\n o_d_,\n material=steel,\n shear_effects=True,\n rotary_inertia=True,\n gyroscopic=True,\n )\n\n shaft_elm = [tim0, tim1]\n disk0 = DiskElement.from_geometry(3, steel, 0.07, 0.05, 0.28)\n stf = 1e6\n bearing0 = BearingElement(0, kxx=stf, cxx=0)\n bearing1 = BearingElement(3, kxx=stf, cxx=0)\n bearings = [bearing0, bearing1]\n\n with pytest.raises(ValueError) as excinfo:\n Rotor(shaft_elm, [disk0])\n assert \"Trying to set disk or bearing outside shaft\" == str(excinfo.value)\n\n with pytest.raises(ValueError) as excinfo:\n Rotor(shaft_elm, bearing_elements=bearings)\n assert \"Trying to set disk or bearing outside shaft\" == str(excinfo.value)\n\n\[email protected]\ndef rotor2():\n # Rotor without damping with 2 shaft elements 1 disk and 2 bearings\n le_ = 0.25\n i_d_ = 0\n o_d_ = 0.05\n\n tim0 = ShaftElement(\n le_,\n i_d_,\n o_d_,\n material=steel,\n shear_effects=True,\n rotary_inertia=True,\n gyroscopic=True,\n )\n tim1 = ShaftElement(\n le_,\n i_d_,\n o_d_,\n material=steel,\n shear_effects=True,\n rotary_inertia=True,\n gyroscopic=True,\n )\n\n shaft_elm = [tim0, tim1]\n disk0 = DiskElement.from_geometry(1, steel, 0.07, 0.05, 0.28)\n stf = 1e6\n bearing0 = BearingElement(0, kxx=stf, cxx=0)\n bearing1 = BearingElement(2, kxx=stf, cxx=0)\n\n return Rotor(shaft_elm, [disk0], [bearing0, bearing1])\n\n\ndef test_mass_matrix_rotor2(rotor2):\n # fmt: off\n Mr2 = np.array([[ 1.421, 0. , 0. , 0.049, 0.496, 0. , 0. , -0.031, 0. , 0. , 0. , 0. ],\n [ 0. , 1.421, -0.049, 0. , 0. , 0.496, 0.031, 0. , 0. , 0. , 0. , 0. ],\n [ 0. , -0.049, 0.002, 0. , 0. , -0.031, -0.002, 0. , 0. , 0. , 0. , 0. ],\n [ 0.049, 0. , 0. , 0.002, 0.031, 0. , 0. , -0.002, 0. , 0. , 0. , 0. ],\n [ 0.496, 0. , 0. , 0.031, 35.431, 0. , 0. , 0. , 0.496, 0. , 0. , -0.031],\n [ 0. , 0.496, -0.031, 0. , 0. , 35.431, 0. , 0. , 0. , 0.496, 0.031, 0. ],\n [ 0. , 0.031, -0.002, 0. , 0. , 0. , 0.183, 0. , 0. , -0.031, -0.002, 0. ],\n [ -0.031, 0. , 0. , -0.002, 0. , 0. , 0. , 0.183, 0.031, 0. , 0. , -0.002],\n [ 0. , 0. , 0. , 0. , 0.496, 0. , 0. , 0.031, 1.421, 0. , 0. , -0.049],\n [ 0. , 0. , 0. , 0. , 0. , 0.496, -0.031, 0. , 0. , 1.421, 0.049, 0. ],\n [ 0. , 0. , 0. , 0. , 0. , 0.031, -0.002, 0. , 0. , 0.049, 0.002, 0. ],\n [ 0. , 0. , 0. , 0. , -0.031, 0. , 0. , -0.002, -0.049, 0. , 0. , 0.002]])\n # fmt: on\n assert_almost_equal(rotor2.M(), Mr2, decimal=3)\n\n\ndef test_a0_0_matrix_rotor2(rotor2):\n # fmt: off\n A0_0 = np.array([[ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],\n [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],\n [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],\n [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],\n [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],\n [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],\n [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],\n [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],\n [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],\n [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],\n [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],\n [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]])\n # fmt: on\n assert_almost_equal(rotor2.A()[:12, :12], A0_0, decimal=3)\n\n\ndef test_a0_1_matrix_rotor2(rotor2):\n # fmt: off\n A0_1 = np.array([[1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],\n [0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],\n [0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0.],\n [0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0.],\n [0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0.],\n [0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0.],\n [0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0.],\n [0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0.],\n [0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0.],\n [0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0.],\n [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0.],\n [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1.]])\n # fmt: on\n assert_almost_equal(rotor2.A()[:12, 12:24], A0_1, decimal=3)\n\n\ndef test_a1_0_matrix_rotor2(rotor2):\n # fmt: off\n A1_0 = np.array([[ 20.63 , -0. , 0. , 4.114, -20.958, 0. , 0. , 1.11 , 0.056, -0. , -0. , -0.014],\n [ 0. , 20.63 , -4.114, 0. , -0. , -20.958, -1.11 , 0. , -0. , 0.056, 0.014, 0. ],\n [ 0. , 697.351, -131.328, 0. , -0. , -705.253, -44.535, 0. , -0. , 2.079, 0.596, 0. ],\n [-697.351, 0. , -0. , -131.328, 705.253, -0. , -0. , -44.535, -2.079, 0. , 0. , 0.596],\n [ 0.442, 0. , -0. , 0.072, -0.887, -0. , -0. , -0. , 0.442, 0. , 0. , -0.072],\n [ 0. , 0.442, -0.072, 0. , -0. , -0.887, 0. , 0. , 0. , 0.442, 0.072, -0. ],\n [ 0. , 6.457, -0.837, 0. , -0. , 0. , -1.561, 0. , -0. , -6.457, -0.837, -0. ],\n [ -6.457, -0. , 0. , -0.837, 0. , 0. , 0. , -1.561, 6.457, 0. , 0. , -0.837],\n [ 0.056, -0. , 0. , 0.014, -20.958, 0. , 0. , -1.11 , 20.63 , 0. , 0. , -4.114],\n [ 0. , 0.056, -0.014, 0. , -0. , -20.958, 1.11 , 0. , 0. , 20.63 , 4.114, -0. ],\n [ -0. , -2.079, 0.596, -0. , 0. , 705.253, -44.535, -0. , -0. , -697.351, -131.328, 0. ],\n [ 2.079, 0. , -0. , 0.596, -705.253, -0. , 0. , -44.535, 697.351, 0. , 0. , -131.328]])\n # fmt: on\n assert_almost_equal(rotor2.A()[12:24, :12] / 1e7, A1_0, decimal=3)\n\n\ndef test_a1_1_matrix_rotor2(rotor2):\n # fmt: off\n A1_1 = np.array([[ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],\n [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],\n [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],\n [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],\n [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],\n [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],\n [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],\n [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],\n [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],\n [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],\n [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],\n [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]])\n # fmt: on\n assert_almost_equal(rotor2.A()[12:24, 12:24] / 1e7, A1_1, decimal=3)\n\n\ndef test_evals_sorted_rotor2(rotor2):\n evals_sorted = np.array(\n [\n 1.4667459679e-12 + 215.3707255735j,\n 3.9623200168e-12 + 215.3707255733j,\n 7.4569772223e-11 + 598.0247411492j,\n 1.1024641658e-11 + 598.0247411456j,\n 4.3188161105e-09 + 3956.2249777612j,\n 2.5852376472e-11 + 3956.2249797838j,\n 4.3188161105e-09 - 3956.2249777612j,\n 2.5852376472e-11 - 3956.2249797838j,\n 7.4569772223e-11 - 598.0247411492j,\n 1.1024641658e-11 - 598.0247411456j,\n 1.4667459679e-12 - 215.3707255735j,\n 3.9623200168e-12 - 215.3707255733j,\n ]\n )\n\n evals_sorted_w_10000 = np.array(\n [\n -4.838034e-14 + 34.822138j,\n -5.045245e-01 + 215.369011j,\n 5.045245e-01 + 215.369011j,\n 8.482603e-08 + 3470.897616j,\n 4.878990e-07 + 3850.212629j,\n 4.176291e01 + 3990.22903j,\n 4.176291e01 - 3990.22903j,\n 4.878990e-07 - 3850.212629j,\n 8.482603e-08 - 3470.897616j,\n 5.045245e-01 - 215.369011j,\n -5.045245e-01 - 215.369011j,\n -4.838034e-14 - 34.822138j,\n ]\n )\n modal2_0 = rotor2.run_modal(speed=0)\n rotor2_evals, rotor2_evects = rotor2._eigen(speed=0)\n assert_allclose(rotor2_evals, evals_sorted, rtol=1e-3)\n assert_allclose(modal2_0.evalues, evals_sorted, rtol=1e-3)\n modal2_10000 = rotor2.run_modal(speed=10000)\n assert_allclose(modal2_10000.evalues, evals_sorted_w_10000, rtol=1e-1)\n\n\[email protected]\ndef rotor3():\n # Rotor without damping with 6 shaft elements 2 disks and 2 bearings\n i_d = 0\n o_d = 0.05\n n = 6\n L = [0.25 for _ in range(n)]\n\n shaft_elem = [\n ShaftElement(\n l,\n i_d,\n o_d,\n material=steel,\n shear_effects=True,\n rotary_inertia=True,\n gyroscopic=True,\n )\n for l in L\n ]\n\n disk0 = DiskElement.from_geometry(2, steel, 0.07, 0.05, 0.28)\n disk1 = DiskElement.from_geometry(4, steel, 0.07, 0.05, 0.35)\n\n stfx = 1e6\n stfy = 0.8e6\n bearing0 = BearingElement(0, kxx=stfx, kyy=stfy, cxx=0)\n bearing1 = BearingElement(6, kxx=stfx, kyy=stfy, cxx=0)\n\n return Rotor(shaft_elem, [disk0, disk1], [bearing0, bearing1])\n\n\[email protected]\ndef rotor3_odd():\n # Rotor without damping with odd number of shaft elements (7)\n # 2 disks and 2 bearings\n i_d = 0\n o_d = 0.05\n n = 7\n L = [0.25 for _ in range(n)]\n\n shaft_elem = [\n ShaftElement(\n l,\n i_d,\n o_d,\n material=steel,\n shear_effects=True,\n rotary_inertia=True,\n gyroscopic=True,\n )\n for l in L\n ]\n\n disk0 = DiskElement.from_geometry(2, steel, 0.07, 0.05, 0.28)\n disk1 = DiskElement.from_geometry(4, steel, 0.07, 0.05, 0.35)\n\n stfx = 1e6\n stfy = 0.8e6\n bearing0 = BearingElement(0, kxx=stfx, kyy=stfy, cxx=0)\n bearing1 = BearingElement(6, kxx=stfx, kyy=stfy, cxx=0)\n\n return Rotor(shaft_elem, [disk0, disk1], [bearing0, bearing1])\n\n\ndef test_rotor_attributes(rotor1, rotor3, rotor3_odd):\n assert len(rotor1.nodes) == 3\n assert len(rotor1.nodes_i_d) == 3\n assert len(rotor1.nodes_o_d) == 3\n assert rotor1.L == 0.5\n assert rotor1.m_disks == 0\n assert rotor1.m_shaft == 7.6674495701675891\n assert rotor1.m == 7.6674495701675891\n assert rotor1.nodes_pos[0] == 0\n assert rotor1.nodes_pos[1] == 0.25\n assert rotor1.nodes_pos[-1] == 0.5\n assert len(rotor3.shaft_elements_length) == 6\n assert len(rotor3_odd.shaft_elements_length) == 7\n\n\ndef test_kappa_rotor3(rotor3):\n # TODO: Move this to test_results.py\n modal3_0 = rotor3.run_modal(speed=0)\n assert_allclose(modal3_0.kappa(0, 0)[\"Frequency\"], 82.653037, rtol=1e-3)\n assert_allclose(modal3_0.kappa(0, 0)[\"Major axes\"], 0.001454062985920231, rtol=1e-3)\n assert_allclose(\n modal3_0.kappa(0, 0)[\"Minor axes\"], 2.0579515874459978e-11, rtol=1e-3, atol=1e-6\n )\n assert_allclose(\n modal3_0.kappa(0, 0)[\"kappa\"], -1.415311171090584e-08, rtol=1e-3, atol=1e-6\n )\n\n modal3_2000 = rotor3.run_modal(speed=2000)\n assert_allclose(modal3_2000.kappa(0, 0)[\"Frequency\"], 77.37957042, rtol=1e-3)\n assert_allclose(\n modal3_2000.kappa(0, 0)[\"Major axes\"], 0.0011885396330204021, rtol=1e-3\n )\n assert_allclose(\n modal3_2000.kappa(0, 0)[\"Minor axes\"], 0.0007308144427338161, rtol=1e-3\n )\n assert_allclose(modal3_2000.kappa(0, 0)[\"kappa\"], -0.6148843693807821, rtol=1e-3)\n\n assert_allclose(modal3_2000.kappa(0, 1)[\"Frequency\"], 88.98733511566752, rtol=1e-3)\n assert_allclose(\n modal3_2000.kappa(0, 1)[\"Major axes\"], 0.0009947502339267566, rtol=1e-3\n )\n assert_allclose(\n modal3_2000.kappa(0, 1)[\"Minor axes\"], 0.0008412470069506472, rtol=1e-3\n )\n assert_allclose(modal3_2000.kappa(0, 1)[\"kappa\"], 0.8456866641084784, rtol=1e-3)\n\n assert_allclose(modal3_2000.kappa(1, 1)[\"Frequency\"], 88.98733511566752, rtol=1e-3)\n assert_allclose(\n modal3_2000.kappa(1, 1)[\"Major axes\"], 0.0018877975750108973, rtol=1e-3\n )\n assert_allclose(\n modal3_2000.kappa(1, 1)[\"Minor axes\"], 0.0014343257484060105, rtol=1e-3\n )\n assert_allclose(modal3_2000.kappa(1, 1)[\"kappa\"], 0.7597878964314968, rtol=1e-3)\n\n\ndef test_kappa_mode_rotor3(rotor3):\n modal3_2000 = rotor3.run_modal(2000)\n assert_allclose(\n modal3_2000.kappa_mode(0),\n [-0.614884, -0.696056, -0.723983, -0.729245, -0.708471, -0.656976, -0.513044],\n rtol=1e-3,\n )\n\n assert_allclose(\n modal3_2000.kappa_mode(1),\n [0.845687, 0.759788, 0.734308, 0.737393, 0.778295, 0.860137, 0.948157],\n rtol=1e-3,\n )\n\n\[email protected]\ndef rotor4():\n # Rotor without damping with 6 shaft elements 2 disks and 2 bearings\n # Same as rotor3, but constructed with sections.\n i_d = 0\n o_d = 0.05\n n = 6\n L = [0.25 for _ in range(n)]\n\n n0 = len(L) // 2\n n1 = len(L) // 2\n L0 = sum(L[:n0])\n L1 = sum(L[n1:])\n sec0 = ShaftElement.section(L0, n0, i_d, o_d, material=steel)\n sec1 = ShaftElement.section(L1, n1, i_d, o_d, material=steel)\n\n shaft_elem = [sec0, sec1]\n\n disk0 = DiskElement.from_geometry(2, steel, 0.07, 0.05, 0.28)\n disk1 = DiskElement.from_geometry(4, steel, 0.07, 0.05, 0.35)\n\n stfx = 1e6\n stfy = 0.8e6\n bearing0 = BearingElement(0, kxx=stfx, kyy=stfy, cxx=0)\n bearing1 = BearingElement(6, kxx=stfx, kyy=stfy, cxx=0)\n\n return Rotor(shaft_elem, [disk0, disk1], [bearing0, bearing1])\n\n\ndef test_evals_rotor3_rotor4(rotor3, rotor4):\n rotor3_evals, rotor3_evects = rotor3._eigen(speed=0)\n rotor4_evals, rotor4_evects = rotor4._eigen(speed=0)\n\n assert_allclose(rotor3_evals, rotor4_evals, rtol=1e-3)\n\n\ndef test_campbell(rotor4):\n speed = np.linspace(0, 300, 3)\n camp = rotor4.run_campbell(speed)\n\n camp_calculated = camp.wd\n # fmt: off\n camp_desired = np.array([[82.65303734, 86.65811435, 254.52047828, 274.31285391, 679.48903239, 716.78631221],\n [82.60929602, 86.68625235, 251.70037114, 276.87787937, 652.85679897, 742.60864608],\n [82.48132723, 86.76734307, 245.49092844, 282.33294699, 614.05536277, 779.07778334]])\n # fmt: on\n assert_allclose(camp_calculated, camp_desired)\n\n\[email protected](reason=\"Needs investigation. It fails depending on system.\")\ndef test_freq_response(rotor4):\n magdb_exp = np.array(\n [\n [\n [-120.0, -120.86944548, -115.66348242, -125.09053613],\n [-363.3527912, -151.34622928, -119.93523136, -131.80470016],\n [-354.61148814, -160.12580074, -126.60092157, -129.32321566],\n [-123.52182518, -115.10369362, -117.98804019, -114.71703185],\n ],\n [\n [-372.78089953, -151.34622928, -119.93523136, -131.80470016],\n [-118.06179974, -118.64323754, -120.32380413, -123.40124075],\n [-121.58362492, -113.542778, -118.87061678, -113.43742325],\n [-359.49336181, -167.93504252, -134.49524237, -130.94575121],\n ],\n [\n [-373.93814241, -160.12580074, -126.60092157, -129.32321566],\n [-121.58362492, -113.542778, -118.87061678, -113.43742325],\n [-101.07120376, -105.55913457, -104.14094712, -102.44191459],\n [-370.75325104, -173.53567801, -139.05415269, -127.7170584],\n ],\n [\n [-123.52182518, -115.10369362, -117.98804019, -114.71703185],\n [-362.65206982, -167.93504252, -134.49524237, -130.94575121],\n [-350.39254778, -173.53567801, -139.05415269, -127.7170584],\n [-101.29234967, -106.9521567, -104.66576262, -103.46014727],\n ],\n ]\n )\n\n magdb_exp_modes_4 = np.array(\n [\n [\n [-186.09498071, -141.31217447, -156.3727046, -164.2331948],\n [-343.18648319, -177.48024148, -185.20860324, -186.64998732],\n [-334.7831122, -177.53606335, -187.59501345, -184.89095401],\n [-153.70571976, -128.91233707, -141.534854, -146.49160424],\n ],\n [\n [-359.4389246, -177.48024148, -185.20860324, -186.64998732],\n [-122.88901214, -139.43588496, -154.12804564, -161.85419832],\n [-124.04894039, -128.97278476, -141.32571597, -146.31133247],\n [-347.60421616, -175.0690129, -185.41011193, -182.54955925],\n ],\n [\n [-350.13764012, -177.53606335, -187.59501345, -184.89095401],\n [-124.04894039, -128.97278476, -141.32571597, -146.31133247],\n [-111.60564526, -122.9491126, -123.76248808, -122.27201722],\n [-337.19738844, -162.11699607, -159.52366304, -159.38118889],\n ],\n [\n [-153.70571976, -128.91233707, -141.534854, -146.49160424],\n [-333.15975187, -175.0690129, -185.41011193, -182.54955925],\n [-323.43173195, -162.11699607, -159.52366304, -159.38118889],\n [-121.31645881, -120.44617713, -124.36604496, -122.47735964],\n ],\n ]\n )\n\n omega = np.linspace(0.0, 450.0, 4)\n freq_resp = rotor4.run_freq_response(speed_range=omega)\n magdb = 20.0 * np.log10(freq_resp.magnitude)\n assert_allclose(magdb[:4, :4, :4], magdb_exp)\n\n freq_resp = rotor4.run_freq_response(speed_range=omega, modes=list(range(4)))\n magdb = 20.0 * np.log10(freq_resp.magnitude)\n assert_allclose(magdb[:4, :4, :4], magdb_exp_modes_4)\n\n\ndef test_freq_response_w_force(rotor4):\n # modal4 = rotor4.run_modal(0)\n F0 = np.array(\n [\n [0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j],\n [0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j],\n [0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j],\n [0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j],\n [0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j],\n [0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j],\n [0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j],\n [0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j],\n [0.0 + 0.0j, 22.5 + 0.0j, 90.0 + 0.0j, 202.5 + 0.0j],\n [0.0 + 0.0j, 0.0 - 22.5j, 0.0 - 90.0j, 0.0 - 202.5j],\n [0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j],\n [0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j],\n [0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j],\n [0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j],\n [0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j],\n [0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j],\n [0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j],\n [0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j],\n [0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j],\n [0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j],\n [0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j],\n [0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j],\n [0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j],\n [0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j],\n [0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j],\n [0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j],\n [0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j],\n [0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j],\n ]\n )\n mag_exp = np.array(\n [\n [0.00000000e00, 1.14259057e-06, 1.88932819e-04, 4.50376020e-05],\n [0.00000000e00, 3.02252319e-06, 1.50551126e-04, 4.98323245e-05],\n [0.00000000e00, 1.97842812e-05, 5.19405022e-05, 2.80824236e-05],\n [0.00000000e00, 2.02593969e-05, 1.64498124e-05, 1.06100461e-05],\n ]\n )\n mag_exp_2_unb = np.array(\n [\n [0.00000000e00, 4.80337594e-06, 2.31170438e-04, 6.90062268e-05],\n [0.00000000e00, 3.15307288e-06, 1.87793923e-04, 8.08531462e-05],\n [0.00000000e00, 3.79692673e-05, 5.97050225e-05, 5.48105215e-05],\n [0.00000000e00, 4.16812885e-05, 1.38592416e-05, 2.20209089e-05],\n ]\n )\n\n omega = np.linspace(0.0, 450.0, 4)\n freq_resp = rotor4.forced_response(force=F0, speed_range=omega)\n mag = freq_resp.magnitude\n assert_allclose(mag[:4, :4], mag_exp)\n\n freq_resp = rotor4.run_unbalance_response(2, 0.001, 0, frequency=omega)\n mag = freq_resp.magnitude\n assert_allclose(mag[:4, :4], mag_exp)\n\n freq_resp = rotor4.run_unbalance_response(2, 0.001, 0, frequency=omega)\n mag = freq_resp.magnitude\n assert_allclose(mag[:4, :4], mag_exp)\n\n freq_resp = rotor4.run_unbalance_response(\n [2, 3], [0.001, 0.001], [0.0, 0], frequency=omega\n )\n mag = freq_resp.magnitude\n assert_allclose(mag[:4, :4], mag_exp_2_unb)\n\n\ndef test_mesh_convergence(rotor3):\n rotor3.convergence(n_eigval=0, err_max=1e-08)\n modal3 = rotor3.run_modal(speed=0)\n\n assert_allclose(len(rotor3.shaft_elements), 96, atol=0)\n assert_allclose(modal3.wn[0], 82.653037335, atol=1e-02)\n assert_allclose(rotor3.shaft_elements[0].L, 0.015625, atol=1e-06)\n assert_allclose(rotor3.disk_elements[0].n, 32, atol=0)\n assert_allclose(rotor3.disk_elements[1].n, 64, atol=0)\n assert_allclose(rotor3.bearing_elements[0].n, 0, atol=0)\n assert_allclose(rotor3.bearing_elements[1].n, 96, atol=0)\n assert rotor3.error_arr[-1] <= 1e-08 * 100\n\n\ndef test_static_analysis_rotor3(rotor3):\n static = rotor3.run_static()\n\n assert_almost_equal(\n static.deformation[0],\n np.array(\n [\n -4.94274533e-12,\n -4.51249085e-04,\n -7.88420867e-04,\n -9.18114192e-04,\n -8.08560219e-04,\n -4.68788888e-04,\n -5.56171636e-12,\n ]\n ),\n decimal=6,\n )\n assert_almost_equal(\n static.Vx[0],\n np.array(\n [\n -494.2745,\n -456.6791,\n -456.6791,\n -419.0837,\n -99.4925,\n -61.8971,\n -61.8971,\n -24.3017,\n 480.9808,\n 518.5762,\n 518.5762,\n 556.1716,\n ]\n ),\n decimal=3,\n )\n assert_almost_equal(\n static.Bm[0],\n np.array(\n [\n 0.0,\n -118.8692,\n -118.8692,\n -228.3396,\n -228.3396,\n -248.5133,\n -248.5133,\n -259.2881,\n -259.2881,\n -134.3435,\n -134.3435,\n 0.0,\n ]\n ),\n decimal=3,\n )\n\n\[email protected]\ndef rotor5():\n # Rotor without damping with 10 shaft elements 2 disks and 2 bearings\n i_d = 0\n o_d = 0.05\n n = 10\n L = [0.25 for _ in range(n)]\n\n shaft_elem = [\n ShaftElement(\n l,\n i_d,\n o_d,\n material=steel,\n shear_effects=True,\n rotary_inertia=True,\n gyroscopic=True,\n )\n for l in L\n ]\n\n disk0 = DiskElement.from_geometry(4, steel, 0.07, 0.05, 0.28)\n disk1 = DiskElement.from_geometry(6, steel, 0.07, 0.05, 0.35)\n\n stfx = 1e6\n stfy = 1e6\n bearing0 = BearingElement(2, kxx=stfx, kyy=stfy, cxx=0)\n bearing1 = BearingElement(8, kxx=stfx, kyy=stfy, cxx=0)\n\n return Rotor(shaft_elem, [disk0, disk1], [bearing0, bearing1])\n\n\ndef test_static_analysis_rotor5(rotor5):\n static = rotor5.run_static()\n\n assert_almost_equal(\n static.deformation[0],\n np.array(\n [\n 8.12651626e-04,\n 4.08939282e-04,\n -5.69465378e-12,\n -4.05876595e-04,\n -7.15824882e-04,\n -8.36443708e-04,\n -7.35964234e-04,\n -4.23416398e-04,\n -6.31362481e-12,\n 4.28859620e-04,\n 8.52492302e-04,\n ]\n ),\n decimal=6,\n )\n assert_almost_equal(\n static.Vx[0],\n np.array(\n [\n 0.0,\n 37.5954,\n 37.5954,\n 75.1908,\n -494.2745,\n -456.6791,\n -456.6791,\n -419.0837,\n -99.4925,\n -61.8971,\n -61.8971,\n -24.3017,\n 480.9808,\n 518.5762,\n 518.5762,\n 556.1716,\n -75.1908,\n -37.5954,\n -37.5954,\n -0.0,\n ]\n ),\n decimal=3,\n )\n assert_almost_equal(\n static.Bm[0],\n np.array(\n [\n 0.0,\n 4.6994,\n 4.6994,\n 18.7977,\n 18.7977,\n -100.0715,\n -100.0715,\n -209.5418,\n -209.5418,\n -229.7155,\n -229.7155,\n -240.4904,\n -240.4904,\n -115.5458,\n -115.5458,\n 18.7977,\n 18.7977,\n 4.6994,\n 4.6994,\n 0.0,\n ]\n ),\n decimal=3,\n )\n\n\[email protected]\ndef rotor6():\n # Overhung rotor without damping with 10 shaft elements\n # 2 disks and 2 bearings\n i_d = 0\n o_d = 0.05\n n = 10\n L = [0.25 for _ in range(n)]\n\n shaft_elem = [\n ShaftElement(\n l,\n i_d,\n o_d,\n material=steel,\n shear_effects=True,\n rotary_inertia=True,\n gyroscopic=True,\n )\n for l in L\n ]\n\n disk0 = DiskElement.from_geometry(5, steel, 0.07, 0.05, 0.28)\n disk1 = DiskElement.from_geometry(10, steel, 0.07, 0.05, 0.35)\n\n stfx = 1e6\n stfy = 1e6\n bearing0 = BearingElement(2, kxx=stfx, kyy=stfy, cxx=0)\n bearing1 = BearingElement(8, kxx=stfx, kyy=stfy, cxx=0)\n\n return Rotor(shaft_elem, [disk0, disk1], [bearing0, bearing1])\n\n\ndef test_static_analysis_rotor6(rotor6):\n static = rotor6.run_static()\n\n assert_almost_equal(\n static.deformation[0],\n np.array(\n [\n -1.03951876e-04,\n -4.93624668e-05,\n -1.79345202e-12,\n 3.74213098e-05,\n 7.66066703e-05,\n 1.29084322e-04,\n 1.85016673e-04,\n 1.72933811e-04,\n -1.02148266e-11,\n -3.96409257e-04,\n -9.20006704e-04,\n ]\n ),\n decimal=6,\n )\n assert_almost_equal(\n static.Vx[0],\n np.array(\n [\n -0.0,\n 37.5954,\n 37.5954,\n 75.1908,\n -104.1544,\n -66.5589,\n -66.5589,\n -28.9635,\n -28.9635,\n 8.6319,\n 328.2231,\n 365.8185,\n 365.8185,\n 403.4139,\n 403.4139,\n 441.0093,\n -580.4733,\n -542.8779,\n -542.8779,\n -505.2825,\n ]\n ),\n decimal=3,\n )\n assert_almost_equal(\n static.Bm[0],\n np.array(\n [\n 0.0,\n 4.6994,\n 4.6994,\n 18.7977,\n 18.7977,\n -2.5415,\n -2.5415,\n -14.4818,\n -14.4818,\n -17.0232,\n -17.0232,\n 69.732,\n 69.732,\n 165.886,\n 165.886,\n 271.439,\n 271.439,\n 131.0201,\n 131.02,\n 0.0,\n ]\n ),\n decimal=3,\n )\n\n\ndef test_run_critical_speed(rotor5, rotor6):\n results5 = rotor5.run_critical_speed(num_modes=12, rtol=0.005)\n results6 = rotor6.run_critical_speed(num_modes=12, rtol=0.005)\n\n wn5 = np.array(\n [\n 86.10505193,\n 86.60492546,\n 198.93259257,\n 207.97165539,\n 244.95609413,\n 250.53522782,\n ]\n )\n wd5 = np.array(\n [\n 86.1050519,\n 86.60492544,\n 198.93259256,\n 207.97165539,\n 244.95609413,\n 250.53522782,\n ]\n )\n log_dec5 = np.zeros_like(wd5)\n damping_ratio5 = np.zeros_like(wd5)\n\n wd6 = np.array(\n [\n 61.52110644,\n 63.72862198,\n 117.49491374,\n 118.55829416,\n 233.83724523,\n 236.40346235,\n ]\n )\n wn6 = np.array(\n [\n 61.52110644,\n 63.72862198,\n 117.49491375,\n 118.55829421,\n 233.83724523,\n 236.40346458,\n ]\n )\n log_dec6 = np.zeros_like(wd6)\n damping_ratio6 = np.zeros_like(wd6)\n\n assert_almost_equal(results5.wd, wd5, decimal=4)\n assert_almost_equal(results5.wn, wn5, decimal=4)\n assert_almost_equal(results5.log_dec, log_dec5, decimal=4)\n assert_almost_equal(results5.damping_ratio, damping_ratio5, decimal=4)\n\n assert_almost_equal(results6.wd, wd6, decimal=4)\n assert_almost_equal(results6.wn, wn6, decimal=4)\n assert_almost_equal(results6.log_dec, log_dec6, decimal=4)\n assert_almost_equal(results6.damping_ratio, damping_ratio6, decimal=4)\n\n\[email protected]\ndef coaxrotor():\n # Co-axial rotor system with 2 shafts, 4 disks and\n # 4 bearings (3 to ground and 1 to body)\n i_d = 0\n o_d = 0.05\n n = 10\n L = [0.25 for _ in range(n)]\n\n axial_shaft = [ShaftElement(l, i_d, o_d, material=steel) for l in L]\n\n i_d = 0.25\n o_d = 0.30\n n = 6\n L = [0.25 for _ in range(n)]\n\n coaxial_shaft = [ShaftElement(l, i_d, o_d, material=steel) for l in L]\n\n disk0 = DiskElement.from_geometry(\n n=1, material=steel, width=0.07, i_d=0.05, o_d=0.28\n )\n disk1 = DiskElement.from_geometry(\n n=9, material=steel, width=0.07, i_d=0.05, o_d=0.28\n )\n disk2 = DiskElement.from_geometry(\n n=13, material=steel, width=0.07, i_d=0.20, o_d=0.48\n )\n disk3 = DiskElement.from_geometry(\n n=15, material=steel, width=0.07, i_d=0.20, o_d=0.48\n )\n\n shaft = [axial_shaft, coaxial_shaft]\n disks = [disk0, disk1, disk2, disk3]\n\n stfx = 1e6\n stfy = 1e6\n bearing0 = BearingElement(0, kxx=stfx, kyy=stfy, cxx=0)\n bearing1 = BearingElement(10, kxx=stfx, kyy=stfy, cxx=0)\n bearing2 = BearingElement(11, kxx=stfx, kyy=stfy, cxx=0)\n bearing3 = BearingElement(8, n_link=17, kxx=stfx, kyy=stfy, cxx=0)\n bearings = [bearing0, bearing1, bearing2, bearing3]\n\n return CoAxialRotor(shaft, disks, bearings)\n\n\ndef test_coaxial_rotor_assembly(coaxrotor):\n # fmt: off\n assert list(coaxrotor.df[\"shaft_number\"]) == [\n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,\n 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0\n ]\n assert coaxrotor.nodes_pos == [\n 0.0, 0.25, 0.5, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0, 2.25, 2.5,\n 0.5, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0\n ]\n assert list(coaxrotor.df_shaft[\"nodes_pos_l\"]) == [\n 0.0, 0.25, 0.5, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0, 2.25,\n 0.5, 0.75, 1.0, 1.25, 1.5, 1.75\n ]\n assert list(coaxrotor.df_shaft[\"nodes_pos_r\"]) == [\n 0.25, 0.5, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0, 2.25, 2.5,\n 0.75, 1.0, 1.25, 1.5, 1.75, 2.0\n ]\n assert list(coaxrotor.df[\"y_pos\"].dropna()) == [\n 0.025, 0.05, 0.025, 0.05, 0.025, 0.15, 0.3, 0.3\n ]\n assert list(np.round(coaxrotor.df[\"y_pos_sup\"].dropna(), 3)) == [\n 0.319, 0.125, 0.319, 0.444\n ]\n # fmt: on\n\n\ndef test_from_section():\n # Rotor built from classmethod from_section\n # 2 disks and 2 bearings\n leng_data = [0.5, 1.0, 2.0, 1.0, 0.5]\n leng_data_error = [0.5, 1.0, 2.0, 1.0]\n\n odl_data = [0.1, 0.2, 0.3, 0.2, 0.1]\n odr_data_error = [0.1, 0.2, 0.3, 0.2]\n\n idl_data = [0, 0, 0, 0, 0]\n material = steel\n material_error = [steel, steel]\n disk_data = [\n DiskElement.from_geometry(n=2, material=steel, width=0.07, i_d=0.05, o_d=0.28),\n DiskElement.from_geometry(n=3, material=steel, width=0.07, i_d=0.05, o_d=0.35),\n ]\n brg_seal_data = [\n BearingElement(n=0, kxx=1e6, cxx=0, kyy=1e6, cyy=0),\n BearingElement(n=5, kxx=1e6, cxx=0, kyy=1e6, cyy=0),\n ]\n\n rotor1 = Rotor.from_section(\n leng_data=leng_data,\n idl_data=idl_data,\n odl_data=odl_data,\n material_data=material,\n disk_data=disk_data,\n brg_seal_data=brg_seal_data,\n nel_r=4,\n )\n\n assert_allclose(len(rotor1.shaft_elements), 20, atol=0)\n assert_allclose(rotor1.shaft_elements[0].L, 0.125, atol=0)\n assert_allclose(rotor1.shaft_elements[4].L, 0.25, atol=0)\n assert_allclose(rotor1.shaft_elements[8].L, 0.5, atol=0)\n assert_allclose(rotor1.shaft_elements[12].L, 0.25, atol=0)\n assert_allclose(rotor1.shaft_elements[16].L, 0.125, atol=0)\n assert_allclose(rotor1.disk_elements[0].n, 8, atol=0)\n assert_allclose(rotor1.disk_elements[1].n, 12, atol=0)\n assert_allclose(rotor1.bearing_elements[0].n, 0, atol=0)\n assert_allclose(rotor1.bearing_elements[1].n, 20, atol=0)\n\n with pytest.raises(ValueError) as excinfo:\n Rotor.from_section(\n leng_data=leng_data_error,\n idl_data=idl_data,\n odl_data=odl_data,\n material_data=material,\n disk_data=disk_data,\n brg_seal_data=brg_seal_data,\n nel_r=4,\n )\n assert \"The lists size do not match (leng_data, odl_data and idl_data).\" == str(\n excinfo.value\n )\n\n with pytest.raises(ValueError) as excinfo:\n Rotor.from_section(\n leng_data=leng_data,\n idl_data=idl_data,\n odl_data=odl_data,\n odr_data=odr_data_error,\n material_data=material,\n disk_data=disk_data,\n brg_seal_data=brg_seal_data,\n nel_r=4,\n )\n assert \"The lists size do not match (leng_data, odr_data and idr_data).\" == str(\n excinfo.value\n )\n\n with pytest.raises(AttributeError) as excinfo:\n Rotor.from_section(\n leng_data=leng_data,\n idl_data=idl_data,\n odl_data=odl_data,\n material_data=None,\n disk_data=disk_data,\n brg_seal_data=brg_seal_data,\n nel_r=4,\n )\n assert \"Please define a material or a list of materials\" == str(excinfo.value)\n\n with pytest.raises(IndexError) as excinfo:\n Rotor.from_section(\n leng_data=leng_data,\n idl_data=idl_data,\n odl_data=odl_data,\n material_data=material_error,\n disk_data=disk_data,\n brg_seal_data=brg_seal_data,\n nel_r=4,\n )\n assert \"material_data size does not match size of other lists\" == str(excinfo.value)\n\n\[email protected]\ndef rotor7():\n # Rotor with damping\n # Rotor with 6 shaft elements, 2 disks and 2 bearings\n i_d = 0\n o_d = 0.05\n n = 6\n L = [0.25 for _ in range(n)]\n\n shaft_elem = [\n ShaftElement(\n l,\n i_d,\n o_d,\n material=steel,\n shear_effects=True,\n rotary_inertia=True,\n gyroscopic=True,\n )\n for l in L\n ]\n\n disk0 = DiskElement.from_geometry(2, steel, 0.07, 0.05, 0.28)\n disk1 = DiskElement.from_geometry(4, steel, 0.07, 0.05, 0.35)\n\n stfx = 1e6\n stfy = 1e6\n bearing0 = BearingElement(0, kxx=stfx, kyy=stfy, cxx=1e3, cyy=1e3)\n bearing1 = BearingElement(6, kxx=stfx, kyy=stfy, cxx=1e3, cyy=1e3)\n\n return Rotor(shaft_elem, [disk0, disk1], [bearing0, bearing1])\n\n\ndef test_whirl_values(rotor7):\n speed_range = np.linspace(50, 500, 10)\n for speed in speed_range:\n modal7 = rotor7.run_modal(speed)\n assert_allclose(modal7.whirl_values(), [1.0, 0.0, 1.0, 0.0, 1.0, 0.0], atol=0)\n assert_equal(\n modal7.whirl_direction(),\n np.array(\n [\"Backward\", \"Forward\", \"Backward\", \"Forward\", \"Backward\", \"Forward\"],\n dtype=\"<U8\",\n ),\n )\n\n\ndef test_kappa_mode(rotor7):\n modal7 = rotor7.run_modal(100.0)\n assert_allclose(\n modal7.kappa_mode(0),\n [\n -0.999999999989335,\n -0.9999999999893868,\n -0.9999999999894262,\n -0.9999999999894176,\n -0.9999999999893875,\n -0.9999999999893159,\n -0.9999999999891641,\n ],\n rtol=1e-7,\n )\n assert_allclose(\n modal7.kappa_mode(1),\n [\n 0.9999999999926857,\n 0.9999999999925702,\n 0.9999999999925301,\n 0.9999999999924822,\n 0.9999999999924919,\n 0.9999999999925321,\n 0.9999999999926336,\n ],\n rtol=1e-7,\n )\n assert_allclose(\n modal7.kappa_mode(2),\n [\n -0.9999999999586078,\n -0.9999999999590045,\n -0.9999999999595016,\n -0.9999999999682825,\n -0.9999999999577597,\n -0.999999999961294,\n -0.9999999999628185,\n ],\n rtol=1e-7,\n )\n\n modal7 = rotor7.run_modal(speed=250.0)\n assert_allclose(\n modal7.kappa_mode(0),\n [\n -0.9999999999996795,\n -0.9999999999997023,\n -0.9999999999997117,\n -0.9999999999997297,\n -0.9999999999997392,\n -0.9999999999997269,\n -0.9999999999997203,\n ],\n rtol=1e-7,\n )\n assert_allclose(\n modal7.kappa_mode(1),\n [\n 0.9999999999992075,\n 0.999999999999222,\n 0.9999999999992263,\n 0.9999999999992275,\n 0.9999999999992394,\n 0.9999999999992564,\n 0.9999999999992875,\n ],\n rtol=1e-7,\n )\n assert_allclose(\n modal7.kappa_mode(2),\n [\n -0.9999999999955613,\n -0.999999999995006,\n -0.9999999999949597,\n -0.9999999999897796,\n -0.999999999996037,\n -0.9999999999966488,\n -0.9999999999969151,\n ],\n rtol=1e-7,\n )\n\n modal7 = rotor7.run_modal(500.0)\n assert_allclose(\n modal7.kappa_mode(0),\n [\n -0.9999999999986061,\n -0.999999999998796,\n -0.9999999999988834,\n -0.9999999999989619,\n -0.999999999998994,\n -0.9999999999989716,\n -0.9999999999989015,\n ],\n rtol=1e-7,\n )\n assert_allclose(\n modal7.kappa_mode(1),\n [\n 0.9999999999995656,\n 0.9999999999993939,\n 0.9999999999993113,\n 0.9999999999992302,\n 0.999999999999194,\n 0.9999999999992081,\n 0.9999999999992395,\n ],\n rtol=1e-7,\n )\n assert_allclose(\n modal7.kappa_mode(2),\n [\n -0.999999999997584,\n -0.9999999999976369,\n -0.9999999999979048,\n -0.9999999999986678,\n -0.9999999999977003,\n -0.9999999999983235,\n -0.9999999999986461,\n ],\n rtol=1e-7,\n )\n\n\ndef test_kappa_axes_values(rotor7):\n modal7 = rotor7.run_modal(50)\n assert_allclose(modal7.kappa(3, 0)[\"Minor axes\"], 0.0024460977827471028, atol=1e-6)\n assert_allclose(modal7.kappa(3, 1)[\"Minor axes\"], 0.0024415401094917922, atol=1e-6)\n assert_allclose(modal7.kappa(3, 2)[\"Minor axes\"], 7.753006465896838e-05, atol=1e-8)\n assert_allclose(modal7.kappa(3, 0)[\"Major axes\"], 0.0024460977827550083, atol=1e-6)\n assert_allclose(modal7.kappa(3, 1)[\"Major axes\"], 0.0024415401094980776, atol=1e-6)\n assert_allclose(modal7.kappa(3, 2)[\"Major axes\"], 7.753006466024783e-05, atol=1e-8)\n\n modal7 = rotor7.run_modal(200)\n assert_allclose(modal7.kappa(3, 0)[\"Minor axes\"], 0.002453197790184042, atol=1e-6)\n assert_allclose(modal7.kappa(3, 1)[\"Minor axes\"], 0.0024349531472631354, atol=1e-6)\n assert_allclose(modal7.kappa(3, 2)[\"Minor axes\"], 8.081580235887124e-05, atol=1e-8)\n assert_allclose(modal7.kappa(3, 0)[\"Major axes\"], 0.002453197790191339, atol=1e-6)\n assert_allclose(modal7.kappa(3, 1)[\"Major axes\"], 0.0024349531472711047, atol=1e-6)\n assert_allclose(modal7.kappa(3, 2)[\"Major axes\"], 8.081580235956821e-05, atol=1e-8)\n\n modal7 = rotor7.run_modal(400)\n assert_allclose(modal7.kappa(3, 0)[\"Minor axes\"], 0.002463187671800876, atol=1e-6)\n assert_allclose(modal7.kappa(3, 1)[\"Minor axes\"], 0.0024266089747119572, atol=1e-6)\n assert_allclose(modal7.kappa(3, 2)[\"Minor axes\"], 8.480305842194371e-05, atol=1e-8)\n assert_allclose(modal7.kappa(3, 0)[\"Major axes\"], 0.002463187671801488, atol=1e-6)\n assert_allclose(modal7.kappa(3, 1)[\"Major axes\"], 0.0024266089747121845, atol=1e-6)\n assert_allclose(modal7.kappa(3, 2)[\"Major axes\"], 8.480305842205874e-05, atol=1e-8)\n\n\[email protected](reason=\"Fails for very small values\")\ndef test_H_kappa(rotor7):\n rotor7.w = 400\n assert_allclose(\n rotor7.H_kappa(3, 0),\n [[6.06729351e-06, -6.33478357e-19], [-6.33478357e-19, 6.06729351e-06]],\n rtol=1e-2,\n )\n assert_allclose(\n rotor7.H_kappa(3, 0),\n [[5.88843112e-06, 2.88604638e-20], [2.88604638e-20, 5.88843112e-06]],\n rtol=1e-2,\n )\n assert_allclose(\n rotor7.H_kappa(3, 0),\n [[7.19155872e-09, 9.75123448e-21], [9.75123448e-21, 7.19155872e-09]],\n rtol=1e-2,\n )\n\n rotor7.w = 200\n assert_allclose(\n rotor7.H_kappa(3, 0),\n [[6.0181794e-06, 1.9785678e-18], [1.9785678e-18, 6.0181794e-06]],\n rtol=1e-2,\n )\n assert_allclose(\n rotor7.H_kappa(3, 0),\n [[5.92899683e-06, -1.24262274e-17], [-1.24262274e-17, 5.92899683e-06]],\n rtol=1e-2,\n )\n assert_allclose(\n rotor7.H_kappa(3, 0),\n [[6.53119391e-09, 4.73407722e-20], [4.73407722e-20, 6.53119391e-09]],\n rtol=1e-2,\n )\n\n\ndef test_save_load():\n a = rotor_example()\n a.save(\"teste00000000000000001\")\n b = Rotor.load(\"teste00000000000000001.rsm\")\n (Path.cwd() / \"teste00000000000000001.rsm\").unlink()\n\n assert a == b\n\n\ndef test_global_index():\n i_d = 0\n o_d = 0.05\n n = 6\n L = [0.25 for _ in range(n)]\n\n shaft_elem = [\n ShaftElement(\n l,\n i_d,\n o_d,\n material=steel,\n shear_effects=True,\n rotary_inertia=True,\n gyroscopic=True,\n )\n for l in L\n ]\n\n disk0 = DiskElement.from_geometry(\n n=2, material=steel, width=0.07, i_d=0.05, o_d=0.28\n )\n disk1 = DiskElement.from_geometry(\n n=4, material=steel, width=0.07, i_d=0.05, o_d=0.28\n )\n\n stfx = 1e6\n stfy = 0.8e6\n bearing0 = BearingElement(0, n_link=7, kxx=stfx, kyy=stfy, cxx=0)\n support0 = BearingElement(7, kxx=stfx, kyy=stfy, cxx=0, tag=\"Support0\")\n bearing1 = BearingElement(6, n_link=8, kxx=stfx, kyy=stfy, cxx=0)\n support1 = BearingElement(8, kxx=stfx, kyy=stfy, cxx=0, tag=\"Support1\")\n\n point_mass0 = PointMass(7, m=1.0)\n point_mass1 = PointMass(8, m=1.0)\n\n rotor = Rotor(\n shaft_elem,\n [disk0, disk1],\n [bearing0, bearing1, support0, support1],\n [point_mass0, point_mass1],\n )\n\n shaft = rotor.shaft_elements\n disks = rotor.disk_elements\n bearings = rotor.bearing_elements\n pointmass = rotor.point_mass_elements\n\n assert shaft[0].dof_global_index.x_0 == 0\n assert shaft[0].dof_global_index.y_0 == 1\n assert shaft[0].dof_global_index.alpha_0 == 2\n assert shaft[0].dof_global_index.beta_0 == 3\n assert shaft[0].dof_global_index.x_1 == 4\n assert shaft[0].dof_global_index.y_1 == 5\n assert shaft[0].dof_global_index.alpha_1 == 6\n assert shaft[0].dof_global_index.beta_1 == 7\n\n assert disks[0].dof_global_index.x_2 == 8\n assert disks[0].dof_global_index.y_2 == 9\n assert disks[0].dof_global_index.alpha_2 == 10\n assert disks[0].dof_global_index.beta_2 == 11\n\n assert bearings[0].dof_global_index.x_0 == 0\n assert bearings[0].dof_global_index.y_0 == 1\n assert bearings[0].dof_global_index.x_7 == 28\n assert bearings[0].dof_global_index.y_7 == 29\n assert bearings[1].dof_global_index.x_6 == 24\n assert bearings[1].dof_global_index.y_6 == 25\n assert bearings[1].dof_global_index.x_8 == 30\n assert bearings[1].dof_global_index.y_8 == 31\n assert bearings[2].dof_global_index.x_7 == 28\n assert bearings[2].dof_global_index.y_7 == 29\n assert bearings[3].dof_global_index.x_8 == 30\n assert bearings[3].dof_global_index.y_8 == 31\n\n assert pointmass[0].dof_global_index.x_7 == 28\n assert pointmass[0].dof_global_index.y_7 == 29\n assert pointmass[1].dof_global_index.x_8 == 30\n assert pointmass[1].dof_global_index.y_8 == 31\n\n\ndef test_distincts_dof_elements_error():\n with pytest.raises(Exception):\n i_d = 0\n o_d = 0.05\n n = 6\n L = [0.25 for _ in range(n)]\n\n shaft_elem = [\n ShaftElement6DoF(\n material=steel,\n L=0.25,\n idl=0,\n odl=0.05,\n idr=0,\n odr=0.05,\n alpha=0,\n beta=0,\n rotary_inertia=False,\n shear_effects=False,\n )\n for l in L\n ]\n\n # purposeful error here!\n disk0 = DiskElement.from_geometry(\n n=2, material=steel, width=0.07, i_d=0.05, o_d=0.28\n )\n disk1 = DiskElement6DoF.from_geometry(\n n=4, material=steel, width=0.07, i_d=0.05, o_d=0.28\n )\n\n kxx = 1e6\n kyy = 0.8e6\n kzz = 1e5\n cxx = 0\n cyy = 0\n czz = 0\n bearing0 = BearingElement6DoF(\n n=0, kxx=kxx, kyy=kyy, cxx=cxx, cyy=cyy, kzz=kzz, czz=czz\n )\n bearing1 = BearingElement6DoF(\n n=6, kxx=kxx, kyy=kyy, cxx=cxx, cyy=cyy, kzz=kzz, czz=czz\n )\n Rotor(shaft_elem, [disk0, disk1], [bearing0, bearing1], n_eigen=36)\n\n\[email protected]\ndef rotor_6dof():\n i_d = 0\n o_d = 0.05\n n = 6\n L = [0.25 for _ in range(n)]\n\n shaft_elem = [\n ShaftElement6DoF(\n material=steel,\n L=0.25,\n idl=0,\n odl=0.05,\n idr=0,\n odr=0.05,\n alpha=0,\n beta=0,\n rotary_inertia=False,\n shear_effects=False,\n )\n for l in L\n ]\n\n disk0 = DiskElement6DoF.from_geometry(\n n=2, material=steel, width=0.07, i_d=0.05, o_d=0.28\n )\n disk1 = DiskElement6DoF.from_geometry(\n n=4, material=steel, width=0.07, i_d=0.05, o_d=0.28\n )\n\n kxx = 1e6\n kyy = 0.8e6\n kzz = 1e5\n cxx = 0\n cyy = 0\n czz = 0\n bearing0 = BearingElement6DoF(\n n=0, kxx=kxx, kyy=kyy, cxx=cxx, cyy=cyy, kzz=kzz, czz=czz\n )\n bearing1 = BearingElement6DoF(\n n=6, kxx=kxx, kyy=kyy, cxx=cxx, cyy=cyy, kzz=kzz, czz=czz\n )\n\n return Rotor(shaft_elem, [disk0, disk1], [bearing0, bearing1])\n\n\[email protected](\n reason=\"Needs investigation. It fails depending on system. Most likely due to eig solution precision\"\n)\ndef test_modal_6dof(rotor_6dof):\n modal = rotor_6dof.run_modal(speed=0)\n wn = np.array(\n [\n 1.52311799e-05,\n 4.76215722e01,\n 9.17983572e01,\n 9.62938682e01,\n 2.74512744e02,\n 2.96499037e02,\n ]\n )\n wd = np.array(\n [0.0, 47.62157215, 91.79835717, 96.29386819, 274.51274397, 296.49903736]\n )\n\n assert_almost_equal(modal.wn[:6], wn, decimal=3)\n assert_almost_equal(modal.wd[:6], wd, decimal=3)\n\n\[email protected]\ndef rotor8():\n # Rotor with damping\n # Rotor with 6 shaft elements, 2 disks and 2 bearings with frequency dependent coefficients\n i_d = 0\n o_d = 0.05\n n = 6\n L = [0.25 for _ in range(n)]\n\n shaft_elem = [\n ShaftElement(\n l,\n i_d,\n o_d,\n material=steel,\n shear_effects=True,\n rotary_inertia=True,\n gyroscopic=True,\n )\n for l in L\n ]\n\n disk0 = DiskElement.from_geometry(2, steel, 0.07, 0.05, 0.28)\n disk1 = DiskElement.from_geometry(4, steel, 0.07, 0.05, 0.35)\n\n stfx = [1e7, 1.5e7]\n stfy = [1e7, 1.5e7]\n c = [1e3, 1.5e3]\n frequency = [50, 5000]\n bearing0 = BearingElement(0, kxx=stfx, kyy=stfy, cxx=c, cyy=c, frequency=frequency)\n bearing1 = BearingElement(6, kxx=stfx, kyy=stfy, cxx=c, cyy=c, frequency=frequency)\n\n return Rotor(shaft_elem, [disk0, disk1], [bearing0, bearing1])\n\n\ndef test_ucs_calc(rotor8):\n exp_stiffness_range = np.array([1000000.0, 1832980.710832, 3359818.286284])\n exp_rotor_wn = np.array([86.658114, 95.660573, 101.868254])\n exp_intersection_points_x = np.array(\n [10058123.652648, 10058123.652648, 10363082.398797]\n )\n exp_intersection_points_y = np.array([107.542416, 107.542416, 409.451575])\n stiffness_range, rotor_wn, bearing, intersection_points = rotor8._calc_ucs()\n assert_allclose(stiffness_range[:3], exp_stiffness_range)\n assert_allclose(rotor_wn[0, :3], exp_rotor_wn)\n assert_allclose(intersection_points[\"x\"][:3], exp_intersection_points_x, rtol=1e-3)\n assert_allclose(intersection_points[\"y\"][:3], exp_intersection_points_y, rtol=1e-3)\n\n\ndef test_save_load(rotor8):\n file = Path(tempdir) / \"rotor8.toml\"\n rotor8.save(file)\n rotor8_loaded = Rotor.load(file)\n\n rotor8 == rotor8_loaded\n"
] | [
[
"numpy.sqrt",
"numpy.linspace",
"numpy.arctan",
"pandas.DataFrame",
"numpy.round",
"numpy.argmin",
"numpy.mean",
"scipy.signal.argrelextrema",
"numpy.where",
"numpy.sin",
"scipy.interpolate.interp1d",
"numpy.argmax",
"pandas.concat",
"numpy.logspace",
"numpy.delete",
"numpy.append",
"numpy.log10",
"numpy.transpose",
"numpy.array",
"numpy.sum",
"numpy.cos",
"numpy.piecewise",
"numpy.sign"
],
[
"numpy.linspace",
"numpy.testing.assert_almost_equal",
"numpy.log10",
"numpy.zeros_like",
"numpy.testing.assert_allclose",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"1.3",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
dimitymiller/cac-openset | [
"b07dadbb8caa5d7430c403734f6543ff17e2ae11"
] | [
"datasets/generate_trainval_splits.py"
] | [
"\"\"\"\n\tRandomly select train and validation subsets from training datasets.\n\t80/20 split ratio used for all datasets except TinyImageNet, which will use 90/10.\n\n\tDimity Miller, 2020\n\"\"\"\n\nimport json\nimport random\nimport torchvision\nimport numpy as np\n\nrandom.seed(1000)\n\ndef save_trainval_split(dataset, train_idxs, val_idxs):\n\tprint(\"Saving {} Train/Val split to {}/trainval_idxs.json\".format(dataset, dataset))\n\tfile = open('{}/trainval_idxs.json'.format(dataset), 'w')\n\tfile.write(json.dumps({'Train': train_idxs, 'Val': val_idxs}))\n\tfile.close()\n\nmnist = torchvision.datasets.MNIST('data')\nsvhn = torchvision.datasets.SVHN('data')\ncifar10 = torchvision.datasets.CIFAR10('data')\ntinyImagenet = torchvision.datasets.ImageFolder('data/tiny-imagenet-200/train')\n\ndatasets = {'MNIST': mnist, 'SVHN': svhn, 'CIFAR10': cifar10, 'TinyImageNet': tinyImagenet}\nsplit = {'MNIST': 0.8, 'SVHN': 0.8, 'CIFAR10': 0.8, 'TinyImageNet': 0.9}\n\nfor datasetName in datasets.keys():\n\tdataset = datasets[datasetName]\t\n\n\t#get class label for each image. svhn has different syntax as .labels\n\ttry:\n\t\ttargets = dataset.targets\n\t\tnum_classes = len(dataset.classes)\n\texcept:\n\t\ttargets = dataset.labels\n\t\tnum_classes = len(np.unique(targets))\n\n\t#save image idxs per class\n\tclass_idxs = [[] for i in range(num_classes)]\n\tfor i, lbl in enumerate(targets):\n\t\tclass_idxs[lbl] += [i]\n\n\t#determine size of train subset\n\tclass_size = [len(x) for x in class_idxs]\n\tclass_train_size = [int(split[datasetName]*x) for x in class_size]\n\n\t#subset per class into train and val subsets randomly\n\ttrain_idxs = {}\n\tval_idxs = {}\n\tfor class_num in range(num_classes):\n\t\ttrain_size = class_train_size[class_num]\n\t\tidxs = class_idxs[class_num]\n\t\trandom.shuffle(idxs)\n\t\ttrain_idxs[class_num] = idxs[:train_size]\n\t\tval_idxs[class_num] = idxs[train_size:]\n\n\tsave_trainval_split(datasetName, train_idxs, val_idxs)\n\n\t#cifar10 and cifar+m datasets can use the same training and val splits\n\tif 'CIFAR' in datasetName:\n\t\tsave_trainval_split('CIFAR+10', train_idxs, val_idxs)\n\t\tsave_trainval_split('CIFAR+50', train_idxs, val_idxs)"
] | [
[
"numpy.unique"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Pandinosaurus/depthai | [
"a46ad95744d8175f1c87bf8cd92c7423a84b8607"
] | [
"depthai_profiler.py"
] | [
"#!/usr/bin/env python3\n\n#depthai function profiler\nimport subprocess\nimport sys\nimport numpy as np\n\n#this is a debugging tool, that's why it's not added to requirements.txt\ntry:\n import snakeviz\nexcept ImportError:\n raise ImportError('\\033[1;5;31m snakeviz module not found, run: \\033[0m python3 -m pip install snakeviz ')\n\nif __name__ == \"__main__\":\n output_profile_file = 'depthai.prof'\n cmd = [\"python3\", \"-m\", \"cProfile\", \"-o\", output_profile_file, \"-s\", \"tottime\", \"depthai_demo.py\"]\n cmd = np.concatenate((cmd, sys.argv[1:]))\n print(cmd)\n\n subprocess.run(cmd)\n subprocess.run([\"snakeviz\", output_profile_file])\n"
] | [
[
"numpy.concatenate"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
IJSComplexMatter/cddm | [
"f4d7521ad88271027c61743b2e8a2355a40cb117"
] | [
"examples/paper/plot_error.py"
] | [
"\"\"\"Plots fig 3. and fig 4. from the paper.\n\nYou must first create data calling the following scripts:\n \n$ python auto_correlate_random_error.py\n$ python auto_correlate_standard_error.py\n$ python auto_correlate_fast_error.py\n$ python cross_correlate_error.py\n\n\"\"\"\n\nfrom cddm.sim import random_time_count\nfrom cddm.multitau import ccorr_multi_count, acorr_multi_count,log_merge_count, multilevel, merge_multilevel\nfrom cddm.norm import sigma_prime_weighted, weight_prime_from_g, sigma_weighted, weight_from_g\n#from cddm.avg import denoise,decreasing\n\nimport matplotlib.pyplot as plt\n\nimport numpy as np\nfrom examples.paper.conf import NFRAMES, PERIOD , NFRAMES_RANDOM, PERIOD_RANDOM\n\nfrom examples.paper.conf import DATA_PATH\nfrom examples.paper.conf import SAVE_FIGS\nimport os\n\nfrom examples.paper.form_factor import g1, bg1, bg2\n\n#: whether toplot cross-correlationor auto-correlation data\nCROSS = False\n\n#: whether to plot binned data\nBINNING_DATA = 1\n\n#: whether to to mark binned data with markers\nBINNING_ERROR = 0\n\n#: whether to plot binning error model\nBINNING_MODEL = 0\n\n#: which K value to plot\nK = 16\n\nif CROSS:\n\n data = np.load(os.path.join(DATA_PATH,\"cross_error_corr.npy\"))\n bgs = np.load(os.path.join(DATA_PATH,\"cross_error_bg.npy\"))\n vars = np.load(os.path.join(DATA_PATH,\"cross_error_var.npy\"))\n\n data_regular = np.load(os.path.join(DATA_PATH,\"auto_standard_error_corr.npy\"))[...,0:NFRAMES//PERIOD*2]\nelse:\n data = np.load(os.path.join(DATA_PATH,\"auto_random_error_corr.npy\"))\n bgs = np.load(os.path.join(DATA_PATH,\"auto_random_error_bg.npy\"))\n vars = np.load(os.path.join(DATA_PATH,\"auto_random_error_var.npy\"))\n\n data_regular = np.load(os.path.join(DATA_PATH,\"auto_fast_error_corr.npy\"))[...,0:NFRAMES]\n\n\n\nLABELS = {1: \"B'\", 2 : \"S'\", 3 : \"W'\", 5 : \"B\", 6 : \"S\", 7 : \"W\", 9 : \"B''\", 10 : \"S''\", 11 : \"W''\" }\n\n\nMARKERS = {1: \"1\", 2 : \"2\", 3 : \"3\", 5 : \"4\",6 : \"+\", 7 : \"x\", 9 : \"4\", 10 : \"+\", 11 : \"x\"}\n\n\nplt.figure()\n\n\n\nif not BINNING_MODEL:\n \n #estimated count for the random triggering experiment\n if CROSS:\n n = NFRAMES/PERIOD*2\n else:\n n = random_time_count(NFRAMES_RANDOM, PERIOD_RANDOM)[0:NFRAMES_RANDOM]\nelse: \n if CROSS:\n clin,cmulti = ccorr_multi_count(NFRAMES, period = PERIOD, level_size = 16, binning = 1)\n else:\n clin,cmulti = acorr_multi_count(NFRAMES_RANDOM, period = PERIOD_RANDOM, level_size = 16, binning = 1)\n \n #get eefective count in aveariging... \n x,n = log_merge_count(clin, cmulti, binning = 1)\n\n\ni,j = (K,0)\n\nx = np.arange(NFRAMES)\n\n#delta parameter for weight model\ndelta = 0.\n\nbg1 = bg1(51,0)[...,i,j]\nbg2 = bg2(51,0)[...,i,j] if CROSS else bg1\ng = g1(x,51,0, cross = CROSS)[...,i,j]\n\nwp = weight_prime_from_g(g,delta,bg1,bg2)\nw = weight_from_g(g, delta)\n\n#error estimators using a simple model of independent data (delta = 0).\n\nerr1 = sigma_prime_weighted(0., g+0j, delta,bg1,bg2)#/n**0.5\nerr2 = sigma_prime_weighted(1., g, delta,bg1,bg2)#/n**0.5\nerr3 = sigma_prime_weighted(wp, g, delta,bg1,bg2)#/n**0.5\n\nerr5 = sigma_weighted(0., g, delta)#/n**0.5\nerr6 = sigma_weighted(1., g, delta)#/n**0.5\nerr7 = sigma_weighted(w, g, delta)#/n**0.5\n\n\nax1 = plt.subplot(121)\nax1.set_xscale(\"log\")\nax1.set_xlabel(r\"$\\tau$\")\nax1.set_title(r\"$g(\\tau), w(\\tau)$ @ $q = {}$\".format(K))\n\nax2 = plt.subplot(122)\nax2.set_title(r\"$\\sigma (\\tau)$ @ $q = {}$\".format(K))\n\nfor binning in (0,1):\n x,y = merge_multilevel(multilevel(data_regular[:,2,i,j,:],binning = binning))\n if CROSS:\n x = x*PERIOD//2\n g = g1(x,51,0)[...,i,j]\n\n std = (((y - g)**2).mean(axis = 0))**0.5\n\n if binning == BINNING_DATA:\n ax1.semilogx(x[1:],y[0,1:],marker = \"o\", linestyle = '',fillstyle = \"none\",label = \"$R$\", color = \"k\") \n if binning == BINNING_ERROR:\n ax2.semilogx(x[1:],std[1:],marker = \"o\", linestyle = '', fillstyle = \"none\",label = \"$R$\", color = \"k\")\n else:\n ax2.semilogx(x[1:],std[1:],linestyle = ':', fillstyle = \"none\", color = \"k\")\n\n\n\n\nfor binning in (0,1):\n ax1.set_prop_cycle(None)\n ax2.set_prop_cycle(None)\n for norm in (1,2,3,5,6,7):\n \n x,y = merge_multilevel(multilevel(data[:,norm,i,j,:],binning = binning))\n g = g1(x,51,0)[...,i,j]\n std = (((y - g)**2).mean(axis = 0))**0.5\n \n if binning == BINNING_DATA:\n ax1.semilogx(x[1:],y[0,1:],marker = MARKERS.get(norm,\"o\"), linestyle = '',fillstyle = \"none\",label = \"${}$\".format(LABELS.get(norm)))\n if binning == BINNING_ERROR:\n ax2.semilogx(x[1:],std[1:],marker = MARKERS.get(norm,\"o\"), linestyle = '', fillstyle = \"none\",label = \"${}$\".format(LABELS.get(norm)))\n else: \n ax2.semilogx(x[1:],std[1:],linestyle = ':', fillstyle = \"none\")\n\n\nax1.plot(x[1:],g1(x[1:],51,0)[...,i,j], \"k\",label = \"$g$\")\n\n# #: take first run, norm = 3 data for g estimation\n# x,g = log_average(data[0,3,i,j,:])\n# g = denoise(g)\n# g = decreasing(g)\n# g = g.clip(0,1)\n# ax1.plot(x[1:],g[1:], \"k:\",label = \"denoised\")\n\nx = np.arange(NFRAMES)\nax1.plot(x[1:],w[1:], \"k--\",label = \"$w$\")\nax1.plot(x[1:],wp[1:], \"k:\",label = \"$w'$\")\n\n#ax2.set_ylim(ax1.get_ylim())\n\n\nx,err1 = merge_multilevel(multilevel(err1,binning = 0))\nx,err2 = merge_multilevel(multilevel(err2,binning = 0))\nx,err3 = merge_multilevel(multilevel(err3,binning = 0))\nx,err5 = merge_multilevel(multilevel(err5,binning = 0))\nx,err6 = merge_multilevel(multilevel(err6,binning = 0))\nx,err7 = merge_multilevel(multilevel(err7,binning = 0))\n\nax2.set_prop_cycle(None)\n\n\nnmax = len(x)\nif BINNING_MODEL or not CROSS:\n n = n[1:nmax]\n\nax2.loglog(x[1:],err1[1:]/np.sqrt(n),\"-\")\nax2.loglog(x[1:],err2[1:]/np.sqrt(n),\"-\")\nax2.loglog(x[1:],err3[1:]/np.sqrt(n),\"-\")\nax2.loglog(x[1:],err5[1:]/np.sqrt(n),\"-\")\nax2.loglog(x[1:],err6[1:]/np.sqrt(n),\"-\")\nax2.loglog(x[1:],err7[1:]/np.sqrt(n),\"-\")\n\nax2.set_xlabel(r\"$\\tau$\")\nax2.set_ylabel(r\"$\\sigma$\")\nax2.set_ylim(0.001,2)\nax1.set_ylabel(r\"$g,w$\")\nax1.set_ylim(-1,1.5)\n\nax1.legend(loc = 3)\n\nplt.tight_layout()\n\nif SAVE_FIGS:\n if CROSS:\n plt.savefig(\"plots/plot_cross_error_{}.pdf\".format(K))\n else:\n plt.savefig(\"plots/plot_auto_error_{}.pdf\".format(K))\n \nplt.show()\n\n\n"
] | [
[
"matplotlib.pyplot.tight_layout",
"numpy.sqrt",
"numpy.arange",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
panpanyunshi/rlkit | [
"e1f6c9e59ab2baab93d35385cdc43ab3632b2b65"
] | [
"rlkit/torch/core.py"
] | [
"import numpy as np\nimport torch\n\nfrom rlkit.torch import pytorch_util as ptu\n\n\ndef eval_np(module, *args, **kwargs):\n \"\"\"\n Eval this module with a numpy interface, 返回numpy类型变量\n\n Same as a call to __call__ except all Variable input/outputs are\n replaced with numpy equivalents.\n\n Assumes the output is either a single object or a tuple of objects.\n \"\"\"\n torch_args = tuple(torch_ify(x) for x in args)\n torch_kwargs = {k: torch_ify(v) for k, v in kwargs.items()}\n outputs = module(*torch_args, **torch_kwargs)\n if isinstance(outputs, tuple):\n return tuple(np_ify(x) for x in outputs)\n else:\n return np_ify(outputs)\n\n\ndef torch_ify(np_array_or_other):\n '''\n 将numpy数据转化为torch数据\n :param np_array_or_other:\n :return:\n '''\n if isinstance(np_array_or_other, np.ndarray):\n return ptu.from_numpy(np_array_or_other)\n else:\n return np_array_or_other\n\n\ndef np_ify(tensor_or_other):\n '''\n 将tensor变量转化为numpy\n :param tensor_or_other:\n :return:\n '''\n if isinstance(tensor_or_other, torch.autograd.Variable):\n return ptu.get_numpy(tensor_or_other)\n else:\n return tensor_or_other\n\n\ndef _elem_or_tuple_to_variable(elem_or_tuple):\n if isinstance(elem_or_tuple, tuple):\n return tuple(\n _elem_or_tuple_to_variable(e) for e in elem_or_tuple\n )\n return ptu.from_numpy(elem_or_tuple).float()\n\n\ndef _filter_batch(np_batch):\n for k, v in np_batch.items():\n if v.dtype == np.bool:\n yield k, v.astype(int)\n else:\n yield k, v\n\n\ndef np_to_pytorch_batch(np_batch):\n return {\n k: _elem_or_tuple_to_variable(x)\n for k, x in _filter_batch(np_batch)\n if x.dtype != np.dtype('O') # ignore object (e.g. dictionaries)\n }\n\n"
] | [
[
"numpy.dtype"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
asaran/decision-transformer | [
"f6f8bf283256d616d213ac5bd07cb7f3efb101b3"
] | [
"gym/decision_transformer/models/decision_transformer.py"
] | [
"import numpy as np\nimport torch\nimport torch.nn as nn\n\nimport transformers\n\nfrom decision_transformer.models.model import TrajectoryModel\nfrom decision_transformer.models.trajectory_gpt2 import GPT2Model\n\n\nclass DecisionTransformer(TrajectoryModel):\n\n \"\"\"\n This model uses GPT to model (Return_1, state_1, action_1, Return_2, state_2, ...)\n \"\"\"\n\n def __init__(\n self,\n state_dim,\n act_dim,\n hidden_size,\n max_length=None,\n max_ep_len=4096,\n action_tanh=True,\n **kwargs\n ):\n super().__init__(state_dim, act_dim, max_length=max_length)\n\n self.hidden_size = hidden_size\n config = transformers.GPT2Config(\n vocab_size=1, # doesn't matter -- we don't use the vocab\n n_embd=hidden_size,\n **kwargs\n )\n\n # note: the only difference between this GPT2Model and the default Huggingface version\n # is that the positional embeddings are removed (since we'll add those ourselves)\n self.transformer = GPT2Model(config)\n\n self.embed_timestep = nn.Embedding(max_ep_len, hidden_size)\n self.embed_return = torch.nn.Linear(1, hidden_size)\n self.embed_state = torch.nn.Linear(self.state_dim, hidden_size)\n self.embed_action = torch.nn.Linear(self.act_dim, hidden_size)\n\n self.embed_ln = nn.LayerNorm(hidden_size)\n\n # note: we don't predict states or returns for the paper\n self.predict_state = torch.nn.Linear(hidden_size, self.state_dim)\n self.predict_action = nn.Sequential(\n *([nn.Linear(hidden_size, self.act_dim)] + ([nn.Tanh()] if action_tanh else []))\n )\n self.predict_return = torch.nn.Linear(hidden_size, 1)\n\n def forward(self, states, actions, rewards, returns_to_go, timesteps, attention_mask=None):\n\n batch_size, seq_length = states.shape[0], states.shape[1]\n\n if attention_mask is None:\n # attention mask for GPT: 1 if can be attended to, 0 if not\n attention_mask = torch.ones((batch_size, seq_length), dtype=torch.long)\n\n # embed each modality with a different head\n state_embeddings = self.embed_state(states)\n action_embeddings = self.embed_action(actions)\n returns_embeddings = self.embed_return(returns_to_go)\n time_embeddings = self.embed_timestep(timesteps)\n\n # time embeddings are treated similar to positional embeddings\n state_embeddings = state_embeddings + time_embeddings\n action_embeddings = action_embeddings + time_embeddings\n returns_embeddings = returns_embeddings + time_embeddings\n\n # this makes the sequence look like (R_1, s_1, a_1, R_2, s_2, a_2, ...)\n # which works nice in an autoregressive sense since states predict actions\n stacked_inputs = torch.stack(\n (returns_embeddings, state_embeddings, action_embeddings), dim=1\n ).permute(0, 2, 1, 3).reshape(batch_size, 3*seq_length, self.hidden_size)\n stacked_inputs = self.embed_ln(stacked_inputs)\n\n # to make the attention mask fit the stacked inputs, have to stack it as well\n stacked_attention_mask = torch.stack(\n (attention_mask, attention_mask, attention_mask), dim=1\n ).permute(0, 2, 1).reshape(batch_size, 3*seq_length)\n\n # we feed in the input embeddings (not word indices as in NLP) to the model\n transformer_outputs = self.transformer(\n inputs_embeds=stacked_inputs,\n attention_mask=stacked_attention_mask,\n )\n x = transformer_outputs['last_hidden_state']\n\n # reshape x so that the second dimension corresponds to the original\n # returns (0), states (1), or actions (2); i.e. x[:,1,t] is the token for s_t\n x = x.reshape(batch_size, seq_length, 3, self.hidden_size).permute(0, 2, 1, 3)\n\n # get predictions\n return_preds = self.predict_return(x[:,2]) # predict next return given state and action\n state_preds = self.predict_state(x[:,2]) # predict next state given state and action\n action_preds = self.predict_action(x[:,1]) # predict next action given state\n\n return state_preds, action_preds, return_preds\n\n def get_action(self, states, actions, rewards, returns_to_go, timesteps, **kwargs):\n # we don't care about the past rewards in this model\n\n states = states.reshape(1, -1, self.state_dim)\n actions = actions.reshape(1, -1, self.act_dim)\n returns_to_go = returns_to_go.reshape(1, -1, 1)\n timesteps = timesteps.reshape(1, -1)\n\n if self.max_length is not None:\n states = states[:,-self.max_length:]\n actions = actions[:,-self.max_length:]\n returns_to_go = returns_to_go[:,-self.max_length:]\n timesteps = timesteps[:,-self.max_length:]\n\n # pad all tokens to sequence length\n attention_mask = torch.cat([torch.zeros(self.max_length-states.shape[1]), torch.ones(states.shape[1])])\n attention_mask = attention_mask.to(dtype=torch.long, device=states.device).reshape(1, -1)\n states = torch.cat(\n [torch.zeros((states.shape[0], self.max_length-states.shape[1], self.state_dim), device=states.device), states],\n dim=1).to(dtype=torch.float32)\n actions = torch.cat(\n [torch.zeros((actions.shape[0], self.max_length - actions.shape[1], self.act_dim),\n device=actions.device), actions],\n dim=1).to(dtype=torch.float32)\n returns_to_go = torch.cat(\n [torch.zeros((returns_to_go.shape[0], self.max_length-returns_to_go.shape[1], 1), device=returns_to_go.device), returns_to_go],\n dim=1).to(dtype=torch.float32)\n timesteps = torch.cat(\n [torch.zeros((timesteps.shape[0], self.max_length-timesteps.shape[1]), device=timesteps.device), timesteps],\n dim=1\n ).to(dtype=torch.long)\n else:\n attention_mask = None\n\n _, action_preds, return_preds = self.forward(\n states, actions, None, returns_to_go, timesteps, attention_mask=attention_mask, **kwargs)\n\n return action_preds[0,-1]\n"
] | [
[
"torch.ones",
"torch.zeros",
"torch.nn.Embedding",
"torch.nn.LayerNorm",
"torch.nn.Tanh",
"torch.nn.Linear",
"torch.stack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
elwintay/clearml_test | [
"c87985303e69490e83ec779d098570bc505f80ae"
] | [
"model_gtt/run_pl_gtt.py"
] | [
"import argparse\nimport glob\nimport pandas as pd\nimport logging\nimport os\nimport json\nfrom collections import OrderedDict\nfrom eval import eval_tf\n\nimport numpy as np\nimport torch\nfrom seqeval.metrics import f1_score, precision_score, recall_score, accuracy_score\nfrom torch.nn import CrossEntropyLoss\nfrom torch.utils.data import DataLoader, TensorDataset\n\nfrom transformer_base import BaseTransformer, add_generic_args, generic_train\nfrom utils_gtt import convert_examples_to_features, get_labels, read_examples_from_file, read_golds_from_test_file, not_sub_string, incident_token_to_type\n\nrole_list = [\"incident_type\", \"PerpInd\", \"PerpOrg\", \"Target\", \"Victim\", \"Weapon\"]\n\nlogging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\n\n# def upload_file(s3,bucket_name,s3_folder,filename, preds_log):\n \n# path = s3_folder + filename\n# s3object = s3.Object(bucket_name,path)\n# s3object.put(Body=(bytes(json.dumps(preds_log, indent=4))))\n# return\n\nclass NERTransformer(BaseTransformer):\n \"\"\"\n A training module for single-transformer-ee. See BaseTransformer for the core options.\n \"\"\"\n\n mode = \"base\"\n\n def __init__(self, hparams):\n self.pad_token_label_id = CrossEntropyLoss().ignore_index\n # super(NERTransformer, self).__init__(hparams, num_labels, self.mode)\n \n super(NERTransformer, self).__init__(hparams, None, self.mode)\n\n self.device = torch.device(\"cuda\" if torch.cuda.is_available() and self.hparams.n_gpu else \"cpu\")\n # n_gpu = torch.cuda.device_count()\n # self.MASK = tokenizer.convert_tokens_to_ids(['[MASK]'])[0]\n self.SEP = self.tokenizer.convert_tokens_to_ids(['[SEP]'])[0]\n self.CLS = self.tokenizer.convert_tokens_to_ids(['[CLS]'])[0] \n self.SEP_template = self.tokenizer.convert_tokens_to_ids([\"[unused0]\"])[0]\n\n def forward(self, **inputs):\n labels = inputs.pop(\"labels\", None) # doc_length\n args = self.hparams\n\n outputs = self.model(**inputs) # sequence_output, pooled_output, (hidden_states), (attentions)\n sequence_output = outputs[0]\n src_sequence_output = sequence_output[:, :args.max_seq_length_src, :]\n src_sequence_output = torch.transpose(src_sequence_output, 1, 2) # hidden * doc_length\n tgt_sequence_output = sequence_output[:, args.max_seq_length_src:, :] # tgt_length * hidden\n logits = torch.bmm(tgt_sequence_output, src_sequence_output) # tgt_length * doc_length\n\n outputs = (logits,) + outputs[2:]\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n tgt_attention_mask_1d = inputs[\"attention_mask\"][:, -1, args.max_seq_length_src:]\n if tgt_attention_mask_1d is not None:\n active_logits = logits.view(-1, args.max_seq_length_src)\n active_labels = labels.view(-1)\n loss = loss_fct(active_logits, active_labels)\n else:\n loss = loss_fct(logits.view(-1, args.max_seq_length_src), labels.view(-1))\n outputs = (loss,) + outputs\n\n # import ipdb; ipdb.set_trace()\n return outputs\n\n def training_step(self, batch, batch_num):\n \"Compute loss and log.\"\n inputs = {\"input_ids\": batch[0], \"attention_mask\": batch[1], \"token_type_ids\": batch[2], \"position_ids\": batch[3], \"labels\": batch[4]}\n outputs = self(**inputs)\n loss = outputs[0]\n tensorboard_logs = {\"training_loss\": loss, \"rate\": self.lr_scheduler.get_last_lr()[-1]}\n return {\"loss\": loss, \"log\": tensorboard_logs}\n\n def prepare_data(self):\n \"Called to initialize data. Use the call to construct features\"\n args = self.hparams\n for mode in [\"train\", \"dev\", \"test\"]:\n cached_features_file = self._feature_file(mode)\n if not os.path.exists(cached_features_file):\n logger.info(\"Creating features from dataset file at %s\", args.data_dir)\n examples = read_examples_from_file(args.data_dir, mode, self.tokenizer, debug=args.debug)\n features = convert_examples_to_features(\n examples,\n # self.labels,\n args.max_seq_length_src,\n args.max_seq_length_tgt,\n self.tokenizer,\n cls_token_at_end=bool(args.model_type in [\"xlnet\"]),\n cls_token=self.tokenizer.cls_token,\n cls_token_segment_id=2 if args.model_type in [\"xlnet\"] else 0,\n sep_token=self.tokenizer.sep_token,\n sep_token_extra=bool(args.model_type in [\"roberta\"]),\n pad_on_left=bool(args.model_type in [\"xlnet\"]),\n pad_token=self.tokenizer.convert_tokens_to_ids([self.tokenizer.pad_token])[0],\n pad_token_segment_id=4 if args.model_type in [\"xlnet\"] else 0,\n pad_token_label_id=self.pad_token_label_id,\n )\n logger.info(\"Saving features into cached file %s\", cached_features_file)\n torch.save(features, cached_features_file)\n # import ipdb; ipdb.set_trace()\n\n def load_dataset(self, mode, batch_size):\n \"Load datasets. Called after prepare data.\"\n args = self.hparams\n cached_features_file = self._feature_file(mode)\n logger.info(\"Loading features from cached file %s\", cached_features_file)\n features = torch.load(cached_features_file)\n if args.debug:\n features = features[:2]\n # features = features[:len(features)//10]\n all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long) #check this next\n all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)\n all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)\n all_position_ids = torch.tensor([f.position_ids for f in features], dtype=torch.long)\n all_label_ids = torch.tensor([f.label_ids for f in features], dtype=torch.long)\n all_docid = torch.tensor([f.docid for f in features], dtype=torch.long)\n return DataLoader(\n TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_position_ids, all_label_ids, all_docid), batch_size=batch_size\n )\n\n def validation_step(self, batch, batch_nb):\n \"Compute validation\"\n inputs = {\"input_ids\": batch[0], \"attention_mask\": batch[1], \"token_type_ids\": batch[2], \"position_ids\": batch[3], \"labels\": batch[4]}\n # inputs = {\"input_ids\": batch[0], \"attention_mask\": batch[1], \"position_ids\": batch[3], \"labels\": batch[4]}\n outputs = self(**inputs)\n tmp_eval_loss, logits = outputs[:2]\n preds = logits.detach().cpu().numpy()\n out_label_ids = inputs[\"labels\"].detach().cpu().numpy()\n docid = batch[5].detach().cpu().numpy()\n return {\"val_loss\": tmp_eval_loss.detach().cpu(), \"pred\": preds, \"target\": out_label_ids, \"docid\": docid}\n\n\n def validation_epoch_end(self, outputs):\n val_loss_mean = torch.stack([x[\"val_loss\"] for x in outputs]).mean()\n preds = np.concatenate([x[\"pred\"] for x in outputs], axis=0)\n preds = np.argmax(preds, axis=2)\n out_label_ids = np.concatenate([x[\"target\"] for x in outputs], axis=0)\n\n out_label_list = [[] for _ in range(out_label_ids.shape[0])]\n preds_list = [[] for _ in range(out_label_ids.shape[0])]\n\n for i in range(out_label_ids.shape[0]):\n for j in range(out_label_ids.shape[1]):\n if out_label_ids[i, j] != self.pad_token_label_id:\n out_label_list[i].append(out_label_ids[i][j])\n preds_list[i].append(preds[i][j])\n # import ipdb; ipdb.set_trace()\n\n logs = {\n \"val_loss\": val_loss_mean,\n \"val_accuracy\": accuracy_score(out_label_list, preds_list)\n }\n return {\"val_loss\": logs[\"val_loss\"], \"log\": logs, \"progress_bar\": logs}\n\n\n def test_step(self, batch, batch_nb):\n \"Compute test\"\n # test_loss\n # inputs = {\"input_ids\": batch[0], \"attention_mask\": batch[1], \"token_type_ids\": batch[2], \"position_ids\": batch[3], \"labels\": batch[4]}\n # outputs = self(**inputs)\n # tmp_eval_loss, tmp_eval_logits = outputs[:2]\n # tmp_eval_logits = tmp_eval_logits.detach().cpu().numpy()\n # out_label_ids = inputs[\"labels\"].detach().cpu().numpy()\n\n # preds (real decoding)\n global_args = self.hparams\n max_seq_length_src = self.hparams.max_seq_length_src\n max_seq_length_tgt = self.hparams.max_seq_length_tgt\n bs = batch[0].size(0)\n\n i = max_seq_length_src\n src_input_ids = batch[0][:, :max_seq_length_src]\n src_position_ids = batch[3][:, :max_seq_length_src]\n tgt_input_ids, init_tgt_input_ids = torch.tensor([[self.CLS]]).to(self.device), torch.tensor([[self.CLS]]).to(self.device)\n tgt_position_ids, init_tgt_position_ids = torch.tensor([[0]]).to(self.device), torch.tensor([[0]]).to(self.device)\n\n # get out_input_id_list (pred_seq)\n while i <= max_seq_length_src + max_seq_length_tgt - 1:\n input_ids = torch.cat((src_input_ids, tgt_input_ids), dim=1)\n attention_mask = batch[1][:, :i+1, :i+1]\n for j in range(max_seq_length_src, i+1):\n attention_mask[:, j, max_seq_length_src:j+1] = 1\n # if i == max_seq_length_src + 3: # debug\n # import ipdb; ipdb.set_trace()\n token_type_ids = batch[2][:, :i+1]\n position_ids = torch.cat((src_position_ids, tgt_position_ids), dim=1)\n inputs = {\"input_ids\": input_ids, \"attention_mask\": attention_mask, \"token_type_ids\": token_type_ids, \"position_ids\": position_ids}\n # print(tgt_position_ids) # debug\n outputs = self(**inputs)\n logits = outputs[0][0]\n\n ## option 1: setting the decoding constraints (!!!)\n # # (constraint 1) on decoding offset (length and larger offset)\n # for j in range(tgt_position_ids.size(1)):\n # if j == 0: continue\n # cur_token_id = tgt_input_ids[0][j].detach().cpu().tolist()\n # cur_token_position = tgt_position_ids[0][j].detach().cpu().tolist() \n # if cur_token_id == self.CLS or cur_token_id == self.SEP or cur_token_id == self.SEP_template: \n # continue\n # else:\n # # remove (early stop/output [SEP]) the case like ``the post post post post ...''\n # token_id_cnt = 0\n # k = j\n # while k > 0 and tgt_input_ids[0][k].detach().cpu().tolist() == cur_token_id:\n # token_id_cnt += 1\n # k -= 1\n # if token_id_cnt >= 4:\n # for q in range(max_seq_length_src):\n # if src_input_ids[0][q].detach().cpu().tolist() == self.SEP:\n # logits[j][q] += 10000.0\n\n # before_mask = cur_token_position\n # for k in range(before_mask):\n # if src_input_ids[0][k].detach().cpu().tolist() == self.SEP: \n # continue\n # logits[j][k] -= 10000.0\n # # for k in range(cur_token_position + 30, max_seq_length_src):\n # # if src_input_ids[0][k].detach().cpu().tolist() == self.SEP: continue\n # # logits[j][k] -= 10000.0\n\n # (constraint 2) thresh for predicting [SEP]\n probs = torch.nn.Softmax(dim=-1)(logits)\n top_2_probs, top_2_indices = torch.topk(probs, 2, dim=-1)\n for j in range(top_2_indices.size(0)):\n prob_gap = (top_2_probs[j][0]/top_2_probs[j][1]).detach().cpu().tolist()\n if src_input_ids[0][top_2_indices[j][0].detach().cpu().tolist()].detach().cpu().tolist() == self.SEP and prob_gap < global_args.thresh:\n top_2_indices[j][0] = top_2_indices[j][1]\n\n out_position_id = top_2_indices[:, 0]\n\n # # option 2: direct greedy decoding\n # out_position_id = torch.argmax(logits, -1)\n \n # print(out_position_id) # debug\n out_input_id = torch.index_select(src_input_ids, 1, out_position_id)\n out_position_id = out_position_id.unsqueeze(dim=0) # add batch dim\n tgt_input_ids = torch.cat((init_tgt_input_ids, out_input_id), dim=1)\n tgt_position_ids = torch.cat((init_tgt_position_ids, out_position_id), dim=1)\n i += 1\n\n # import ipdb; ipdb.set_trace()\n\n # #########save prob logits\n # temp_save = pd.DataFrame(probs.cpu().numpy())\n # temp_torch = torch.range(0, src_input_ids.size(1)-1,dtype=int,device='cuda')\n # temp_out_input_id = torch.index_select(src_input_ids, 1, temp_torch)\n # temp_out_input_id = temp_out_input_id.detach().cpu().tolist()\n # column = self.tokenizer.convert_ids_to_tokens(temp_out_input_id[0])\n # temp_save.columns = column\n # docids = batch[5].detach().cpu().tolist()\n # docids_name = str(docids) + '_probs.csv'\n # temp_save.to_csv(docids_name, index=False)\n\n\n # from out_input_id_list (pred_seq) to pred_extracts\n docids = batch[5].detach().cpu().tolist()\n pred_seq = []\n pred_extract = []\n for b in range(bs): # bs == 1\n src_input_id_list = src_input_ids[b].detach().cpu().tolist()\n out_input_id_list = out_input_id[b].detach().cpu().tolist()\n out_position_id_list = out_position_id[b].detach().cpu().tolist()\n if out_input_id_list[-1] != self.CLS:\n out_input_id_list.append(self.CLS)\n\n # get raw pred_seq\n # sep_cnt = 0\n for idx, token_id in enumerate(out_input_id_list):\n if token_id == self.CLS:\n break\n pred_seq.append(self.tokenizer.convert_ids_to_tokens(out_input_id_list[:idx+1]))\n\n # get pred_extract\n temps_extract = []\n buf_template = []\n buf_template_pos = []\n for idx, token_id in enumerate(out_input_id_list[:idx]):\n if token_id == self.SEP_template:\n # decode one template's content\n # incident_token_to_type[]\n p_extract = []\n sep_cnt = 0\n position_buf = []\n for temp_idx, temp_token_id in enumerate(buf_template):\n if temp_token_id == self.SEP:\n sep_cnt += 1\n entitys = []\n s_e_pair = []\n for position in position_buf:\n s_e_pair.append(position)\n if len(s_e_pair) == 2:\n s, e = s_e_pair[0], s_e_pair[1]\n extract_ids = []\n for j in range(s, e+1): \n extract_ids.append(src_input_id_list[j])\n extract_tokens = self.tokenizer.convert_ids_to_tokens(extract_ids)\n if extract_tokens:\n if len(extract_tokens) <= 20: \n candidate_str = \" \".join(extract_tokens).replace(\" ##\", \"\").replace(\" - \", \"-\")\n if sep_cnt != 5 or \"bomb\" not in candidate_str:\n if [candidate_str] not in entitys and not_sub_string(candidate_str, entitys) and candidate_str[:2] != \"##\":\n entitys.append([candidate_str])\n s_e_pair = []\n # extra s in s_e_pair\n if s_e_pair:\n extract_tokens = self.tokenizer.convert_ids_to_tokens([src_input_id_list[s_e_pair[0]]])\n if len(extract_tokens) <= 20: \n candidate_str = \" \".join(extract_tokens).replace(\" ##\", \"\").replace(\" - \", \"-\")\n if sep_cnt != 5 or \"bomb\" not in candidate_str:\n if [candidate_str] not in entitys and not_sub_string(candidate_str, entitys) and candidate_str[:2] != \"##\":\n entitys.append([candidate_str])\n # add all entitys of this role\n p_extract.append(entitys)\n # clean buffer\n position_buf = []\n else:\n position_buf.append(buf_template_pos[temp_idx])\n if sep_cnt >= 6: break\n\n # extra token1 token2 [unused0] (no final [SEP])\n if position_buf:\n entitys = []\n s_e_pair = []\n for position in position_buf:\n s_e_pair.append(position)\n if len(s_e_pair) == 2:\n s, e = s_e_pair[0], s_e_pair[1]\n extract_ids = []\n for j in range(s, e+1): \n extract_ids.append(src_input_id_list[j])\n extract_tokens = self.tokenizer.convert_ids_to_tokens(extract_ids)\n if extract_tokens:\n if len(extract_tokens) <= 20: \n candidate_str = \" \".join(extract_tokens).replace(\" ##\", \"\").replace(\" - \", \"-\")\n if sep_cnt != 5 or \"bomb\" not in candidate_str:\n if [candidate_str] not in entitys and not_sub_string(candidate_str, entitys) and candidate_str[:2] != \"##\":\n entitys.append([candidate_str])\n p_extract.append(entitys)\n\n if p_extract:\n temps_extract.append(p_extract)\n buf_template = []\n buf_template_pos = []\n else:\n buf_template.append(out_input_id_list[idx])\n buf_template_pos.append(out_position_id_list[idx])\n\n\n pred_extract.append(temps_extract)\n ### old ###\n # sep_cnt = 0\n # position_buf = []\n # for idx, token_id in enumerate(out_input_id_list):\n # if token_id == self.SEP:\n # sep_cnt += 1\n # entitys = []\n # s_e_pair = []\n # for position in position_buf:\n # s_e_pair.append(position)\n # if len(s_e_pair) == 2:\n # s, e = s_e_pair[0], s_e_pair[1]\n # extract_ids = []\n # for j in range(s, e+1): \n # extract_ids.append(src_input_id_list[j])\n # extract_tokens = self.tokenizer.convert_ids_to_tokens(extract_ids)\n # if extract_tokens:\n # if len(extract_tokens) <= 20: \n # candidate_str = \" \".join(extract_tokens).replace(\" ##\", \"\")\n # if sep_cnt != 4 or \"bomb\" not in candidate_str:\n # if [candidate_str] not in entitys and not_sub_string(candidate_str, entitys) and candidate_str[:2] != \"##\":\n # entitys.append([candidate_str])\n # s_e_pair = []\n # # extra s in s_e_pair\n # if s_e_pair:\n # extract_tokens = self.tokenizer.convert_ids_to_tokens([src_input_id_list[s_e_pair[0]]])\n # if len(extract_tokens) <= 20: \n # candidate_str = \" \".join(extract_tokens).replace(\" ##\", \"\")\n # if sep_cnt != 4 or \"bomb\" not in candidate_str:\n # if [candidate_str] not in entitys and not_sub_string(candidate_str, entitys) and candidate_str[:2] != \"##\":\n # entitys.append([candidate_str])\n # # add all entitys of this role\n # p_extract.append(entitys)\n # # clean buffer\n # position_buf = []\n # else:\n # position_buf.append(out_position_id_list[idx])\n # if sep_cnt >= 5: break\n ### old ###\n\n\n # return {\"test_loss\": tmp_eval_loss.detach().cpu(), \"pred_seq\": pred_seq, \"pred_extract\": pred_extract, \"logits\": tmp_eval_logits, \"target\": out_label_ids, \"docid\": docids}\n return {\"pred_seq\": pred_seq, \"pred_extract\": pred_extract, \"docid\": docids}\n\n\n def test_epoch_end(self, outputs):\n # # updating to test_epoch_end instead of deprecated test_end\n args = self.hparams\n logs = {}\n\n ## real decoding\n # read golds\n doctexts_tokens, golds = read_golds_from_test_file(args.data_dir, self.tokenizer, debug=args.debug)\n # get preds and preds_log\n preds = OrderedDict()\n preds_log = OrderedDict()\n for x in outputs:\n docids = x[\"docid\"]\n pred_seq = x[\"pred_seq\"]\n pred_extract = x[\"pred_extract\"]\n # preds (pred_extract)]\n for docid, temps_extract in zip(docids, pred_extract):\n if docid not in preds:\n preds[docid] = []\n for temp_raw in temps_extract:\n temp = OrderedDict()\n template_name = temp_raw[0][0][0]\n with open('/data/wikievents/muc_format/role_dicts.json') as f:\n role_dict = json.load(f)\n role_list = ['incident_type'] + role_dict[template_name]\n for idx, role in enumerate(role_list):\n temp[role] = []\n if idx+1 > len(temp_raw):\n continue\n elif temp_raw[idx]:\n if role == \"incident_type\":\n if temp_raw[idx][0][0] in incident_token_to_type:\n temp[role] = incident_token_to_type[temp_raw[idx][0][0]]\n else:\n temp[role] = temp_raw[idx][0][0]\n else:\n temp[role] = temp_raw[idx]\n\n preds[docid].append(temp)\n\n \n # preds_log\n for docid, p_seq in zip(docids, pred_seq):\n if docid not in preds_log:\n preds_log[docid] = OrderedDict()\n preds_log[docid][\"doctext\"] = \" \".join(doctexts_tokens[docid])\n preds_log[docid][\"pred_seq\"] = \" \".join(p_seq)\n preds_log[docid][\"pred_templates\"] = preds[docid]\n preds_log[docid][\"gold_templates\"] = golds[docid]\n\n # # evaluate (rewrite this for it to work)\n # results = eval_tf(preds, golds)\n # for key in results:\n # if key == \"micro_avg\":\n # print(\"***************** {} *****************\".format(key))\n # else:\n # print(\"================= {} =================\".format(key))\n # print(\"P: {:.2f}%, R: {:.2f}%, F1: {:.2f}%\".format(results[key][\"p\"] * 100, results[key][\"r\"] * 100, results[key][\"f1\"] * 100)) # phi_strict\n\n logger.info(\"writing preds to .out file:\")\n # session = boto3.Session(aws_access_key_id=\"AKIA8C43BC01F5E3176C\",aws_secret_access_key=\"VKYHHxqQl5GW/g3RG6c/qR65EbNrpTBBdNRtYX08\")\n # s3 = session.resource('s3',endpoint_url=\"https://ecs.dsta.ai\")\n # upload_file(s3,\"blackwidow-s3\",\"models/trained_outputs/gtt/\",\"preds_gtt.out\", preds_log)\n if args.debug:\n output_path = os.path.join(args.output_dir,\"preds_gtt_debug.out\") \n with open(\"preds_gtt_debug.out\", \"w+\") as f:\n f.write(json.dumps(preds_log, indent=4)) \n else:\n output_path = os.path.join(args.output_dir,\"preds_gtt.out\") \n with open(output_path, \"w+\") as f:\n f.write(json.dumps(preds_log, indent=4))\n\n # import ipdb; ipdb.set_trace()\n\n return {\"log\": logs, \"progress_bar\": logs}\n # return {\"test_loss\": logs[\"test_loss\"], \"log\": logs, \"progress_bar\": logs}\n\n @staticmethod\n def add_model_specific_args(parser, root_dir):\n # Add NER specific options\n BaseTransformer.add_model_specific_args(parser, root_dir)\n parser.add_argument(\n \"--max_seq_length_src\",\n default=128,\n type=int,\n help=\"The maximum total input sequence length after tokenization for src. Sequences longer \"\n \"than this will be truncated, sequences shorter will be padded.\",\n )\n\n parser.add_argument(\n \"--max_seq_length_tgt\",\n default=128,\n type=int,\n help=\"The maximum total input sequence length after tokenization for tgt. Sequences longer \"\n \"than this will be truncated, sequences shorter will be padded.\",\n )\n\n parser.add_argument(\n \"--labels\",\n default=\"\",\n type=str,\n help=\"Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.\",\n )\n\n parser.add_argument(\n \"--data_dir\",\n default=None,\n type=str,\n required=True,\n help=\"The input data dir. Should contain the training files for the CoNLL-2003 NER task.\",\n )\n\n parser.add_argument(\n \"--overwrite_cache\", action=\"store_true\", help=\"Overwrite the cached training and evaluation sets\"\n )\n\n parser.add_argument(\"--debug\", action=\"store_true\", help=\"if in debug mode\")\n\n parser.add_argument(\"--thresh\", default=1, type=float, help=\"thresh for predicting [SEP]\",)\n return parser\n\n\n# if __name__ == \"__main__\":\n# parser = argparse.ArgumentParser()\n# add_generic_args(parser, os.getcwd())\n# parser = NERTransformer.add_model_specific_args(parser, os.getcwd())\n# args = parser.parse_args()\n# global_args = args\n# logger.info(args)\n# model = NERTransformer(args)\n# trainer = generic_train(model, args)\n\n# if args.do_predict:\n# # See https://github.com/huggingface/transformers/issues/3159\n# # pl use this format to create a checkpoint:\n# # https://github.com/PyTorchLightning/pytorch-lightning/blob/master\\\n# # /pytorch_lightning/callbacks/model_checkpoint.py#L169\n# checkpoints = list(sorted(glob.glob(os.path.join(args.output_dir, \"checkpointepoch=*.ckpt\"), recursive=True)))\n# model = NERTransformer.load_from_checkpoint(checkpoints[-1])\n# model.hparams = args\n# if args.debug:\n# model.hparams.debug = True\n# trainer.test(model)\n"
] | [
[
"torch.nn.Softmax",
"torch.nn.CrossEntropyLoss",
"torch.transpose",
"torch.load",
"torch.cat",
"torch.utils.data.TensorDataset",
"torch.stack",
"torch.tensor",
"numpy.concatenate",
"numpy.argmax",
"torch.bmm",
"torch.cuda.is_available",
"torch.topk",
"torch.index_select",
"torch.save"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
simonsimon006/tensorflow-wavelets | [
"21a095bf0048ae2488ca5ae4961d2cbfe94263a9",
"21a095bf0048ae2488ca5ae4961d2cbfe94263a9"
] | [
"Development/models/DWT2.py",
"src/tensorflow_wavelets/Layers/DMWT.py"
] | [
"import os\nimport cv2\nimport math\nimport pywt\nimport numpy as np\nfrom utils import mse\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\nfrom tensorflow.keras.datasets import mnist, cifar10\n\n# os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # for tensor flow warning\n# os.environ['CUDA_VISIBLE_DEVICES'] = '-1'\n\n\nclass DWT(layers.Layer):\n def __init__(self, name='haar', **kwargs):\n super(DWT, self).__init__(**kwargs)\n self._name = self.name + \"_\" + name\n # get filter coeffs from 3rd party lib\n wavelet = pywt.Wavelet(name)\n self.dec_len = wavelet.dec_len\n\n # decomposition filter low pass and hight pass coeffs\n db2_lpf = wavelet.dec_lo\n db2_hpf = wavelet.dec_hi\n\n # covert filters into tensors and reshape for convolution math\n db2_lpf = tf.constant(db2_lpf[::-1])\n self.db2_lpf = tf.reshape(db2_lpf, (1, wavelet.dec_len, 1, 1))\n\n db2_hpf = tf.constant(db2_hpf[::-1])\n self.db2_hpf = tf.reshape(db2_hpf, (1, wavelet.dec_len, 1, 1))\n\n self.conv_type = \"VALID\"\n self.border_padd = \"SYMMETRIC\"\n\n def build(self, input_shape):\n # filter dims should be bigger if input is not gray scale\n if input_shape[-1] != 1:\n self.db2_lpf = tf.repeat(self.db2_lpf, input_shape[-1], axis=-1)\n self.db2_hpf = tf.repeat(self.db2_hpf, input_shape[-1], axis=-1)\n\n def call(self, inputs, training=None, mask=None):\n\n # border padding symatric add coulums\n inputs_pad = tf.pad(inputs, [[0, 0], [0, 0], [self.dec_len-1, self.dec_len-1], [0, 0]], self.border_padd)\n\n # approximation conv only rows\n a = tf.nn.conv2d(\n inputs_pad, self.db2_lpf, padding=self.conv_type, strides=[1, 1, 1, 1],\n )\n # details conv only rows\n d = tf.nn.conv2d(\n inputs_pad, self.db2_hpf, padding=self.conv_type, strides=[1, 1, 1, 1],\n )\n # ds - down sample\n a_ds = a[:, :, 1:a.shape[2]:2, :]\n d_ds = d[:, :, 1:d.shape[2]:2, :]\n\n # border padding symatric add rows\n a_ds_pad = tf.pad(a_ds, [[0, 0], [self.dec_len-1, self.dec_len-1], [0, 0], [0, 0]], self.border_padd)\n d_ds_pad = tf.pad(d_ds, [[0, 0], [self.dec_len-1, self.dec_len-1], [0, 0], [0, 0]], self.border_padd)\n\n # convolution is done on the rows so we need to\n # transpose the matrix in order to convolve the colums\n a_ds_pad = tf.transpose(a_ds_pad, perm=[0, 2, 1, 3])\n d_ds_pad = tf.transpose(d_ds_pad, perm=[0, 2, 1, 3])\n\n # aa approximation approximation\n aa = tf.nn.conv2d(\n a_ds_pad, self.db2_lpf, padding=self.conv_type, strides=[1, 1, 1, 1],\n )\n # ad approximation details\n ad = tf.nn.conv2d(\n a_ds_pad, self.db2_hpf, padding=self.conv_type, strides=[1, 1, 1, 1],\n )\n # ad details aproximation\n da = tf.nn.conv2d(\n d_ds_pad, self.db2_lpf, padding=self.conv_type, strides=[1, 1, 1, 1],\n )\n # dd details details\n dd = tf.nn.conv2d(\n d_ds_pad, self.db2_hpf, padding=self.conv_type, strides=[1, 1, 1, 1],\n )\n\n # transpose back the matrix\n aa = tf.transpose(aa, perm=[0, 2, 1, 3])\n ad = tf.transpose(ad, perm=[0, 2, 1, 3])\n da = tf.transpose(da, perm=[0, 2, 1, 3])\n dd = tf.transpose(dd, perm=[0, 2, 1, 3])\n\n # down sample\n ll = aa[:, 1:aa.shape[1]:2, :, :]\n lh = ad[:, 1:ad.shape[1]:2, :, :]\n hl = da[:, 1:da.shape[1]:2, :, :]\n hh = dd[:, 1:dd.shape[1]:2, :, :]\n\n # concate all outputs ionto tensor\n x = tf.concat([ll, lh, hl, hh], axis=-1)\n\n return x\n\n\nclass IDWT(layers.Layer):\n def __init__(self, name='haar', **kwargs):\n super(IDWT, self).__init__(**kwargs)\n self._name = self.name + \"_\" + name\n self.pad_type = \"VALID\"\n self.border_pad = \"SYMMETRIC\"\n\n # get filter coeffs from 3rd party lib\n wavelet = pywt.Wavelet(name)\n self.rec_len = wavelet.rec_len\n\n # decomposition filter low pass and hight pass coeffs\n db2_lpf = wavelet.rec_lo\n db2_hpf = wavelet.rec_hi\n\n # covert filters into tensors and reshape for convolution math\n db2_lpf = tf.constant(db2_lpf[::-1])\n self.db2_lpf = tf.reshape(db2_lpf, (1, wavelet.rec_len, 1, 1))\n\n db2_hpf = tf.constant(db2_hpf[::-1])\n self.db2_hpf = tf.reshape(db2_hpf, (1, wavelet.rec_len, 1, 1))\n\n def upsampler2d(self, x):\n \"\"\"\n up sampling with zero insertion between rows and columns\n :param x: 4 dim tensor (?, w, h, ch)\n :return: up sampled tensor with shape (?, 2*w, 2*h, ch)\n \"\"\"\n # create zero like tensor\n zero_tensor = tf.zeros_like(x)\n # stack both tensors\n stack_rows = tf.stack([x, zero_tensor], axis=3)\n # reshape for zero insertion between the rows\n stack_rows = tf.reshape(stack_rows, shape=[-1, x.shape[1], x.shape[2]*2, x.shape[3]])\n # transpose in order to insert zeros for the columns\n stack_rows = tf.transpose(stack_rows, perm=[0, 2, 1, 3])\n # create zero like tensor but now like the padded one\n zero_tensor_1 = tf.zeros_like(stack_rows)\n # stack both tensors\n stack_rows_cols = tf.stack([stack_rows, zero_tensor_1], axis=3)\n # reshape for zero insertion between the columns\n us_padded = tf.reshape(stack_rows_cols, shape=[-1, x.shape[1]*2, x.shape[2]*2, x.shape[3]])\n # transpose back to normal\n us_padded = tf.transpose(us_padded, perm=[0, 2, 1, 3])\n return us_padded\n\n def call(self, inputs, training=None, mask=None):\n\n # border padding for convolution with low pass and high pass filters\n x = tf.pad(inputs,\n [[0, 0], [self.rec_len-1, self.rec_len-1], [self.rec_len-1, self.rec_len-1], [0, 0]],\n self.border_pad)\n\n # convert to float32\n # x = tf.cast(x, tf.float32)\n # GPU works with float 32\n # CPU can work with 64 but need to add extra flag\n # convert to float64\n # x = tf.cast(x, tf.float64)\n\n # extract approximation and details from input tensor\n # TODO: whit if tensor shape is bigger then 4?\n # and expand the dims for the up sampling\n ll = tf.expand_dims(x[:, :, :, 0], axis=-1)\n lh = tf.expand_dims(x[:, :, :, 1], axis=-1)\n hl = tf.expand_dims(x[:, :, :, 2], axis=-1)\n hh = tf.expand_dims(x[:, :, :, 3], axis=-1)\n\n ll_us_pad = self.upsampler2d(ll)\n lh_us_pad = self.upsampler2d(lh)\n hl_us_pad = self.upsampler2d(hl)\n hh_us_pad = self.upsampler2d(hh)\n\n # convolution for the rows\n # transpose for the column convolution\n # convolution for the column\n # transpose back to normal\n\n ll_conv_lpf = tf.nn.conv2d(ll_us_pad, self.db2_lpf, padding=self.pad_type, strides=[1, 1, 1, 1], )\n ll_conv_lpf_tr = tf.transpose(ll_conv_lpf, perm=[0, 2, 1, 3])\n ll_conv_lpf_lpf = tf.nn.conv2d(ll_conv_lpf_tr, self.db2_lpf, padding=self.pad_type, strides=[1, 1, 1, 1], )\n ll_conv_lpf_lpf_tr = tf.transpose(ll_conv_lpf_lpf, perm=[0, 2, 1, 3])\n\n lh_conv_lpf = tf.nn.conv2d(lh_us_pad, self.db2_lpf, padding=self.pad_type, strides=[1, 1, 1, 1], )\n lh_conv_lpf_tr = tf.transpose(lh_conv_lpf, perm=[0, 2, 1, 3])\n lh_conv_lpf_hpf = tf.nn.conv2d(lh_conv_lpf_tr, self.db2_lpf, padding=self.pad_type, strides=[1, 1, 1, 1], )\n lh_conv_lpf_hpf_tr = tf.transpose(lh_conv_lpf_hpf, perm=[0, 2, 1, 3])\n\n hl_conv_hpf = tf.nn.conv2d(hl_us_pad, self.db2_hpf, padding=self.pad_type, strides=[1, 1, 1, 1], )\n hl_conv_hpf_tr = tf.transpose(hl_conv_hpf, perm=[0, 2, 1, 3])\n hl_conv_hpf_lpf = tf.nn.conv2d(hl_conv_hpf_tr, self.db2_lpf, padding=self.pad_type, strides=[1, 1, 1, 1], )\n hl_conv_hpf_lpf_tr = tf.transpose(hl_conv_hpf_lpf, perm=[0, 2, 1, 3])\n\n hh_conv_hpf = tf.nn.conv2d(hh_us_pad, self.db2_hpf, padding=self.pad_type, strides=[1, 1, 1, 1], )\n hh_conv_hpf_tr = tf.transpose(hh_conv_hpf, perm=[0, 2, 1, 3])\n hh_conv_hpf_hpf = tf.nn.conv2d(hh_conv_hpf_tr, self.db2_hpf, padding=self.pad_type, strides=[1, 1, 1, 1], )\n hh_conv_hpf_hpf_tr = tf.transpose(hh_conv_hpf_hpf, perm=[0, 2, 1, 3])\n\n # add all together\n reconstructed = tf.add_n([ll_conv_lpf_lpf_tr,\n lh_conv_lpf_hpf_tr,\n hl_conv_hpf_lpf_tr,\n hh_conv_hpf_hpf_tr])\n # crop the paded part\n crop = (self.rec_len -1)*2\n return reconstructed[:, crop-1:-crop, crop-1:-crop, :]\n\n\nif __name__ == \"__main__\":\n # (x_train, y_train), (x_test, y_test) = cifar10.load_data()\n # x_train = x_train.astype(\"float32\")\n # x_test = x_test.astype(\"float32\")\n # # x_train = cv2.imread(\"../input/LennaGrey.png\", 0)\n # frog = tf.expand_dims(\n # x_train[0, :, :, :], 0, name=None\n # )\n # print(\"frog shape\", frog.shape)\n # model = keras.Sequential()\n # model.add(keras.Input(shape=(256, 256, 4)))\n # model.add(IDWT())\n # model.summary()\n\n name = \"db2\"\n img = cv2.imread(\"../input/LennaGrey.png\",0)\n img_ex1 = np.expand_dims(img, axis=-1)\n img_ex2 = np.expand_dims(img_ex1, axis=0)\n\n model = keras.Sequential()\n model.add(layers.InputLayer(input_shape=img_ex1.shape))\n model.add(DWT(name=name))\n # model.summary()\n coeffs = model.predict(img_ex2)\n LL = coeffs[0, ..., 0]\n LH = coeffs[0, ..., 1]\n HL = coeffs[0, ..., 2]\n HH = coeffs[0, ..., 3]\n\n model = keras.Sequential()\n model.add(layers.InputLayer(input_shape=coeffs[0].shape))\n model.add(IDWT(name=name))\n model.summary()\n\n my_recon = model.predict(coeffs)\n img_my_rec = my_recon[0, :, :, 0]\n coeffs2 = pywt.wavedec2(img, name,level=1)\n\n LL2 = coeffs2[0]\n LH2 = coeffs2[1][0]\n HL2 = coeffs2[1][1]\n HH2 = coeffs2[1][2]\n\n recon_pywt = pywt.waverec2(coeffs2, name)\n img_pywt_rec = recon_pywt\n\n print(\"LL mse \", mse.mse(LL, LL2))\n print(\"LH mse \", mse.mse(LH, LH2))\n print(\"HL mse \", mse.mse(HL, HL2))\n print(\"HH mse \", mse.mse(HH, HH2))\n print(\"img mse \", mse.mse(img_pywt_rec, img_my_rec))\n\n difference = cv2.absdiff(np.int32(img_my_rec), np.int32(img_pywt_rec))\n _, mask = cv2.threshold(difference.astype(\"uint8\"), 0, 255, cv2.THRESH_BINARY)\n\n cv2.imshow(\"diff\", mask)\n cv2.waitKey(0)\n pass\n\n\n\n\n\n # a = model.predict(frog, steps=1)\n # #\n # approx = tf.image.convert_image_dtype(a[0, ..., 0], dtype=tf.float32)\n # with tf.Session() as sess:\n # img = sess.run(approx)\n # # pass\n # #\n # img = np.clip(img, 0, 255)\n # img = np.ceil(img)\n # img = img.astype(\"uint8\")\n # with open(r\"D:\\TEMP\\LL_python_layer.raw\", \"wb\") as outfile:\n # outfile.write(img) # Write it\n\n # model = models.WaveletCifar10CNN.WaveletCNN((32,32,3), 10)\n # model.summary()",
"# Timor Leiderman AUG 2021\n\nfrom tensorflow.keras import layers\nfrom tensorflow_wavelets.utils import filters\nfrom tensorflow_wavelets.utils.helpers import *\nfrom tensorflow_wavelets.utils.cast import *\n\n# os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # for tensor flow warning\n# os.environ['CUDA_VISIBLE_DEVICES'] = '-1'\n\n\n# Discrete MultiWavelet transform Layer\nclass DMWT(layers.Layer):\n \"\"\"\n Discrete Multi Wavlelets Transform\n Input: wave_name - name of the Wavele Filters (ghm, dd2)\n TODO: add support for more wavelets\n \"\"\"\n def __init__(self, wavelet_name='ghm', **kwargs):\n super(DMWT, self).__init__(**kwargs)\n self.wave_name = wavelet_name.lower()\n self.w_mat = None\n\n def build(self, input_shape):\n # create filter matrix\n h = int(input_shape[1])\n w = int(input_shape[2])\n if self.wave_name == 'dd2':\n w_mat = filters.dd2(h, w)\n else:\n w_mat = filters.ghm_w_mat(h, w)\n w_mat = tf.constant(w_mat, dtype=tf.float32)\n w_mat = tf.expand_dims(w_mat, axis=0)\n self.w_mat = tf.expand_dims(w_mat, axis=-1)\n # repeat if number of channels is bigger then 1\n if input_shape[-1] != 1:\n self.w_mat = tf.repeat(self.w_mat, input_shape[-1], axis=-1)\n\n def call(self, inputs, training=None, mask=None):\n if self.wave_name == 'dd2':\n res = analysis_filter_bank2d_dd2_mult(inputs, self.w_mat)\n else:\n res = analysis_filter_bank2d_ghm_mult(inputs, self.w_mat)\n return res\n\n\n# Inverse Discrete MultiWavelet transform Layer\n\nclass IDMWT(layers.Layer):\n \"\"\"\n Inverse Multi Wavelet Transform\n wave_name - name of the Wavele Filters (ghm, dd2)\n \"\"\"\n def __init__(self, wave_name='ghm', **kwargs):\n super(IDMWT, self).__init__(**kwargs)\n self.wave_name = wave_name\n self.w_mat = None\n\n def build(self, input_shape):\n # create filter matrix\n h = int(input_shape[1])//2\n w = int(input_shape[2])//2\n if self.wave_name == 'dd2':\n w_mat = filters.dd2(2*h, 2*w)\n else:\n w_mat = filters.ghm_w_mat(h, w)\n w_mat = tf.constant(w_mat, dtype=tf.float32)\n # transpose for the reconstruction\n w_mat = tf.transpose(w_mat, perm=[1, 0])\n w_mat = tf.expand_dims(w_mat, axis=-1)\n self.w_mat = tf.expand_dims(w_mat, axis=0)\n # repeat if channels bigger then 1\n if input_shape[-1] != 1:\n self.w_mat = tf.repeat(self.w_mat, input_shape[-1], axis=-1)\n\n def call(self, inputs, training=None, mask=None):\n if self.wave_name == 'dd2':\n res = synthesis_filter_bank2d_dd2_mult(inputs, self.w_mat)\n else:\n res = synthesis_filter_bank2d_ghm_mult(inputs, self.w_mat)\n\n return res\n\n\nif __name__ == \"__main__\":\n\n import cv2\n from tensorflow.keras import Model\n from tensorflow_wavelets.Layers import DWT\n from tensorflow_wavelets.Layers.Threshold import *\n from tensorflow_wavelets.utils.cast import *\n import numpy as np\n from tensorflow_wavelets.utils.mse import mse\n\n img = cv2.imread(\"../../../Development/input/LennaGrey.png\", 0)\n img_ex1 = np.expand_dims(img, axis=0)\n img_ex1 = np.expand_dims(img_ex1, axis=-1)\n\n # _, h, w, c = img_ex1.shape\n h, w, c = 512, 512, 1\n x_inp = layers.Input(shape=(h, w, c))\n x = DMWT(\"ghm\")(x_inp)\n x = Threshold(algo='1', mode=\"hard\")(x)\n x = IDMWT(\"ghm\")(x)\n model = Model(x_inp, x, name=\"MyModel\")\n model.summary()\n model.run_eagerly = True\n\n out = model.predict(img_ex1)\n print(mse(img, out[0, ..., 0]))\n cv2.imshow(\"orig\", out[0, ..., 0].astype(\"uint8\"))\n cv2.waitKey(0)\n\n #\n # out_l = tf_rgb_to_ndarray(out*2)\n # out1 = cast_like_matlab_uint8_2d_rgb(out_l)\n # cv2.imshow(\"orig\", out1.astype('uint8'))\n # cv2.waitKey(0)\n\n # x_inp = layers.Input(shape=(28, 28, 1))\n # x = DMWT()(x_inp)\n # # x = IDMWT()(x)\n # x = layers.Flatten()(x)\n # x = layers.Dense(10, activation=\"softmax\")(x)\n #\n # model = Model(x_inp, x, name=\"mymodel\")\n # model.summary()\n # optimizer = SGD(lr=1e-4, momentum=0.9)\n # model.compile(loss=\"categorical_crossentropy\",\n # optimizer=optimizer, metrics=[\"accuracy\"])\n # (x_train, y_train), (x_test, y_test) = mnist.load_data()\n #\n # y_train = to_categorical(y_train)\n # y_test = to_categorical(y_test)\n # x_train = x_train.astype('float32') / 255.0\n # x_train = np.expand_dims(x_train, axis=-1)\n #\n # x_test = x_test.astype('float32') / 255.0\n # x_test = np.expand_dims(x_test, axis=-1)\n # history = model.fit(x_train, y_train,\n # validation_split=0.2,\n # epochs=40,\n # batch_size=32,\n # verbose=2,\n # )\n"
] | [
[
"numpy.expand_dims",
"tensorflow.constant",
"tensorflow.transpose",
"tensorflow.concat",
"tensorflow.stack",
"tensorflow.reshape",
"tensorflow.keras.Sequential",
"tensorflow.expand_dims",
"tensorflow.keras.layers.InputLayer",
"numpy.int32",
"tensorflow.repeat",
"tensorflow.zeros_like",
"tensorflow.pad",
"tensorflow.add_n",
"tensorflow.nn.conv2d"
],
[
"numpy.expand_dims",
"tensorflow.keras.Model",
"tensorflow.keras.layers.Input"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.6",
"2.4",
"2.3",
"2.5",
"2.2"
]
}
] |
chetansurwade/great_expectations | [
"f488d861f3c00c73a6181d6bd5788fb8895079d9"
] | [
"tests/conftest.py"
] | [
"import datetime\nimport locale\nimport logging\nimport os\nimport random\nimport shutil\nimport sys\nimport warnings\nfrom typing import Dict, List, Optional\n\nimport numpy as np\nimport pandas as pd\nimport pytest\nfrom freezegun import freeze_time\nfrom ruamel.yaml import YAML\n\nimport great_expectations as ge\nfrom great_expectations import DataContext\nfrom great_expectations.core import ExpectationConfiguration\nfrom great_expectations.core.expectation_suite import ExpectationSuite\nfrom great_expectations.core.expectation_validation_result import (\n ExpectationValidationResult,\n)\nfrom great_expectations.core.usage_statistics.usage_statistics import (\n UsageStatisticsHandler,\n)\nfrom great_expectations.core.util import get_or_create_spark_application\nfrom great_expectations.data_context.store.profiler_store import ProfilerStore\nfrom great_expectations.data_context.types.base import (\n AnonymizedUsageStatisticsConfig,\n CheckpointConfig,\n DataContextConfig,\n GeCloudConfig,\n)\nfrom great_expectations.data_context.types.resource_identifiers import (\n ConfigurationIdentifier,\n ExpectationSuiteIdentifier,\n GeCloudIdentifier,\n)\nfrom great_expectations.data_context.util import (\n file_relative_path,\n instantiate_class_from_config,\n)\nfrom great_expectations.dataset.pandas_dataset import PandasDataset\nfrom great_expectations.datasource import SqlAlchemyDatasource\nfrom great_expectations.datasource.data_connector.util import (\n get_filesystem_one_level_directory_glob_path_list,\n)\nfrom great_expectations.datasource.new_datasource import BaseDatasource, Datasource\nfrom great_expectations.rule_based_profiler.config import RuleBasedProfilerConfig\nfrom great_expectations.rule_based_profiler.config.base import (\n ruleBasedProfilerConfigSchema,\n)\nfrom great_expectations.rule_based_profiler.parameter_builder.simple_date_format_string_parameter_builder import (\n DEFAULT_CANDIDATE_STRINGS,\n)\nfrom great_expectations.self_check.util import (\n build_test_backends_list as build_test_backends_list_v3,\n)\nfrom great_expectations.self_check.util import (\n expectationSuiteValidationResultSchema,\n get_dataset,\n)\nfrom great_expectations.util import is_library_loadable\n\nRULE_BASED_PROFILER_MIN_PYTHON_VERSION: tuple = (3, 7)\n\nyaml = YAML()\n###\n#\n# NOTE: THESE TESTS ARE WRITTEN WITH THE en_US.UTF-8 LOCALE AS DEFAULT FOR STRING FORMATTING\n#\n###\n\nlocale.setlocale(locale.LC_ALL, \"en_US.UTF-8\")\n\nlogger = logging.getLogger(__name__)\n\n\ndef skip_if_python_below_minimum_version():\n \"\"\"\n All test fixtures for Rule-Based Profiler must execute this method; for example:\n ```\n skip_if_python_below_minimum_version()\n ```\n for as long as the support for Python versions less than 3.7 is provided. In particular, Python-3.6 support for\n \"dataclasses.asdict()\" does not handle None values as well as the more recent versions of Python do.\n \"\"\"\n if sys.version_info < RULE_BASED_PROFILER_MIN_PYTHON_VERSION:\n pytest.skip(\n \"skipping fixture because Python version 3.7 (or greater) is required\"\n )\n\n\ndef pytest_configure(config):\n config.addinivalue_line(\n \"markers\",\n \"smoketest: mark test as smoketest--it does not have useful assertions but may produce side effects \"\n \"that require manual inspection.\",\n )\n config.addinivalue_line(\n \"markers\",\n \"rendered_output: produces rendered output that should be manually reviewed.\",\n )\n config.addinivalue_line(\n \"markers\",\n \"aws_integration: runs aws integration test that may be very slow and requires credentials\",\n )\n\n\ndef pytest_addoption(parser):\n # note: --no-spark will be deprecated in favor of --spark\n parser.addoption(\n \"--no-spark\",\n action=\"store_true\",\n help=\"If set, suppress tests against the spark test suite\",\n )\n parser.addoption(\n \"--spark\",\n action=\"store_true\",\n help=\"If set, execute tests against the spark test suite\",\n )\n parser.addoption(\n \"--no-sqlalchemy\",\n action=\"store_true\",\n help=\"If set, suppress all tests using sqlalchemy\",\n )\n parser.addoption(\n \"--postgresql\",\n action=\"store_true\",\n help=\"If set, execute tests against postgresql\",\n )\n # note: --no-postgresql will be deprecated in favor of --postgresql\n parser.addoption(\n \"--no-postgresql\",\n action=\"store_true\",\n help=\"If set, supress tests against postgresql\",\n )\n parser.addoption(\n \"--mysql\",\n action=\"store_true\",\n help=\"If set, execute tests against mysql\",\n )\n parser.addoption(\n \"--mssql\",\n action=\"store_true\",\n help=\"If set, execute tests against mssql\",\n )\n parser.addoption(\n \"--bigquery\",\n action=\"store_true\",\n help=\"If set, execute tests against bigquery\",\n )\n parser.addoption(\n \"--aws\",\n action=\"store_true\",\n help=\"If set, execute tests against AWS resources like S3, RedShift and Athena\",\n )\n parser.addoption(\n \"--aws-integration\",\n action=\"store_true\",\n help=\"If set, run aws integration tests for usage_statistics\",\n )\n parser.addoption(\n \"--docs-tests\",\n action=\"store_true\",\n help=\"If set, run integration tests for docs\",\n )\n parser.addoption(\n \"--performance-tests\",\n action=\"store_true\",\n help=\"If set, run performance tests (which might also require additional arguments like --bigquery)\",\n )\n\n\ndef build_test_backends_list(metafunc):\n test_backend_names: List[str] = build_test_backends_list_cfe(metafunc)\n backend_name_class_name_map: Dict[str, str] = {\n \"pandas\": \"PandasDataset\",\n \"spark\": \"SparkDFDataset\",\n }\n backend_name: str\n return [\n (backend_name_class_name_map.get(backend_name) or backend_name)\n for backend_name in test_backend_names\n ]\n\n\ndef build_test_backends_list_cfe(metafunc):\n # adding deprecation warnings\n if metafunc.config.getoption(\"--no-postgresql\"):\n warnings.warn(\n \"--no-sqlalchemy is deprecated as of v0.14 in favor of the --postgresql flag. It will be removed in v0.16. Please adjust your tests accordingly\",\n DeprecationWarning,\n )\n if metafunc.config.getoption(\"--no-spark\"):\n warnings.warn(\n \"--no-spark is deprecated as of v0.14 in favor of the --spark flag. It will be removed in v0.16. Please adjust your tests accordingly.\",\n DeprecationWarning,\n )\n include_pandas: bool = True\n include_spark: bool = metafunc.config.getoption(\"--spark\")\n include_sqlalchemy: bool = not metafunc.config.getoption(\"--no-sqlalchemy\")\n include_postgresql: bool = metafunc.config.getoption(\"--postgresql\")\n include_mysql: bool = metafunc.config.getoption(\"--mysql\")\n include_mssql: bool = metafunc.config.getoption(\"--mssql\")\n include_bigquery: bool = metafunc.config.getoption(\"--bigquery\")\n include_aws: bool = metafunc.config.getoption(\"--aws\")\n test_backend_names: List[str] = build_test_backends_list_v3(\n include_pandas=include_pandas,\n include_spark=include_spark,\n include_sqlalchemy=include_sqlalchemy,\n include_postgresql=include_postgresql,\n include_mysql=include_mysql,\n include_mssql=include_mssql,\n include_bigquery=include_bigquery,\n include_aws=include_aws,\n )\n return test_backend_names\n\n\ndef pytest_generate_tests(metafunc):\n test_backends = build_test_backends_list(metafunc)\n if \"test_backend\" in metafunc.fixturenames:\n metafunc.parametrize(\"test_backend\", test_backends, scope=\"module\")\n if \"test_backends\" in metafunc.fixturenames:\n metafunc.parametrize(\"test_backends\", [test_backends], scope=\"module\")\n\n\ndef pytest_collection_modifyitems(config, items):\n if config.getoption(\"--aws-integration\"):\n # --aws-integration given in cli: do not skip aws-integration tests\n return\n if config.getoption(\"--docs-tests\"):\n # --docs-tests given in cli: do not skip documentation integration tests\n return\n skip_aws_integration = pytest.mark.skip(\n reason=\"need --aws-integration option to run\"\n )\n skip_docs_integration = pytest.mark.skip(reason=\"need --docs-tests option to run\")\n for item in items:\n if \"aws_integration\" in item.keywords:\n item.add_marker(skip_aws_integration)\n if \"docs\" in item.keywords:\n item.add_marker(skip_docs_integration)\n\n\[email protected](autouse=True)\ndef no_usage_stats(monkeypatch):\n # Do not generate usage stats from test runs\n monkeypatch.setenv(\"GE_USAGE_STATS\", \"False\")\n\n\[email protected](scope=\"module\")\ndef sa(test_backends):\n if not any(\n [dbms in test_backends for dbms in [\"postgresql\", \"sqlite\", \"mysql\", \"mssql\"]]\n ):\n pytest.skip(\"No recognized sqlalchemy backend selected.\")\n else:\n try:\n import sqlalchemy as sa\n\n return sa\n except ImportError:\n raise ValueError(\"SQL Database tests require sqlalchemy to be installed.\")\n\n\[email protected](index=2)\[email protected]\ndef spark_session(test_backends):\n if \"SparkDFDataset\" not in test_backends:\n pytest.skip(\"No spark backend selected.\")\n\n try:\n import pyspark\n from pyspark.sql import SparkSession\n\n return get_or_create_spark_application(\n spark_config={\n \"spark.sql.catalogImplementation\": \"hive\",\n \"spark.executor.memory\": \"450m\",\n # \"spark.driver.allowMultipleContexts\": \"true\", # This directive does not appear to have any effect.\n }\n )\n except ImportError:\n raise ValueError(\"spark tests are requested, but pyspark is not installed\")\n\n\[email protected]\ndef basic_spark_df_execution_engine(spark_session):\n from great_expectations.execution_engine import SparkDFExecutionEngine\n\n conf: List[tuple] = spark_session.sparkContext.getConf().getAll()\n spark_config: Dict[str, str] = dict(conf)\n execution_engine: SparkDFExecutionEngine = SparkDFExecutionEngine(\n spark_config=spark_config,\n )\n return execution_engine\n\n\[email protected](index=3)\[email protected]\ndef spark_session_v012(test_backends):\n if \"SparkDFDataset\" not in test_backends:\n pytest.skip(\"No spark backend selected.\")\n\n try:\n import pyspark\n from pyspark.sql import SparkSession\n\n return get_or_create_spark_application(\n spark_config={\n \"spark.sql.catalogImplementation\": \"hive\",\n \"spark.executor.memory\": \"450m\",\n # \"spark.driver.allowMultipleContexts\": \"true\", # This directive does not appear to have any effect.\n }\n )\n except ImportError:\n raise ValueError(\"spark tests are requested, but pyspark is not installed\")\n\n\[email protected]\ndef basic_expectation_suite(empty_data_context_stats_enabled):\n context: DataContext = empty_data_context_stats_enabled\n expectation_suite = ExpectationSuite(\n expectation_suite_name=\"default\",\n meta={},\n expectations=[\n ExpectationConfiguration(\n expectation_type=\"expect_column_to_exist\",\n kwargs={\"column\": \"infinities\"},\n ),\n ExpectationConfiguration(\n expectation_type=\"expect_column_to_exist\", kwargs={\"column\": \"nulls\"}\n ),\n ExpectationConfiguration(\n expectation_type=\"expect_column_to_exist\", kwargs={\"column\": \"naturals\"}\n ),\n ExpectationConfiguration(\n expectation_type=\"expect_column_values_to_be_unique\",\n kwargs={\"column\": \"naturals\"},\n ),\n ],\n data_context=context,\n )\n return expectation_suite\n\n\[email protected]\ndef numeric_high_card_dict():\n # fmt: off\n data = {\n \"norm_0_1\": [\n 0.7225866251125405, -0.5951819764073379, -0.2679313226299394, -0.22503289285616823, 0.1432092195399402, 1.1874676802669433, 1.2766412196640815, 0.15197071140718296, -0.08787273509474242, -0.14524643717509128, -1.236408169492396, -0.1595432263317598, 1.0856768114741797, 0.5082788229519655, 0.26419244684748955, -0.2532308428977167, -0.6362679196021943, -3.134120304969242, -1.8990888524318292, 0.15701781863102648,\n -0.775788419966582, -0.7400872167978756, -0.10578357492485335, 0.30287010067847436, -1.2127058770179304, -0.6750567678010801, 0.3341434318919877, 1.8336516507046157, 1.105410842250908, -0.7711783703442725, -0.20834347267477862, -0.06315849766945486, 0.003016997583954831, -1.0500016329150343, -0.9168020284223636, 0.306128397266698, 1.0980602112281863, -0.10465519493772572, 0.4557797534454941, -0.2524452955086468,\n -1.6176089110359837, 0.46251282530754667, 0.45751208998354903, 0.4222844954971609, 0.9651098606162691, -0.1364401431697167, -0.4988616288584964, -0.29549238375582904, 0.6950204582392359, 0.2975369992016046, -1.0159498719807218, 1.3704532401348395, 1.1210419577766673, 1.2051869452003332, 0.10749349867353084, -3.1876892257116562, 1.316240976262548, -1.3777452919511493, -1.0666211985935259, 1.605446695828751,\n -0.39682821266996865, -0.2828059717857655, 1.30488698803017, -2.116606225467923, -0.2026680301462151, -0.05504008273574069, -0.028520163428411835, 0.4424105678123449, -0.3427628263418371, 0.23805293411919937, -0.7515414823259695, -0.1272505897548366, 1.803348436304099, -2.0178252709022124, 0.4860300090112474, 1.2304054166426217, 0.7228668982068365, 1.7400607500575112, 0.3480274098246697, -0.3887978895385282,\n -1.6511926233909175, 0.14517929503564567, -1.1599010576123796, -0.016133552438119002, 0.47157644883706273, 0.27657785075518254, 1.4464286976282463, -1.2605489185634533, -1.2548765025615338, 0.0755319579826929, 1.0476733637516833, -0.7038690219524807, -0.9580696842862921, -0.18135657098008018, -0.18163993379314564, 0.4092798531146971, -2.049808182546896, -1.2447062617916826, -1.6681140306283337, 1.0709944517933483,\n -0.7059385234342846, -0.8033587669003331, -1.8152275905903312, 0.11729996097670137, 2.2994900038012376, -0.1291192451734159, -0.6731565869164164, -0.06690994571366346, -0.40330072968473235, -0.23927186025094221, 2.7756216937096676, 0.06441299443146056, -0.5095247173507204, -0.5228853558871007, 0.806629654091097, -2.110096084114651, -0.1233374136509439, -1.021178519845751, 0.058906278340351045, -0.26316852406211017,\n -1.2990807244026237, -0.1937986598084067, 0.3909222793445317, 0.578027315076297, -0.11837271520846208, -1.134297652720464, 0.496915417153268, -0.5315184110418045, 0.5284176849952198, -1.6810338988102331, 0.41220454054009154, 1.0554031136792, -1.4222775023918832, -1.1664353586956209, 0.018952180522661358, -0.04620616876577671, -0.8446292647938418, -0.6889432180332509, -0.16012081070647954, 0.5680940644754282,\n -1.9792941921407943, 0.35441842206114726, 0.12433268557499534, 0.25366905921805377, 0.6262297786892028, 1.327981424671081, 1.774834324890265, -0.9725604763128438, 0.42824027889428, 0.19725541390327114, 1.4640606982992412, 1.6484993842838995, 0.009848260786412894, -2.318740403198263, -0.4125245127403577, -0.15500831770388285, 1.010740123094443, 0.7509498708766653, -0.021415407776108144, 0.6466776546788641,\n -1.421096837521404, 0.5632248951325018, -1.230539161899903, -0.26766333435961503, -1.7208241092827994, -1.068122926814994, -1.6339248620455546, 0.07225436117508208, -1.2018233250224348, -0.07213000691963527, -1.0080992229563746, -1.151378048476321, -0.2660104149809121, 1.6307779136408695, 0.8394822016824073, -0.23362802143120032, -0.36799502320054384, 0.35359852278856263, 0.5830948999779656, -0.730683771776052,\n 1.4715728371820667, -1.0668090648998136, -1.025762014881618, 0.21056106958224155, -0.5141254207774576, -0.1592942838690149, 0.7688711617969363, -2.464535892598544, -0.33306989349452987, 0.9457207224940593, 0.36108072442574435, -0.6490066877470516, -0.8714147266896871, 0.6567118414749348, -0.18543305444915045, 0.11156511615955596, 0.7299392157186994, -0.9902398239693843, -1.3231344439063761, -1.1402773433114928,\n 0.3696183719476138, -1.0512718152423168, -0.6093518314203102, 0.0010622538704462257, -0.17676306948277776, -0.6291120128576891, 1.6390197341434742, -0.8105788162716191, -2.0105672384392204, -0.7909143328024505, -0.10510684692203587, -0.013384480496840259, 0.37683659744804815, -0.15123337965442354, 1.8427651248902048, 1.0371006855495906, 0.29198928612503655, -1.7455852392709181, 1.0854545339796853, 1.8156620972829793,\n 1.2399563224061596, 1.1196530775769857, 0.4349954478175989, 0.11093680938321168, 0.9945934589378227, -0.5779739742428905, 1.0398502505219054, -0.09401160691650227, 0.22793239636661505, -1.8664992140331715, -0.16104499274010126, -0.8497511318264537, -0.005035074822415585, -1.7956896952184151, 1.8304783101189757, 0.19094408763231646, 1.3353023874309002, 0.5889134606052353, -0.48487660139277866, 0.4817014755127622,\n 1.5981632863770983, 2.1416849775567943, -0.5524061711669017, 0.3364804821524787, -0.8609687548167294, 0.24548635047971906, -0.1281468603588133, -0.03871410517044196, -0.2678174852638268, 0.41800607312114096, -0.2503930647517959, 0.8432391494945226, -0.5684563173706987, -0.6737077809046504, 2.0559579098493606, -0.29098826888414253, -0.08572747304559661, -0.301857666880195, -0.3446199959065524, 0.7391340848217359,\n -0.3087136212446006, 0.5245553707204758, -3.063281336805349, 0.47471623010413705, 0.3733427291759615, -0.26216851429591426, -0.5433523111756248, 0.3305385199964823, -1.4866150542941634, -0.4699911958560942, 0.7312367186673805, -0.22346998944216903, -0.4102860865811592, -0.3003478250288424, -0.3436168605845268, 0.9456524589400904, -0.03710285453384255, 0.10330609878001526, 0.6919858329179392, 0.8673477607085118,\n 0.380742577915601, 0.5785785515837437, -0.011421905830097267, 0.587187810965595, -1.172536467775141, -0.532086162097372, -0.34440413367820183, -1.404900386188497, -0.1916375229779241, 1.6910999461291834, -0.6070351182769795, -0.8371447893868493, 0.8853944070432224, 1.4062946075925473, -0.4575973141608374, 1.1458755768004445, 0.2619874618238163, 1.7105876844856704, -1.3938976454537522, -0.11403217166441704,\n -1.0354305240085717, -0.4285770475062154, 0.10326635421187867, 0.6911853442971228, 0.6293835213179542, -0.819693698713199, -0.7378190403744175, -1.495947672573938, -1.2406693914431872, -1.0486341638186725, -1.3715759883075953, 3.585407817418151, -0.8007079372574223, -1.527336776754733, -0.4716571043072485, -0.6967311271405545, 1.0003347462169225, -0.30569565002022697, 0.3646134876772732, 0.49083033603832493,\n 0.07754580794955847, -0.13467337850920083, 0.02134473458605164, 0.5025183900540823, -0.940929087894874, 1.441600637127558, -0.0857298131221344, -0.575175243519591, 0.42622029657630595, -0.3239674701415489, 0.22648849821602596, -0.6636465305318631, 0.30415000329164754, -0.6170241274574016, 0.07578674772163065, 0.2952841441615124, 0.8120317689468056, -0.46861353019671337, 0.04718559572470416, -0.3105660017232523,\n -0.28898463203535724, 0.9575298065734561, -0.1977556031830993, 0.009658232624257272, 1.1432743259603295, -1.8989396918936858, 0.20787070770386357, 1.4256750543782999, -0.03838329973778874, -0.9051229357470373, -1.2002277085489457, 2.405569956130733, 1.895817948326675, -0.8260858325924574, 0.5759061866255807, 2.7022875569683342, 1.0591327405967745, 0.21449833798124354, 0.19970388388081273, 0.018242139911433558,\n -0.630960146999549, -2.389646042147776, 0.5424304992480339, -1.2159551561948718, -1.6851632640204128, -0.4812221268109694, 0.6217652794219579, -0.380139431677482, -0.2643524783321051, 0.5106648694993016, -0.895602157034141, -0.20559568725141816, 1.5449271875734911, 1.544075783565114, 0.17877619857826843, 1.9729717339967108, 0.8302033109816261, -0.39118561199170965, -0.4428357598297098, -0.02550407946753186,\n -1.0202977138210447, 2.6604654314300835, 1.9163029269361842, 0.34697436596877657, -0.8078124769022497, -1.3876596649099957, 0.44707250163663864, -0.6752837232272447, -0.851291770954755, 0.7599767868730256, 0.8134109401706875, -1.6766750539980289, -0.06051832829232975, -0.4652931327216134, -0.9249124398287735, 1.9022739762222731, 1.7632300613807597, 1.675335012283785, 0.47529854476887495, -0.7892463423254658,\n 0.3910120652706098, 0.5812432547936405, 0.2693084649672777, -0.08138564925779349, 0.9150619269526952, -0.8637356349272142, -0.14137853834901817, -0.20192754829896423, 0.04718228147088756, -0.9743600144318, -0.9936290943927825, 0.3544612180477054, 0.6839546770735121, 1.5089070357620178, 1.301167565172228, -1.5396145667672985, 0.42854366341485456, -1.5876582617301032, -0.0316985879141714, 0.3144220016570915,\n -0.05054766725644431, 0.2934139006870167, 0.11396170275994542, -0.6472140129693643, 1.6556030742445431, 1.0319410208453506, 0.3292217603989991, -0.058758121958605435, -0.19917171648476298, -0.5192866115874029, 0.1997510689920335, -1.3675686656161756, -1.7761517497832053, -0.11260276070167097, 0.9717892642758689, 0.0840815981843948, -0.40211265381258554, 0.27384496844034517, -1.0403875081272367, 1.2884781173493884,\n -1.8066239592554476, 1.1136979156298865, -0.06223155785690416, 1.3930381289015936, 0.4586305673655182, 1.3159249757827194, -0.5369892835955705, 0.17827408233621184, 0.22693934439969682, 0.8216240002114816, -1.0422409752281838, 0.3329686606709231, -1.5128804353968217, 1.0323052869815534, 1.1640486934424354, 1.6450118078345612, -0.6717687395070293, -0.08135119186406627, 1.2746921873544188, -0.8255794145095643,\n 0.7123504776564864, 0.6953336934741682, 2.191382322698439, 1.4155790749261592, 2.4681081786912866, -2.2904357033803815, -0.8375155191566624, 1.1040106662196736, 0.7084133268872015, -3.401968681942055, 0.23237090512844757, 1.1199436238058174, 0.6333916486592628, -0.6012340913121055, -0.3693951838866523, -1.7742670566875682, -0.36431378282545124, -0.4042586409194551, -0.04648644034604476, 1.5138191613743486,\n -0.2053670782251071, 1.8679122383251414, 0.8355881018692999, -0.5369705129279005, -0.7909355080370954, 2.1080036780007987, 0.019537331188020687, -1.4672982688640615, -1.486842866467901, -1.1036839537574874, 1.0800858540685894, -0.2313974176207594, 0.47763272078271807, -1.9196070490691473, -0.8193535127855751, -0.6853651905832031, -0.18272370464882973, -0.33413577684633056, 2.2261342671906106, 1.6853726343573683,\n 0.8563421109235769, 1.0468799885096596, 0.12189082561416206, -1.3596466927672854, -0.7607432068282968, 0.7061728288620306, -0.4384478018639071, 0.8620104661898899, 1.04258758121448, -1.1464159128515612, 0.9617945424413628, 0.04987102831355013, -0.8472878887606543, 0.32986774370339184, 1.278319839581162, -0.4040926804592034, -0.6691567800662129, 0.9415431940597389, 0.3974846022291844, -0.8425204662387112,\n -1.506166868030291, -0.04248497940038203, 0.26434168799067986, -1.5698380163561454, -0.6651727917714935, 1.2400220571204048, -0.1251830593977037, 0.6156254221302833, 0.43585628657139575, -1.6014619037611209, 1.9152323656075512, -0.8847911114213622, 1.359854519784993, -0.5554989575409871, 0.25064804193232354, 0.7976616257678464, 0.37834567410982123, -0.6300374359617635, -1.0613465068052854, -0.866474302027355,\n 1.2458556977164312, 0.577814049080149, 2.069400463823993, 0.9068690176961165, -0.5031387968484738, -0.3640749863516844, -1.041502465417534, 0.6732994659644133, -0.006355018868252906, -0.3650517541386253, 1.0975063446734974, -2.203726812834859, 1.060685913143899, -0.4618706570892267, 0.06475263817517128, -0.19326357638969882, -0.01812119454736379, 0.1337618009668529, 1.1838276997792907, 0.4273677345455913,\n -0.4912341608307858, 0.2349993979417651, 0.9566260826411601, -0.7948243131958422, -0.6168334352331588, 0.3369425926447926, 0.8547756445246633, 0.2666330662219728, 2.431868771129661, 1.0089732701876513, -0.1162341515974066, -1.1746306816795218, -0.08227639025627424, 0.794676385688044, 0.15005011094018297, -0.8763821573601055, -1.0811684990769739, 0.6311588092267179, 0.026124278982220386, 0.8306502001533514,\n 1.0856487813261877, -0.018702855899823106, -0.07338137135247896, -0.8435746484744243, -0.18091216366556986, 0.2295807891528797, -1.0689295774443397, -1.5621175533013612, 1.3314045672598216, 0.6211561903553582, 1.0479302317100871, -1.1509436982013124, 0.447985084931758, 0.19917261474342404, 0.3582887259341301, 0.9953552868908098, 0.8948165434511316, 0.4949033431999123, -0.23004847985703908, 0.6411581535557106,\n -1.1589671573242186, -0.13691519182560624, -0.8849560872785238, 0.6629182075027006, 2.2608150731789696, 2.2823614453180294, -1.2291376923498247, -0.9267975556981378, 0.2597417839242135, -0.7667310491821938, 0.10503294084132372, 2.960320355577672, -1.0645098483081497, -1.2888339889815872, -0.6564570556444346, 0.4742489396354781, 0.8879606773334898, -0.6477585196839569, -0.7309497810668936, 1.7025953934976548,\n 0.1789174966941155, -0.4839093362740933, -0.8917713440107442, 1.4521776747175792, -0.1676974219641624, -0.500672037099228, -0.2947747621553442, 0.929636971325952, -0.7614935150071248, 1.6886298813725842, -0.8136217834373227, 1.2030997228178093, 1.382267485738376, 2.594387458306705, -0.7703668776292266, -0.7642584795112598, 1.3356598324609947, -0.5745269784148925, -2.212092904499444, -1.727975556661197,\n -0.18543087256023608, -0.10167435635752538, 1.3480966068787303, 0.0142803272337873, -0.480077631815393, -0.32270216749876185, -1.7884435311074431, -0.5695640948971382, -0.22859087912027687, -0.08783386938029487, -0.18151955278624396, 0.2031493507095467, 0.06444304447669409, -0.4339138073294572, 0.236563959074551, -0.2937958719187449, 0.1611232843821199, -0.6574871644742827, 1.3141902865107886, 0.6093649138398077,\n 0.056674985715912514, -1.828714441504608, -0.46768482587669535, 0.6489735384886999, 0.5035677725398181, -0.887590772676158, -0.3222316759913631, -0.35172770495027483, -0.4329205472963193, -0.8449916868048998, 0.38282765028957993, 1.3171924061732359, 0.2956667124648384, 0.5390909497681301, -0.7591989862253667, -1.1520792974885883, -0.39344757869384944, 0.6192677330177175, -0.05578834574542242, 0.593015990282657,\n 0.9374465229256678, 0.647772562443425, 1.1071167572595217, -1.3015016617832518, 1.267300472456379, -0.5807673178649629, 0.9343468385348384, -0.28554893036513673, 0.4487573993840033, 0.6749018890520516, -1.20482985206765, 0.17291806504654686, -0.4124576407610529, -0.9203236505429044, -0.7461342369802754, -0.19694162321688435, 0.46556512963300906, 0.5198366004764268, -1.7222561645076129, -0.7078891617994071,\n -1.1653209054214695, 1.5560964971092122, 0.3335520152642012, 0.008390825910327906, 0.11336719644324977, 0.3158913817073965, 0.4704483453862008, -0.5700583482495889, -1.276634964816531, -1.7880560933777756, -0.26514994709973827, 0.6194447367446946, -0.654762456435761, 1.0621929196158544, 0.4454719444987052, -0.9323145612076791, 1.3197357985874438, -0.8792938558447049, -0.2470423905508279, 0.5128954444799875,\n -0.09202044992462606, -1.3082892596744382, -0.34428948138804927, 0.012422196356164879, 1.4626152292162142, 0.34678216997159833, 0.409462409138861, 0.32838364873801185, 1.8776849459782967, 1.6816627852133539, -0.24894138693568296, 0.7150105850753732, 0.22929306929129853, -0.21434910504054566, 1.3339497173912471, -1.2497042452057836, -0.04487255356399775, -0.6486304639082145, -0.8048044333264733, -1.8090170501469942,\n 1.481689285694336, -1.4772553200884717, -0.36792462539303805, -1.103508260812736, -0.2135236993720317, 0.40889179796540165, 1.993585196733386, 0.43879096427562897, -0.44512875171982147, -1.1780830020629518, -1.666001035275436, -0.2977294957665528, 1.7299614542270356, 0.9882265798853356, 2.2412430815464597, 0.5801434875813244, -0.739190619909163, -1.2663490594895201, 0.5735521649879137, 1.2105709455012765,\n 1.9112159951415644, -2.259218931706201, -0.563310876529377, -2.4119185903750493, 0.9662624485722368, -0.22788851242764951, 0.9198283887420099, 0.7855927065251492, -0.7459868094792474, 0.10543289218409971, 0.6401750224618271, -0.0077375118689326705, -0.11647036625911977, -0.4722391874001602, -0.2718425102733572, -0.8796746964457087, 0.6112903638894259, 0.5347851929096421, -0.4749419210717794, 1.0633720764557604,\n -0.2590556665572949, 2.590182301241823, 1.4524061372706638, -0.8503733047335056, 0.5609357391481067, -1.5661825434426477, 0.8019667474525984, 1.2716795425969496, 0.20011166646917924, -0.7105405282282679, -0.5593129072748189, -1.2401371010520867, -0.7002520937780202, -2.236596391787529, -1.8130090502823886, -0.23990633860801777, 1.7428780878151378, 1.4661206538178901, -0.8678567353744017, 0.2957423562639015,\n 0.13935419069962593, 1.399598845123674, 0.059729544605779575, -0.9607778026198247, 0.18474907798482051, 1.0117193651915666, -0.9173540069396245, 0.8934765521365161, -0.665655291396948, -0.32955768273493324, 0.3062873812209283, 0.177342106982554, 0.3595522704599547, -1.5964209653110262, 0.6705899137346863, -1.1034642863469553, -1.0029562484065524, 0.10622956543479244, 0.4261871936541378, 0.7777501694354336,\n -0.806235923997437, -0.8272801398172428, -1.2783440745845536, 0.5982979227669168, -0.28214494859284556, 1.101560367699546, -0.14008021262664466, -0.38717961692054237, 0.9962925044431369, -0.7391490127960976, -0.06294945881724459, 0.7283671247384875, -0.8458895297768138, 0.22808829204347086, 0.43685668023014523, 0.9204095286935638, -0.028241645704951284, 0.15951784765135396, 0.8068984900818966, -0.34387965576978663,\n 0.573828962760762, -0.13374515460012618, -0.5552788325377814, 0.5644705833909952, -0.7500532220469983, 0.33436674493862256, -0.8595435026628129, -0.38943898244735853, 0.6401502590131951, -1.2968645995363652, 0.5861622311675501, 0.2311759458689689, 0.10962292708600496, -0.26025023584932205, -0.5398478003611565, -1.0514168636922954, 1.2689172189127857, 1.7029909647408918, -0.02325431623491577, -0.3064675950620902,\n -1.5816446841009473, 0.6874254059433739, 0.7755967316475798, 1.4119333324396597, 0.14198739135512406, 0.2927714469848192, -0.7239793888399496, 0.3506448783535265, -0.7568480706640158, -1.2158508387501554, 0.22197589131086445, -0.5621415304506887, -1.2381112050191665, -1.917208333033256, -0.3321665793941188, -0.5916951886991071, -1.244826507645294, -0.29767661008214463, 0.8590635852032509, -1.8579290298421591,\n -1.0470546224962876, -2.540080936704841, 0.5458326769958273, 0.042222128206941614, 0.6080450228346708, 0.6542717901662132, -1.7292955132690793, -0.4793123354077725, 0.7341767020417185, -1.3322222208234826, -0.5076389542432337, 0.684399163420284, 0.3948487980667425, -1.7919279627150193, 1.582925890933478, 0.8341846456063038, 0.11776890377042544, 1.7471239793853526, 1.2269451783893597, 0.4235463733287474,\n 1.5908284320029056, -1.635191535538596, 0.04419903330064594, -1.264385360373252, 0.5370192519783876, 1.2368603501240771, -0.9241079150337286, -0.3428051342915208, 0.0882286441353256, -2.210824604513402, -1.9000343283757128, 0.4633735273417207, -0.32534396967175094, 0.026187836765356437, 0.18253601230609245, 0.8519745761039671, -0.028225375482784816, -0.5114197447067229, -1.2428743809444227, 0.2879711400745508,\n 1.2857130031108321, 0.5296743558975853, -0.8440551904275335, -1.3776032491368861, 1.8164028526343798, -1.1422045767986222, -1.8675179752970443, 0.6969635320800454, 0.9444010906414336, -1.28197913481747, -0.06259132322304235, -0.4518754825442558, 0.9183188639099813, -0.2916931407869574, -1.1464007469977915, -0.4475136941593681, 0.44385573868752803, 2.1606711638680762, -1.4813603018181851, -0.5647618024870872,\n -1.474746204557383, -2.9067748098220485, 0.06132111635940877, -0.09663310829361334, -1.087053744976143, -1.774855117659402, 0.8130120568830074, -0.5179279676199186, -0.32549430825787784, -1.1995838271705979, 0.8587480835176114, -0.02095126282663596, 0.6677898019388228, -1.1891003375304232, -2.1125937754631305, -0.047765192715672734, 0.09812525010300294, -1.034992359189106, 1.0213451864081846, 1.0788796513160641,\n -1.444469239557739, 0.28341828947950637, -2.4556013891966737, 1.7126080715698266, -0.5943068899412715, 1.0897594994215383, -0.16345461884651272, 0.7027032523865234, 2.2851158088542562, 0.5038100496225458, -0.16724173993999966, -0.6747457076421414, 0.42254684460738184, 1.277203836895222, -0.34438446183574595, 0.38956738377878264, -0.26884968654334923, -0.02148772950361766, 0.02044885235644607, -1.3873669828232345,\n 0.19995968746809226, -1.5826859815811556, -0.20385119370067947, 0.5724329589281247, -1.330307658319185, 0.7756101314358208, -0.4989071461473931, 0.5388161769427321, -0.9811085284266614, 2.335331094403556, -0.5588657325211347, -1.2850853695283377, 0.40092993245913744, -1.9675685522110529, 0.9378938542456674, -0.18645815013912917, -0.6828273180353106, -1.840122530632185, -1.2581798109361761, 0.2867275394896832,\n ],\n }\n # fmt: on\n return data\n\n\[email protected]\ndef numeric_high_card_dataset(test_backend, numeric_high_card_dict):\n schemas = {\n \"pandas\": {\n \"norm_0_1\": \"float64\",\n },\n \"postgresql\": {\n # \"norm_0_1\": \"DOUBLE_PRECISION\",\n \"norm_0_1\": \"NUMERIC\",\n },\n \"sqlite\": {\n \"norm_0_1\": \"FLOAT\",\n },\n \"mysql\": {\n \"norm_0_1\": \"DOUBLE\",\n },\n \"mssql\": {\n \"norm_0_1\": \"FLOAT\",\n },\n \"spark\": {\n \"norm_0_1\": \"FloatType\",\n },\n }\n return get_dataset(test_backend, numeric_high_card_dict, schemas=schemas)\n\n\[email protected]\ndef non_numeric_high_card_dataset(test_backend):\n \"\"\"Provide dataset fixtures that have special values and/or are otherwise useful outside\n the standard json testing framework\"\"\"\n\n # fmt: off\n data = {\n \"highcardnonnum\": [\n \"CZVYSnQhHhoti8mQ66XbDuIjE5FMeIHb\", \"cPWAg2MJjh8fkRRU1B9aD8vWq3P8KTxJ\", \"4tehKwWiCDpuOmTPRYYqTqM7TvEa8Zi7\", \"ZvlAnCGiGfkKgQoNrhnnyrjmU7sLsUZz\", \"AaqMhdYukVdexTk6LlWvzXYXTp5upPuf\", \"ZSKmXUB35K14khHGyjYtuCHuI8yeM7yR\", \"F1cwKp4HsCN2s2kXQGR5RUa3WAcibCq2\", \"coaX8bSHoVZ8FP8SuQ57SFbrvpRHcibq\", \"3IzmbSJF525qtn7O4AvfKONnz7eFgnyU\", \"gLCtw7435gaR532PNFVCtvk14lNJpZXv\",\n \"hNyjMYZkVlOKRjhg8cKymU5Bvnh0MK5R\", \"IqKC2auGTNehP8y24HzDQOdt9oysgFyx\", \"TePy034aBKlNeAmcJmKJ4p1yF7EUYEOg\", \"cIfDv6ieTAobe84P84InzDKrJrccmqbq\", \"m1979gfI6lVF9ijJA245bchYFd1EaMap\", \"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\", \"7wcR161jyKYhFLEZkhFqSXLwXW46I5x8\", \"IpmNsUFgbbVnL0ljJZOBHnTV0FKARwSn\", \"hsA4btHJg6Gq1jwOuOc3pl2UPB5QUwZg\", \"vwZyG0jGUys3HQdUiOocIbzhUdUugwKX\",\n \"rTc9h94WjOXN5Wg40DyatFEFfp9mgWj6\", \"p1f20s14ZJGUTIBUNeBmJEkWKlwoyqjA\", \"VzgAIYNKHA0APN0oZtzMAfmbCzJenswy\", \"IO7BqR3iS136YMlLCEo6W3jKNOVJIlLG\", \"eTEyhiRuyEcTnHThi1W6yi1mxUjq8TEp\", \"4OHPKQgk3sPPYpKWcEWUtNZ0jv00UuPU\", \"ZJCstyyUvTR2gwSM6FLgkXYDwG54qo8u\", \"nGQsvDAzuL5Yc2XpqoG5P7RhpiTpJp8H\", \"NfX4KfEompMbbKloFq8NQpdXtk5PjaPe\", \"CP22IFHDX1maoSjTEdtBfrMHWQKACGDB\",\n \"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer\", \"hGwZQW7ao9HqNV2xAovuMBdyafNDE8q6\", \"OJmDHbqP1wzarsaSwCphsqvdy5SnTQMT\", \"JQbXIcgwUhttfPIGB7VGGfL2KiElabrO\", \"eTTNDggfPpRC22SEVNo9W0BPEWO4Cr57\", \"GW2JuUJmuCebia7RUiCNl2BTjukIzZWj\", \"oVFAvQEKmRTLBqdCuPoJNvzPvQ7UArWC\", \"zeMHFFKLr5j4DIFxRQ7jHWCMClrP3LmJ\", \"eECcArV5TZRftL6ZWaUDO6D2l3HiZj1Y\", \"xLNJXaCkOLrD6E0kgGaFOFwctNXjrd77\",\n \"1f8KOCkOvehXYvN8PKv1Ch6dzOjRAr01\", \"uVF6HJgjVmoipK1sEpVOFJYuv2TXXsOG\", \"agIk8H2nFa0K27IFr0VM2RNp6saihYI3\", \"cAUnysbb8SBLSTr0H7cA1fmnpaL80e0N\", \"fM1IzD5USx4lMYi6bqPCEZjd2aP7G9vv\", \"k8B9KCXhaQb6Q82zFbAzOESAtDxK174J\", \"i65d8jqET5FsVw9t5BwAvBjkEJI6eUMj\", \"HbT1b7DQL7n7ZEt2FsKHIggycT1XIYd8\", \"938eC0iGMSqZNlqgDNG9YVE7t4izO2Ev\", \"PyZetp4izgE4ymPcUXyImF5mm7I6zbta\",\n \"FaXA6YSUrvSnW7quAimLqQMNrU1Dxyjs\", \"PisVMvI9RsqQw21B7qYcKkRo5c8C2AKd\", \"eSQIxFqyYVf55UMzMEZrotPO74i3Sh03\", \"2b74DhJ6YFHrAkrjK4tvvKkYUKll44bR\", \"3svDRnrELyAsC69Phpnl2Os89856tFBJ\", \"ZcSGN9YYNHnHjUp0SktWoZI7JDmvRTTN\", \"m9eDkZ5oZEOFP3HUfaZEirecv2UhQ1B1\", \"wZTwJmMX5Q58DhDdmScdigTSyUUC04sO\", \"oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ\", \"KAuFgcmRKQPIIqGMAQQPfjyC1VXt40vs\",\n \"0S4iueoqKNjvS55O57BdY3DbfwhIDwKc\", \"ywbQfOLkvXEUzZISZp1cpwCenrrNPjfF\", \"Mayxk8JkV3Z6aROtnsKyqwVK5exiJa8i\", \"pXqIRP5fQzbDtj1xFqgJey6dyFOJ1YiU\", \"6Ba6RSM56x4MIaJ2wChQ3trBVOw1SWGM\", \"puqzOpRJyNVAwH2vLjVCL3uuggxO5aoB\", \"jOI4E43wA3lYBWbV0nMxqix885Tye1Pf\", \"YgTTYpRDrxU1dMKZeVHYzYNovH2mWGB7\", \"24yYfUg1ATvfI1PW79ytsEqHWJHI69wQ\", \"mS2AVcLFp6i36sX7yAUrdfM0g0RB2X4D\",\n \"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\", \"ItvI4l02oAIZEd5cPtDf4OnyBazji0PL\", \"DW4oLNP49MNNENFoFf7jDTI04xdvCiWg\", \"vrOZrkAS9MCGOqzhCv4cmr5AGddVBShU\", \"NhTsracusfp5V6zVeWqLZnychDl7jjO4\", \"R74JT4EEhh3Xeu5tbx8bZFkXZRhx6HUn\", \"bd9yxS6b1QrKXuT4irY4kpjSyLmKZmx6\", \"UMdFQNSiJZtLK3jxBETZrINDKcRqRd0c\", \"He7xIY2BMNZ7vSO47KfKoYskVJeeedI7\", \"G8PqO0ADoKfDPsMT1K0uOrYf1AtwlTSR\",\n \"hqfmEBNCA7qgntcQVqB7beBt0hB7eaxF\", \"mlYdlfei13P6JrT7ZbSZdsudhE24aPYr\", \"gUTUoH9LycaItbwLZkK9qf0xbRDgOMN4\", \"xw3AuIPyHYq59Qbo5QkQnECSqd2UCvLo\", \"kbfzRyRqGZ9WvmTdYKDjyds6EK4fYCyx\", \"7AOZ3o2egl6aU1zOrS8CVwXYZMI8NTPg\", \"Wkh43H7t95kRb9oOMjTSqC7163SrI4rU\", \"x586wCHsLsOaXl3F9cYeaROwdFc2pbU1\", \"oOd7GdoPn4qqfAeFj2Z3ddyFdmkuPznh\", \"suns0vGgaMzasYpwDEEof2Ktovy0o4os\",\n \"of6W1csCTCBMBXli4a6cEmGZ9EFIOFRC\", \"mmTiWVje9SotwPgmRxrGrNeI9DssAaCj\", \"pIX0vhOzql5c6Z6NpLbzc8MvYiONyT54\", \"nvyCo3MkIK4tS6rkuL4Yw1RgGKwhm4c2\", \"prQGAOvQbB8fQIrp8xaLXmGwcxDcCnqt\", \"ajcLVizD2vwZlmmGKyXYki03SWn7fnt3\", \"mty9rQJBeTsBQ7ra8vWRbBaWulzhWRSG\", \"JL38Vw7yERPC4gBplBaixlbpDg8V7gC6\", \"MylTvGl5L1tzosEcgGCQPjIRN6bCUwtI\", \"hmr0LNyYObqe5sURs408IhRb50Lnek5K\",\n \"CZVYSnQhHhoti8mQ66XbDuIjE5FMeIHb\", \"cPWAg2MJjh8fkRRU1B9aD8vWq3P8KTxJ\", \"4tehKwWiCDpuOmTPRYYqTqM7TvEa8Zi7\", \"ZvlAnCGiGfkKgQoNrhnnyrjmU7sLsUZz\", \"AaqMhdYukVdexTk6LlWvzXYXTp5upPuf\", \"ZSKmXUB35K14khHGyjYtuCHuI8yeM7yR\", \"F1cwKp4HsCN2s2kXQGR5RUa3WAcibCq2\", \"coaX8bSHoVZ8FP8SuQ57SFbrvpRHcibq\", \"3IzmbSJF525qtn7O4AvfKONnz7eFgnyU\", \"gLCtw7435gaR532PNFVCtvk14lNJpZXv\",\n \"hNyjMYZkVlOKRjhg8cKymU5Bvnh0MK5R\", \"IqKC2auGTNehP8y24HzDQOdt9oysgFyx\", \"TePy034aBKlNeAmcJmKJ4p1yF7EUYEOg\", \"cIfDv6ieTAobe84P84InzDKrJrccmqbq\", \"m1979gfI6lVF9ijJA245bchYFd1EaMap\", \"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\", \"7wcR161jyKYhFLEZkhFqSXLwXW46I5x8\", \"IpmNsUFgbbVnL0ljJZOBHnTV0FKARwSn\", \"hsA4btHJg6Gq1jwOuOc3pl2UPB5QUwZg\", \"vwZyG0jGUys3HQdUiOocIbzhUdUugwKX\",\n \"rTc9h94WjOXN5Wg40DyatFEFfp9mgWj6\", \"p1f20s14ZJGUTIBUNeBmJEkWKlwoyqjA\", \"VzgAIYNKHA0APN0oZtzMAfmbCzJenswy\", \"IO7BqR3iS136YMlLCEo6W3jKNOVJIlLG\", \"eTEyhiRuyEcTnHThi1W6yi1mxUjq8TEp\", \"4OHPKQgk3sPPYpKWcEWUtNZ0jv00UuPU\", \"ZJCstyyUvTR2gwSM6FLgkXYDwG54qo8u\", \"nGQsvDAzuL5Yc2XpqoG5P7RhpiTpJp8H\", \"NfX4KfEompMbbKloFq8NQpdXtk5PjaPe\", \"CP22IFHDX1maoSjTEdtBfrMHWQKACGDB\",\n \"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer\", \"hGwZQW7ao9HqNV2xAovuMBdyafNDE8q6\", \"OJmDHbqP1wzarsaSwCphsqvdy5SnTQMT\", \"JQbXIcgwUhttfPIGB7VGGfL2KiElabrO\", \"eTTNDggfPpRC22SEVNo9W0BPEWO4Cr57\", \"GW2JuUJmuCebia7RUiCNl2BTjukIzZWj\", \"oVFAvQEKmRTLBqdCuPoJNvzPvQ7UArWC\", \"zeMHFFKLr5j4DIFxRQ7jHWCMClrP3LmJ\", \"eECcArV5TZRftL6ZWaUDO6D2l3HiZj1Y\", \"xLNJXaCkOLrD6E0kgGaFOFwctNXjrd77\",\n \"1f8KOCkOvehXYvN8PKv1Ch6dzOjRAr01\", \"uVF6HJgjVmoipK1sEpVOFJYuv2TXXsOG\", \"agIk8H2nFa0K27IFr0VM2RNp6saihYI3\", \"cAUnysbb8SBLSTr0H7cA1fmnpaL80e0N\", \"fM1IzD5USx4lMYi6bqPCEZjd2aP7G9vv\", \"k8B9KCXhaQb6Q82zFbAzOESAtDxK174J\", \"i65d8jqET5FsVw9t5BwAvBjkEJI6eUMj\", \"HbT1b7DQL7n7ZEt2FsKHIggycT1XIYd8\", \"938eC0iGMSqZNlqgDNG9YVE7t4izO2Ev\", \"PyZetp4izgE4ymPcUXyImF5mm7I6zbta\",\n \"FaXA6YSUrvSnW7quAimLqQMNrU1Dxyjs\", \"PisVMvI9RsqQw21B7qYcKkRo5c8C2AKd\", \"eSQIxFqyYVf55UMzMEZrotPO74i3Sh03\", \"2b74DhJ6YFHrAkrjK4tvvKkYUKll44bR\", \"3svDRnrELyAsC69Phpnl2Os89856tFBJ\", \"ZcSGN9YYNHnHjUp0SktWoZI7JDmvRTTN\", \"m9eDkZ5oZEOFP3HUfaZEirecv2UhQ1B1\", \"wZTwJmMX5Q58DhDdmScdigTSyUUC04sO\", \"oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ\", \"KAuFgcmRKQPIIqGMAQQPfjyC1VXt40vs\",\n \"0S4iueoqKNjvS55O57BdY3DbfwhIDwKc\", \"ywbQfOLkvXEUzZISZp1cpwCenrrNPjfF\", \"Mayxk8JkV3Z6aROtnsKyqwVK5exiJa8i\", \"pXqIRP5fQzbDtj1xFqgJey6dyFOJ1YiU\", \"6Ba6RSM56x4MIaJ2wChQ3trBVOw1SWGM\", \"puqzOpRJyNVAwH2vLjVCL3uuggxO5aoB\", \"jOI4E43wA3lYBWbV0nMxqix885Tye1Pf\", \"YgTTYpRDrxU1dMKZeVHYzYNovH2mWGB7\", \"24yYfUg1ATvfI1PW79ytsEqHWJHI69wQ\", \"mS2AVcLFp6i36sX7yAUrdfM0g0RB2X4D\",\n \"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\", \"ItvI4l02oAIZEd5cPtDf4OnyBazji0PL\", \"DW4oLNP49MNNENFoFf7jDTI04xdvCiWg\", \"vrOZrkAS9MCGOqzhCv4cmr5AGddVBShU\", \"NhTsracusfp5V6zVeWqLZnychDl7jjO4\", \"R74JT4EEhh3Xeu5tbx8bZFkXZRhx6HUn\", \"bd9yxS6b1QrKXuT4irY4kpjSyLmKZmx6\", \"UMdFQNSiJZtLK3jxBETZrINDKcRqRd0c\", \"He7xIY2BMNZ7vSO47KfKoYskVJeeedI7\", \"G8PqO0ADoKfDPsMT1K0uOrYf1AtwlTSR\",\n \"hqfmEBNCA7qgntcQVqB7beBt0hB7eaxF\", \"mlYdlfei13P6JrT7ZbSZdsudhE24aPYr\", \"gUTUoH9LycaItbwLZkK9qf0xbRDgOMN4\", \"xw3AuIPyHYq59Qbo5QkQnECSqd2UCvLo\", \"kbfzRyRqGZ9WvmTdYKDjyds6EK4fYCyx\", \"7AOZ3o2egl6aU1zOrS8CVwXYZMI8NTPg\", \"Wkh43H7t95kRb9oOMjTSqC7163SrI4rU\", \"x586wCHsLsOaXl3F9cYeaROwdFc2pbU1\", \"oOd7GdoPn4qqfAeFj2Z3ddyFdmkuPznh\", \"suns0vGgaMzasYpwDEEof2Ktovy0o4os\",\n \"of6W1csCTCBMBXli4a6cEmGZ9EFIOFRC\", \"mmTiWVje9SotwPgmRxrGrNeI9DssAaCj\", \"pIX0vhOzql5c6Z6NpLbzc8MvYiONyT54\", \"nvyCo3MkIK4tS6rkuL4Yw1RgGKwhm4c2\", \"prQGAOvQbB8fQIrp8xaLXmGwcxDcCnqt\", \"ajcLVizD2vwZlmmGKyXYki03SWn7fnt3\", \"mty9rQJBeTsBQ7ra8vWRbBaWulzhWRSG\", \"JL38Vw7yERPC4gBplBaixlbpDg8V7gC6\", \"MylTvGl5L1tzosEcgGCQPjIRN6bCUwtI\", \"hmr0LNyYObqe5sURs408IhRb50Lnek5K\",\n ],\n # Built from highcardnonnum using the following:\n # vals = pd.Series(data[\"highcardnonnum\"])\n # sample_vals = vals.sample(n=10, random_state=42)\n # weights = np.random.RandomState(42).rand(10)\n # weights = weights / np.sum(weights)\n # new_vals = sample_vals.sample(n=200, weights=weights, replace=True, random_state=11)\n \"medcardnonnum\": [\n \"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\", \"ajcLVizD2vwZlmmGKyXYki03SWn7fnt3\", \"oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ\", \"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\", \"oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ\", \"oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ\", \"ajcLVizD2vwZlmmGKyXYki03SWn7fnt3\", \"oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ\", \"k8B9KCXhaQb6Q82zFbAzOESAtDxK174J\", \"NhTsracusfp5V6zVeWqLZnychDl7jjO4\",\n \"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\", \"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\", \"k8B9KCXhaQb6Q82zFbAzOESAtDxK174J\", \"NhTsracusfp5V6zVeWqLZnychDl7jjO4\", \"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\", \"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\", \"ajcLVizD2vwZlmmGKyXYki03SWn7fnt3\", \"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\", \"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer\", \"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\",\n \"NhTsracusfp5V6zVeWqLZnychDl7jjO4\", \"NhTsracusfp5V6zVeWqLZnychDl7jjO4\", \"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer\", \"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer\", \"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\", \"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\", \"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\", \"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\", \"ajcLVizD2vwZlmmGKyXYki03SWn7fnt3\", \"oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ\",\n \"oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ\", \"NhTsracusfp5V6zVeWqLZnychDl7jjO4\", \"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\", \"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\", \"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\", \"k8B9KCXhaQb6Q82zFbAzOESAtDxK174J\", \"k8B9KCXhaQb6Q82zFbAzOESAtDxK174J\", \"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer\", \"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\", \"NhTsracusfp5V6zVeWqLZnychDl7jjO4\",\n \"ajcLVizD2vwZlmmGKyXYki03SWn7fnt3\", \"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer\", \"ajcLVizD2vwZlmmGKyXYki03SWn7fnt3\", \"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer\", \"ajcLVizD2vwZlmmGKyXYki03SWn7fnt3\", \"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer\", \"NhTsracusfp5V6zVeWqLZnychDl7jjO4\", \"k8B9KCXhaQb6Q82zFbAzOESAtDxK174J\", \"NhTsracusfp5V6zVeWqLZnychDl7jjO4\", \"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\",\n \"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\", \"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer\", \"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\", \"oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ\", \"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\", \"ajcLVizD2vwZlmmGKyXYki03SWn7fnt3\", \"ajcLVizD2vwZlmmGKyXYki03SWn7fnt3\", \"NhTsracusfp5V6zVeWqLZnychDl7jjO4\", \"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer\", \"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\",\n \"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\", \"k8B9KCXhaQb6Q82zFbAzOESAtDxK174J\", \"oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ\", \"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer\", \"NhTsracusfp5V6zVeWqLZnychDl7jjO4\", \"NhTsracusfp5V6zVeWqLZnychDl7jjO4\", \"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\", \"NhTsracusfp5V6zVeWqLZnychDl7jjO4\", \"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\", \"k8B9KCXhaQb6Q82zFbAzOESAtDxK174J\",\n \"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer\", \"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\", \"k8B9KCXhaQb6Q82zFbAzOESAtDxK174J\", \"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer\", \"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\", \"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer\", \"oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ\", \"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\", \"ajcLVizD2vwZlmmGKyXYki03SWn7fnt3\", \"NhTsracusfp5V6zVeWqLZnychDl7jjO4\",\n \"oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ\", \"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\", \"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\", \"oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ\", \"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer\", \"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\", \"NhTsracusfp5V6zVeWqLZnychDl7jjO4\", \"NhTsracusfp5V6zVeWqLZnychDl7jjO4\", \"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\", \"k8B9KCXhaQb6Q82zFbAzOESAtDxK174J\",\n \"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\", \"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\", \"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\", \"NhTsracusfp5V6zVeWqLZnychDl7jjO4\", \"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer\", \"oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ\", \"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer\", \"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\", \"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\", \"mS2AVcLFp6i36sX7yAUrdfM0g0RB2X4D\",\n \"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer\", \"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\", \"NhTsracusfp5V6zVeWqLZnychDl7jjO4\", \"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer\", \"oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ\", \"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\", \"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer\", \"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\", \"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer\", \"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\",\n \"k8B9KCXhaQb6Q82zFbAzOESAtDxK174J\", \"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\", \"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer\", \"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\", \"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\", \"NhTsracusfp5V6zVeWqLZnychDl7jjO4\", \"NfX4KfEompMbbKloFq8NQpdXtk5PjaPe\", \"k8B9KCXhaQb6Q82zFbAzOESAtDxK174J\", \"oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ\", \"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer\",\n \"NfX4KfEompMbbKloFq8NQpdXtk5PjaPe\", \"k8B9KCXhaQb6Q82zFbAzOESAtDxK174J\", \"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\", \"oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ\", \"oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ\", \"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\", \"oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ\", \"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\", \"k8B9KCXhaQb6Q82zFbAzOESAtDxK174J\", \"k8B9KCXhaQb6Q82zFbAzOESAtDxK174J\",\n \"NhTsracusfp5V6zVeWqLZnychDl7jjO4\", \"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\", \"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\", \"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer\", \"k8B9KCXhaQb6Q82zFbAzOESAtDxK174J\", \"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer\", \"k8B9KCXhaQb6Q82zFbAzOESAtDxK174J\", \"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\", \"oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ\", \"k8B9KCXhaQb6Q82zFbAzOESAtDxK174J\",\n \"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\", \"k8B9KCXhaQb6Q82zFbAzOESAtDxK174J\", \"NfX4KfEompMbbKloFq8NQpdXtk5PjaPe\", \"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\", \"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\", \"NfX4KfEompMbbKloFq8NQpdXtk5PjaPe\", \"oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ\", \"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\", \"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\", \"k8B9KCXhaQb6Q82zFbAzOESAtDxK174J\",\n \"k8B9KCXhaQb6Q82zFbAzOESAtDxK174J\", \"k8B9KCXhaQb6Q82zFbAzOESAtDxK174J\", \"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer\", \"ajcLVizD2vwZlmmGKyXYki03SWn7fnt3\", \"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\", \"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\", \"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\", \"ajcLVizD2vwZlmmGKyXYki03SWn7fnt3\", \"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\", \"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\",\n \"ajcLVizD2vwZlmmGKyXYki03SWn7fnt3\", \"NhTsracusfp5V6zVeWqLZnychDl7jjO4\", \"k8B9KCXhaQb6Q82zFbAzOESAtDxK174J\", \"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\", \"ajcLVizD2vwZlmmGKyXYki03SWn7fnt3\", \"k8B9KCXhaQb6Q82zFbAzOESAtDxK174J\", \"NhTsracusfp5V6zVeWqLZnychDl7jjO4\", \"oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ\", \"oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ\", \"k8B9KCXhaQb6Q82zFbAzOESAtDxK174J\",\n \"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer\", \"ajcLVizD2vwZlmmGKyXYki03SWn7fnt3\", \"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\", \"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer\", \"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\", \"k8B9KCXhaQb6Q82zFbAzOESAtDxK174J\", \"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer\", \"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\", \"ajcLVizD2vwZlmmGKyXYki03SWn7fnt3\", \"NhTsracusfp5V6zVeWqLZnychDl7jjO4\",\n \"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\", \"k8B9KCXhaQb6Q82zFbAzOESAtDxK174J\", \"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\", \"k8B9KCXhaQb6Q82zFbAzOESAtDxK174J\", \"NhTsracusfp5V6zVeWqLZnychDl7jjO4\", \"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\", \"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\", \"k8B9KCXhaQb6Q82zFbAzOESAtDxK174J\", \"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer\", \"NhTsracusfp5V6zVeWqLZnychDl7jjO4\",\n \"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\", \"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer\", \"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\", \"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\", \"NhTsracusfp5V6zVeWqLZnychDl7jjO4\", \"k8B9KCXhaQb6Q82zFbAzOESAtDxK174J\", \"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer\", \"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer\", \"ajcLVizD2vwZlmmGKyXYki03SWn7fnt3\", \"oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ\",\n ],\n }\n # fmt: on\n schemas = {\n \"pandas\": {\n \"highcardnonnum\": \"str\",\n \"medcardnonnum\": \"str\",\n },\n \"postgresql\": {\n \"highcardnonnum\": \"TEXT\",\n \"medcardnonnum\": \"TEXT\",\n },\n \"sqlite\": {\n \"highcardnonnum\": \"VARCHAR\",\n \"medcardnonnum\": \"VARCHAR\",\n },\n \"mysql\": {\n \"highcardnonnum\": \"TEXT\",\n \"medcardnonnum\": \"TEXT\",\n },\n \"mssql\": {\n \"highcardnonnum\": \"VARCHAR\",\n \"medcardnonnum\": \"VARCHAR\",\n },\n \"spark\": {\n \"highcardnonnum\": \"StringType\",\n \"medcardnonnum\": \"StringType\",\n },\n }\n return get_dataset(test_backend, data, schemas=schemas)\n\n\ndef dataset_sample_data(test_backend):\n # No infinities for mysql\n if test_backend == \"mysql\":\n data = {\n # \"infinities\": [-np.inf, -10, -np.pi, 0, np.pi, 10/2.2, np.inf],\n \"nulls\": [np.nan, None, 0, 1.1, 2.2, 3.3, None],\n \"naturals\": [1, 2, 3, 4, 5, 6, 7],\n }\n else:\n data = {\n \"infinities\": [-np.inf, -10, -np.pi, 0, np.pi, 10 / 2.2, np.inf],\n \"nulls\": [np.nan, None, 0, 1.1, 2.2, 3.3, None],\n \"naturals\": [1, 2, 3, 4, 5, 6, 7],\n }\n schemas = {\n \"pandas\": {\"infinities\": \"float64\", \"nulls\": \"float64\", \"naturals\": \"float64\"},\n \"postgresql\": {\n \"infinities\": \"DOUBLE_PRECISION\",\n \"nulls\": \"DOUBLE_PRECISION\",\n \"naturals\": \"NUMERIC\",\n },\n \"sqlite\": {\"infinities\": \"FLOAT\", \"nulls\": \"FLOAT\", \"naturals\": \"FLOAT\"},\n \"mysql\": {\"nulls\": \"DOUBLE\", \"naturals\": \"DOUBLE\"},\n \"mssql\": {\"infinities\": \"FLOAT\", \"nulls\": \"FLOAT\", \"naturals\": \"FLOAT\"},\n \"spark\": {\n \"infinities\": \"FloatType\",\n \"nulls\": \"FloatType\",\n \"naturals\": \"FloatType\",\n },\n }\n return data, schemas\n\n\[email protected]\ndef dataset(test_backend):\n \"\"\"Provide dataset fixtures that have special values and/or are otherwise useful outside\n the standard json testing framework\"\"\"\n data, schemas = dataset_sample_data(test_backend)\n return get_dataset(test_backend, data, schemas=schemas)\n\n\[email protected]\ndef pandas_dataset():\n test_backend = \"PandasDataset\"\n data, schemas = dataset_sample_data(test_backend)\n return get_dataset(test_backend, data, schemas=schemas)\n\n\[email protected]\ndef sqlalchemy_dataset(test_backends):\n \"\"\"Provide dataset fixtures that have special values and/or are otherwise useful outside\n the standard json testing framework\"\"\"\n if \"postgresql\" in test_backends:\n backend = \"postgresql\"\n elif \"sqlite\" in test_backends:\n backend = \"sqlite\"\n else:\n return\n\n data = {\n \"infinities\": [-np.inf, -10, -np.pi, 0, np.pi, 10 / 2.2, np.inf],\n \"nulls\": [np.nan, None, 0, 1.1, 2.2, 3.3, None],\n \"naturals\": [1, 2, 3, 4, 5, 6, 7],\n }\n schemas = {\n \"postgresql\": {\n \"infinities\": \"DOUBLE_PRECISION\",\n \"nulls\": \"DOUBLE_PRECISION\",\n \"naturals\": \"DOUBLE_PRECISION\",\n },\n \"sqlite\": {\"infinities\": \"FLOAT\", \"nulls\": \"FLOAT\", \"naturals\": \"FLOAT\"},\n }\n return get_dataset(backend, data, schemas=schemas, profiler=None)\n\n\[email protected]\ndef sqlitedb_engine(test_backend):\n if test_backend == \"sqlite\":\n try:\n import sqlalchemy as sa\n\n return sa.create_engine(\"sqlite://\")\n except ImportError:\n raise ValueError(\"sqlite tests require sqlalchemy to be installed\")\n else:\n pytest.skip(\"Skipping test designed for sqlite on non-sqlite backend.\")\n\n\[email protected]\ndef postgresql_engine(test_backend):\n if test_backend == \"postgresql\":\n try:\n import sqlalchemy as sa\n\n db_hostname = os.getenv(\"GE_TEST_LOCAL_DB_HOSTNAME\", \"localhost\")\n engine = sa.create_engine(\n f\"postgresql://postgres@{db_hostname}/test_ci\"\n ).connect()\n yield engine\n engine.close()\n except ImportError:\n raise ValueError(\"SQL Database tests require sqlalchemy to be installed.\")\n else:\n pytest.skip(\"Skipping test designed for postgresql on non-postgresql backend.\")\n\n\[email protected](scope=\"function\")\ndef empty_data_context(\n tmp_path,\n) -> DataContext:\n project_path = tmp_path / \"empty_data_context\"\n project_path.mkdir()\n project_path = str(project_path)\n context = ge.data_context.DataContext.create(project_path)\n context_path = os.path.join(project_path, \"great_expectations\")\n asset_config_path = os.path.join(context_path, \"expectations\")\n os.makedirs(asset_config_path, exist_ok=True)\n assert context.list_datasources() == []\n return context\n\n\[email protected]\ndef titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled(\n tmp_path_factory,\n monkeypatch,\n):\n # Re-enable GE_USAGE_STATS\n monkeypatch.delenv(\"GE_USAGE_STATS\")\n\n project_path: str = str(tmp_path_factory.mktemp(\"titanic_data_context\"))\n context_path: str = os.path.join(project_path, \"great_expectations\")\n os.makedirs(os.path.join(context_path, \"expectations\"), exist_ok=True)\n data_path: str = os.path.join(context_path, \"..\", \"data\", \"titanic\")\n os.makedirs(os.path.join(data_path), exist_ok=True)\n shutil.copy(\n file_relative_path(\n __file__,\n os.path.join(\n \"test_fixtures\",\n \"great_expectations_v013_no_datasource_stats_enabled.yml\",\n ),\n ),\n str(os.path.join(context_path, \"great_expectations.yml\")),\n )\n shutil.copy(\n file_relative_path(__file__, os.path.join(\"test_sets\", \"Titanic.csv\")),\n str(\n os.path.join(\n context_path, \"..\", \"data\", \"titanic\", \"Titanic_19120414_1313.csv\"\n )\n ),\n )\n shutil.copy(\n file_relative_path(__file__, os.path.join(\"test_sets\", \"Titanic.csv\")),\n str(\n os.path.join(context_path, \"..\", \"data\", \"titanic\", \"Titanic_19120414_1313\")\n ),\n )\n shutil.copy(\n file_relative_path(__file__, os.path.join(\"test_sets\", \"Titanic.csv\")),\n str(os.path.join(context_path, \"..\", \"data\", \"titanic\", \"Titanic_1911.csv\")),\n )\n shutil.copy(\n file_relative_path(__file__, os.path.join(\"test_sets\", \"Titanic.csv\")),\n str(os.path.join(context_path, \"..\", \"data\", \"titanic\", \"Titanic_1912.csv\")),\n )\n\n context: DataContext = DataContext(context_root_dir=context_path)\n assert context.root_directory == context_path\n\n datasource_config: str = f\"\"\"\n class_name: Datasource\n\n execution_engine:\n class_name: PandasExecutionEngine\n\n data_connectors:\n my_basic_data_connector:\n class_name: InferredAssetFilesystemDataConnector\n base_directory: {data_path}\n default_regex:\n pattern: (.*)\\\\.csv\n group_names:\n - data_asset_name\n\n my_special_data_connector:\n class_name: ConfiguredAssetFilesystemDataConnector\n base_directory: {data_path}\n glob_directive: \"*.csv\"\n\n default_regex:\n pattern: (.+)\\\\.csv\n group_names:\n - name\n assets:\n users:\n base_directory: {data_path}\n pattern: (.+)_(\\\\d+)_(\\\\d+)\\\\.csv\n group_names:\n - name\n - timestamp\n - size\n\n my_other_data_connector:\n class_name: ConfiguredAssetFilesystemDataConnector\n base_directory: {data_path}\n glob_directive: \"*.csv\"\n\n default_regex:\n pattern: (.+)\\\\.csv\n group_names:\n - name\n assets:\n users: {{}}\n\n my_runtime_data_connector:\n module_name: great_expectations.datasource.data_connector\n class_name: RuntimeDataConnector\n batch_identifiers:\n - pipeline_stage_name\n - airflow_run_id\n \"\"\"\n\n # noinspection PyUnusedLocal\n datasource: Datasource = context.test_yaml_config(\n name=\"my_datasource\", yaml_config=datasource_config, pretty_print=False\n )\n # noinspection PyProtectedMember\n context._save_project_config()\n\n return context\n\n\[email protected]\ndef titanic_v013_multi_datasource_pandas_data_context_with_checkpoints_v1_with_empty_store_stats_enabled(\n titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled,\n tmp_path_factory,\n monkeypatch,\n):\n context: DataContext = titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled\n\n project_dir: str = context.root_directory\n data_path: str = os.path.join(project_dir, \"..\", \"data\", \"titanic\")\n\n datasource_config: str = f\"\"\"\n class_name: Datasource\n\n execution_engine:\n class_name: PandasExecutionEngine\n\n data_connectors:\n my_additional_data_connector:\n class_name: InferredAssetFilesystemDataConnector\n base_directory: {data_path}\n default_regex:\n pattern: (.*)\\\\.csv\n group_names:\n - data_asset_name\n \"\"\"\n\n # noinspection PyUnusedLocal\n datasource: BaseDatasource = context.add_datasource(\n \"my_additional_datasource\", **yaml.load(datasource_config)\n )\n\n return context\n\n\[email protected]\ndef titanic_v013_multi_datasource_multi_execution_engine_data_context_with_checkpoints_v1_with_empty_store_stats_enabled(\n sa,\n spark_session,\n titanic_v013_multi_datasource_pandas_data_context_with_checkpoints_v1_with_empty_store_stats_enabled,\n tmp_path_factory,\n test_backends,\n monkeypatch,\n):\n context: DataContext = titanic_v013_multi_datasource_pandas_data_context_with_checkpoints_v1_with_empty_store_stats_enabled\n\n project_dir: str = context.root_directory\n data_path: str = os.path.join(project_dir, \"..\", \"data\", \"titanic\")\n\n if (\n any(\n [\n dbms in test_backends\n for dbms in [\"postgresql\", \"sqlite\", \"mysql\", \"mssql\"]\n ]\n )\n and (sa is not None)\n and is_library_loadable(library_name=\"sqlalchemy\")\n ):\n db_fixture_file_path: str = file_relative_path(\n __file__,\n os.path.join(\"test_sets\", \"titanic_sql_test_cases.db\"),\n )\n db_file_path: str = os.path.join(\n data_path,\n \"titanic_sql_test_cases.db\",\n )\n shutil.copy(\n db_fixture_file_path,\n db_file_path,\n )\n\n datasource_config: str = f\"\"\"\n class_name: Datasource\n execution_engine:\n class_name: SqlAlchemyExecutionEngine\n connection_string: sqlite:///{db_file_path}\n data_connectors:\n default_runtime_data_connector_name:\n class_name: RuntimeDataConnector\n batch_identifiers:\n - default_identifier_name\n default_inferred_data_connector_name:\n class_name: InferredAssetSqlDataConnector\n name: whole_table\n \"\"\"\n\n # noinspection PyUnusedLocal\n datasource: BaseDatasource = context.add_datasource(\n \"my_sqlite_db_datasource\", **yaml.load(datasource_config)\n )\n\n return context\n\n\[email protected]\ndef deterministic_asset_dataconnector_context(\n tmp_path_factory,\n monkeypatch,\n):\n # Re-enable GE_USAGE_STATS\n monkeypatch.delenv(\"GE_USAGE_STATS\")\n\n project_path = str(tmp_path_factory.mktemp(\"titanic_data_context\"))\n context_path = os.path.join(project_path, \"great_expectations\")\n os.makedirs(os.path.join(context_path, \"expectations\"), exist_ok=True)\n data_path = os.path.join(context_path, \"..\", \"data\", \"titanic\")\n os.makedirs(os.path.join(data_path), exist_ok=True)\n shutil.copy(\n file_relative_path(\n __file__,\n \"./test_fixtures/great_expectations_v013_no_datasource_stats_enabled.yml\",\n ),\n str(os.path.join(context_path, \"great_expectations.yml\")),\n )\n shutil.copy(\n file_relative_path(__file__, \"./test_sets/Titanic.csv\"),\n str(\n os.path.join(\n context_path, \"..\", \"data\", \"titanic\", \"Titanic_19120414_1313.csv\"\n )\n ),\n )\n shutil.copy(\n file_relative_path(__file__, \"./test_sets/Titanic.csv\"),\n str(os.path.join(context_path, \"..\", \"data\", \"titanic\", \"Titanic_1911.csv\")),\n )\n shutil.copy(\n file_relative_path(__file__, \"./test_sets/Titanic.csv\"),\n str(os.path.join(context_path, \"..\", \"data\", \"titanic\", \"Titanic_1912.csv\")),\n )\n context = ge.data_context.DataContext(context_path)\n assert context.root_directory == context_path\n\n datasource_config = f\"\"\"\n class_name: Datasource\n\n execution_engine:\n class_name: PandasExecutionEngine\n\n data_connectors:\n my_other_data_connector:\n class_name: ConfiguredAssetFilesystemDataConnector\n base_directory: {data_path}\n glob_directive: \"*.csv\"\n\n default_regex:\n pattern: (.+)\\\\.csv\n group_names:\n - name\n assets:\n users: {{}}\n \"\"\"\n\n context.test_yaml_config(\n name=\"my_datasource\", yaml_config=datasource_config, pretty_print=False\n )\n # noinspection PyProtectedMember\n context._save_project_config()\n return context\n\n\[email protected]\ndef titanic_pandas_data_context_with_v013_datasource_stats_enabled_with_checkpoints_v1_with_templates(\n titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled,\n):\n context: DataContext = titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled\n\n # add simple template config\n simple_checkpoint_template_config: CheckpointConfig = CheckpointConfig(\n name=\"my_simple_template_checkpoint\",\n config_version=1,\n run_name_template=\"%Y-%M-foo-bar-template-$VAR\",\n action_list=[\n {\n \"name\": \"store_validation_result\",\n \"action\": {\n \"class_name\": \"StoreValidationResultAction\",\n },\n },\n {\n \"name\": \"store_evaluation_params\",\n \"action\": {\n \"class_name\": \"StoreEvaluationParametersAction\",\n },\n },\n {\n \"name\": \"update_data_docs\",\n \"action\": {\n \"class_name\": \"UpdateDataDocsAction\",\n },\n },\n ],\n evaluation_parameters={\n \"environment\": \"$GE_ENVIRONMENT\",\n \"tolerance\": 1.0e-2,\n \"aux_param_0\": \"$MY_PARAM\",\n \"aux_param_1\": \"1 + $MY_PARAM\",\n },\n runtime_configuration={\n \"result_format\": {\n \"result_format\": \"BASIC\",\n \"partial_unexpected_count\": 20,\n }\n },\n )\n simple_checkpoint_template_config_key: ConfigurationIdentifier = (\n ConfigurationIdentifier(\n configuration_key=simple_checkpoint_template_config.name\n )\n )\n context.checkpoint_store.set(\n key=simple_checkpoint_template_config_key,\n value=simple_checkpoint_template_config,\n )\n\n # add nested template configs\n nested_checkpoint_template_config_1: CheckpointConfig = CheckpointConfig(\n name=\"my_nested_checkpoint_template_1\",\n config_version=1,\n run_name_template=\"%Y-%M-foo-bar-template-$VAR\",\n expectation_suite_name=\"suite_from_template_1\",\n action_list=[\n {\n \"name\": \"store_validation_result\",\n \"action\": {\n \"class_name\": \"StoreValidationResultAction\",\n },\n },\n {\n \"name\": \"store_evaluation_params\",\n \"action\": {\n \"class_name\": \"StoreEvaluationParametersAction\",\n },\n },\n {\n \"name\": \"update_data_docs\",\n \"action\": {\n \"class_name\": \"UpdateDataDocsAction\",\n },\n },\n ],\n evaluation_parameters={\n \"environment\": \"FOO\",\n \"tolerance\": \"FOOBOO\",\n \"aux_param_0\": \"FOOBARBOO\",\n \"aux_param_1\": \"FOOBARBOO\",\n \"template_1_key\": 456,\n },\n runtime_configuration={\n \"result_format\": \"FOOBARBOO\",\n \"partial_unexpected_count\": \"FOOBARBOO\",\n \"template_1_key\": 123,\n },\n validations=[\n {\n \"batch_request\": {\n \"datasource_name\": \"my_datasource_template_1\",\n \"data_connector_name\": \"my_special_data_connector_template_1\",\n \"data_asset_name\": \"users_from_template_1\",\n \"data_connector_query\": {\"partition_index\": -999},\n }\n }\n ],\n )\n nested_checkpoint_template_config_1_key: ConfigurationIdentifier = (\n ConfigurationIdentifier(\n configuration_key=nested_checkpoint_template_config_1.name\n )\n )\n context.checkpoint_store.set(\n key=nested_checkpoint_template_config_1_key,\n value=nested_checkpoint_template_config_1,\n )\n\n nested_checkpoint_template_config_2: CheckpointConfig = CheckpointConfig(\n name=\"my_nested_checkpoint_template_2\",\n config_version=1,\n template_name=\"my_nested_checkpoint_template_1\",\n run_name_template=\"%Y-%M-foo-bar-template-$VAR-template-2\",\n action_list=[\n {\n \"name\": \"store_validation_result\",\n \"action\": {\n \"class_name\": \"StoreValidationResultAction\",\n },\n },\n {\n \"name\": \"store_evaluation_params\",\n \"action\": {\n \"class_name\": \"MyCustomStoreEvaluationParametersActionTemplate2\",\n },\n },\n {\n \"name\": \"update_data_docs\",\n \"action\": {\n \"class_name\": \"UpdateDataDocsAction\",\n },\n },\n {\n \"name\": \"new_action_from_template_2\",\n \"action\": {\"class_name\": \"Template2SpecialAction\"},\n },\n ],\n evaluation_parameters={\n \"environment\": \"$GE_ENVIRONMENT\",\n \"tolerance\": 1.0e-2,\n \"aux_param_0\": \"$MY_PARAM\",\n \"aux_param_1\": \"1 + $MY_PARAM\",\n },\n runtime_configuration={\n \"result_format\": \"BASIC\",\n \"partial_unexpected_count\": 20,\n },\n )\n nested_checkpoint_template_config_2_key: ConfigurationIdentifier = (\n ConfigurationIdentifier(\n configuration_key=nested_checkpoint_template_config_2.name\n )\n )\n context.checkpoint_store.set(\n key=nested_checkpoint_template_config_2_key,\n value=nested_checkpoint_template_config_2,\n )\n\n nested_checkpoint_template_config_3: CheckpointConfig = CheckpointConfig(\n name=\"my_nested_checkpoint_template_3\",\n config_version=1,\n template_name=\"my_nested_checkpoint_template_2\",\n run_name_template=\"%Y-%M-foo-bar-template-$VAR-template-3\",\n action_list=[\n {\n \"name\": \"store_validation_result\",\n \"action\": {\n \"class_name\": \"StoreValidationResultAction\",\n },\n },\n {\n \"name\": \"store_evaluation_params\",\n \"action\": {\n \"class_name\": \"MyCustomStoreEvaluationParametersActionTemplate3\",\n },\n },\n {\n \"name\": \"update_data_docs\",\n \"action\": {\n \"class_name\": \"UpdateDataDocsAction\",\n },\n },\n {\n \"name\": \"new_action_from_template_3\",\n \"action\": {\"class_name\": \"Template3SpecialAction\"},\n },\n ],\n evaluation_parameters={\n \"environment\": \"$GE_ENVIRONMENT\",\n \"tolerance\": 1.0e-2,\n \"aux_param_0\": \"$MY_PARAM\",\n \"aux_param_1\": \"1 + $MY_PARAM\",\n \"template_3_key\": 123,\n },\n runtime_configuration={\n \"result_format\": \"BASIC\",\n \"partial_unexpected_count\": 20,\n \"template_3_key\": \"bloopy!\",\n },\n )\n nested_checkpoint_template_config_3_key: ConfigurationIdentifier = (\n ConfigurationIdentifier(\n configuration_key=nested_checkpoint_template_config_3.name\n )\n )\n context.checkpoint_store.set(\n key=nested_checkpoint_template_config_3_key,\n value=nested_checkpoint_template_config_3,\n )\n\n # add minimal SimpleCheckpoint\n simple_checkpoint_config: CheckpointConfig = CheckpointConfig(\n name=\"my_minimal_simple_checkpoint\",\n class_name=\"SimpleCheckpoint\",\n config_version=1,\n )\n simple_checkpoint_config_key: ConfigurationIdentifier = ConfigurationIdentifier(\n configuration_key=simple_checkpoint_config.name\n )\n context.checkpoint_store.set(\n key=simple_checkpoint_config_key,\n value=simple_checkpoint_config,\n )\n\n # add SimpleCheckpoint with slack webhook\n simple_checkpoint_with_slack_webhook_config: CheckpointConfig = CheckpointConfig(\n name=\"my_simple_checkpoint_with_slack\",\n class_name=\"SimpleCheckpoint\",\n config_version=1,\n slack_webhook=\"https://hooks.slack.com/foo/bar\",\n )\n simple_checkpoint_with_slack_webhook_config_key: ConfigurationIdentifier = (\n ConfigurationIdentifier(\n configuration_key=simple_checkpoint_with_slack_webhook_config.name\n )\n )\n context.checkpoint_store.set(\n key=simple_checkpoint_with_slack_webhook_config_key,\n value=simple_checkpoint_with_slack_webhook_config,\n )\n\n # add SimpleCheckpoint with slack webhook and notify_with\n simple_checkpoint_with_slack_webhook_and_notify_with_all_config: CheckpointConfig = CheckpointConfig(\n name=\"my_simple_checkpoint_with_slack_and_notify_with_all\",\n class_name=\"SimpleCheckpoint\",\n config_version=1,\n slack_webhook=\"https://hooks.slack.com/foo/bar\",\n notify_with=\"all\",\n )\n simple_checkpoint_with_slack_webhook_and_notify_with_all_config_key: ConfigurationIdentifier = ConfigurationIdentifier(\n configuration_key=simple_checkpoint_with_slack_webhook_and_notify_with_all_config.name\n )\n context.checkpoint_store.set(\n key=simple_checkpoint_with_slack_webhook_and_notify_with_all_config_key,\n value=simple_checkpoint_with_slack_webhook_and_notify_with_all_config,\n )\n\n # add SimpleCheckpoint with site_names\n simple_checkpoint_with_site_names_config: CheckpointConfig = CheckpointConfig(\n name=\"my_simple_checkpoint_with_site_names\",\n class_name=\"SimpleCheckpoint\",\n config_version=1,\n site_names=[\"local_site\"],\n )\n simple_checkpoint_with_site_names_config_key: ConfigurationIdentifier = (\n ConfigurationIdentifier(\n configuration_key=simple_checkpoint_with_site_names_config.name\n )\n )\n context.checkpoint_store.set(\n key=simple_checkpoint_with_site_names_config_key,\n value=simple_checkpoint_with_site_names_config,\n )\n\n # noinspection PyProtectedMember\n context._save_project_config()\n return context\n\n\[email protected]\ndef empty_context_with_checkpoint(empty_data_context):\n context = empty_data_context\n root_dir = empty_data_context.root_directory\n fixture_name = \"my_checkpoint.yml\"\n fixture_path = file_relative_path(\n __file__, f\"./data_context/fixtures/contexts/{fixture_name}\"\n )\n checkpoints_file = os.path.join(root_dir, \"checkpoints\", fixture_name)\n shutil.copy(fixture_path, checkpoints_file)\n assert os.path.isfile(checkpoints_file)\n return context\n\n\[email protected]\ndef empty_data_context_stats_enabled(tmp_path_factory, monkeypatch):\n # Re-enable GE_USAGE_STATS\n monkeypatch.delenv(\"GE_USAGE_STATS\", raising=False)\n project_path = str(tmp_path_factory.mktemp(\"empty_data_context\"))\n context = ge.data_context.DataContext.create(project_path)\n context_path = os.path.join(project_path, \"great_expectations\")\n asset_config_path = os.path.join(context_path, \"expectations\")\n os.makedirs(asset_config_path, exist_ok=True)\n return context\n\n\[email protected]\ndef titanic_data_context(\n tmp_path_factory,\n) -> DataContext:\n project_path = str(tmp_path_factory.mktemp(\"titanic_data_context\"))\n context_path = os.path.join(project_path, \"great_expectations\")\n os.makedirs(os.path.join(context_path, \"expectations\"), exist_ok=True)\n os.makedirs(os.path.join(context_path, \"checkpoints\"), exist_ok=True)\n data_path = os.path.join(context_path, \"..\", \"data\")\n os.makedirs(os.path.join(data_path), exist_ok=True)\n titanic_yml_path = file_relative_path(\n __file__, \"./test_fixtures/great_expectations_v013_titanic.yml\"\n )\n shutil.copy(\n titanic_yml_path, str(os.path.join(context_path, \"great_expectations.yml\"))\n )\n titanic_csv_path = file_relative_path(__file__, \"./test_sets/Titanic.csv\")\n shutil.copy(\n titanic_csv_path, str(os.path.join(context_path, \"..\", \"data\", \"Titanic.csv\"))\n )\n return ge.data_context.DataContext(context_path)\n\n\[email protected]\ndef titanic_data_context_no_data_docs_no_checkpoint_store(tmp_path_factory):\n project_path = str(tmp_path_factory.mktemp(\"titanic_data_context\"))\n context_path = os.path.join(project_path, \"great_expectations\")\n os.makedirs(os.path.join(context_path, \"expectations\"), exist_ok=True)\n os.makedirs(os.path.join(context_path, \"checkpoints\"), exist_ok=True)\n data_path = os.path.join(context_path, \"..\", \"data\")\n os.makedirs(os.path.join(data_path), exist_ok=True)\n titanic_yml_path = file_relative_path(\n __file__, \"./test_fixtures/great_expectations_titanic_pre_v013_no_data_docs.yml\"\n )\n shutil.copy(\n titanic_yml_path, str(os.path.join(context_path, \"great_expectations.yml\"))\n )\n titanic_csv_path = file_relative_path(__file__, \"./test_sets/Titanic.csv\")\n shutil.copy(\n titanic_csv_path, str(os.path.join(context_path, \"..\", \"data\", \"Titanic.csv\"))\n )\n return ge.data_context.DataContext(context_path)\n\n\[email protected]\ndef titanic_data_context_no_data_docs(tmp_path_factory):\n project_path = str(tmp_path_factory.mktemp(\"titanic_data_context\"))\n context_path = os.path.join(project_path, \"great_expectations\")\n os.makedirs(os.path.join(context_path, \"expectations\"), exist_ok=True)\n os.makedirs(os.path.join(context_path, \"checkpoints\"), exist_ok=True)\n data_path = os.path.join(context_path, \"..\", \"data\")\n os.makedirs(os.path.join(data_path), exist_ok=True)\n titanic_yml_path = file_relative_path(\n __file__, \"./test_fixtures/great_expectations_titanic_no_data_docs.yml\"\n )\n shutil.copy(\n titanic_yml_path, str(os.path.join(context_path, \"great_expectations.yml\"))\n )\n titanic_csv_path = file_relative_path(__file__, \"./test_sets/Titanic.csv\")\n shutil.copy(\n titanic_csv_path, str(os.path.join(context_path, \"..\", \"data\", \"Titanic.csv\"))\n )\n return ge.data_context.DataContext(context_path)\n\n\[email protected]\ndef titanic_data_context_stats_enabled(tmp_path_factory, monkeypatch):\n # Re-enable GE_USAGE_STATS\n monkeypatch.delenv(\"GE_USAGE_STATS\")\n project_path = str(tmp_path_factory.mktemp(\"titanic_data_context\"))\n context_path = os.path.join(project_path, \"great_expectations\")\n os.makedirs(os.path.join(context_path, \"expectations\"), exist_ok=True)\n os.makedirs(os.path.join(context_path, \"checkpoints\"), exist_ok=True)\n data_path = os.path.join(context_path, \"..\", \"data\")\n os.makedirs(os.path.join(data_path), exist_ok=True)\n titanic_yml_path = file_relative_path(\n __file__, \"./test_fixtures/great_expectations_v013_titanic.yml\"\n )\n shutil.copy(\n titanic_yml_path, str(os.path.join(context_path, \"great_expectations.yml\"))\n )\n titanic_csv_path = file_relative_path(__file__, \"./test_sets/Titanic.csv\")\n shutil.copy(\n titanic_csv_path, str(os.path.join(context_path, \"..\", \"data\", \"Titanic.csv\"))\n )\n return ge.data_context.DataContext(context_path)\n\n\[email protected]\ndef titanic_data_context_stats_enabled_config_version_2(tmp_path_factory, monkeypatch):\n # Re-enable GE_USAGE_STATS\n monkeypatch.delenv(\"GE_USAGE_STATS\")\n project_path = str(tmp_path_factory.mktemp(\"titanic_data_context\"))\n context_path = os.path.join(project_path, \"great_expectations\")\n os.makedirs(os.path.join(context_path, \"expectations\"), exist_ok=True)\n os.makedirs(os.path.join(context_path, \"checkpoints\"), exist_ok=True)\n data_path = os.path.join(context_path, \"..\", \"data\")\n os.makedirs(os.path.join(data_path), exist_ok=True)\n titanic_yml_path = file_relative_path(\n __file__, \"./test_fixtures/great_expectations_titanic.yml\"\n )\n shutil.copy(\n titanic_yml_path, str(os.path.join(context_path, \"great_expectations.yml\"))\n )\n titanic_csv_path = file_relative_path(__file__, \"./test_sets/Titanic.csv\")\n shutil.copy(\n titanic_csv_path, str(os.path.join(context_path, \"..\", \"data\", \"Titanic.csv\"))\n )\n return ge.data_context.DataContext(context_path)\n\n\[email protected]\ndef titanic_data_context_stats_enabled_config_version_3(tmp_path_factory, monkeypatch):\n # Re-enable GE_USAGE_STATS\n monkeypatch.delenv(\"GE_USAGE_STATS\")\n project_path = str(tmp_path_factory.mktemp(\"titanic_data_context\"))\n context_path = os.path.join(project_path, \"great_expectations\")\n os.makedirs(os.path.join(context_path, \"expectations\"), exist_ok=True)\n os.makedirs(os.path.join(context_path, \"checkpoints\"), exist_ok=True)\n data_path = os.path.join(context_path, \"..\", \"data\")\n os.makedirs(os.path.join(data_path), exist_ok=True)\n titanic_yml_path = file_relative_path(\n __file__, \"./test_fixtures/great_expectations_v013_upgraded_titanic.yml\"\n )\n shutil.copy(\n titanic_yml_path, str(os.path.join(context_path, \"great_expectations.yml\"))\n )\n titanic_csv_path = file_relative_path(__file__, \"./test_sets/Titanic.csv\")\n shutil.copy(\n titanic_csv_path, str(os.path.join(context_path, \"..\", \"data\", \"Titanic.csv\"))\n )\n return ge.data_context.DataContext(context_path)\n\n\[email protected]\ndef titanic_sqlite_db(sa):\n try:\n import sqlalchemy as sa\n from sqlalchemy import create_engine\n\n titanic_db_path = file_relative_path(__file__, \"./test_sets/titanic.db\")\n engine = create_engine(f\"sqlite:///{titanic_db_path}\")\n assert engine.execute(\"select count(*) from titanic\").fetchall()[0] == (1313,)\n return engine\n except ImportError:\n raise ValueError(\"sqlite tests require sqlalchemy to be installed\")\n\n\[email protected]\ndef titanic_sqlite_db_connection_string(sa):\n try:\n import sqlalchemy as sa\n from sqlalchemy import create_engine\n\n titanic_db_path = file_relative_path(__file__, \"./test_sets/titanic.db\")\n engine = create_engine(f\"sqlite:////{titanic_db_path}\")\n assert engine.execute(\"select count(*) from titanic\").fetchall()[0] == (1313,)\n return f\"sqlite:///{titanic_db_path}\"\n except ImportError:\n raise ValueError(\"sqlite tests require sqlalchemy to be installed\")\n\n\[email protected]\ndef titanic_expectation_suite(empty_data_context_stats_enabled):\n data_context: DataContext = empty_data_context_stats_enabled\n return ExpectationSuite(\n expectation_suite_name=\"Titanic.warning\",\n meta={},\n data_asset_type=\"Dataset\",\n expectations=[\n ExpectationConfiguration(\n expectation_type=\"expect_column_to_exist\", kwargs={\"column\": \"PClass\"}\n ),\n ExpectationConfiguration(\n expectation_type=\"expect_column_values_to_not_be_null\",\n kwargs={\"column\": \"Name\"},\n ),\n ExpectationConfiguration(\n expectation_type=\"expect_table_row_count_to_equal\",\n kwargs={\"value\": 1313},\n ),\n ],\n data_context=data_context,\n )\n\n\[email protected]\ndef empty_sqlite_db(sa):\n \"\"\"An empty in-memory sqlite db that always gets run.\"\"\"\n try:\n import sqlalchemy as sa\n from sqlalchemy import create_engine\n\n engine = create_engine(\"sqlite://\")\n assert engine.execute(\"select 1\").fetchall()[0] == (1,)\n return engine\n except ImportError:\n raise ValueError(\"sqlite tests require sqlalchemy to be installed\")\n\n\[email protected]\n@freeze_time(\"09/26/2019 13:42:41\")\ndef site_builder_data_context_with_html_store_titanic_random(\n tmp_path_factory, filesystem_csv_3\n):\n base_dir = str(tmp_path_factory.mktemp(\"project_dir\"))\n project_dir = os.path.join(base_dir, \"project_path\")\n os.mkdir(project_dir)\n\n os.makedirs(os.path.join(project_dir, \"data\"))\n os.makedirs(os.path.join(project_dir, \"data/titanic\"))\n shutil.copy(\n file_relative_path(__file__, \"./test_sets/Titanic.csv\"),\n str(os.path.join(project_dir, \"data\", \"titanic\", \"Titanic.csv\")),\n )\n\n os.makedirs(os.path.join(project_dir, \"data\", \"random\"))\n shutil.copy(\n os.path.join(filesystem_csv_3, \"f1.csv\"),\n str(os.path.join(project_dir, \"data\", \"random\", \"f1.csv\")),\n )\n shutil.copy(\n os.path.join(filesystem_csv_3, \"f2.csv\"),\n str(os.path.join(project_dir, \"data\", \"random\", \"f2.csv\")),\n )\n ge.data_context.DataContext.create(project_dir)\n shutil.copy(\n file_relative_path(\n __file__, \"./test_fixtures/great_expectations_site_builder.yml\"\n ),\n str(os.path.join(project_dir, \"great_expectations\", \"great_expectations.yml\")),\n )\n context = ge.data_context.DataContext(\n context_root_dir=os.path.join(project_dir, \"great_expectations\")\n )\n\n context.add_datasource(\n \"titanic\",\n class_name=\"PandasDatasource\",\n batch_kwargs_generators={\n \"subdir_reader\": {\n \"class_name\": \"SubdirReaderBatchKwargsGenerator\",\n \"base_directory\": os.path.join(project_dir, \"data\", \"titanic\"),\n }\n },\n )\n context.add_datasource(\n \"random\",\n class_name=\"PandasDatasource\",\n batch_kwargs_generators={\n \"subdir_reader\": {\n \"class_name\": \"SubdirReaderBatchKwargsGenerator\",\n \"base_directory\": os.path.join(project_dir, \"data\", \"random\"),\n }\n },\n )\n\n context.profile_datasource(\"titanic\")\n context.profile_datasource(\"random\")\n context.profile_datasource(context.list_datasources()[0][\"name\"])\n\n context._project_config.anonymous_usage_statistics = {\n \"enabled\": True,\n \"data_context_id\": \"f43d4897-385f-4366-82b0-1a8eda2bf79c\",\n }\n\n return context\n\n\[email protected](scope=\"function\")\n@freeze_time(\"09/26/2019 13:42:41\")\ndef site_builder_data_context_v013_with_html_store_titanic_random(\n tmp_path, filesystem_csv_3\n):\n base_dir = tmp_path / \"project_dir\"\n base_dir.mkdir()\n base_dir = str(base_dir)\n project_dir = os.path.join(base_dir, \"project_path\")\n os.mkdir(project_dir)\n\n os.makedirs(os.path.join(project_dir, \"data\"))\n os.makedirs(os.path.join(project_dir, \"data\", \"titanic\"))\n shutil.copy(\n file_relative_path(__file__, \"./test_sets/Titanic.csv\"),\n str(os.path.join(project_dir, \"data\", \"titanic\", \"Titanic.csv\")),\n )\n\n os.makedirs(os.path.join(project_dir, \"data\", \"random\"))\n shutil.copy(\n os.path.join(filesystem_csv_3, \"f1.csv\"),\n str(os.path.join(project_dir, \"data\", \"random\", \"f1.csv\")),\n )\n shutil.copy(\n os.path.join(filesystem_csv_3, \"f2.csv\"),\n str(os.path.join(project_dir, \"data\", \"random\", \"f2.csv\")),\n )\n ge.data_context.DataContext.create(project_dir)\n shutil.copy(\n file_relative_path(\n __file__, \"./test_fixtures/great_expectations_v013_site_builder.yml\"\n ),\n str(os.path.join(project_dir, \"great_expectations\", \"great_expectations.yml\")),\n )\n context = ge.data_context.DataContext(\n context_root_dir=os.path.join(project_dir, \"great_expectations\")\n )\n\n context.add_datasource(\n \"titanic\",\n class_name=\"PandasDatasource\",\n batch_kwargs_generators={\n \"subdir_reader\": {\n \"class_name\": \"SubdirReaderBatchKwargsGenerator\",\n \"base_directory\": os.path.join(project_dir, \"data\", \"titanic\"),\n }\n },\n )\n context.add_datasource(\n \"random\",\n class_name=\"PandasDatasource\",\n batch_kwargs_generators={\n \"subdir_reader\": {\n \"class_name\": \"SubdirReaderBatchKwargsGenerator\",\n \"base_directory\": os.path.join(project_dir, \"data\", \"random\"),\n }\n },\n )\n\n context.profile_datasource(\"titanic\")\n context.profile_datasource(\"random\")\n context.profile_datasource(context.list_datasources()[0][\"name\"])\n\n context._project_config.anonymous_usage_statistics = {\n \"enabled\": True,\n \"data_context_id\": \"f43d4897-385f-4366-82b0-1a8eda2bf79c\",\n }\n\n return context\n\n\[email protected]\ndef v20_project_directory(tmp_path_factory):\n \"\"\"\n GE config_version: 2 project for testing upgrade helper\n \"\"\"\n project_path = str(tmp_path_factory.mktemp(\"v20_project\"))\n context_root_dir = os.path.join(project_path, \"great_expectations\")\n shutil.copytree(\n file_relative_path(\n __file__, \"./test_fixtures/upgrade_helper/great_expectations_v20_project/\"\n ),\n context_root_dir,\n )\n shutil.copy(\n file_relative_path(\n __file__, \"./test_fixtures/upgrade_helper/great_expectations_v2.yml\"\n ),\n os.path.join(context_root_dir, \"great_expectations.yml\"),\n )\n return context_root_dir\n\n\[email protected]\ndef data_context_parameterized_expectation_suite_no_checkpoint_store(tmp_path_factory):\n \"\"\"\n This data_context is *manually* created to have the config we want, vs\n created with DataContext.create()\n \"\"\"\n project_path = str(tmp_path_factory.mktemp(\"data_context\"))\n context_path = os.path.join(project_path, \"great_expectations\")\n asset_config_path = os.path.join(context_path, \"expectations\")\n fixture_dir = file_relative_path(__file__, \"./test_fixtures\")\n os.makedirs(\n os.path.join(asset_config_path, \"my_dag_node\"),\n exist_ok=True,\n )\n shutil.copy(\n os.path.join(fixture_dir, \"great_expectations_basic.yml\"),\n str(os.path.join(context_path, \"great_expectations.yml\")),\n )\n shutil.copy(\n os.path.join(\n fixture_dir,\n \"expectation_suites/parameterized_expectation_suite_fixture.json\",\n ),\n os.path.join(asset_config_path, \"my_dag_node\", \"default.json\"),\n )\n os.makedirs(os.path.join(context_path, \"plugins\"), exist_ok=True)\n shutil.copy(\n os.path.join(fixture_dir, \"custom_pandas_dataset.py\"),\n str(os.path.join(context_path, \"plugins\", \"custom_pandas_dataset.py\")),\n )\n shutil.copy(\n os.path.join(fixture_dir, \"custom_sqlalchemy_dataset.py\"),\n str(os.path.join(context_path, \"plugins\", \"custom_sqlalchemy_dataset.py\")),\n )\n shutil.copy(\n os.path.join(fixture_dir, \"custom_sparkdf_dataset.py\"),\n str(os.path.join(context_path, \"plugins\", \"custom_sparkdf_dataset.py\")),\n )\n return ge.data_context.DataContext(context_path)\n\n\[email protected]\ndef data_context_parameterized_expectation_suite(tmp_path_factory):\n \"\"\"\n This data_context is *manually* created to have the config we want, vs\n created with DataContext.create()\n \"\"\"\n project_path = str(tmp_path_factory.mktemp(\"data_context\"))\n context_path = os.path.join(project_path, \"great_expectations\")\n asset_config_path = os.path.join(context_path, \"expectations\")\n fixture_dir = file_relative_path(__file__, \"./test_fixtures\")\n os.makedirs(\n os.path.join(asset_config_path, \"my_dag_node\"),\n exist_ok=True,\n )\n shutil.copy(\n os.path.join(fixture_dir, \"great_expectations_v013_basic.yml\"),\n str(os.path.join(context_path, \"great_expectations.yml\")),\n )\n shutil.copy(\n os.path.join(\n fixture_dir,\n \"expectation_suites/parameterized_expectation_suite_fixture.json\",\n ),\n os.path.join(asset_config_path, \"my_dag_node\", \"default.json\"),\n )\n os.makedirs(os.path.join(context_path, \"plugins\"), exist_ok=True)\n shutil.copy(\n os.path.join(fixture_dir, \"custom_pandas_dataset.py\"),\n str(os.path.join(context_path, \"plugins\", \"custom_pandas_dataset.py\")),\n )\n shutil.copy(\n os.path.join(fixture_dir, \"custom_sqlalchemy_dataset.py\"),\n str(os.path.join(context_path, \"plugins\", \"custom_sqlalchemy_dataset.py\")),\n )\n shutil.copy(\n os.path.join(fixture_dir, \"custom_sparkdf_dataset.py\"),\n str(os.path.join(context_path, \"plugins\", \"custom_sparkdf_dataset.py\")),\n )\n return ge.data_context.DataContext(context_path)\n\n\[email protected]\ndef data_context_simple_expectation_suite(tmp_path_factory):\n \"\"\"\n This data_context is *manually* created to have the config we want, vs\n created with DataContext.create()\n \"\"\"\n project_path = str(tmp_path_factory.mktemp(\"data_context\"))\n context_path = os.path.join(project_path, \"great_expectations\")\n asset_config_path = os.path.join(context_path, \"expectations\")\n fixture_dir = file_relative_path(__file__, \"./test_fixtures\")\n os.makedirs(\n os.path.join(asset_config_path, \"my_dag_node\"),\n exist_ok=True,\n )\n shutil.copy(\n os.path.join(fixture_dir, \"great_expectations_basic.yml\"),\n str(os.path.join(context_path, \"great_expectations.yml\")),\n )\n shutil.copy(\n os.path.join(\n fixture_dir,\n \"rendering_fixtures/expectations_suite_1.json\",\n ),\n os.path.join(asset_config_path, \"default.json\"),\n )\n os.makedirs(os.path.join(context_path, \"plugins\"), exist_ok=True)\n shutil.copy(\n os.path.join(fixture_dir, \"custom_pandas_dataset.py\"),\n str(os.path.join(context_path, \"plugins\", \"custom_pandas_dataset.py\")),\n )\n shutil.copy(\n os.path.join(fixture_dir, \"custom_sqlalchemy_dataset.py\"),\n str(os.path.join(context_path, \"plugins\", \"custom_sqlalchemy_dataset.py\")),\n )\n shutil.copy(\n os.path.join(fixture_dir, \"custom_sparkdf_dataset.py\"),\n str(os.path.join(context_path, \"plugins\", \"custom_sparkdf_dataset.py\")),\n )\n return ge.data_context.DataContext(context_path)\n\n\[email protected]()\ndef filesystem_csv_data_context_with_validation_operators(\n titanic_data_context_stats_enabled, filesystem_csv_2\n):\n titanic_data_context_stats_enabled.add_datasource(\n \"rad_datasource\",\n module_name=\"great_expectations.datasource\",\n class_name=\"PandasDatasource\",\n batch_kwargs_generators={\n \"subdir_reader\": {\n \"class_name\": \"SubdirReaderBatchKwargsGenerator\",\n \"base_directory\": str(filesystem_csv_2),\n }\n },\n )\n return titanic_data_context_stats_enabled\n\n\[email protected]()\ndef filesystem_csv_data_context(\n empty_data_context,\n filesystem_csv_2,\n) -> DataContext:\n empty_data_context.add_datasource(\n \"rad_datasource\",\n module_name=\"great_expectations.datasource\",\n class_name=\"PandasDatasource\",\n batch_kwargs_generators={\n \"subdir_reader\": {\n \"class_name\": \"SubdirReaderBatchKwargsGenerator\",\n \"base_directory\": str(filesystem_csv_2),\n }\n },\n )\n return empty_data_context\n\n\[email protected]\ndef filesystem_csv(tmp_path_factory):\n base_dir = tmp_path_factory.mktemp(\"filesystem_csv\")\n base_dir = str(base_dir)\n # Put a few files in the directory\n with open(os.path.join(base_dir, \"f1.csv\"), \"w\") as outfile:\n outfile.writelines([\"a,b,c\\n\"])\n with open(os.path.join(base_dir, \"f2.csv\"), \"w\") as outfile:\n outfile.writelines([\"a,b,c\\n\"])\n\n os.makedirs(os.path.join(base_dir, \"f3\"), exist_ok=True)\n with open(os.path.join(base_dir, \"f3\", \"f3_20190101.csv\"), \"w\") as outfile:\n outfile.writelines([\"a,b,c\\n\"])\n with open(os.path.join(base_dir, \"f3\", \"f3_20190102.csv\"), \"w\") as outfile:\n outfile.writelines([\"a,b,c\\n\"])\n\n return base_dir\n\n\[email protected](scope=\"function\")\ndef filesystem_csv_2(tmp_path):\n base_dir = tmp_path / \"filesystem_csv_2\"\n base_dir.mkdir()\n base_dir = str(base_dir)\n\n # Put a file in the directory\n toy_dataset = PandasDataset({\"x\": [1, 2, 3]})\n toy_dataset.to_csv(os.path.join(base_dir, \"f1.csv\"), index=False)\n assert os.path.isabs(base_dir)\n assert os.path.isfile(os.path.join(base_dir, \"f1.csv\"))\n\n return base_dir\n\n\[email protected](scope=\"function\")\ndef filesystem_csv_3(tmp_path):\n base_dir = tmp_path / \"filesystem_csv_3\"\n base_dir.mkdir()\n base_dir = str(base_dir)\n\n # Put a file in the directory\n toy_dataset = PandasDataset({\"x\": [1, 2, 3]})\n toy_dataset.to_csv(os.path.join(base_dir, \"f1.csv\"), index=False)\n\n toy_dataset_2 = PandasDataset({\"y\": [1, 2, 3]})\n toy_dataset_2.to_csv(os.path.join(base_dir, \"f2.csv\"), index=False)\n\n return base_dir\n\n\[email protected](scope=\"function\")\ndef filesystem_csv_4(tmp_path):\n base_dir = tmp_path / \"filesystem_csv_4\"\n base_dir.mkdir()\n base_dir = str(base_dir)\n\n # Put a file in the directory\n toy_dataset = PandasDataset(\n {\n \"x\": [1, 2, 3],\n \"y\": [1, 2, 3],\n }\n )\n toy_dataset.to_csv(os.path.join(base_dir, \"f1.csv\"), index=None)\n\n return base_dir\n\n\[email protected]\ndef titanic_profiled_evrs_1():\n with open(\n file_relative_path(\n __file__, \"./render/fixtures/BasicDatasetProfiler_evrs.json\"\n ),\n ) as infile:\n return expectationSuiteValidationResultSchema.loads(infile.read())\n\n\n# various types of evr\[email protected]\ndef evr_failed():\n return ExpectationValidationResult(\n success=False,\n result={\n \"element_count\": 1313,\n \"missing_count\": 0,\n \"missing_percent\": 0.0,\n \"unexpected_count\": 3,\n \"unexpected_percent\": 0.2284843869002285,\n \"unexpected_percent_nonmissing\": 0.2284843869002285,\n \"partial_unexpected_list\": [\n \"Daly, Mr Peter Denis \",\n \"Barber, Ms \",\n \"Geiger, Miss Emily \",\n ],\n \"partial_unexpected_index_list\": [77, 289, 303],\n \"partial_unexpected_counts\": [\n {\"value\": \"Barber, Ms \", \"count\": 1},\n {\"value\": \"Daly, Mr Peter Denis \", \"count\": 1},\n {\"value\": \"Geiger, Miss Emily \", \"count\": 1},\n ],\n },\n exception_info={\n \"raised_exception\": False,\n \"exception_message\": None,\n \"exception_traceback\": None,\n },\n expectation_config=ExpectationConfiguration(\n expectation_type=\"expect_column_values_to_not_match_regex\",\n kwargs={\n \"column\": \"Name\",\n \"regex\": \"^\\\\s+|\\\\s+$\",\n \"result_format\": \"SUMMARY\",\n },\n ),\n )\n\n\[email protected]\ndef evr_success():\n return ExpectationValidationResult(\n success=True,\n result={\"observed_value\": 1313},\n exception_info={\n \"raised_exception\": False,\n \"exception_message\": None,\n \"exception_traceback\": None,\n },\n expectation_config=ExpectationConfiguration(\n expectation_type=\"expect_table_row_count_to_be_between\",\n kwargs={\"min_value\": 0, \"max_value\": None, \"result_format\": \"SUMMARY\"},\n ),\n )\n\n\[email protected]\ndef sqlite_view_engine(test_backends):\n # Create a small in-memory engine with two views, one of which is temporary\n if \"sqlite\" in test_backends:\n try:\n import sqlalchemy as sa\n\n sqlite_engine = sa.create_engine(\"sqlite://\")\n df = pd.DataFrame({\"a\": [1, 2, 3, 4, 5]})\n df.to_sql(name=\"test_table\", con=sqlite_engine, index=True)\n sqlite_engine.execute(\n \"CREATE TEMP VIEW test_temp_view AS SELECT * FROM test_table where a < 4;\"\n )\n sqlite_engine.execute(\n \"CREATE VIEW test_view AS SELECT * FROM test_table where a > 4;\"\n )\n return sqlite_engine\n except ImportError:\n sa = None\n else:\n pytest.skip(\"SqlAlchemy tests disabled; not testing views\")\n\n\[email protected]\ndef expectation_suite_identifier():\n return ExpectationSuiteIdentifier(\"my.expectation.suite.name\")\n\n\[email protected]\ndef basic_sqlalchemy_datasource(sqlitedb_engine):\n return SqlAlchemyDatasource(\"basic_sqlalchemy_datasource\", engine=sqlitedb_engine)\n\n\[email protected]\ndef test_folder_connection_path_csv(tmp_path_factory):\n df1 = pd.DataFrame({\"col_1\": [1, 2, 3, 4, 5], \"col_2\": [\"a\", \"b\", \"c\", \"d\", \"e\"]})\n path = str(tmp_path_factory.mktemp(\"test_folder_connection_path_csv\"))\n df1.to_csv(path_or_buf=os.path.join(path, \"test.csv\"), index=False)\n return str(path)\n\n\[email protected]\ndef test_db_connection_string(tmp_path_factory, test_backends):\n if \"sqlite\" not in test_backends:\n pytest.skip(\"skipping fixture because sqlite not selected\")\n df1 = pd.DataFrame({\"col_1\": [1, 2, 3, 4, 5], \"col_2\": [\"a\", \"b\", \"c\", \"d\", \"e\"]})\n df2 = pd.DataFrame({\"col_1\": [0, 1, 2, 3, 4], \"col_2\": [\"b\", \"c\", \"d\", \"e\", \"f\"]})\n\n try:\n import sqlalchemy as sa\n\n basepath = str(tmp_path_factory.mktemp(\"db_context\"))\n path = os.path.join(basepath, \"test.db\")\n engine = sa.create_engine(\"sqlite:///\" + str(path))\n df1.to_sql(name=\"table_1\", con=engine, index=True)\n df2.to_sql(name=\"table_2\", con=engine, index=True, schema=\"main\")\n\n # Return a connection string to this newly-created db\n return \"sqlite:///\" + str(path)\n except ImportError:\n raise ValueError(\"SQL Database tests require sqlalchemy to be installed.\")\n\n\[email protected]\ndef test_df(tmp_path_factory):\n def generate_ascending_list_of_datetimes(\n k, start_date=datetime.date(2020, 1, 1), end_date=datetime.date(2020, 12, 31)\n ):\n start_time = datetime.datetime(\n start_date.year, start_date.month, start_date.day\n )\n days_between_dates = (end_date - start_date).total_seconds()\n\n datetime_list = [\n start_time\n + datetime.timedelta(seconds=random.randrange(days_between_dates))\n for i in range(k)\n ]\n datetime_list.sort()\n return datetime_list\n\n k = 120\n random.seed(1)\n\n timestamp_list = generate_ascending_list_of_datetimes(\n k, end_date=datetime.date(2020, 1, 31)\n )\n date_list = [datetime.date(ts.year, ts.month, ts.day) for ts in timestamp_list]\n\n batch_ids = [random.randint(0, 10) for i in range(k)]\n batch_ids.sort()\n\n session_ids = [random.randint(2, 60) for i in range(k)]\n session_ids.sort()\n session_ids = [i - random.randint(0, 2) for i in session_ids]\n\n events_df = pd.DataFrame(\n {\n \"id\": range(k),\n \"batch_id\": batch_ids,\n \"date\": date_list,\n \"y\": [d.year for d in date_list],\n \"m\": [d.month for d in date_list],\n \"d\": [d.day for d in date_list],\n \"timestamp\": timestamp_list,\n \"session_ids\": session_ids,\n \"event_type\": [\n random.choice([\"start\", \"stop\", \"continue\"]) for i in range(k)\n ],\n \"favorite_color\": [\n \"#\"\n + \"\".join([random.choice(list(\"0123456789ABCDEF\")) for j in range(6)])\n for i in range(k)\n ],\n }\n )\n return events_df\n\n\[email protected]\ndef data_context_with_simple_sql_datasource_for_testing_get_batch(\n sa, empty_data_context\n):\n context: DataContext = empty_data_context\n\n db_file_path: str = file_relative_path(\n __file__,\n os.path.join(\"test_sets\", \"test_cases_for_sql_data_connector.db\"),\n )\n\n datasource_config: str = f\"\"\"\nclass_name: SimpleSqlalchemyDatasource\nconnection_string: sqlite:///{db_file_path}\nintrospection:\n whole_table: {{}}\n\n daily:\n splitter_method: _split_on_converted_datetime\n splitter_kwargs:\n column_name: date\n date_format_string: \"%Y-%m-%d\"\n\n weekly:\n splitter_method: _split_on_converted_datetime\n splitter_kwargs:\n column_name: date\n date_format_string: \"%Y-%W\"\n\n by_id_dozens:\n splitter_method: _split_on_divided_integer\n splitter_kwargs:\n column_name: id\n divisor: 12\n\"\"\"\n\n try:\n context.add_datasource(\"my_sqlite_db\", **yaml.load(datasource_config))\n except AttributeError:\n pytest.skip(\"SQL Database tests require sqlalchemy to be installed.\")\n\n return context\n\n\[email protected]\ndef basic_datasource(tmp_path_factory):\n base_directory: str = str(\n tmp_path_factory.mktemp(\"basic_datasource_runtime_data_connector\")\n )\n\n basic_datasource: Datasource = instantiate_class_from_config(\n config=yaml.load(\n f\"\"\"\nclass_name: Datasource\n\ndata_connectors:\n test_runtime_data_connector:\n module_name: great_expectations.datasource.data_connector\n class_name: RuntimeDataConnector\n batch_identifiers:\n - pipeline_stage_name\n - airflow_run_id\n - custom_key_0\n\nexecution_engine:\n class_name: PandasExecutionEngine\n\n \"\"\",\n ),\n runtime_environment={\n \"name\": \"my_datasource\",\n },\n config_defaults={\n \"module_name\": \"great_expectations.datasource\",\n },\n )\n\n return basic_datasource\n\n\[email protected]\ndef db_file():\n return file_relative_path(\n __file__,\n os.path.join(\"test_sets\", \"test_cases_for_sql_data_connector.db\"),\n )\n\n\[email protected]\ndef data_context_with_datasource_pandas_engine(empty_data_context):\n context = empty_data_context\n config = yaml.load(\n f\"\"\"\n class_name: Datasource\n execution_engine:\n class_name: PandasExecutionEngine\n data_connectors:\n default_runtime_data_connector_name:\n class_name: RuntimeDataConnector\n batch_identifiers:\n - default_identifier_name\n \"\"\",\n )\n context.add_datasource(\n \"my_datasource\",\n **config,\n )\n return context\n\n\[email protected]\ndef data_context_with_datasource_spark_engine(empty_data_context, spark_session):\n context = empty_data_context\n config = yaml.load(\n f\"\"\"\n class_name: Datasource\n execution_engine:\n class_name: SparkDFExecutionEngine\n data_connectors:\n default_runtime_data_connector_name:\n class_name: RuntimeDataConnector\n batch_identifiers:\n - default_identifier_name\n \"\"\",\n )\n context.add_datasource(\n \"my_datasource\",\n **config,\n )\n return context\n\n\[email protected]\ndef data_context_with_datasource_sqlalchemy_engine(empty_data_context, db_file):\n context = empty_data_context\n config = yaml.load(\n f\"\"\"\n class_name: Datasource\n execution_engine:\n class_name: SqlAlchemyExecutionEngine\n connection_string: sqlite:///{db_file}\n data_connectors:\n default_runtime_data_connector_name:\n class_name: RuntimeDataConnector\n batch_identifiers:\n - default_identifier_name\n \"\"\",\n )\n context.add_datasource(\n \"my_datasource\",\n **config,\n )\n return context\n\n\[email protected]\ndef data_context_with_query_store(\n empty_data_context, titanic_sqlite_db_connection_string\n):\n context = empty_data_context\n config = yaml.load(\n f\"\"\"\n class_name: Datasource\n execution_engine:\n class_name: SqlAlchemyExecutionEngine\n connection_string: {titanic_sqlite_db_connection_string}\n data_connectors:\n default_runtime_data_connector_name:\n class_name: RuntimeDataConnector\n batch_identifiers:\n - default_identifier_name\n \"\"\"\n )\n context.add_datasource(\n \"my_datasource\",\n **config,\n )\n store_config = yaml.load(\n f\"\"\"\n class_name: SqlAlchemyQueryStore\n credentials:\n connection_string: {titanic_sqlite_db_connection_string}\n queries:\n col_count:\n query: \"SELECT COUNT(*) FROM titanic;\"\n return_type: \"scalar\"\n dist_col_count:\n query: \"SELECT COUNT(DISTINCT PClass) FROM titanic;\"\n return_type: \"scalar\"\n \"\"\"\n )\n context.add_store(\"my_query_store\", store_config)\n return context\n\n\[email protected]\ndef ge_cloud_base_url():\n return \"https://app.test.greatexpectations.io\"\n\n\[email protected]\ndef ge_cloud_organization_id():\n return \"bd20fead-2c31-4392-bcd1-f1e87ad5a79c\"\n\n\[email protected]\ndef ge_cloud_access_token():\n return \"6bb5b6f5c7794892a4ca168c65c2603e\"\n\n\[email protected]\ndef ge_cloud_config(ge_cloud_base_url, ge_cloud_organization_id, ge_cloud_access_token):\n return GeCloudConfig(\n base_url=ge_cloud_base_url,\n organization_id=ge_cloud_organization_id,\n access_token=ge_cloud_access_token,\n )\n\n\[email protected](scope=\"function\")\ndef empty_ge_cloud_data_context_config(\n ge_cloud_base_url, ge_cloud_organization_id, ge_cloud_access_token\n):\n config_yaml_str = f\"\"\"\nstores:\n default_evaluation_parameter_store:\n class_name: EvaluationParameterStore\n\n default_expectations_store:\n class_name: ExpectationsStore\n store_backend:\n class_name: GeCloudStoreBackend\n ge_cloud_base_url: {ge_cloud_base_url}\n ge_cloud_resource_type: expectation_suite\n ge_cloud_credentials:\n access_token: {ge_cloud_access_token}\n organization_id: {ge_cloud_organization_id}\n suppress_store_backend_id: True\n\n default_validations_store:\n class_name: ValidationsStore\n store_backend:\n class_name: GeCloudStoreBackend\n ge_cloud_base_url: {ge_cloud_base_url}\n ge_cloud_resource_type: suite_validation_result\n ge_cloud_credentials:\n access_token: {ge_cloud_access_token}\n organization_id: {ge_cloud_organization_id}\n suppress_store_backend_id: True\n\n default_checkpoint_store:\n class_name: CheckpointStore\n store_backend:\n class_name: GeCloudStoreBackend\n ge_cloud_base_url: {ge_cloud_base_url}\n ge_cloud_resource_type: contract\n ge_cloud_credentials:\n access_token: {ge_cloud_access_token}\n organization_id: {ge_cloud_organization_id}\n suppress_store_backend_id: True\n\nevaluation_parameter_store_name: default_evaluation_parameter_store\nexpectations_store_name: default_expectations_store\nvalidations_store_name: default_validations_store\ncheckpoint_store_name: default_checkpoint_store\n\"\"\"\n data_context_config_dict = yaml.load(config_yaml_str)\n return DataContextConfig(**data_context_config_dict)\n\n\[email protected](scope=\"function\")\ndef empty_cloud_data_context(\n tmp_path, empty_ge_cloud_data_context_config, ge_cloud_config\n) -> DataContext:\n project_path = tmp_path / \"empty_data_context\"\n project_path.mkdir()\n project_path = str(project_path)\n\n context = ge.data_context.BaseDataContext(\n project_config=empty_ge_cloud_data_context_config,\n context_root_dir=project_path,\n ge_cloud_mode=True,\n ge_cloud_config=ge_cloud_config,\n )\n assert context.list_datasources() == []\n return context\n\n\[email protected]\ndef cloud_data_context_with_datasource_pandas_engine(empty_cloud_data_context):\n context = empty_cloud_data_context\n config = yaml.load(\n f\"\"\"\n class_name: Datasource\n execution_engine:\n class_name: PandasExecutionEngine\n data_connectors:\n default_runtime_data_connector_name:\n class_name: RuntimeDataConnector\n batch_identifiers:\n - default_identifier_name\n \"\"\",\n )\n context.add_datasource(\n \"my_datasource\",\n **config,\n )\n return context\n\n\[email protected]\ndef cloud_data_context_with_datasource_sqlalchemy_engine(\n empty_cloud_data_context, db_file\n):\n context = empty_cloud_data_context\n config = yaml.load(\n f\"\"\"\n class_name: Datasource\n execution_engine:\n class_name: SqlAlchemyExecutionEngine\n connection_string: sqlite:///{db_file}\n data_connectors:\n default_runtime_data_connector_name:\n class_name: RuntimeDataConnector\n batch_identifiers:\n - default_identifier_name\n \"\"\",\n )\n context.add_datasource(\n \"my_datasource\",\n **config,\n )\n return context\n\n\[email protected](scope=\"function\")\ndef profiler_name() -> str:\n skip_if_python_below_minimum_version()\n\n return \"my_first_profiler\"\n\n\[email protected](scope=\"function\")\ndef profiler_store_name() -> str:\n skip_if_python_below_minimum_version()\n\n return \"profiler_store\"\n\n\[email protected](scope=\"function\")\ndef profiler_config_with_placeholder_args(\n profiler_name: str,\n) -> RuleBasedProfilerConfig:\n \"\"\"\n This fixture does not correspond to a practical profiler with rules, whose constituent components perform meaningful\n computations; rather, it uses \"placeholder\" style attribute values, which is adequate for configuration level tests.\n \"\"\"\n skip_if_python_below_minimum_version()\n\n return RuleBasedProfilerConfig(\n name=profiler_name,\n class_name=\"RuleBasedProfiler\",\n config_version=1.0,\n variables={\n \"false_positive_threshold\": 1.0e-2,\n },\n rules={\n \"rule_1\": {\n \"domain_builder\": {\n \"class_name\": \"TableDomainBuilder\",\n },\n \"parameter_builders\": [\n {\n \"class_name\": \"MetricMultiBatchParameterBuilder\",\n \"name\": \"my_parameter\",\n \"metric_name\": \"my_metric\",\n },\n ],\n \"expectation_configuration_builders\": [\n {\n \"class_name\": \"DefaultExpectationConfigurationBuilder\",\n \"expectation_type\": \"expect_column_pair_values_A_to_be_greater_than_B\",\n \"column_A\": \"$domain.domain_kwargs.column_A\",\n \"column_B\": \"$domain.domain_kwargs.column_B\",\n \"my_arg\": \"$parameter.my_parameter.value[0]\",\n \"my_other_arg\": \"$parameter.my_parameter.value[1]\",\n \"meta\": {\n \"details\": {\n \"my_parameter_estimator\": \"$parameter.my_parameter.details\",\n \"note\": \"Important remarks about estimation algorithm.\",\n },\n },\n },\n ],\n },\n },\n )\n\n\[email protected]\ndef empty_profiler_store(profiler_store_name: str) -> ProfilerStore:\n skip_if_python_below_minimum_version()\n\n return ProfilerStore(profiler_store_name)\n\n\[email protected]\ndef profiler_key(profiler_name: str) -> ConfigurationIdentifier:\n skip_if_python_below_minimum_version()\n\n return ConfigurationIdentifier(configuration_key=profiler_name)\n\n\[email protected]\ndef ge_cloud_profiler_id() -> str:\n skip_if_python_below_minimum_version()\n\n return \"my_ge_cloud_profiler_id\"\n\n\[email protected]\ndef ge_cloud_profiler_key(ge_cloud_profiler_id: str) -> GeCloudIdentifier:\n skip_if_python_below_minimum_version()\n\n return GeCloudIdentifier(resource_type=\"contract\", ge_cloud_id=ge_cloud_profiler_id)\n\n\[email protected]\ndef populated_profiler_store(\n empty_profiler_store: ProfilerStore,\n profiler_config_with_placeholder_args: RuleBasedProfilerConfig,\n profiler_key: ConfigurationIdentifier,\n) -> ProfilerStore:\n skip_if_python_below_minimum_version()\n\n # Roundtrip through schema validation to remove any illegal fields add/or restore any missing fields.\n serialized_config: dict = ruleBasedProfilerConfigSchema.dump(\n profiler_config_with_placeholder_args\n )\n deserialized_config: dict = ruleBasedProfilerConfigSchema.load(serialized_config)\n\n profiler_config: RuleBasedProfilerConfig = RuleBasedProfilerConfig(\n **deserialized_config\n )\n\n profiler_store = empty_profiler_store\n profiler_store.set(key=profiler_key, value=profiler_config)\n return profiler_store\n\n\[email protected]\n@freeze_time(\"09/26/2019 13:42:41\")\ndef alice_columnar_table_single_batch(empty_data_context):\n \"\"\"\n About the \"Alice\" User Workflow Fixture\n\n Alice has a single table of columnar data called user_events (DataAsset) that she wants to check periodically as new\n data is added.\n\n - She knows what some of the columns mean, but not all - and there are MANY of them (only a subset currently shown\n in examples and fixtures).\n\n - She has organized other tables similarly so that for example column name suffixes indicate which are for user\n ids (_id) and which timestamps are for versioning (_ts).\n\n She wants to use a configurable profiler to generate a description (ExpectationSuite) about table so that she can:\n\n 1. use it to validate the user_events table periodically and set up alerts for when things change\n\n 2. have a place to add her domain knowledge of the data (that can also be validated against new data)\n\n 3. if all goes well, generalize some of the Profiler to use on her other tables\n\n Alice configures her Profiler using the YAML configurations and data file locations captured in this fixture.\n \"\"\"\n skip_if_python_below_minimum_version()\n\n verbose_profiler_config_file_path: str = file_relative_path(\n __file__,\n os.path.join(\n \"test_fixtures\",\n \"rule_based_profiler\",\n \"alice_user_workflow_verbose_profiler_config.yml\",\n ),\n )\n\n verbose_profiler_config: str\n with open(verbose_profiler_config_file_path) as f:\n verbose_profiler_config = f.read()\n\n my_rule_for_user_ids_expectation_configurations: List[ExpectationConfiguration] = [\n ExpectationConfiguration(\n expectation_type=\"expect_column_values_to_be_of_type\",\n kwargs={\n \"column\": \"user_id\",\n \"type_\": \"INTEGER\",\n },\n meta={},\n ),\n ExpectationConfiguration(\n expectation_type=\"expect_column_values_to_be_between\",\n kwargs={\n \"min_value\": 1000,\n \"max_value\": 999999999999,\n \"column\": \"user_id\",\n },\n meta={},\n ),\n ExpectationConfiguration(\n expectation_type=\"expect_column_values_to_not_be_null\",\n kwargs={\n \"column\": \"user_id\",\n },\n meta={},\n ),\n ExpectationConfiguration(\n expectation_type=\"expect_column_values_to_be_less_than\",\n meta={},\n kwargs={\"value\": 9488404, \"column\": \"user_id\"},\n ),\n ExpectationConfiguration(\n expectation_type=\"expect_column_values_to_be_greater_than\",\n meta={},\n kwargs={\"value\": 397433, \"column\": \"user_id\"},\n ),\n ]\n\n event_ts_column_data: Dict[str, str] = {\n \"column_name\": \"event_ts\",\n \"observed_max_time_str\": \"2004-10-19 11:05:20\",\n \"observed_strftime_format\": \"%Y-%m-%d %H:%M:%S\",\n }\n\n my_rule_for_timestamps_column_data: List[Dict[str, str]] = [\n event_ts_column_data,\n {\n \"column_name\": \"server_ts\",\n \"observed_max_time_str\": \"2004-10-19 11:05:20\",\n },\n {\n \"column_name\": \"device_ts\",\n \"observed_max_time_str\": \"2004-10-19 11:05:22\",\n },\n ]\n my_rule_for_timestamps_expectation_configurations: List[\n ExpectationConfiguration\n ] = []\n column_data: Dict[str, str]\n for column_data in my_rule_for_timestamps_column_data:\n my_rule_for_timestamps_expectation_configurations.extend(\n [\n ExpectationConfiguration(\n expectation_type=\"expect_column_values_to_be_of_type\",\n kwargs={\n \"column\": column_data[\"column_name\"],\n \"type_\": \"TIMESTAMP\",\n },\n meta={},\n ),\n ExpectationConfiguration(\n expectation_type=\"expect_column_values_to_be_increasing\",\n kwargs={\n \"column\": column_data[\"column_name\"],\n },\n meta={},\n ),\n ExpectationConfiguration(\n expectation_type=\"expect_column_values_to_be_dateutil_parseable\",\n kwargs={\n \"column\": column_data[\"column_name\"],\n },\n meta={},\n ),\n ExpectationConfiguration(\n expectation_type=\"expect_column_min_to_be_between\",\n kwargs={\n \"column\": column_data[\"column_name\"],\n \"min_value\": \"2004-10-19T10:23:54\", # From variables\n \"max_value\": \"2004-10-19T10:23:54\", # From variables\n },\n meta={\n \"notes\": {\n \"format\": \"markdown\",\n \"content\": [\n \"### This expectation confirms no events occur before tracking started **2004-10-19 10:23:54**\"\n ],\n }\n },\n ),\n ExpectationConfiguration(\n expectation_type=\"expect_column_max_to_be_between\",\n kwargs={\n \"column\": column_data[\"column_name\"],\n \"min_value\": \"2004-10-19T10:23:54\", # From variables\n \"max_value\": event_ts_column_data[\n \"observed_max_time_str\"\n ], # Pin to event_ts column\n },\n meta={\n \"notes\": {\n \"format\": \"markdown\",\n \"content\": [\n \"### This expectation confirms that the event_ts contains the latest timestamp of all domains\"\n ],\n }\n },\n ),\n ExpectationConfiguration(\n expectation_type=\"expect_column_values_to_match_strftime_format\",\n kwargs={\n \"column\": column_data[\"column_name\"],\n \"strftime_format\": {\n \"value\": event_ts_column_data[\n \"observed_strftime_format\"\n ], # Pin to event_ts column\n \"details\": {\n \"success_ratio\": 1.0,\n \"candidate_strings\": sorted(DEFAULT_CANDIDATE_STRINGS),\n },\n },\n },\n meta={\n \"notes\": {\n \"format\": \"markdown\",\n \"content\": [\n \"### This expectation confirms that fields ending in _ts are of the format detected by parameter builder SimpleDateFormatStringParameterBuilder\"\n ],\n }\n },\n ),\n ]\n )\n\n my_rule_for_one_cardinality_expectation_configurations: List[\n ExpectationConfiguration\n ] = [\n ExpectationConfiguration(\n expectation_type=\"expect_column_values_to_be_in_set\",\n kwargs={\n \"column\": \"user_agent\",\n \"value_set\": [\n \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36\"\n ],\n },\n meta={},\n ),\n ]\n\n expectation_configurations: List[ExpectationConfiguration] = []\n\n expectation_configurations.extend(my_rule_for_user_ids_expectation_configurations)\n expectation_configurations.extend(my_rule_for_timestamps_expectation_configurations)\n expectation_configurations.extend(\n my_rule_for_one_cardinality_expectation_configurations\n )\n\n expectation_suite_name: str = \"alice_columnar_table_single_batch\"\n expected_expectation_suite: ExpectationSuite = ExpectationSuite(\n expectation_suite_name=expectation_suite_name, data_context=empty_data_context\n )\n expectation_configuration: ExpectationConfiguration\n for expectation_configuration in expectation_configurations:\n # NOTE Will 20211208 add_expectation() method, although being called by an ExpectationSuite instance, is being\n # called within a fixture, and we will prevent it from sending a usage_event by calling the private method\n # _add_expectation().\n expected_expectation_suite._add_expectation(\n expectation_configuration=expectation_configuration, send_usage_event=False\n )\n\n # NOTE that this expectation suite should fail when validated on the data in \"sample_data_relative_path\"\n # because the device_ts is ahead of the event_ts for the latest event\n sample_data_relative_path: str = \"alice_columnar_table_single_batch_data.csv\"\n\n profiler_config: dict = yaml.load(verbose_profiler_config)\n\n # Roundtrip through schema validation to remove any illegal fields add/or restore any missing fields.\n deserialized_config: dict = ruleBasedProfilerConfigSchema.load(profiler_config)\n serialized_config: dict = ruleBasedProfilerConfigSchema.dump(deserialized_config)\n\n # `class_name`/`module_name` are generally consumed through `instantiate_class_from_config`\n # so we need to manually remove those values if we wish to use the **kwargs instantiation pattern\n serialized_config.pop(\"class_name\")\n serialized_config.pop(\"module_name\")\n expected_expectation_suite.add_citation(\n comment=\"Suite created by Rule-Based Profiler with the configuration included.\",\n profiler_config=serialized_config,\n )\n\n return {\n \"profiler_config\": verbose_profiler_config,\n \"expected_expectation_suite_name\": expectation_suite_name,\n \"expected_expectation_suite\": expected_expectation_suite,\n \"sample_data_relative_path\": sample_data_relative_path,\n }\n\n\[email protected]\ndef alice_columnar_table_single_batch_context(\n monkeypatch,\n empty_data_context_stats_enabled,\n alice_columnar_table_single_batch,\n):\n skip_if_python_below_minimum_version()\n\n context: DataContext = empty_data_context_stats_enabled\n # We need our salt to be consistent between runs to ensure idempotent anonymized values\n context._usage_statistics_handler = UsageStatisticsHandler(\n context, \"00000000-0000-0000-0000-00000000a004\", \"N/A\"\n )\n monkeypatch.chdir(context.root_directory)\n data_relative_path: str = \"../data\"\n data_path: str = os.path.join(context.root_directory, data_relative_path)\n os.makedirs(data_path, exist_ok=True)\n\n # Copy data\n filename: str = alice_columnar_table_single_batch[\"sample_data_relative_path\"]\n shutil.copy(\n file_relative_path(\n __file__,\n os.path.join(\n \"test_sets\",\n f\"{filename}\",\n ),\n ),\n str(os.path.join(data_path, filename)),\n )\n\n data_connector_base_directory: str = \"./\"\n monkeypatch.setenv(\"base_directory\", data_connector_base_directory)\n monkeypatch.setenv(\"data_fixtures_root\", data_relative_path)\n\n datasource_name: str = \"alice_columnar_table_single_batch_datasource\"\n data_connector_name: str = \"alice_columnar_table_single_batch_data_connector\"\n data_asset_name: str = \"alice_columnar_table_single_batch_data_asset\"\n datasource_config: str = rf\"\"\"\nclass_name: Datasource\nmodule_name: great_expectations.datasource\nexecution_engine:\n module_name: great_expectations.execution_engine\n class_name: PandasExecutionEngine\ndata_connectors:\n {data_connector_name}:\n class_name: ConfiguredAssetFilesystemDataConnector\n assets:\n {data_asset_name}:\n module_name: great_expectations.datasource.data_connector.asset\n group_names:\n - filename\n pattern: (.*)\\.csv\n reader_options:\n delimiter: \",\"\n class_name: Asset\n base_directory: ${{data_fixtures_root}}\n glob_directive: \"*.csv\"\n base_directory: ${{base_directory}}\n module_name: great_expectations.datasource.data_connector\n \"\"\"\n\n context.add_datasource(name=datasource_name, **yaml.load(datasource_config))\n\n assert context.list_datasources() == [\n {\n \"class_name\": \"Datasource\",\n \"data_connectors\": {\n data_connector_name: {\n \"assets\": {\n data_asset_name: {\n \"base_directory\": data_relative_path,\n \"class_name\": \"Asset\",\n \"glob_directive\": \"*.csv\",\n \"group_names\": [\"filename\"],\n \"module_name\": \"great_expectations.datasource.data_connector.asset\",\n \"pattern\": \"(.*)\\\\.csv\",\n }\n },\n \"base_directory\": data_connector_base_directory,\n \"class_name\": \"ConfiguredAssetFilesystemDataConnector\",\n \"module_name\": \"great_expectations.datasource.data_connector\",\n },\n },\n \"execution_engine\": {\n \"class_name\": \"PandasExecutionEngine\",\n \"module_name\": \"great_expectations.execution_engine\",\n },\n \"module_name\": \"great_expectations.datasource\",\n \"name\": datasource_name,\n }\n ]\n return context\n\n\[email protected]\n@freeze_time(\"09/26/2019 13:42:41\")\ndef bobby_columnar_table_multi_batch(empty_data_context):\n \"\"\"\n About the \"Bobby\" User Workflow Fixture\n Bobby has multiple tables of columnar data called user_events (DataAsset) that he wants to check periodically as new\n data is added.\n - He knows what some of the columns are of the accounting/financial/account type.\n He wants to use a configurable profiler to generate a description (ExpectationSuite) about tables so that he can:\n 1. monitor the average number of rows in the tables\n 2. use it to validate min/max boundaries of all columns are of the accounting/financial/account type and set up\n alerts for when things change\n 3. have a place to add his domain knowledge of the data (that can also be validated against new data)\n 4. if all goes well, generalize some of the Profiler to use on his other tables\n Bobby uses a crude, highly inaccurate deterministic parametric estimator -- for illustrative purposes.\n Bobby configures his Profiler using the YAML configurations and data file locations captured in this fixture.\n \"\"\"\n skip_if_python_below_minimum_version()\n\n verbose_profiler_config_file_path: str = file_relative_path(\n __file__,\n os.path.join(\n \"test_fixtures\",\n \"rule_based_profiler\",\n \"bobby_user_workflow_verbose_profiler_config.yml\",\n ),\n )\n\n verbose_profiler_config: str\n with open(verbose_profiler_config_file_path) as f:\n verbose_profiler_config = f.read()\n\n my_row_count_range_rule_expectation_configurations_oneshot_estimator: List[\n ExpectationConfiguration\n ] = [\n ExpectationConfiguration(\n **{\n \"kwargs\": {\"min_value\": 7505, \"max_value\": 8495},\n \"expectation_type\": \"expect_table_row_count_to_be_between\",\n \"meta\": {\n \"profiler_details\": {\n \"metric_configuration\": {\n \"metric_name\": \"table.row_count\",\n \"domain_kwargs\": {},\n \"metric_value_kwargs\": None,\n \"metric_dependencies\": None,\n },\n \"num_batches\": 2,\n },\n },\n },\n ),\n ]\n\n my_column_ranges_rule_expectation_configurations_oneshot_estimator: List[\n ExpectationConfiguration\n ] = [\n ExpectationConfiguration(\n **{\n \"expectation_type\": \"expect_column_min_to_be_between\",\n \"meta\": {\n \"profiler_details\": {\n \"metric_configuration\": {\n \"metric_name\": \"column.min\",\n \"domain_kwargs\": {\"column\": \"VendorID\"},\n \"metric_value_kwargs\": None,\n \"metric_dependencies\": None,\n },\n \"num_batches\": 2,\n }\n },\n \"kwargs\": {\n \"column\": \"VendorID\",\n \"min_value\": 1,\n \"max_value\": 1,\n \"mostly\": 1.0,\n },\n },\n ),\n ExpectationConfiguration(\n **{\n \"expectation_type\": \"expect_column_max_to_be_between\",\n \"meta\": {\n \"profiler_details\": {\n \"metric_configuration\": {\n \"metric_name\": \"column.max\",\n \"domain_kwargs\": {\"column\": \"VendorID\"},\n \"metric_value_kwargs\": None,\n \"metric_dependencies\": None,\n },\n \"num_batches\": 2,\n }\n },\n \"kwargs\": {\n \"column\": \"VendorID\",\n \"min_value\": 4,\n \"max_value\": 4,\n \"mostly\": 1.0,\n },\n },\n ),\n ExpectationConfiguration(\n **{\n \"expectation_type\": \"expect_column_min_to_be_between\",\n \"meta\": {\n \"profiler_details\": {\n \"metric_configuration\": {\n \"metric_name\": \"column.min\",\n \"domain_kwargs\": {\"column\": \"passenger_count\"},\n \"metric_value_kwargs\": None,\n \"metric_dependencies\": None,\n },\n \"num_batches\": 2,\n }\n },\n \"kwargs\": {\n \"column\": \"passenger_count\",\n \"min_value\": 0,\n \"max_value\": 1,\n \"mostly\": 1.0,\n },\n },\n ),\n ExpectationConfiguration(\n **{\n \"expectation_type\": \"expect_column_max_to_be_between\",\n \"meta\": {\n \"profiler_details\": {\n \"metric_configuration\": {\n \"metric_name\": \"column.max\",\n \"domain_kwargs\": {\"column\": \"passenger_count\"},\n \"metric_value_kwargs\": None,\n \"metric_dependencies\": None,\n },\n \"num_batches\": 2,\n }\n },\n \"kwargs\": {\n \"column\": \"passenger_count\",\n \"min_value\": 6,\n \"max_value\": 6,\n \"mostly\": 1.0,\n },\n },\n ),\n ExpectationConfiguration(\n **{\n \"expectation_type\": \"expect_column_min_to_be_between\",\n \"meta\": {\n \"profiler_details\": {\n \"metric_configuration\": {\n \"metric_name\": \"column.min\",\n \"domain_kwargs\": {\"column\": \"trip_distance\"},\n \"metric_value_kwargs\": None,\n \"metric_dependencies\": None,\n },\n \"num_batches\": 2,\n }\n },\n \"kwargs\": {\n \"column\": \"trip_distance\",\n \"min_value\": 0.0,\n \"max_value\": 0.0,\n \"mostly\": 1.0,\n },\n },\n ),\n ExpectationConfiguration(\n **{\n \"expectation_type\": \"expect_column_max_to_be_between\",\n \"meta\": {\n \"profiler_details\": {\n \"metric_configuration\": {\n \"metric_name\": \"column.max\",\n \"domain_kwargs\": {\"column\": \"trip_distance\"},\n \"metric_value_kwargs\": None,\n \"metric_dependencies\": None,\n },\n \"num_batches\": 2,\n }\n },\n \"kwargs\": {\n \"column\": \"trip_distance\",\n \"min_value\": 37.62,\n \"max_value\": 57.85,\n \"mostly\": 1.0,\n },\n },\n ),\n ExpectationConfiguration(\n **{\n \"expectation_type\": \"expect_column_min_to_be_between\",\n \"meta\": {\n \"profiler_details\": {\n \"metric_configuration\": {\n \"metric_name\": \"column.min\",\n \"domain_kwargs\": {\"column\": \"RatecodeID\"},\n \"metric_value_kwargs\": None,\n \"metric_dependencies\": None,\n },\n \"num_batches\": 2,\n }\n },\n \"kwargs\": {\n \"column\": \"RatecodeID\",\n \"min_value\": 1,\n \"max_value\": 1,\n \"mostly\": 1.0,\n },\n },\n ),\n ExpectationConfiguration(\n **{\n \"expectation_type\": \"expect_column_max_to_be_between\",\n \"meta\": {\n \"profiler_details\": {\n \"metric_configuration\": {\n \"metric_name\": \"column.max\",\n \"domain_kwargs\": {\"column\": \"RatecodeID\"},\n \"metric_value_kwargs\": None,\n \"metric_dependencies\": None,\n },\n \"num_batches\": 2,\n }\n },\n \"kwargs\": {\n \"column\": \"RatecodeID\",\n \"min_value\": 5,\n \"max_value\": 6,\n \"mostly\": 1.0,\n },\n },\n ),\n ExpectationConfiguration(\n **{\n \"expectation_type\": \"expect_column_min_to_be_between\",\n \"meta\": {\n \"profiler_details\": {\n \"metric_configuration\": {\n \"metric_name\": \"column.min\",\n \"domain_kwargs\": {\"column\": \"PULocationID\"},\n \"metric_value_kwargs\": None,\n \"metric_dependencies\": None,\n },\n \"num_batches\": 2,\n }\n },\n \"kwargs\": {\n \"column\": \"PULocationID\",\n \"min_value\": 1,\n \"max_value\": 1,\n \"mostly\": 1.0,\n },\n },\n ),\n ExpectationConfiguration(\n **{\n \"expectation_type\": \"expect_column_max_to_be_between\",\n \"meta\": {\n \"profiler_details\": {\n \"metric_configuration\": {\n \"metric_name\": \"column.max\",\n \"domain_kwargs\": {\"column\": \"PULocationID\"},\n \"metric_value_kwargs\": None,\n \"metric_dependencies\": None,\n },\n \"num_batches\": 2,\n }\n },\n \"kwargs\": {\n \"column\": \"PULocationID\",\n \"min_value\": 265,\n \"max_value\": 265,\n \"mostly\": 1.0,\n },\n },\n ),\n ExpectationConfiguration(\n **{\n \"expectation_type\": \"expect_column_min_to_be_between\",\n \"meta\": {\n \"profiler_details\": {\n \"metric_configuration\": {\n \"metric_name\": \"column.min\",\n \"domain_kwargs\": {\"column\": \"DOLocationID\"},\n \"metric_value_kwargs\": None,\n \"metric_dependencies\": None,\n },\n \"num_batches\": 2,\n }\n },\n \"kwargs\": {\n \"column\": \"DOLocationID\",\n \"min_value\": 1,\n \"max_value\": 1,\n \"mostly\": 1.0,\n },\n },\n ),\n ExpectationConfiguration(\n **{\n \"expectation_type\": \"expect_column_max_to_be_between\",\n \"meta\": {\n \"profiler_details\": {\n \"metric_configuration\": {\n \"metric_name\": \"column.max\",\n \"domain_kwargs\": {\"column\": \"DOLocationID\"},\n \"metric_value_kwargs\": None,\n \"metric_dependencies\": None,\n },\n \"num_batches\": 2,\n }\n },\n \"kwargs\": {\n \"column\": \"DOLocationID\",\n \"min_value\": 265,\n \"max_value\": 265,\n \"mostly\": 1.0,\n },\n },\n ),\n ExpectationConfiguration(\n **{\n \"expectation_type\": \"expect_column_min_to_be_between\",\n \"meta\": {\n \"profiler_details\": {\n \"metric_configuration\": {\n \"metric_name\": \"column.min\",\n \"domain_kwargs\": {\"column\": \"payment_type\"},\n \"metric_value_kwargs\": None,\n \"metric_dependencies\": None,\n },\n \"num_batches\": 2,\n }\n },\n \"kwargs\": {\n \"column\": \"payment_type\",\n \"min_value\": 1,\n \"max_value\": 1,\n \"mostly\": 1.0,\n },\n },\n ),\n ExpectationConfiguration(\n **{\n \"expectation_type\": \"expect_column_max_to_be_between\",\n \"meta\": {\n \"profiler_details\": {\n \"metric_configuration\": {\n \"metric_name\": \"column.max\",\n \"domain_kwargs\": {\"column\": \"payment_type\"},\n \"metric_value_kwargs\": None,\n \"metric_dependencies\": None,\n },\n \"num_batches\": 2,\n }\n },\n \"kwargs\": {\n \"column\": \"payment_type\",\n \"min_value\": 4,\n \"max_value\": 4,\n \"mostly\": 1.0,\n },\n },\n ),\n ExpectationConfiguration(\n **{\n \"expectation_type\": \"expect_column_min_to_be_between\",\n \"meta\": {\n \"profiler_details\": {\n \"metric_configuration\": {\n \"metric_name\": \"column.min\",\n \"domain_kwargs\": {\"column\": \"fare_amount\"},\n \"metric_value_kwargs\": None,\n \"metric_dependencies\": None,\n },\n \"num_batches\": 2,\n }\n },\n \"kwargs\": {\n \"column\": \"fare_amount\",\n \"min_value\": -51.84,\n \"max_value\": -21.16,\n \"mostly\": 1.0,\n },\n },\n ),\n ExpectationConfiguration(\n **{\n \"expectation_type\": \"expect_column_max_to_be_between\",\n \"meta\": {\n \"profiler_details\": {\n \"metric_configuration\": {\n \"metric_name\": \"column.max\",\n \"domain_kwargs\": {\"column\": \"fare_amount\"},\n \"metric_value_kwargs\": None,\n \"metric_dependencies\": None,\n },\n \"num_batches\": 2,\n }\n },\n \"kwargs\": {\n \"column\": \"fare_amount\",\n \"min_value\": 228.94,\n \"max_value\": 2990.05,\n \"mostly\": 1.0,\n },\n },\n ),\n ExpectationConfiguration(\n **{\n \"expectation_type\": \"expect_column_min_to_be_between\",\n \"meta\": {\n \"profiler_details\": {\n \"metric_configuration\": {\n \"metric_name\": \"column.min\",\n \"domain_kwargs\": {\"column\": \"extra\"},\n \"metric_value_kwargs\": None,\n \"metric_dependencies\": None,\n },\n \"num_batches\": 2,\n }\n },\n \"kwargs\": {\n \"column\": \"extra\",\n \"min_value\": -36.53,\n \"max_value\": -1.18,\n \"mostly\": 1.0,\n },\n },\n ),\n ExpectationConfiguration(\n **{\n \"expectation_type\": \"expect_column_max_to_be_between\",\n \"meta\": {\n \"profiler_details\": {\n \"metric_configuration\": {\n \"metric_name\": \"column.max\",\n \"domain_kwargs\": {\"column\": \"extra\"},\n \"metric_value_kwargs\": None,\n \"metric_dependencies\": None,\n },\n \"num_batches\": 2,\n }\n },\n \"kwargs\": {\n \"column\": \"extra\",\n \"min_value\": 4.51,\n \"max_value\": 6.99,\n \"mostly\": 1.0,\n },\n },\n ),\n ExpectationConfiguration(\n **{\n \"expectation_type\": \"expect_column_min_to_be_between\",\n \"meta\": {\n \"profiler_details\": {\n \"metric_configuration\": {\n \"metric_name\": \"column.min\",\n \"domain_kwargs\": {\"column\": \"mta_tax\"},\n \"metric_value_kwargs\": None,\n \"metric_dependencies\": None,\n },\n \"num_batches\": 2,\n }\n },\n \"kwargs\": {\n \"column\": \"mta_tax\",\n \"min_value\": -0.5,\n \"max_value\": -0.5,\n \"mostly\": 1.0,\n },\n },\n ),\n ExpectationConfiguration(\n **{\n \"expectation_type\": \"expect_column_max_to_be_between\",\n \"meta\": {\n \"profiler_details\": {\n \"metric_configuration\": {\n \"metric_name\": \"column.max\",\n \"domain_kwargs\": {\"column\": \"mta_tax\"},\n \"metric_value_kwargs\": None,\n \"metric_dependencies\": None,\n },\n \"num_batches\": 2,\n }\n },\n \"kwargs\": {\n \"column\": \"mta_tax\",\n \"min_value\": 0.69,\n \"max_value\": 37.32,\n \"mostly\": 1.0,\n },\n },\n ),\n ExpectationConfiguration(\n **{\n \"expectation_type\": \"expect_column_min_to_be_between\",\n \"meta\": {\n \"profiler_details\": {\n \"metric_configuration\": {\n \"metric_name\": \"column.min\",\n \"domain_kwargs\": {\"column\": \"tip_amount\"},\n \"metric_value_kwargs\": None,\n \"metric_dependencies\": None,\n },\n \"num_batches\": 2,\n }\n },\n \"kwargs\": {\n \"column\": \"tip_amount\",\n \"min_value\": 0.0,\n \"max_value\": 0.0,\n \"mostly\": 1.0,\n },\n },\n ),\n ExpectationConfiguration(\n **{\n \"expectation_type\": \"expect_column_max_to_be_between\",\n \"meta\": {\n \"profiler_details\": {\n \"metric_configuration\": {\n \"metric_name\": \"column.max\",\n \"domain_kwargs\": {\"column\": \"tip_amount\"},\n \"metric_value_kwargs\": None,\n \"metric_dependencies\": None,\n },\n \"num_batches\": 2,\n }\n },\n \"kwargs\": {\n \"column\": \"tip_amount\",\n \"min_value\": 46.84,\n \"max_value\": 74.86,\n \"mostly\": 1.0,\n },\n },\n ),\n ExpectationConfiguration(\n **{\n \"expectation_type\": \"expect_column_min_to_be_between\",\n \"meta\": {\n \"profiler_details\": {\n \"metric_configuration\": {\n \"metric_name\": \"column.min\",\n \"domain_kwargs\": {\"column\": \"tolls_amount\"},\n \"metric_value_kwargs\": None,\n \"metric_dependencies\": None,\n },\n \"num_batches\": 2,\n }\n },\n \"kwargs\": {\n \"column\": \"tolls_amount\",\n \"min_value\": 0.0,\n \"max_value\": 0.0,\n \"mostly\": 1.0,\n },\n },\n ),\n ExpectationConfiguration(\n **{\n \"expectation_type\": \"expect_column_max_to_be_between\",\n \"meta\": {\n \"profiler_details\": {\n \"metric_configuration\": {\n \"metric_name\": \"column.max\",\n \"domain_kwargs\": {\"column\": \"tolls_amount\"},\n \"metric_value_kwargs\": None,\n \"metric_dependencies\": None,\n },\n \"num_batches\": 2,\n }\n },\n \"kwargs\": {\n \"column\": \"tolls_amount\",\n \"min_value\": 26.4,\n \"max_value\": 497.67,\n \"mostly\": 1.0,\n },\n },\n ),\n ExpectationConfiguration(\n **{\n \"expectation_type\": \"expect_column_min_to_be_between\",\n \"meta\": {\n \"profiler_details\": {\n \"metric_configuration\": {\n \"metric_name\": \"column.min\",\n \"domain_kwargs\": {\"column\": \"improvement_surcharge\"},\n \"metric_value_kwargs\": None,\n \"metric_dependencies\": None,\n },\n \"num_batches\": 2,\n }\n },\n \"kwargs\": {\n \"column\": \"improvement_surcharge\",\n \"min_value\": -0.3,\n \"max_value\": -0.3,\n \"mostly\": 1.0,\n },\n },\n ),\n ExpectationConfiguration(\n **{\n \"expectation_type\": \"expect_column_max_to_be_between\",\n \"meta\": {\n \"profiler_details\": {\n \"metric_configuration\": {\n \"metric_name\": \"column.max\",\n \"domain_kwargs\": {\"column\": \"improvement_surcharge\"},\n \"metric_value_kwargs\": None,\n \"metric_dependencies\": None,\n },\n \"num_batches\": 2,\n }\n },\n \"kwargs\": {\n \"column\": \"improvement_surcharge\",\n \"min_value\": 0.3,\n \"max_value\": 0.3,\n \"mostly\": 1.0,\n },\n },\n ),\n ExpectationConfiguration(\n **{\n \"expectation_type\": \"expect_column_min_to_be_between\",\n \"meta\": {\n \"profiler_details\": {\n \"metric_configuration\": {\n \"metric_name\": \"column.min\",\n \"domain_kwargs\": {\"column\": \"total_amount\"},\n \"metric_value_kwargs\": None,\n \"metric_dependencies\": None,\n },\n \"num_batches\": 2,\n }\n },\n \"kwargs\": {\n \"column\": \"total_amount\",\n \"min_value\": -52.66,\n \"max_value\": -24.44,\n \"mostly\": 1.0,\n },\n },\n ),\n ExpectationConfiguration(\n **{\n \"expectation_type\": \"expect_column_max_to_be_between\",\n \"meta\": {\n \"profiler_details\": {\n \"metric_configuration\": {\n \"metric_name\": \"column.max\",\n \"domain_kwargs\": {\"column\": \"total_amount\"},\n \"metric_value_kwargs\": None,\n \"metric_dependencies\": None,\n },\n \"num_batches\": 2,\n }\n },\n \"kwargs\": {\n \"column\": \"total_amount\",\n \"min_value\": 550.18,\n \"max_value\": 2992.47,\n \"mostly\": 1.0,\n },\n },\n ),\n ExpectationConfiguration(\n **{\n \"expectation_type\": \"expect_column_min_to_be_between\",\n \"meta\": {\n \"profiler_details\": {\n \"metric_configuration\": {\n \"metric_name\": \"column.min\",\n \"domain_kwargs\": {\"column\": \"congestion_surcharge\"},\n \"metric_value_kwargs\": None,\n \"metric_dependencies\": None,\n },\n \"num_batches\": 2,\n }\n },\n \"kwargs\": {\n \"column\": \"congestion_surcharge\",\n \"min_value\": -2.49,\n \"max_value\": -0.01,\n \"mostly\": 1.0,\n },\n },\n ),\n ExpectationConfiguration(\n **{\n \"expectation_type\": \"expect_column_max_to_be_between\",\n \"meta\": {\n \"profiler_details\": {\n \"metric_configuration\": {\n \"metric_name\": \"column.max\",\n \"domain_kwargs\": {\"column\": \"congestion_surcharge\"},\n \"metric_value_kwargs\": None,\n \"metric_dependencies\": None,\n },\n \"num_batches\": 2,\n }\n },\n \"kwargs\": {\n \"column\": \"congestion_surcharge\",\n \"min_value\": 0.01,\n \"max_value\": 2.49,\n \"mostly\": 1.0,\n },\n },\n ),\n ]\n\n my_column_timestamps_rule_expectation_configurations_oneshot_estimator: List[\n ExpectationConfiguration\n ] = [\n ExpectationConfiguration(\n **{\n \"expectation_type\": \"expect_column_values_to_match_strftime_format\",\n \"kwargs\": {\n \"column\": \"pickup_datetime\",\n \"strftime_format\": \"%Y-%m-%d %H:%M:%S\",\n },\n \"meta\": {\n \"details\": {\n \"success_ratio\": 1.0,\n \"candidate_strings\": [\n \"%Y-%m-%d %H:%M:%S\",\n \"%y-%m-%d\",\n ],\n },\n \"notes\": {\n \"format\": \"markdown\",\n \"content\": [\n \"### This expectation confirms that fields ending in _datetime are of the format detected by parameter builder SimpleDateFormatStringParameterBuilder\"\n ],\n },\n },\n }\n ),\n ExpectationConfiguration(\n **{\n \"expectation_type\": \"expect_column_values_to_match_strftime_format\",\n \"kwargs\": {\n \"column\": \"dropoff_datetime\",\n \"strftime_format\": \"%Y-%m-%d %H:%M:%S\",\n },\n \"meta\": {\n \"details\": {\n \"success_ratio\": 1.0,\n \"candidate_strings\": [\n \"%Y-%m-%d %H:%M:%S\",\n \"%y-%m-%d\",\n ],\n },\n \"notes\": {\n \"format\": \"markdown\",\n \"content\": [\n \"### This expectation confirms that fields ending in _datetime are of the format detected by parameter builder SimpleDateFormatStringParameterBuilder\"\n ],\n },\n },\n }\n ),\n ]\n my_column_regex_rule_expectation_configurations_oneshot_estimator: List[\n ExpectationConfiguration\n ] = [\n ExpectationConfiguration(\n **{\n \"expectation_type\": \"expect_column_values_to_match_regex\",\n \"kwargs\": {\n \"column\": \"VendorID\",\n \"regex\": {\n \"value\": [r\"^\\d{1}$\"],\n \"details\": {\n \"evaluated_regexes\": {r\"^\\d{1}$\": 1.0, r\"^\\d{2}$\": 0.0},\n \"threshold\": 0.9,\n },\n },\n },\n \"meta\": {\n \"notes\": {\n \"format\": \"markdown\",\n \"content\": [\n \"### This expectation confirms that fields ending in ID are of the format detected by parameter builder RegexPatternStringParameterBuilder\"\n ],\n },\n },\n }\n ),\n ExpectationConfiguration(\n **{\n \"expectation_type\": \"expect_column_values_to_match_regex\",\n \"meta\": {\"notes\": {\"format\": \"markdown\", \"content\": None}},\n \"kwargs\": {\n \"column\": \"RatecodeID\",\n \"regex\": {\n \"value\": [r\"^\\d{1}$\"],\n \"details\": {\n \"evaluated_regexes\": {r\"^\\d{1}$\": 1.0, r\"^\\d{2}$\": 0.0},\n \"threshold\": 0.9,\n },\n },\n },\n \"meta\": {\n \"notes\": {\n \"format\": \"markdown\",\n \"content\": [\n \"### This expectation confirms that fields ending in ID are of the format detected by parameter builder RegexPatternStringParameterBuilder\"\n ],\n }\n },\n }\n ),\n ExpectationConfiguration(\n **{\n \"expectation_type\": \"expect_column_values_to_match_regex\",\n \"meta\": {\"notes\": {\"format\": \"markdown\", \"content\": None}},\n \"kwargs\": {\n \"column\": \"PULocationID\",\n \"regex\": {\n \"value\": [r\"^\\d{1}$\"],\n \"details\": {\n \"evaluated_regexes\": {r\"^\\d{1}$\": 1.0, r\"^\\d{2}$\": 0.0},\n \"threshold\": 0.9,\n },\n },\n },\n \"meta\": {\n \"notes\": {\n \"format\": \"markdown\",\n \"content\": [\n \"### This expectation confirms that fields ending in ID are of the format detected by parameter builder RegexPatternStringParameterBuilder\"\n ],\n }\n },\n }\n ),\n ExpectationConfiguration(\n **{\n \"expectation_type\": \"expect_column_values_to_match_regex\",\n \"meta\": {\"notes\": {\"format\": \"markdown\", \"content\": None}},\n \"kwargs\": {\n \"column\": \"DOLocationID\",\n \"regex\": {\n \"value\": [r\"^\\d{1}$\"],\n \"details\": {\n \"evaluated_regexes\": {r\"^\\d{1}$\": 1.0, r\"^\\d{2}$\": 0.0},\n \"threshold\": 0.9,\n },\n },\n },\n \"meta\": {\n \"notes\": {\n \"format\": \"markdown\",\n \"content\": [\n \"### This expectation confirms that fields ending in ID are of the format detected by parameter builder RegexPatternStringParameterBuilder\"\n ],\n }\n },\n }\n ),\n ]\n\n my_rule_for_very_few_cardinality_expectation_configurations: List[\n ExpectationConfiguration\n ] = [\n ExpectationConfiguration(\n **{\n \"expectation_type\": \"expect_column_values_to_be_in_set\",\n \"kwargs\": {\n \"column\": \"VendorID\",\n \"value_set\": [1, 2, 4],\n },\n \"meta\": {},\n }\n ),\n ExpectationConfiguration(\n **{\n \"expectation_type\": \"expect_column_values_to_be_in_set\",\n \"kwargs\": {\n \"column\": \"passenger_count\",\n \"value_set\": [0, 1, 2, 3, 4, 5, 6],\n },\n \"meta\": {},\n }\n ),\n ]\n expectation_configurations: List[ExpectationConfiguration] = []\n\n expectation_configurations.extend(\n my_row_count_range_rule_expectation_configurations_oneshot_estimator\n )\n expectation_configurations.extend(\n my_column_ranges_rule_expectation_configurations_oneshot_estimator\n )\n expectation_configurations.extend(\n my_column_timestamps_rule_expectation_configurations_oneshot_estimator\n )\n\n expectation_configurations.extend(\n my_column_regex_rule_expectation_configurations_oneshot_estimator\n )\n expectation_configurations.extend(\n my_rule_for_very_few_cardinality_expectation_configurations\n )\n expectation_suite_name_oneshot_estimator: str = (\n \"bobby_columnar_table_multi_batch_oneshot_estimator\"\n )\n expected_expectation_suite_oneshot_estimator: ExpectationSuite = ExpectationSuite(\n expectation_suite_name=expectation_suite_name_oneshot_estimator,\n data_context=empty_data_context,\n )\n expectation_configuration: ExpectationConfiguration\n for expectation_configuration in expectation_configurations:\n # NOTE Will 20211208 add_expectation() method, although being called by an ExpectationSuite instance, is being\n # called within a fixture, and we will prevent it from sending a usage_event by calling the private method.\n expected_expectation_suite_oneshot_estimator._add_expectation(\n expectation_configuration=expectation_configuration, send_usage_event=False\n )\n\n profiler_config: dict = yaml.load(verbose_profiler_config)\n\n # Roundtrip through schema validation to remove any illegal fields add/or restore any missing fields.\n deserialized_config: dict = ruleBasedProfilerConfigSchema.load(profiler_config)\n serialized_config: dict = ruleBasedProfilerConfigSchema.dump(deserialized_config)\n\n # `class_name`/`module_name` are generally consumed through `instantiate_class_from_config`\n # so we need to manually remove those values if we wish to use the **kwargs instantiation pattern\n serialized_config.pop(\"class_name\")\n serialized_config.pop(\"module_name\")\n\n expected_expectation_suite_oneshot_estimator.add_citation(\n comment=\"Suite created by Rule-Based Profiler with the configuration included.\",\n profiler_config=serialized_config,\n )\n\n return {\n \"profiler_config\": verbose_profiler_config,\n \"test_configuration_oneshot_estimator\": {\n \"expectation_suite_name\": expectation_suite_name_oneshot_estimator,\n \"expected_expectation_suite\": expected_expectation_suite_oneshot_estimator,\n },\n }\n\n\[email protected]\ndef bobby_columnar_table_multi_batch_deterministic_data_context(\n tmp_path_factory,\n monkeypatch,\n) -> DataContext:\n skip_if_python_below_minimum_version()\n\n # Re-enable GE_USAGE_STATS\n monkeypatch.delenv(\"GE_USAGE_STATS\")\n monkeypatch.setattr(AnonymizedUsageStatisticsConfig, \"enabled\", True)\n\n project_path: str = str(tmp_path_factory.mktemp(\"taxi_data_context\"))\n context_path: str = os.path.join(project_path, \"great_expectations\")\n os.makedirs(os.path.join(context_path, \"expectations\"), exist_ok=True)\n data_path: str = os.path.join(context_path, \"..\", \"data\")\n os.makedirs(os.path.join(data_path), exist_ok=True)\n shutil.copy(\n file_relative_path(\n __file__,\n os.path.join(\n \"integration\",\n \"fixtures\",\n \"yellow_tripdata_pandas_fixture\",\n \"great_expectations\",\n \"great_expectations.yml\",\n ),\n ),\n str(os.path.join(context_path, \"great_expectations.yml\")),\n )\n shutil.copy(\n file_relative_path(\n __file__,\n os.path.join(\n \"test_sets\",\n \"taxi_yellow_tripdata_samples\",\n \"random_subsamples\",\n \"yellow_tripdata_7500_lines_sample_2019-01.csv\",\n ),\n ),\n str(\n os.path.join(\n context_path, \"..\", \"data\", \"yellow_tripdata_sample_2019-01.csv\"\n )\n ),\n )\n shutil.copy(\n file_relative_path(\n __file__,\n os.path.join(\n \"test_sets\",\n \"taxi_yellow_tripdata_samples\",\n \"random_subsamples\",\n \"yellow_tripdata_8500_lines_sample_2019-02.csv\",\n ),\n ),\n str(\n os.path.join(\n context_path, \"..\", \"data\", \"yellow_tripdata_sample_2019-02.csv\"\n )\n ),\n )\n shutil.copy(\n file_relative_path(\n __file__,\n os.path.join(\n \"test_sets\",\n \"taxi_yellow_tripdata_samples\",\n \"random_subsamples\",\n \"yellow_tripdata_9000_lines_sample_2019-03.csv\",\n ),\n ),\n str(\n os.path.join(\n context_path, \"..\", \"data\", \"yellow_tripdata_sample_2019-03.csv\"\n )\n ),\n )\n\n context: DataContext = DataContext(context_root_dir=context_path)\n assert context.root_directory == context_path\n\n return context\n\n\[email protected]\ndef bobster_columnar_table_multi_batch_normal_mean_5000_stdev_1000():\n \"\"\"\n About the \"Bobster\" User Workflow Fixture\n\n Bobster has multiple tables of columnar data called user_events (DataAsset) that he wants to check periodically as\n new data is added.\n\n - He knows what some of the columns are of the acconting/financial/account type, but he is currently interested in\n the average table size (in terms of the number of rows in a table).\n\n He wants to use a configurable profiler to generate a description (ExpectationSuite) about tables so that he can:\n\n 1. monitor the average number of rows in the tables\n\n 2. have a place to add his domain knowledge of the data (that can also be validated against new data)\n\n 3. if all goes well, generalize some of the Profiler to use on his other tables\n\n Bobster uses a custom implementation of the \"bootstrap\" non-parametric (i.e, data-driven) statistical estimator.\n\n Bobster configures his Profiler using the YAML configurations and data file locations captured in this fixture.\n \"\"\"\n skip_if_python_below_minimum_version()\n\n verbose_profiler_config_file_path: str = file_relative_path(\n __file__,\n os.path.join(\n \"test_fixtures\",\n \"rule_based_profiler\",\n \"bobster_user_workflow_verbose_profiler_config.yml\",\n ),\n )\n\n verbose_profiler_config: str\n with open(verbose_profiler_config_file_path) as f:\n verbose_profiler_config = f.read()\n\n expectation_suite_name_bootstrap_estimator: str = (\n \"bobby_columnar_table_multi_batch_bootstrap_estimator\"\n )\n\n my_row_count_range_rule_expect_table_row_count_to_be_between_expectation_mean_value: int = (\n 5000\n )\n my_row_count_range_rule_expect_table_row_count_to_be_between_expectation_std_value: float = (\n 1.0e3\n )\n my_row_count_range_rule_expect_table_row_count_to_be_between_expectation_num_stds: float = (\n 3.00\n )\n\n my_row_count_range_rule_expect_table_row_count_to_be_between_expectation_min_value_mean_value: int = round(\n float(\n my_row_count_range_rule_expect_table_row_count_to_be_between_expectation_mean_value\n )\n - (\n my_row_count_range_rule_expect_table_row_count_to_be_between_expectation_num_stds\n * my_row_count_range_rule_expect_table_row_count_to_be_between_expectation_std_value\n )\n )\n\n my_row_count_range_rule_expect_table_row_count_to_be_between_expectation_max_value_mean_value: int = round(\n float(\n my_row_count_range_rule_expect_table_row_count_to_be_between_expectation_mean_value\n )\n + (\n my_row_count_range_rule_expect_table_row_count_to_be_between_expectation_num_stds\n * my_row_count_range_rule_expect_table_row_count_to_be_between_expectation_std_value\n )\n )\n\n return {\n \"profiler_config\": verbose_profiler_config,\n \"test_configuration_bootstrap_estimator\": {\n \"expectation_suite_name\": expectation_suite_name_bootstrap_estimator,\n \"expect_table_row_count_to_be_between_mean_value\": my_row_count_range_rule_expect_table_row_count_to_be_between_expectation_mean_value,\n \"expect_table_row_count_to_be_between_min_value_mean_value\": my_row_count_range_rule_expect_table_row_count_to_be_between_expectation_min_value_mean_value,\n \"expect_table_row_count_to_be_between_max_value_mean_value\": my_row_count_range_rule_expect_table_row_count_to_be_between_expectation_max_value_mean_value,\n },\n }\n\n\[email protected]\ndef bobster_columnar_table_multi_batch_normal_mean_5000_stdev_1000_data_context(\n tmp_path_factory,\n monkeypatch,\n) -> DataContext:\n \"\"\"\n This fixture generates three years' worth (36 months; i.e., 36 batches) of taxi trip data with the number of rows\n of a batch sampled from a normal distribution with the mean of 5,000 rows and the standard deviation of 1,000 rows.\n \"\"\"\n skip_if_python_below_minimum_version()\n\n # Re-enable GE_USAGE_STATS\n monkeypatch.delenv(\"GE_USAGE_STATS\")\n monkeypatch.setattr(AnonymizedUsageStatisticsConfig, \"enabled\", True)\n\n project_path: str = str(tmp_path_factory.mktemp(\"taxi_data_context\"))\n context_path: str = os.path.join(project_path, \"great_expectations\")\n os.makedirs(os.path.join(context_path, \"expectations\"), exist_ok=True)\n data_path: str = os.path.join(context_path, \"..\", \"data\")\n os.makedirs(os.path.join(data_path), exist_ok=True)\n shutil.copy(\n file_relative_path(\n __file__,\n os.path.join(\n \"integration\",\n \"fixtures\",\n \"yellow_tripdata_pandas_fixture\",\n \"great_expectations\",\n \"great_expectations.yml\",\n ),\n ),\n str(os.path.join(context_path, \"great_expectations.yml\")),\n )\n base_directory: str = file_relative_path(\n __file__,\n os.path.join(\n \"test_sets\",\n \"taxi_yellow_tripdata_samples\",\n ),\n )\n file_name_list: List[str] = get_filesystem_one_level_directory_glob_path_list(\n base_directory_path=base_directory, glob_directive=\"*.csv\"\n )\n file_name_list = sorted(file_name_list)\n num_files: int = len(file_name_list)\n\n rnd_num_sample: np.float64\n output_file_lenths: List[int] = [\n round(rnd_num_sample)\n for rnd_num_sample in np.random.normal(loc=5.0e3, scale=1.0e3, size=num_files)\n ]\n\n idx: int\n file_name: str\n\n output_file_name_length_map: Dict[str, int] = {\n file_name_list[idx]: output_file_lenths[idx]\n for idx, file_name in enumerate(file_name_list)\n }\n\n csv_source_path: str\n df: pd.DataFrame\n for file_name in file_name_list:\n csv_source_path = os.path.join(base_directory, file_name)\n df = pd.read_csv(filepath_or_buffer=csv_source_path)\n df = df.sample(\n n=output_file_name_length_map[file_name], replace=False, random_state=1\n )\n # noinspection PyTypeChecker\n df.to_csv(\n path_or_buf=os.path.join(context_path, \"..\", \"data\", file_name), index=False\n )\n\n context: DataContext = DataContext(context_root_dir=context_path)\n assert context.root_directory == context_path\n\n return context\n\n\[email protected]\ndef quentin_columnar_table_multi_batch():\n \"\"\"\n About the \"Quentin\" User Workflow Fixture\n Quentin has multiple tables of columnar data called user_events (DataAsset) that he wants to check periodically as\n new data is added.\n - He knows what some of the columns are of the accounting/financial/account type, but he is currently interested\n in the range of quantiles of columns capturing financial quantities (column names ending on \"_amount\" suffix).\n He wants to use a configurable profiler to generate a description (ExpectationSuite) about tables so that he can:\n 1. monitor the range of quantiles of columns capturing financial quantities in the tables\n 2. have a place to add his domain knowledge of the data (that can also be validated against new data)\n 3. if all goes well, generalize some of the Profiler to use on his other tables\n Quentin uses a custom implementation of the \"bootstrap\" non-parametric (i.e, data-driven) statistical estimator.\n Quentin configures his Profiler using the YAML configurations and data file locations captured in this fixture.\n \"\"\"\n skip_if_python_below_minimum_version()\n\n verbose_profiler_config_file_path: str = file_relative_path(\n __file__,\n os.path.join(\n \"test_fixtures\",\n \"rule_based_profiler\",\n \"quentin_user_workflow_verbose_profiler_config.yml\",\n ),\n )\n\n verbose_profiler_config: str\n with open(verbose_profiler_config_file_path) as f:\n verbose_profiler_config = f.read()\n\n expectation_suite_name_bootstrap_estimator: str = (\n \"quentin_columnar_table_multi_batch\"\n )\n\n return {\n \"profiler_config\": verbose_profiler_config,\n \"test_configuration\": {\n \"expectation_suite_name\": expectation_suite_name_bootstrap_estimator,\n \"expect_column_quantile_values_to_be_between_quantile_ranges_by_column\": {\n \"tolls_amount\": [[0.0, 0.0], [0.0, 0.0], [0.0, 0.0]],\n \"fare_amount\": [\n [5.842754275, 6.5],\n [8.675167517, 9.5750000000],\n [13.344354435, 15.650000000],\n ],\n \"tip_amount\": [\n [0.0, 0.0],\n [0.81269502, 1.97259736],\n [2.346049055, 2.993680968],\n ],\n \"total_amount\": [\n [8.2740033, 11.422183043],\n [11.2955000, 14.875000000],\n [16.746263451, 21.327684643],\n ],\n },\n },\n }\n\n\[email protected]\ndef quentin_columnar_table_multi_batch_data_context(\n tmp_path_factory,\n monkeypatch,\n) -> DataContext:\n \"\"\"\n This fixture generates three years' worth (36 months; i.e., 36 batches) of taxi trip data with the number of rows\n of each batch being equal to the original number per log file (10,000 rows).\n \"\"\"\n skip_if_python_below_minimum_version()\n\n # Re-enable GE_USAGE_STATS\n monkeypatch.delenv(\"GE_USAGE_STATS\")\n monkeypatch.setattr(AnonymizedUsageStatisticsConfig, \"enabled\", True)\n\n project_path: str = str(tmp_path_factory.mktemp(\"taxi_data_context\"))\n context_path: str = os.path.join(project_path, \"great_expectations\")\n os.makedirs(os.path.join(context_path, \"expectations\"), exist_ok=True)\n data_path: str = os.path.join(context_path, \"..\", \"data\")\n os.makedirs(os.path.join(data_path), exist_ok=True)\n shutil.copy(\n file_relative_path(\n __file__,\n os.path.join(\n \"integration\",\n \"fixtures\",\n \"yellow_tripdata_pandas_fixture\",\n \"great_expectations\",\n \"great_expectations.yml\",\n ),\n ),\n str(os.path.join(context_path, \"great_expectations.yml\")),\n )\n base_directory: str = file_relative_path(\n __file__,\n os.path.join(\n \"test_sets\",\n \"taxi_yellow_tripdata_samples\",\n ),\n )\n file_name_list: List[str] = get_filesystem_one_level_directory_glob_path_list(\n base_directory_path=base_directory, glob_directive=\"*.csv\"\n )\n file_name_list = sorted(file_name_list)\n\n file_name: str\n csv_source_path: str\n for file_name in file_name_list:\n csv_source_path = os.path.join(base_directory, file_name)\n shutil.copy(\n csv_source_path,\n os.path.join(context_path, \"..\", \"data\", file_name),\n )\n\n context: DataContext = DataContext(context_root_dir=context_path)\n assert context.root_directory == context_path\n\n return context\n\n\n# TODO: AJB 20210525 This fixture is not yet used but may be helpful to generate batches for unit tests of multibatch\n# workflows. It should probably be extended to add different column types / data.\[email protected]\ndef multibatch_generic_csv_generator():\n \"\"\"\n Construct a series of csv files with many data types for use in multibatch testing\n \"\"\"\n skip_if_python_below_minimum_version()\n\n def _multibatch_generic_csv_generator(\n data_path: str,\n start_date: Optional[datetime.datetime] = None,\n num_event_batches: Optional[int] = 20,\n num_events_per_batch: Optional[int] = 5,\n ) -> List[str]:\n\n if start_date is None:\n start_date = datetime.datetime(2000, 1, 1)\n\n file_list = []\n category_strings = {\n 0: \"category0\",\n 1: \"category1\",\n 2: \"category2\",\n 3: \"category3\",\n 4: \"category4\",\n 5: \"category5\",\n 6: \"category6\",\n }\n for batch_num in range(num_event_batches):\n # generate a dataframe with multiple column types\n batch_start_date = start_date + datetime.timedelta(\n days=(batch_num * num_events_per_batch)\n )\n # TODO: AJB 20210416 Add more column types\n df = pd.DataFrame(\n {\n \"event_date\": [\n (batch_start_date + datetime.timedelta(days=i)).strftime(\n \"%Y-%m-%d\"\n )\n for i in range(num_events_per_batch)\n ],\n \"batch_num\": [batch_num + 1 for _ in range(num_events_per_batch)],\n \"string_cardinality_3\": [\n category_strings[i % 3] for i in range(num_events_per_batch)\n ],\n }\n )\n filename = f\"csv_batch_{batch_num + 1:03}_of_{num_event_batches:03}.csv\"\n file_list.append(filename)\n # noinspection PyTypeChecker\n df.to_csv(\n os.path.join(data_path, filename),\n index_label=\"intra_batch_index\",\n )\n\n return file_list\n\n return _multibatch_generic_csv_generator\n\n\[email protected]\ndef multibatch_generic_csv_generator_context(monkeypatch, empty_data_context):\n skip_if_python_below_minimum_version()\n\n context: DataContext = empty_data_context\n monkeypatch.chdir(context.root_directory)\n data_relative_path = \"../data\"\n data_path = os.path.join(context.root_directory, data_relative_path)\n os.makedirs(data_path, exist_ok=True)\n\n data_connector_base_directory = \"./\"\n monkeypatch.setenv(\"base_directory\", data_connector_base_directory)\n monkeypatch.setenv(\"data_fixtures_root\", data_relative_path)\n\n datasource_name = \"generic_csv_generator\"\n data_connector_name = \"daily_data_connector\"\n asset_name = \"daily_data_asset\"\n datasource_config = rf\"\"\"\nclass_name: Datasource\nmodule_name: great_expectations.datasource\nexecution_engine:\n module_name: great_expectations.execution_engine\n class_name: PandasExecutionEngine\ndata_connectors:\n {data_connector_name}:\n class_name: ConfiguredAssetFilesystemDataConnector\n assets:\n {asset_name}:\n module_name: great_expectations.datasource.data_connector.asset\n group_names:\n - batch_num\n - total_batches\n pattern: csv_batch_(\\d.+)_of_(\\d.+)\\.csv\n reader_options:\n delimiter: \",\"\n class_name: Asset\n base_directory: $data_fixtures_root\n glob_directive: \"*.csv\"\n base_directory: $base_directory\n module_name: great_expectations.datasource.data_connector\n \"\"\"\n\n context.add_datasource(name=datasource_name, **yaml.load(datasource_config))\n\n assert context.list_datasources() == [\n {\n \"class_name\": \"Datasource\",\n \"data_connectors\": {\n data_connector_name: {\n \"assets\": {\n asset_name: {\n \"base_directory\": data_relative_path,\n \"class_name\": \"Asset\",\n \"glob_directive\": \"*.csv\",\n \"group_names\": [\"batch_num\", \"total_batches\"],\n \"module_name\": \"great_expectations.datasource.data_connector.asset\",\n \"pattern\": \"csv_batch_(\\\\d.+)_of_(\\\\d.+)\\\\.csv\",\n }\n },\n \"base_directory\": data_connector_base_directory,\n \"class_name\": \"ConfiguredAssetFilesystemDataConnector\",\n \"module_name\": \"great_expectations.datasource.data_connector\",\n }\n },\n \"execution_engine\": {\n \"class_name\": \"PandasExecutionEngine\",\n \"module_name\": \"great_expectations.execution_engine\",\n },\n \"module_name\": \"great_expectations.datasource\",\n \"name\": \"generic_csv_generator\",\n }\n ]\n return context\n"
] | [
[
"numpy.random.normal",
"pandas.read_csv",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
drudd/pandas | [
"99922b9175c4ca6acb0f42dd17c01c507cbd94d6"
] | [
"pandas/core/algorithms.py"
] | [
"\"\"\"\nGeneric data algorithms. This module is experimental at the moment and not\nintended for public consumption\n\"\"\"\nfrom __future__ import division\nfrom warnings import warn\nimport numpy as np\n\nimport pandas.core.common as com\nimport pandas.algos as algos\nimport pandas.hashtable as htable\nimport pandas.compat as compat\n\ndef match(to_match, values, na_sentinel=-1):\n \"\"\"\n Compute locations of to_match into values\n\n Parameters\n ----------\n to_match : array-like\n values to find positions of\n values : array-like\n Unique set of values\n na_sentinel : int, default -1\n Value to mark \"not found\"\n\n Examples\n --------\n\n Returns\n -------\n match : ndarray of integers\n \"\"\"\n values = com._asarray_tuplesafe(values)\n if issubclass(values.dtype.type, compat.string_types):\n values = np.array(values, dtype='O')\n\n f = lambda htype, caster: _match_generic(to_match, values, htype, caster)\n result = _hashtable_algo(f, values.dtype)\n\n if na_sentinel != -1:\n\n # replace but return a numpy array\n # use a Series because it handles dtype conversions properly\n from pandas.core.series import Series\n result = Series(result.ravel()).replace(-1,na_sentinel).values.reshape(result.shape)\n\n return result\n\n\ndef unique(values):\n \"\"\"\n Compute unique values (not necessarily sorted) efficiently from input array\n of values\n\n Parameters\n ----------\n values : array-like\n\n Returns\n -------\n uniques\n \"\"\"\n values = com._asarray_tuplesafe(values)\n f = lambda htype, caster: _unique_generic(values, htype, caster)\n return _hashtable_algo(f, values.dtype)\n\n\n# def count(values, uniques=None):\n# f = lambda htype, caster: _count_generic(values, htype, caster)\n\n# if uniques is not None:\n# raise NotImplementedError\n# else:\n# return _hashtable_algo(f, values.dtype)\n\n\ndef _hashtable_algo(f, dtype):\n \"\"\"\n f(HashTable, type_caster) -> result\n \"\"\"\n if com.is_float_dtype(dtype):\n return f(htable.Float64HashTable, com._ensure_float64)\n elif com.is_integer_dtype(dtype):\n return f(htable.Int64HashTable, com._ensure_int64)\n else:\n return f(htable.PyObjectHashTable, com._ensure_object)\n\n\ndef _count_generic(values, table_type, type_caster):\n from pandas.core.series import Series\n\n values = type_caster(values)\n table = table_type(min(len(values), 1000000))\n uniques, labels = table.factorize(values)\n\n return Series(counts, index=uniques)\n\n\ndef _match_generic(values, index, table_type, type_caster):\n values = type_caster(values)\n index = type_caster(index)\n table = table_type(min(len(index), 1000000))\n table.map_locations(index)\n return table.lookup(values)\n\n\ndef _unique_generic(values, table_type, type_caster):\n values = type_caster(values)\n table = table_type(min(len(values), 1000000))\n uniques = table.unique(values)\n return type_caster(uniques)\n\n\ndef factorize(values, sort=False, order=None, na_sentinel=-1):\n \"\"\"\n Encode input values as an enumerated type or categorical variable\n\n Parameters\n ----------\n values : ndarray (1-d)\n Sequence\n sort : boolean, default False\n Sort by values\n order :\n na_sentinel: int, default -1\n Value to mark \"not found\"\n\n Returns\n -------\n \"\"\"\n from pandas.tseries.period import PeriodIndex\n vals = np.asarray(values)\n is_datetime = com.is_datetime64_dtype(vals)\n (hash_klass, vec_klass), vals = _get_data_algo(vals, _hashtables)\n\n table = hash_klass(len(vals))\n uniques = vec_klass()\n labels = table.get_labels(vals, uniques, 0, na_sentinel)\n\n labels = com._ensure_platform_int(labels)\n\n uniques = uniques.to_array()\n\n if sort and len(uniques) > 0:\n sorter = uniques.argsort()\n reverse_indexer = np.empty(len(sorter), dtype=np.int_)\n reverse_indexer.put(sorter, np.arange(len(sorter)))\n\n mask = labels < 0\n labels = reverse_indexer.take(labels)\n np.putmask(labels, mask, -1)\n\n uniques = uniques.take(sorter)\n\n if is_datetime:\n uniques = uniques.view('M8[ns]')\n if isinstance(values, PeriodIndex):\n uniques = PeriodIndex(ordinal=uniques, freq=values.freq)\n\n return labels, uniques\n\n\ndef value_counts(values, sort=True, ascending=False, normalize=False,\n bins=None):\n \"\"\"\n Compute a histogram of the counts of non-null values\n\n Parameters\n ----------\n values : ndarray (1-d)\n sort : boolean, default True\n Sort by values\n ascending : boolean, default False\n Sort in ascending order\n normalize: boolean, default False\n If True then compute a relative histogram\n bins : integer, optional\n Rather than count values, group them into half-open bins,\n convenience for pd.cut, only works with numeric data\n\n Returns\n -------\n value_counts : Series\n\n \"\"\"\n from pandas.core.series import Series\n from pandas.tools.tile import cut\n\n values = Series(values).values\n\n if bins is not None:\n try:\n cat, bins = cut(values, bins, retbins=True)\n except TypeError:\n raise TypeError(\"bins argument only works with numeric data.\")\n values = cat.labels\n\n if com.is_integer_dtype(values.dtype):\n values = com._ensure_int64(values)\n keys, counts = htable.value_count_int64(values)\n\n elif issubclass(values.dtype.type, (np.datetime64, np.timedelta64)):\n dtype = values.dtype\n values = values.view(np.int64)\n keys, counts = htable.value_count_int64(values)\n\n # convert the keys back to the dtype we came in\n keys = Series(keys, dtype=dtype)\n\n else:\n mask = com.isnull(values)\n values = com._ensure_object(values)\n keys, counts = htable.value_count_object(values, mask)\n\n result = Series(counts, index=com._values_from_object(keys))\n\n if bins is not None:\n # TODO: This next line should be more efficient\n result = result.reindex(np.arange(len(cat.levels)), fill_value=0)\n result.index = bins[:-1]\n\n if sort:\n result.sort()\n if not ascending:\n result = result[::-1]\n\n if normalize:\n result = result / float(values.size)\n\n return result\n\n\ndef mode(values):\n \"\"\"Returns the mode or mode(s) of the passed Series or ndarray (sorted)\"\"\"\n # must sort because hash order isn't necessarily defined.\n from pandas.core.series import Series\n\n if isinstance(values, Series):\n constructor = values._constructor\n values = values.values\n else:\n values = np.asanyarray(values)\n constructor = Series\n\n dtype = values.dtype\n if com.is_integer_dtype(values.dtype):\n values = com._ensure_int64(values)\n result = constructor(sorted(htable.mode_int64(values)), dtype=dtype)\n\n elif issubclass(values.dtype.type, (np.datetime64, np.timedelta64)):\n dtype = values.dtype\n values = values.view(np.int64)\n result = constructor(sorted(htable.mode_int64(values)), dtype=dtype)\n\n else:\n mask = com.isnull(values)\n values = com._ensure_object(values)\n res = htable.mode_object(values, mask)\n try:\n res = sorted(res)\n except TypeError as e:\n warn(\"Unable to sort modes: %s\" % e)\n result = constructor(res, dtype=dtype)\n\n return result\n\n\ndef rank(values, axis=0, method='average', na_option='keep',\n ascending=True):\n \"\"\"\n\n \"\"\"\n if values.ndim == 1:\n f, values = _get_data_algo(values, _rank1d_functions)\n ranks = f(values, ties_method=method, ascending=ascending,\n na_option=na_option)\n elif values.ndim == 2:\n f, values = _get_data_algo(values, _rank2d_functions)\n ranks = f(values, axis=axis, ties_method=method,\n ascending=ascending, na_option=na_option)\n return ranks\n\n\ndef quantile(x, q, interpolation_method='fraction'):\n \"\"\"\n Compute sample quantile or quantiles of the input array. For example, q=0.5\n computes the median.\n\n The `interpolation_method` parameter supports three values, namely\n `fraction` (default), `lower` and `higher`. Interpolation is done only,\n if the desired quantile lies between two data points `i` and `j`. For\n `fraction`, the result is an interpolated value between `i` and `j`;\n for `lower`, the result is `i`, for `higher` the result is `j`.\n\n Parameters\n ----------\n x : ndarray\n Values from which to extract score.\n q : scalar or array\n Percentile at which to extract score.\n interpolation_method : {'fraction', 'lower', 'higher'}, optional\n This optional parameter specifies the interpolation method to use,\n when the desired quantile lies between two data points `i` and `j`:\n\n - fraction: `i + (j - i)*fraction`, where `fraction` is the\n fractional part of the index surrounded by `i` and `j`.\n -lower: `i`.\n - higher: `j`.\n\n Returns\n -------\n score : float\n Score at percentile.\n\n Examples\n --------\n >>> from scipy import stats\n >>> a = np.arange(100)\n >>> stats.scoreatpercentile(a, 50)\n 49.5\n\n \"\"\"\n x = np.asarray(x)\n mask = com.isnull(x)\n\n x = x[-mask]\n\n values = np.sort(x)\n\n def _get_score(at):\n if len(values) == 0:\n return np.nan\n\n idx = at * (len(values) - 1)\n if idx % 1 == 0:\n score = values[idx]\n else:\n if interpolation_method == 'fraction':\n score = _interpolate(values[int(idx)], values[int(idx) + 1],\n idx % 1)\n elif interpolation_method == 'lower':\n score = values[np.floor(idx)]\n elif interpolation_method == 'higher':\n score = values[np.ceil(idx)]\n else:\n raise ValueError(\"interpolation_method can only be 'fraction' \"\n \", 'lower' or 'higher'\")\n\n return score\n\n if np.isscalar(q):\n return _get_score(q)\n else:\n q = np.asarray(q, np.float64)\n return algos.arrmap_float64(q, _get_score)\n\n\ndef _interpolate(a, b, fraction):\n \"\"\"Returns the point at the given fraction between a and b, where\n 'fraction' must be between 0 and 1.\n \"\"\"\n return a + (b - a) * fraction\n\n\ndef _get_data_algo(values, func_map):\n if com.is_float_dtype(values):\n f = func_map['float64']\n values = com._ensure_float64(values)\n elif com.is_datetime64_dtype(values):\n f = func_map['int64']\n values = values.view('i8')\n elif com.is_integer_dtype(values):\n f = func_map['int64']\n values = com._ensure_int64(values)\n else:\n f = func_map['generic']\n values = com._ensure_object(values)\n return f, values\n\n\ndef group_position(*args):\n \"\"\"\n Get group position\n \"\"\"\n from collections import defaultdict\n table = defaultdict(int)\n\n result = []\n for tup in zip(*args):\n result.append(table[tup])\n table[tup] += 1\n\n return result\n\n\n_rank1d_functions = {\n 'float64': algos.rank_1d_float64,\n 'int64': algos.rank_1d_int64,\n 'generic': algos.rank_1d_generic\n}\n\n_rank2d_functions = {\n 'float64': algos.rank_2d_float64,\n 'int64': algos.rank_2d_int64,\n 'generic': algos.rank_2d_generic\n}\n\n_hashtables = {\n 'float64': (htable.Float64HashTable, htable.Float64Vector),\n 'int64': (htable.Int64HashTable, htable.Int64Vector),\n 'generic': (htable.PyObjectHashTable, htable.ObjectVector)\n}\n"
] | [
[
"pandas.core.common.is_integer_dtype",
"numpy.asarray",
"pandas.core.common._ensure_object",
"pandas.core.common._ensure_platform_int",
"pandas.core.series.Series",
"pandas.algos.arrmap_float64",
"pandas.core.common.is_float_dtype",
"pandas.hashtable.mode_object",
"numpy.ceil",
"numpy.asanyarray",
"pandas.core.common._ensure_float64",
"pandas.core.common._ensure_int64",
"pandas.tools.tile.cut",
"pandas.tseries.period.PeriodIndex",
"numpy.putmask",
"pandas.hashtable.value_count_int64",
"pandas.hashtable.mode_int64",
"numpy.floor",
"numpy.array",
"pandas.hashtable.value_count_object",
"pandas.core.common._asarray_tuplesafe",
"numpy.sort",
"pandas.core.common._values_from_object",
"pandas.core.common.is_datetime64_dtype",
"pandas.core.common.isnull",
"numpy.isscalar"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"0.19",
"1.1",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
flaght/zipline | [
"15b8832421e2b1ba98ec9938ceb794f64ad581b5",
"0848a8a4862fd8bbe7ba64654e6bc731b4b622b7"
] | [
"tests/test_perf_tracking.py",
"tests/test_api_shim.py"
] | [
"#\n# Copyright 2016 Quantopian, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import division\n\nimport copy\nfrom datetime import (\n datetime,\n timedelta,\n)\nimport logging\n\nimport nose.tools as nt\nimport pytz\n\nimport pandas as pd\nimport numpy as np\nfrom six.moves import range, zip\n\nfrom zipline.assets import Asset\nfrom zipline.assets.synthetic import make_simple_equity_info\nfrom zipline.data.us_equity_pricing import (\n SQLiteAdjustmentWriter,\n SQLiteAdjustmentReader,\n)\nimport zipline.utils.factory as factory\nimport zipline.finance.performance as perf\nfrom zipline.finance.transaction import create_transaction\nimport zipline.utils.math_utils as zp_math\n\nfrom zipline.finance.blotter import Order\nfrom zipline.finance.performance.position import Position\nfrom zipline.utils.factory import create_simulation_parameters\nfrom zipline.utils.serialization_utils import (\n loads_with_persistent_ids, dumps_with_persistent_ids\n)\nfrom zipline.testing import (\n MockDailyBarReader,\n create_data_portal_from_trade_history,\n create_empty_splits_mergers_frame,\n tmp_trading_env,\n)\nfrom zipline.testing.fixtures import (\n WithInstanceTmpDir,\n WithSimParams,\n WithTmpDir,\n WithTradingEnvironment,\n WithTradingCalendars,\n ZiplineTestCase,\n)\nfrom zipline.utils.calendars import get_calendar\n\nlogger = logging.getLogger('Test Perf Tracking')\n\noneday = timedelta(days=1)\ntradingday = timedelta(hours=6, minutes=30)\n\n# nose.tools changed name in python 3\nif not hasattr(nt, 'assert_count_equal'):\n nt.assert_count_equal = nt.assert_items_equal\n\n\ndef check_perf_period(pp,\n gross_leverage,\n net_leverage,\n long_exposure,\n longs_count,\n short_exposure,\n shorts_count):\n\n perf_data = pp.to_dict()\n np.testing.assert_allclose(\n gross_leverage, perf_data['gross_leverage'], rtol=1e-3)\n np.testing.assert_allclose(\n net_leverage, perf_data['net_leverage'], rtol=1e-3)\n np.testing.assert_allclose(\n long_exposure, perf_data['long_exposure'], rtol=1e-3)\n np.testing.assert_allclose(\n longs_count, perf_data['longs_count'], rtol=1e-3)\n np.testing.assert_allclose(\n short_exposure, perf_data['short_exposure'], rtol=1e-3)\n np.testing.assert_allclose(\n shorts_count, perf_data['shorts_count'], rtol=1e-3)\n\n\ndef check_account(account,\n settled_cash,\n equity_with_loan,\n total_positions_value,\n total_positions_exposure,\n regt_equity,\n available_funds,\n excess_liquidity,\n cushion,\n leverage,\n net_leverage,\n net_liquidation):\n # this is a long only portfolio that is only partially invested\n # so net and gross leverage are equal.\n\n np.testing.assert_allclose(settled_cash,\n account.settled_cash, rtol=1e-3)\n np.testing.assert_allclose(equity_with_loan,\n account.equity_with_loan, rtol=1e-3)\n np.testing.assert_allclose(total_positions_value,\n account.total_positions_value, rtol=1e-3)\n np.testing.assert_allclose(total_positions_exposure,\n account.total_positions_exposure, rtol=1e-3)\n np.testing.assert_allclose(regt_equity,\n account.regt_equity, rtol=1e-3)\n np.testing.assert_allclose(available_funds,\n account.available_funds, rtol=1e-3)\n np.testing.assert_allclose(excess_liquidity,\n account.excess_liquidity, rtol=1e-3)\n np.testing.assert_allclose(cushion,\n account.cushion, rtol=1e-3)\n np.testing.assert_allclose(leverage, account.leverage, rtol=1e-3)\n np.testing.assert_allclose(net_leverage,\n account.net_leverage, rtol=1e-3)\n np.testing.assert_allclose(net_liquidation,\n account.net_liquidation, rtol=1e-3)\n\n\ndef create_txn(asset, dt, price, amount):\n \"\"\"\n Create a fake transaction to be filled and processed prior to the execution\n of a given trade event.\n \"\"\"\n if not isinstance(asset, Asset):\n raise ValueError(\"pass an asset to create_txn\")\n\n mock_order = Order(dt, asset, amount, id=None)\n return create_transaction(mock_order, dt, price, amount)\n\n\ndef calculate_results(sim_params,\n env,\n data_portal,\n splits=None,\n txns=None,\n commissions=None):\n \"\"\"\n Run the given events through a stripped down version of the loop in\n AlgorithmSimulator.transform.\n\n IMPORTANT NOTE FOR TEST WRITERS/READERS:\n\n This loop has some wonky logic for the order of event processing for\n datasource types. This exists mostly to accommodate legacy tests that were\n making assumptions about how events would be sorted.\n\n In particular:\n\n - Dividends passed for a given date are processed PRIOR to any events\n for that date.\n - Splits passed for a given date are process AFTER any events for that\n date.\n\n Tests that use this helper should not be considered useful guarantees of\n the behavior of AlgorithmSimulator on a stream containing the same events\n unless the subgroups have been explicitly re-sorted in this way.\n \"\"\"\n\n txns = txns or []\n splits = splits or {}\n commissions = commissions or {}\n\n perf_tracker = perf.PerformanceTracker(\n sim_params, get_calendar(\"NYSE\"), env\n )\n\n results = []\n\n for date in sim_params.sessions:\n for txn in filter(lambda txn: txn.dt == date, txns):\n # Process txns for this date.\n perf_tracker.process_transaction(txn)\n\n try:\n commissions_for_date = commissions[date]\n for comm in commissions_for_date:\n perf_tracker.process_commission(comm)\n except KeyError:\n pass\n\n try:\n splits_for_date = splits[date]\n perf_tracker.handle_splits(splits_for_date)\n except KeyError:\n pass\n\n msg = perf_tracker.handle_market_close(date, data_portal)\n perf_tracker.position_tracker.sync_last_sale_prices(\n date, False, data_portal,\n )\n msg['account'] = perf_tracker.get_account(True)\n results.append(copy.deepcopy(msg))\n return results\n\n\ndef check_perf_tracker_serialization(perf_tracker):\n scalar_keys = [\n 'emission_rate',\n 'txn_count',\n 'market_open',\n 'last_close',\n 'start_session',\n 'day_count',\n 'capital_base',\n 'market_close',\n 'saved_dt',\n 'period_end',\n 'total_days',\n ]\n p_string = dumps_with_persistent_ids(perf_tracker)\n\n test = loads_with_persistent_ids(p_string, env=perf_tracker.env)\n\n for k in scalar_keys:\n nt.assert_equal(getattr(test, k), getattr(perf_tracker, k), k)\n\n perf_periods = (\n test.cumulative_performance,\n test.todays_performance\n )\n for period in perf_periods:\n nt.assert_true(hasattr(period, '_position_tracker'))\n\n\ndef setup_env_data(env, sim_params, sids, futures_sids=[]):\n data = {}\n for sid in sids:\n data[sid] = {\n \"start_date\": sim_params.sessions[0],\n \"end_date\": get_calendar(\"NYSE\").next_session_label(\n sim_params.sessions[-1]\n )\n }\n\n env.write_data(equities_data=data)\n\n futures_data = {}\n for future_sid in futures_sids:\n futures_data[future_sid] = {\n \"start_date\": sim_params.sessions[0],\n # (obviously) FIXME once we have a future calendar\n \"end_date\": get_calendar(\"NYSE\").next_session_label(\n sim_params.sessions[-1]\n ),\n \"multiplier\": 100\n }\n\n env.write_data(futures_data=futures_data)\n\n\nclass TestSplitPerformance(WithSimParams, WithTmpDir, ZiplineTestCase):\n START_DATE = pd.Timestamp('2006-01-03', tz='utc')\n END_DATE = pd.Timestamp('2006-01-04', tz='utc')\n SIM_PARAMS_CAPITAL_BASE = 10e3\n\n ASSET_FINDER_EQUITY_SIDS = 1, 2\n\n @classmethod\n def init_class_fixtures(cls):\n super(TestSplitPerformance, cls).init_class_fixtures()\n cls.asset1 = cls.env.asset_finder.retrieve_asset(1)\n\n def test_multiple_splits(self):\n # if multiple positions all have splits at the same time, verify that\n # the total leftover cash is correct\n perf_tracker = perf.PerformanceTracker(self.sim_params,\n self.trading_calendar,\n self.env)\n\n asset1 = self.asset_finder.retrieve_asset(1)\n asset2 = self.asset_finder.retrieve_asset(2)\n\n perf_tracker.position_tracker.positions[1] = \\\n Position(asset1, amount=10, cost_basis=10, last_sale_price=11)\n\n perf_tracker.position_tracker.positions[2] = \\\n Position(asset2, amount=10, cost_basis=10, last_sale_price=11)\n\n leftover_cash = perf_tracker.position_tracker.handle_splits(\n [(1, 0.333), (2, 0.333)]\n )\n\n # we used to have 10 shares that each cost us $10, total $100\n # now we have 33 shares that each cost us $3.33, total $99.9\n # each position returns $0.10 as leftover cash\n self.assertEqual(0.2, leftover_cash)\n\n def test_split_long_position(self):\n events = factory.create_trade_history(\n self.asset1,\n # TODO: Should we provide adjusted prices in the tests, or provide\n # raw prices and adjust via DataPortal?\n [20, 60],\n [100, 100],\n oneday,\n self.sim_params,\n trading_calendar=self.trading_calendar,\n )\n\n # set up a long position in sid 1\n # 100 shares at $20 apiece = $2000 position\n data_portal = create_data_portal_from_trade_history(\n self.env.asset_finder,\n self.trading_calendar,\n self.tmpdir,\n self.sim_params,\n {1: events},\n )\n\n txns = [create_txn(self.asset1, events[0].dt, 20, 100)]\n\n # set up a split with ratio 3 occurring at the start of the second\n # day.\n splits = {\n events[1].dt: [(1, 3)]\n }\n\n results = calculate_results(self.sim_params,\n self.env,\n data_portal,\n txns=txns,\n splits=splits)\n\n # should have 33 shares (at $60 apiece) and $20 in cash\n self.assertEqual(2, len(results))\n\n latest_positions = results[1]['daily_perf']['positions']\n self.assertEqual(1, len(latest_positions))\n\n # check the last position to make sure it's been updated\n position = latest_positions[0]\n\n self.assertEqual(1, position['sid'])\n self.assertEqual(33, position['amount'])\n self.assertEqual(60, position['cost_basis'])\n self.assertEqual(60, position['last_sale_price'])\n\n # since we started with $10000, and we spent $2000 on the\n # position, but then got $20 back, we should have $8020\n # (or close to it) in cash.\n\n # we won't get exactly 8020 because sometimes a split is\n # denoted as a ratio like 0.3333, and we lose some digits\n # of precision. thus, make sure we're pretty close.\n daily_perf = results[1]['daily_perf']\n\n self.assertTrue(\n zp_math.tolerant_equals(8020,\n daily_perf['ending_cash'], 1),\n \"ending_cash was {0}\".format(daily_perf['ending_cash']))\n\n # Validate that the account attributes were updated.\n account = results[1]['account']\n self.assertEqual(float('inf'), account.day_trades_remaining)\n # this is a long only portfolio that is only partially invested\n # so net and gross leverage are equal.\n np.testing.assert_allclose(0.198, account.leverage, rtol=1e-3)\n np.testing.assert_allclose(0.198, account.net_leverage, rtol=1e-3)\n np.testing.assert_allclose(8020, account.regt_equity, rtol=1e-3)\n self.assertEqual(float('inf'), account.regt_margin)\n np.testing.assert_allclose(8020, account.available_funds, rtol=1e-3)\n self.assertEqual(0, account.maintenance_margin_requirement)\n np.testing.assert_allclose(10000,\n account.equity_with_loan, rtol=1e-3)\n self.assertEqual(float('inf'), account.buying_power)\n self.assertEqual(0, account.initial_margin_requirement)\n np.testing.assert_allclose(8020, account.excess_liquidity,\n rtol=1e-3)\n np.testing.assert_allclose(8020, account.settled_cash, rtol=1e-3)\n np.testing.assert_allclose(10000, account.net_liquidation,\n rtol=1e-3)\n np.testing.assert_allclose(0.802, account.cushion, rtol=1e-3)\n np.testing.assert_allclose(1980, account.total_positions_value,\n rtol=1e-3)\n self.assertEqual(0, account.accrued_interest)\n\n for i, result in enumerate(results):\n for perf_kind in ('daily_perf', 'cumulative_perf'):\n perf_result = result[perf_kind]\n # prices aren't changing, so pnl and returns should be 0.0\n self.assertEqual(0.0, perf_result['pnl'],\n \"day %s %s pnl %s instead of 0.0\" %\n (i, perf_kind, perf_result['pnl']))\n self.assertEqual(0.0, perf_result['returns'],\n \"day %s %s returns %s instead of 0.0\" %\n (i, perf_kind, perf_result['returns']))\n\n\nclass TestDividendPerformance(WithSimParams,\n WithInstanceTmpDir,\n ZiplineTestCase):\n START_DATE = pd.Timestamp('2006-01-03', tz='utc')\n END_DATE = pd.Timestamp('2006-01-10', tz='utc')\n ASSET_FINDER_EQUITY_SIDS = 1, 2\n SIM_PARAMS_CAPITAL_BASE = 10e3\n\n @classmethod\n def init_class_fixtures(cls):\n super(TestDividendPerformance, cls).init_class_fixtures()\n cls.asset1 = cls.asset_finder.retrieve_asset(1)\n cls.asset2 = cls.asset_finder.retrieve_asset(2)\n\n def test_market_hours_calculations(self):\n # DST in US/Eastern began on Sunday March 14, 2010\n before = datetime(2010, 3, 12, 14, 31, tzinfo=pytz.utc)\n after = factory.get_next_trading_dt(\n before,\n timedelta(days=1),\n self.trading_calendar,\n )\n self.assertEqual(after.hour, 13)\n\n def test_long_position_receives_dividend(self):\n # post some trades in the market\n events = factory.create_trade_history(\n self.asset1,\n [10, 10, 10, 10, 10, 10],\n [100, 100, 100, 100, 100, 100],\n oneday,\n self.sim_params,\n trading_calendar=self.trading_calendar,\n )\n\n dbpath = self.instance_tmpdir.getpath('adjustments.sqlite')\n\n writer = SQLiteAdjustmentWriter(\n dbpath,\n MockDailyBarReader(),\n self.trading_calendar.all_sessions,\n )\n splits = mergers = create_empty_splits_mergers_frame()\n dividends = pd.DataFrame({\n 'sid': np.array([1], dtype=np.uint32),\n 'amount': np.array([10.00], dtype=np.float64),\n 'declared_date': np.array([events[0].dt], dtype='datetime64[ns]'),\n 'ex_date': np.array([events[1].dt], dtype='datetime64[ns]'),\n 'record_date': np.array([events[1].dt], dtype='datetime64[ns]'),\n 'pay_date': np.array([events[2].dt], dtype='datetime64[ns]'),\n })\n writer.write(splits, mergers, dividends)\n adjustment_reader = SQLiteAdjustmentReader(dbpath)\n data_portal = create_data_portal_from_trade_history(\n self.env.asset_finder,\n self.trading_calendar,\n self.instance_tmpdir,\n self.sim_params,\n {1: events},\n )\n data_portal._adjustment_reader = adjustment_reader\n\n # Simulate a transaction being filled prior to the ex_date.\n txns = [create_txn(self.asset1, events[0].dt, 10.0, 100)]\n results = calculate_results(\n self.sim_params,\n self.env,\n data_portal,\n txns=txns,\n )\n\n self.assertEqual(len(results), 6)\n cumulative_returns = \\\n [event['cumulative_perf']['returns'] for event in results]\n self.assertEqual(cumulative_returns, [0.0, 0.0, 0.1, 0.1, 0.1, 0.1])\n daily_returns = [event['daily_perf']['returns']\n for event in results]\n self.assertEqual(daily_returns, [0.0, 0.0, 0.10, 0.0, 0.0, 0.0])\n cash_flows = [event['daily_perf']['capital_used']\n for event in results]\n self.assertEqual(cash_flows, [-1000, 0, 1000, 0, 0, 0])\n cumulative_cash_flows = \\\n [event['cumulative_perf']['capital_used'] for event in results]\n self.assertEqual(cumulative_cash_flows, [-1000, -1000, 0, 0, 0, 0])\n cash_pos = \\\n [event['cumulative_perf']['ending_cash'] for event in results]\n self.assertEqual(cash_pos, [9000, 9000, 10000, 10000, 10000, 10000])\n\n def test_long_position_receives_stock_dividend(self):\n # post some trades in the market\n events = {}\n for asset in [self.asset1, self.asset2]:\n events[asset.sid] = factory.create_trade_history(\n asset,\n [10, 10, 10, 10, 10, 10],\n [100, 100, 100, 100, 100, 100],\n oneday,\n self.sim_params,\n trading_calendar=self.trading_calendar,\n )\n\n dbpath = self.instance_tmpdir.getpath('adjustments.sqlite')\n\n writer = SQLiteAdjustmentWriter(\n dbpath,\n MockDailyBarReader(),\n self.trading_calendar.all_sessions\n )\n splits = mergers = create_empty_splits_mergers_frame()\n dividends = pd.DataFrame({\n 'sid': np.array([], dtype=np.uint32),\n 'amount': np.array([], dtype=np.float64),\n 'declared_date': np.array([], dtype='datetime64[ns]'),\n 'ex_date': np.array([], dtype='datetime64[ns]'),\n 'pay_date': np.array([], dtype='datetime64[ns]'),\n 'record_date': np.array([], dtype='datetime64[ns]'),\n })\n sid_1 = events[1]\n stock_dividends = pd.DataFrame({\n 'sid': np.array([1], dtype=np.uint32),\n 'payment_sid': np.array([2], dtype=np.uint32),\n 'ratio': np.array([2], dtype=np.float64),\n 'declared_date': np.array([sid_1[0].dt], dtype='datetime64[ns]'),\n 'ex_date': np.array([sid_1[1].dt], dtype='datetime64[ns]'),\n 'record_date': np.array([sid_1[1].dt], dtype='datetime64[ns]'),\n 'pay_date': np.array([sid_1[2].dt], dtype='datetime64[ns]'),\n })\n writer.write(splits, mergers, dividends, stock_dividends)\n adjustment_reader = SQLiteAdjustmentReader(dbpath)\n\n data_portal = create_data_portal_from_trade_history(\n self.env.asset_finder,\n self.trading_calendar,\n self.instance_tmpdir,\n self.sim_params,\n events,\n )\n\n data_portal._adjustment_reader = adjustment_reader\n txns = [create_txn(self.asset1, events[1][0].dt, 10.0, 100)]\n\n results = calculate_results(\n self.sim_params,\n self.env,\n data_portal,\n txns=txns,\n )\n\n self.assertEqual(len(results), 6)\n cumulative_returns = \\\n [event['cumulative_perf']['returns'] for event in results]\n self.assertEqual(cumulative_returns, [0.0, 0.0, 0.2, 0.2, 0.2, 0.2])\n daily_returns = [event['daily_perf']['returns']\n for event in results]\n self.assertEqual(daily_returns, [0.0, 0.0, 0.2, 0.0, 0.0, 0.0])\n cash_flows = [event['daily_perf']['capital_used']\n for event in results]\n self.assertEqual(cash_flows, [-1000, 0, 0, 0, 0, 0])\n cumulative_cash_flows = \\\n [event['cumulative_perf']['capital_used'] for event in results]\n self.assertEqual(cumulative_cash_flows, [-1000] * 6)\n cash_pos = \\\n [event['cumulative_perf']['ending_cash'] for event in results]\n self.assertEqual(cash_pos, [9000] * 6)\n\n def test_long_position_purchased_on_ex_date_receives_no_dividend(self):\n # post some trades in the market\n events = factory.create_trade_history(\n self.asset1,\n [10, 10, 10, 10, 10, 10],\n [100, 100, 100, 100, 100, 100],\n oneday,\n self.sim_params,\n trading_calendar=self.trading_calendar\n )\n\n dbpath = self.instance_tmpdir.getpath('adjustments.sqlite')\n\n writer = SQLiteAdjustmentWriter(\n dbpath,\n MockDailyBarReader(),\n self.trading_calendar.all_sessions\n )\n splits = mergers = create_empty_splits_mergers_frame()\n dividends = pd.DataFrame({\n 'sid': np.array([1], dtype=np.uint32),\n 'amount': np.array([10.00], dtype=np.float64),\n 'declared_date': np.array([events[0].dt], dtype='datetime64[ns]'),\n 'ex_date': np.array([events[1].dt], dtype='datetime64[ns]'),\n 'record_date': np.array([events[1].dt], dtype='datetime64[ns]'),\n 'pay_date': np.array([events[2].dt], dtype='datetime64[ns]'),\n })\n writer.write(splits, mergers, dividends)\n adjustment_reader = SQLiteAdjustmentReader(dbpath)\n\n data_portal = create_data_portal_from_trade_history(\n self.env.asset_finder,\n self.trading_calendar,\n self.instance_tmpdir,\n self.sim_params,\n {1: events},\n )\n data_portal._adjustment_reader = adjustment_reader\n\n # Simulate a transaction being filled on the ex_date.\n txns = [create_txn(self.asset1, events[1].dt, 10.0, 100)]\n\n results = calculate_results(\n self.sim_params,\n self.env,\n data_portal,\n txns=txns,\n )\n\n self.assertEqual(len(results), 6)\n cumulative_returns = \\\n [event['cumulative_perf']['returns'] for event in results]\n self.assertEqual(cumulative_returns, [0, 0, 0, 0, 0, 0])\n daily_returns = [event['daily_perf']['returns'] for event in results]\n self.assertEqual(daily_returns, [0, 0, 0, 0, 0, 0])\n cash_flows = [event['daily_perf']['capital_used'] for event in results]\n self.assertEqual(cash_flows, [0, -1000, 0, 0, 0, 0])\n cumulative_cash_flows = \\\n [event['cumulative_perf']['capital_used'] for event in results]\n self.assertEqual(cumulative_cash_flows,\n [0, -1000, -1000, -1000, -1000, -1000])\n\n def test_selling_before_dividend_payment_still_gets_paid(self):\n # post some trades in the market\n events = factory.create_trade_history(\n self.asset1,\n [10, 10, 10, 10, 10, 10],\n [100, 100, 100, 100, 100, 100],\n oneday,\n self.sim_params,\n trading_calendar=self.trading_calendar,\n )\n\n dbpath = self.instance_tmpdir.getpath('adjustments.sqlite')\n\n writer = SQLiteAdjustmentWriter(\n dbpath,\n MockDailyBarReader(),\n self.trading_calendar.all_sessions,\n )\n splits = mergers = create_empty_splits_mergers_frame()\n dividends = pd.DataFrame({\n 'sid': np.array([1], dtype=np.uint32),\n 'amount': np.array([10.00], dtype=np.float64),\n 'declared_date': np.array([events[0].dt], dtype='datetime64[ns]'),\n 'ex_date': np.array([events[1].dt], dtype='datetime64[ns]'),\n 'record_date': np.array([events[1].dt], dtype='datetime64[ns]'),\n 'pay_date': np.array([events[3].dt], dtype='datetime64[ns]'),\n })\n writer.write(splits, mergers, dividends)\n adjustment_reader = SQLiteAdjustmentReader(dbpath)\n\n data_portal = create_data_portal_from_trade_history(\n self.env.asset_finder,\n self.trading_calendar,\n self.instance_tmpdir,\n self.sim_params,\n {1: events},\n )\n data_portal._adjustment_reader = adjustment_reader\n\n buy_txn = create_txn(self.asset1, events[0].dt, 10.0, 100)\n sell_txn = create_txn(self.asset1, events[2].dt, 10.0, -100)\n txns = [buy_txn, sell_txn]\n\n results = calculate_results(\n self.sim_params,\n self.env,\n data_portal,\n txns=txns,\n )\n\n self.assertEqual(len(results), 6)\n cumulative_returns = \\\n [event['cumulative_perf']['returns'] for event in results]\n self.assertEqual(cumulative_returns, [0, 0, 0, 0.1, 0.1, 0.1])\n daily_returns = [event['daily_perf']['returns'] for event in results]\n self.assertEqual(daily_returns, [0, 0, 0, 0.1, 0, 0])\n cash_flows = [event['daily_perf']['capital_used'] for event in results]\n self.assertEqual(cash_flows, [-1000, 0, 1000, 1000, 0, 0])\n cumulative_cash_flows = \\\n [event['cumulative_perf']['capital_used'] for event in results]\n self.assertEqual(cumulative_cash_flows,\n [-1000, -1000, 0, 1000, 1000, 1000])\n\n def test_buy_and_sell_before_ex(self):\n # need a six-day simparam\n\n # post some trades in the market\n events = factory.create_trade_history(\n self.asset1,\n [10, 10, 10, 10, 10, 10],\n [100, 100, 100, 100, 100, 100],\n oneday,\n self.sim_params,\n trading_calendar=self.trading_calendar,\n )\n dbpath = self.instance_tmpdir.getpath('adjustments.sqlite')\n\n writer = SQLiteAdjustmentWriter(\n dbpath,\n MockDailyBarReader(),\n self.trading_calendar.all_sessions,\n )\n splits = mergers = create_empty_splits_mergers_frame()\n\n dividends = pd.DataFrame({\n 'sid': np.array([1], dtype=np.uint32),\n 'amount': np.array([10.0], dtype=np.float64),\n 'declared_date': np.array([events[3].dt], dtype='datetime64[ns]'),\n 'ex_date': np.array([events[4].dt], dtype='datetime64[ns]'),\n 'pay_date': np.array([events[5].dt], dtype='datetime64[ns]'),\n 'record_date': np.array([events[4].dt], dtype='datetime64[ns]'),\n })\n writer.write(splits, mergers, dividends)\n adjustment_reader = SQLiteAdjustmentReader(dbpath)\n\n data_portal = create_data_portal_from_trade_history(\n self.env.asset_finder,\n self.trading_calendar,\n self.instance_tmpdir,\n self.sim_params,\n {1: events},\n )\n data_portal._adjustment_reader = adjustment_reader\n buy_txn = create_txn(self.asset1, events[1].dt, 10.0, 100)\n sell_txn = create_txn(self.asset1, events[2].dt, 10.0, -100)\n txns = [buy_txn, sell_txn]\n\n results = calculate_results(\n self.sim_params,\n self.env,\n data_portal,\n txns=txns,\n )\n\n self.assertEqual(len(results), 6)\n cumulative_returns = \\\n [event['cumulative_perf']['returns'] for event in results]\n self.assertEqual(cumulative_returns, [0, 0, 0, 0, 0, 0])\n daily_returns = [event['daily_perf']['returns'] for event in results]\n self.assertEqual(daily_returns, [0, 0, 0, 0, 0, 0])\n cash_flows = [event['daily_perf']['capital_used'] for event in results]\n self.assertEqual(cash_flows, [0, -1000, 1000, 0, 0, 0])\n cumulative_cash_flows = \\\n [event['cumulative_perf']['capital_used'] for event in results]\n self.assertEqual(cumulative_cash_flows, [0, -1000, 0, 0, 0, 0])\n\n def test_ending_before_pay_date(self):\n # post some trades in the market\n events = factory.create_trade_history(\n self.asset1,\n [10, 10, 10, 10, 10, 10],\n [100, 100, 100, 100, 100, 100],\n oneday,\n self.sim_params,\n trading_calendar=self.trading_calendar,\n )\n\n pay_date = self.sim_params.first_open\n # find pay date that is much later.\n for i in range(30):\n pay_date = factory.get_next_trading_dt(pay_date, oneday,\n self.trading_calendar)\n\n dbpath = self.instance_tmpdir.getpath('adjustments.sqlite')\n\n writer = SQLiteAdjustmentWriter(\n dbpath,\n MockDailyBarReader(),\n self.trading_calendar.all_sessions,\n )\n splits = mergers = create_empty_splits_mergers_frame()\n dividends = pd.DataFrame({\n 'sid': np.array([1], dtype=np.uint32),\n 'amount': np.array([10.00], dtype=np.float64),\n 'declared_date': np.array([events[0].dt], dtype='datetime64[ns]'),\n 'ex_date': np.array([events[0].dt], dtype='datetime64[ns]'),\n 'record_date': np.array([events[0].dt], dtype='datetime64[ns]'),\n 'pay_date': np.array([pay_date], dtype='datetime64[ns]'),\n })\n writer.write(splits, mergers, dividends)\n adjustment_reader = SQLiteAdjustmentReader(dbpath)\n\n data_portal = create_data_portal_from_trade_history(\n self.env.asset_finder,\n self.trading_calendar,\n self.instance_tmpdir,\n self.sim_params,\n {1: events},\n )\n data_portal._adjustment_reader = adjustment_reader\n txns = [create_txn(self.asset1, events[1].dt, 10.0, 100)]\n\n results = calculate_results(\n self.sim_params,\n self.env,\n data_portal,\n txns=txns,\n )\n\n self.assertEqual(len(results), 6)\n cumulative_returns = \\\n [event['cumulative_perf']['returns'] for event in results]\n self.assertEqual(cumulative_returns, [0, 0, 0, 0.0, 0.0, 0.0])\n daily_returns = [event['daily_perf']['returns'] for event in results]\n self.assertEqual(daily_returns, [0, 0, 0, 0, 0, 0])\n cash_flows = [event['daily_perf']['capital_used'] for event in results]\n self.assertEqual(cash_flows, [0, -1000, 0, 0, 0, 0])\n cumulative_cash_flows = \\\n [event['cumulative_perf']['capital_used'] for event in results]\n self.assertEqual(\n cumulative_cash_flows,\n [0, -1000, -1000, -1000, -1000, -1000]\n )\n\n def test_short_position_pays_dividend(self):\n # post some trades in the market\n events = factory.create_trade_history(\n self.asset1,\n [10, 10, 10, 10, 10, 10],\n [100, 100, 100, 100, 100, 100],\n oneday,\n self.sim_params,\n trading_calendar=self.trading_calendar,\n )\n\n dbpath = self.instance_tmpdir.getpath('adjustments.sqlite')\n\n writer = SQLiteAdjustmentWriter(\n dbpath,\n MockDailyBarReader(),\n self.trading_calendar.all_sessions,\n )\n splits = mergers = create_empty_splits_mergers_frame()\n dividends = pd.DataFrame({\n 'sid': np.array([1], dtype=np.uint32),\n 'amount': np.array([10.00], dtype=np.float64),\n 'declared_date': np.array([events[0].dt], dtype='datetime64[ns]'),\n 'ex_date': np.array([events[2].dt], dtype='datetime64[ns]'),\n 'record_date': np.array([events[2].dt], dtype='datetime64[ns]'),\n 'pay_date': np.array([events[3].dt], dtype='datetime64[ns]'),\n })\n writer.write(splits, mergers, dividends)\n adjustment_reader = SQLiteAdjustmentReader(dbpath)\n\n data_portal = create_data_portal_from_trade_history(\n self.env.asset_finder,\n self.trading_calendar,\n self.instance_tmpdir,\n self.sim_params,\n {1: events},\n )\n data_portal._adjustment_reader = adjustment_reader\n txns = [create_txn(self.asset1, events[1].dt, 10.0, -100)]\n\n results = calculate_results(\n self.sim_params,\n self.env,\n data_portal,\n txns=txns,\n )\n\n self.assertEqual(len(results), 6)\n cumulative_returns = \\\n [event['cumulative_perf']['returns'] for event in results]\n self.assertEqual(cumulative_returns, [0.0, 0.0, 0.0, -0.1, -0.1, -0.1])\n daily_returns = [event['daily_perf']['returns'] for event in results]\n self.assertEqual(daily_returns, [0.0, 0.0, 0.0, -0.1, 0.0, 0.0])\n cash_flows = [event['daily_perf']['capital_used'] for event in results]\n self.assertEqual(cash_flows, [0, 1000, 0, -1000, 0, 0])\n cumulative_cash_flows = \\\n [event['cumulative_perf']['capital_used'] for event in results]\n self.assertEqual(cumulative_cash_flows, [0, 1000, 1000, 0, 0, 0])\n\n def test_no_position_receives_no_dividend(self):\n # post some trades in the market\n events = factory.create_trade_history(\n self.asset1,\n [10, 10, 10, 10, 10, 10],\n [100, 100, 100, 100, 100, 100],\n oneday,\n self.sim_params,\n trading_calendar=self.trading_calendar,\n )\n\n dbpath = self.instance_tmpdir.getpath('adjustments.sqlite')\n\n writer = SQLiteAdjustmentWriter(\n dbpath,\n MockDailyBarReader(),\n self.trading_calendar.all_sessions,\n )\n splits = mergers = create_empty_splits_mergers_frame()\n dividends = pd.DataFrame({\n 'sid': np.array([1], dtype=np.uint32),\n 'amount': np.array([10.00], dtype=np.float64),\n 'declared_date': np.array([events[0].dt], dtype='datetime64[ns]'),\n 'ex_date': np.array([events[1].dt], dtype='datetime64[ns]'),\n 'pay_date': np.array([events[2].dt], dtype='datetime64[ns]'),\n 'record_date': np.array([events[2].dt], dtype='datetime64[ns]'),\n })\n writer.write(splits, mergers, dividends)\n adjustment_reader = SQLiteAdjustmentReader(dbpath)\n\n data_portal = create_data_portal_from_trade_history(\n self.env.asset_finder,\n self.trading_calendar,\n self.instance_tmpdir,\n self.sim_params,\n {1: events},\n )\n data_portal._adjustment_reader = adjustment_reader\n\n results = calculate_results(\n self.sim_params,\n self.env,\n data_portal,\n )\n\n self.assertEqual(len(results), 6)\n cumulative_returns = \\\n [event['cumulative_perf']['returns'] for event in results]\n self.assertEqual(cumulative_returns, [0.0, 0.0, 0.0, 0.0, 0.0, 0.0])\n daily_returns = [event['daily_perf']['returns'] for event in results]\n self.assertEqual(daily_returns, [0.0, 0.0, 0.0, 0.0, 0.0, 0.0])\n cash_flows = [event['daily_perf']['capital_used'] for event in results]\n self.assertEqual(cash_flows, [0, 0, 0, 0, 0, 0])\n cumulative_cash_flows = \\\n [event['cumulative_perf']['capital_used'] for event in results]\n self.assertEqual(cumulative_cash_flows, [0, 0, 0, 0, 0, 0])\n\n def test_no_dividend_at_simulation_end(self):\n # post some trades in the market\n events = factory.create_trade_history(\n self.asset1,\n [10, 10, 10, 10, 10, 10],\n [100, 100, 100, 100, 100, 100],\n oneday,\n self.sim_params,\n trading_calendar=self.trading_calendar,\n )\n\n dbpath = self.instance_tmpdir.getpath('adjustments.sqlite')\n\n writer = SQLiteAdjustmentWriter(\n dbpath,\n MockDailyBarReader(),\n self.trading_calendar.all_sessions,\n )\n splits = mergers = create_empty_splits_mergers_frame()\n dividends = pd.DataFrame({\n 'sid': np.array([1], dtype=np.uint32),\n 'amount': np.array([10.00], dtype=np.float64),\n 'declared_date': np.array([events[-3].dt], dtype='datetime64[ns]'),\n 'ex_date': np.array([events[-2].dt], dtype='datetime64[ns]'),\n 'record_date': np.array([events[0].dt], dtype='datetime64[ns]'),\n 'pay_date': np.array(\n [self.trading_calendar.next_session_label(\n self.trading_calendar.minute_to_session_label(\n events[-1].dt\n )\n )],\n dtype='datetime64[ns]'),\n })\n writer.write(splits, mergers, dividends)\n adjustment_reader = SQLiteAdjustmentReader(dbpath)\n\n # Set the last day to be the last event\n sim_params = create_simulation_parameters(\n num_days=6,\n capital_base=10e3,\n start=self.sim_params.start_session,\n end=self.sim_params.end_session\n )\n\n sim_params = sim_params.create_new(\n sim_params.start_session,\n events[-1].dt\n )\n\n data_portal = create_data_portal_from_trade_history(\n self.env.asset_finder,\n self.trading_calendar,\n self.instance_tmpdir,\n sim_params,\n {1: events},\n )\n data_portal._adjustment_reader = adjustment_reader\n # Simulate a transaction being filled prior to the ex_date.\n txns = [create_txn(self.asset1, events[0].dt, 10.0, 100)]\n results = calculate_results(\n sim_params,\n self.env,\n data_portal,\n txns=txns,\n )\n\n self.assertEqual(len(results), 6)\n cumulative_returns = \\\n [event['cumulative_perf']['returns'] for event in results]\n self.assertEqual(cumulative_returns, [0.0, 0.0, 0.0, 0.0, 0.0, 0.0])\n daily_returns = [event['daily_perf']['returns'] for event in results]\n self.assertEqual(daily_returns, [0.0, 0.0, 0.0, 0.0, 0.0, 0.0])\n cash_flows = [event['daily_perf']['capital_used'] for event in results]\n self.assertEqual(cash_flows, [-1000, 0, 0, 0, 0, 0])\n cumulative_cash_flows = \\\n [event['cumulative_perf']['capital_used'] for event in results]\n self.assertEqual(cumulative_cash_flows,\n [-1000, -1000, -1000, -1000, -1000, -1000])\n\n\nclass TestDividendPerformanceHolidayStyle(TestDividendPerformance):\n\n # The holiday tests begins the simulation on the day\n # before Thanksgiving, so that the next trading day is\n # two days ahead. Any tests that hard code events\n # to be start + oneday will fail, since those events will\n # be skipped by the simulation.\n START_DATE = pd.Timestamp('2003-11-30', tz='utc')\n END_DATE = pd.Timestamp('2003-12-08', tz='utc')\n\n\nclass TestPositionPerformance(WithInstanceTmpDir, WithTradingCalendars,\n ZiplineTestCase):\n\n def create_environment_stuff(self,\n num_days=4,\n sids=[1, 2],\n futures_sids=[3]):\n start = pd.Timestamp('2006-01-01', tz='utc')\n end = start + timedelta(days=num_days * 2)\n equities = make_simple_equity_info(sids, start, end)\n futures = pd.DataFrame.from_dict(\n {\n sid: {\n 'start_date': start,\n 'end_date': end,\n 'multiplier': 100,\n 'exchange': \"TEST\",\n }\n for sid in futures_sids\n },\n orient='index',\n )\n self.env = self.enter_instance_context(tmp_trading_env(\n equities=equities,\n futures=futures,\n ))\n self.sim_params = create_simulation_parameters(\n start=start,\n num_days=num_days,\n )\n\n self.finder = self.env.asset_finder\n self.asset1 = self.env.asset_finder.retrieve_asset(1)\n self.asset2 = self.env.asset_finder.retrieve_asset(2)\n self.asset3 = self.env.asset_finder.retrieve_asset(3)\n\n def test_long_short_positions(self):\n \"\"\"\n start with $1000\n buy 100 stock1 shares at $10\n sell short 100 stock2 shares at $10\n stock1 then goes down to $9\n stock2 goes to $11\n \"\"\"\n self.create_environment_stuff()\n\n trades_1 = factory.create_trade_history(\n self.asset1,\n [10, 10, 10, 9],\n [100, 100, 100, 100],\n oneday,\n self.sim_params,\n trading_calendar=self.trading_calendar,\n )\n\n trades_2 = factory.create_trade_history(\n self.asset2,\n [10, 10, 10, 11],\n [100, 100, 100, 100],\n oneday,\n self.sim_params,\n trading_calendar=self.trading_calendar,\n )\n\n data_portal = create_data_portal_from_trade_history(\n self.env.asset_finder,\n self.trading_calendar,\n self.instance_tmpdir,\n self.sim_params,\n {1: trades_1, 2: trades_2}\n )\n\n txn1 = create_txn(self.asset1, trades_1[0].dt, 10.0, 100)\n txn2 = create_txn(self.asset2, trades_1[0].dt, 10.0, -100)\n\n pt = perf.PositionTracker(self.env.asset_finder,\n self.sim_params.data_frequency)\n pp = perf.PerformancePeriod(1000.0, self.env.asset_finder,\n self.sim_params.data_frequency)\n pp.position_tracker = pt\n pt.execute_transaction(txn1)\n pp.handle_execution(txn1)\n pt.execute_transaction(txn2)\n pp.handle_execution(txn2)\n\n dt = trades_1[-2].dt\n pt.sync_last_sale_prices(dt, False, data_portal)\n\n pp.calculate_performance()\n\n check_perf_period(\n pp,\n gross_leverage=2.0,\n net_leverage=0.0,\n long_exposure=1000.0,\n longs_count=1,\n short_exposure=-1000.0,\n shorts_count=1)\n # Validate that the account attributes were updated.\n account = pp.as_account()\n check_account(account,\n settled_cash=1000.0,\n equity_with_loan=1000.0,\n total_positions_value=0.0,\n total_positions_exposure=0.0,\n regt_equity=1000.0,\n available_funds=1000.0,\n excess_liquidity=1000.0,\n cushion=1.0,\n leverage=2.0,\n net_leverage=0.0,\n net_liquidation=1000.0)\n\n dt = trades_1[-1].dt\n pt.sync_last_sale_prices(dt, False, data_portal)\n\n pp.calculate_performance()\n\n # Validate that the account attributes were updated.\n account = pp.as_account()\n\n check_perf_period(\n pp,\n gross_leverage=2.5,\n net_leverage=-0.25,\n long_exposure=900.0,\n longs_count=1,\n short_exposure=-1100.0,\n shorts_count=1)\n\n check_account(account,\n settled_cash=1000.0,\n equity_with_loan=800.0,\n total_positions_value=-200.0,\n total_positions_exposure=-200.0,\n regt_equity=1000.0,\n available_funds=1000.0,\n excess_liquidity=1000.0,\n cushion=1.25,\n leverage=2.5,\n net_leverage=-0.25,\n net_liquidation=800.0)\n\n def test_levered_long_position(self):\n \"\"\"\n start with $1,000, then buy 1000 shares at $10.\n price goes to $11\n \"\"\"\n # post some trades in the market\n\n self.create_environment_stuff()\n\n trades = factory.create_trade_history(\n self.asset1,\n [10, 10, 10, 11],\n [100, 100, 100, 100],\n oneday,\n self.sim_params,\n trading_calendar=self.trading_calendar,\n )\n\n data_portal = create_data_portal_from_trade_history(\n self.env.asset_finder,\n self.trading_calendar,\n self.instance_tmpdir,\n self.sim_params,\n {1: trades})\n txn = create_txn(self.asset1, trades[1].dt, 10.0, 1000)\n pt = perf.PositionTracker(self.env.asset_finder,\n self.sim_params.data_frequency)\n pp = perf.PerformancePeriod(1000.0, self.env.asset_finder,\n self.sim_params.data_frequency)\n pp.position_tracker = pt\n\n pt.execute_transaction(txn)\n pp.handle_execution(txn)\n\n pp.calculate_performance()\n\n check_perf_period(\n pp,\n gross_leverage=10.0,\n net_leverage=10.0,\n long_exposure=10000.0,\n longs_count=1,\n short_exposure=0.0,\n shorts_count=0)\n\n # Validate that the account attributes were updated.\n pt.sync_last_sale_prices(trades[-2].dt, False, data_portal)\n\n # Validate that the account attributes were updated.\n account = pp.as_account()\n check_account(account,\n settled_cash=-9000.0,\n equity_with_loan=1000.0,\n total_positions_value=10000.0,\n total_positions_exposure=10000.0,\n regt_equity=-9000.0,\n available_funds=-9000.0,\n excess_liquidity=-9000.0,\n cushion=-9.0,\n leverage=10.0,\n net_leverage=10.0,\n net_liquidation=1000.0)\n\n # now simulate a price jump to $11\n pt.sync_last_sale_prices(trades[-1].dt, False, data_portal)\n\n pp.calculate_performance()\n\n check_perf_period(\n pp,\n gross_leverage=5.5,\n net_leverage=5.5,\n long_exposure=11000.0,\n longs_count=1,\n short_exposure=0.0,\n shorts_count=0)\n\n # Validate that the account attributes were updated.\n account = pp.as_account()\n\n check_account(account,\n settled_cash=-9000.0,\n equity_with_loan=2000.0,\n total_positions_value=11000.0,\n total_positions_exposure=11000.0,\n regt_equity=-9000.0,\n available_funds=-9000.0,\n excess_liquidity=-9000.0,\n cushion=-4.5,\n leverage=5.5,\n net_leverage=5.5,\n net_liquidation=2000.0)\n\n def test_long_position(self):\n \"\"\"\n verify that the performance period calculates properly for a\n single buy transaction\n \"\"\"\n self.create_environment_stuff()\n\n # post some trades in the market\n trades = factory.create_trade_history(\n self.asset1,\n [10, 10, 10, 11],\n [100, 100, 100, 100],\n oneday,\n self.sim_params,\n trading_calendar=self.trading_calendar,\n )\n\n data_portal = create_data_portal_from_trade_history(\n self.env.asset_finder,\n self.trading_calendar,\n self.instance_tmpdir,\n self.sim_params,\n {1: trades})\n txn = create_txn(self.asset1, trades[1].dt, 10.0, 100)\n pt = perf.PositionTracker(self.env.asset_finder,\n self.sim_params.data_frequency)\n pp = perf.PerformancePeriod(1000.0, self.env.asset_finder,\n self.sim_params.data_frequency,\n period_open=self.sim_params.start_session,\n period_close=self.sim_params.end_session)\n pp.position_tracker = pt\n\n pt.execute_transaction(txn)\n pp.handle_execution(txn)\n\n # This verifies that the last sale price is being correctly\n # set in the positions. If this is not the case then returns can\n # incorrectly show as sharply dipping if a transaction arrives\n # before a trade. This is caused by returns being based on holding\n # stocks with a last sale price of 0.\n self.assertEqual(pp.positions[1].last_sale_price, 10.0)\n\n pt.sync_last_sale_prices(trades[-1].dt, False, data_portal)\n\n pp.calculate_performance()\n\n self.assertEqual(\n pp.cash_flow,\n -1 * txn.price * txn.amount,\n \"capital used should be equal to the opposite of the transaction \\\n cost of sole txn in test\"\n )\n\n self.assertEqual(\n len(pp.positions),\n 1,\n \"should be just one position\")\n\n self.assertEqual(\n pp.positions[1].sid,\n txn.sid,\n \"position should be in security with id 1\")\n\n self.assertEqual(\n pp.positions[1].amount,\n txn.amount,\n \"should have a position of {sharecount} shares\".format(\n sharecount=txn.amount\n )\n )\n\n self.assertEqual(\n pp.positions[1].cost_basis,\n txn.price,\n \"should have a cost basis of 10\"\n )\n\n self.assertEqual(\n pp.positions[1].last_sale_price,\n trades[-1].price,\n \"last sale should be same as last trade. \\\n expected {exp} actual {act}\".format(\n exp=trades[-1].price,\n act=pp.positions[1].last_sale_price)\n )\n\n self.assertEqual(\n pp.ending_value,\n 1100,\n \"ending value should be price of last trade times number of \\\n shares in position\"\n )\n\n self.assertEqual(pp.pnl, 100, \"gain of 1 on 100 shares should be 100\")\n\n check_perf_period(\n pp,\n gross_leverage=1.0,\n net_leverage=1.0,\n long_exposure=1100.0,\n longs_count=1,\n short_exposure=0.0,\n shorts_count=0)\n\n # Validate that the account attributes were updated.\n account = pp.as_account()\n check_account(account,\n settled_cash=0.0,\n equity_with_loan=1100.0,\n total_positions_value=1100.0,\n total_positions_exposure=1100.0,\n regt_equity=0.0,\n available_funds=0.0,\n excess_liquidity=0.0,\n cushion=0.0,\n leverage=1.0,\n net_leverage=1.0,\n net_liquidation=1100.0)\n\n def test_short_position(self):\n \"\"\"verify that the performance period calculates properly for a \\\nsingle short-sale transaction\"\"\"\n self.create_environment_stuff(num_days=6)\n\n trades = factory.create_trade_history(\n self.asset1,\n [10, 10, 10, 11, 10, 9],\n [100, 100, 100, 100, 100, 100],\n oneday,\n self.sim_params,\n trading_calendar=self.trading_calendar,\n )\n\n trades_1 = trades[:-2]\n\n data_portal = create_data_portal_from_trade_history(\n self.env.asset_finder,\n self.trading_calendar,\n self.instance_tmpdir,\n self.sim_params,\n {1: trades})\n\n txn = create_txn(self.asset1, trades[1].dt, 10.0, -100)\n pt = perf.PositionTracker(self.env.asset_finder,\n self.sim_params.data_frequency)\n pp = perf.PerformancePeriod(\n 1000.0, self.env.asset_finder,\n self.sim_params.data_frequency)\n pp.position_tracker = pt\n\n pt.execute_transaction(txn)\n pp.handle_execution(txn)\n\n pt.sync_last_sale_prices(trades_1[-1].dt, False, data_portal)\n\n pp.calculate_performance()\n\n self.assertEqual(\n pp.cash_flow,\n -1 * txn.price * txn.amount,\n \"capital used should be equal to the opposite of the transaction\\\n cost of sole txn in test\"\n )\n\n self.assertEqual(\n len(pp.positions),\n 1,\n \"should be just one position\")\n\n self.assertEqual(\n pp.positions[1].sid,\n txn.sid,\n \"position should be in security from the transaction\"\n )\n\n self.assertEqual(\n pp.positions[1].amount,\n -100,\n \"should have a position of -100 shares\"\n )\n\n self.assertEqual(\n pp.positions[1].cost_basis,\n txn.price,\n \"should have a cost basis of 10\"\n )\n\n self.assertEqual(\n pp.positions[1].last_sale_price,\n trades_1[-1].price,\n \"last sale should be price of last trade\"\n )\n\n self.assertEqual(\n pp.ending_value,\n -1100,\n \"ending value should be price of last trade times number of \\\n shares in position\"\n )\n\n self.assertEqual(pp.pnl, -100, \"gain of 1 on 100 shares should be 100\")\n\n # simulate additional trades, and ensure that the position value\n # reflects the new price\n trades_2 = trades[-2:]\n\n # simulate a rollover to a new period\n pp.rollover()\n\n pt.sync_last_sale_prices(trades[-1].dt, False, data_portal)\n\n pp.calculate_performance()\n\n self.assertEqual(\n pp.cash_flow,\n 0,\n \"capital used should be zero, there were no transactions in \\\n performance period\"\n )\n\n self.assertEqual(\n len(pp.positions),\n 1,\n \"should be just one position\"\n )\n\n self.assertEqual(\n pp.positions[1].sid,\n txn.sid,\n \"position should be in security from the transaction\"\n )\n\n self.assertEqual(\n pp.positions[1].amount,\n -100,\n \"should have a position of -100 shares\"\n )\n\n self.assertEqual(\n pp.positions[1].cost_basis,\n txn.price,\n \"should have a cost basis of 10\"\n )\n\n self.assertEqual(\n pp.positions[1].last_sale_price,\n trades_2[-1].price,\n \"last sale should be price of last trade\"\n )\n\n self.assertEqual(\n pp.ending_value,\n -900,\n \"ending value should be price of last trade times number of \\\n shares in position\")\n\n self.assertEqual(\n pp.pnl,\n 200,\n \"drop of 2 on -100 shares should be 200\"\n )\n\n # now run a performance period encompassing the entire trade sample.\n ptTotal = perf.PositionTracker(self.env.asset_finder,\n self.sim_params.data_frequency)\n ppTotal = perf.PerformancePeriod(1000.0, self.env.asset_finder,\n self.sim_params.data_frequency)\n ppTotal.position_tracker = pt\n\n ptTotal.execute_transaction(txn)\n ppTotal.handle_execution(txn)\n\n ptTotal.sync_last_sale_prices(trades[-1].dt, False, data_portal)\n\n ppTotal.calculate_performance()\n\n self.assertEqual(\n ppTotal.cash_flow,\n -1 * txn.price * txn.amount,\n \"capital used should be equal to the opposite of the transaction \\\ncost of sole txn in test\"\n )\n\n self.assertEqual(\n len(ppTotal.positions),\n 1,\n \"should be just one position\"\n )\n self.assertEqual(\n ppTotal.positions[1].sid,\n txn.sid,\n \"position should be in security from the transaction\"\n )\n\n self.assertEqual(\n ppTotal.positions[1].amount,\n -100,\n \"should have a position of -100 shares\"\n )\n\n self.assertEqual(\n ppTotal.positions[1].cost_basis,\n txn.price,\n \"should have a cost basis of 10\"\n )\n\n self.assertEqual(\n ppTotal.positions[1].last_sale_price,\n trades_2[-1].price,\n \"last sale should be price of last trade\"\n )\n\n self.assertEqual(\n ppTotal.ending_value,\n -900,\n \"ending value should be price of last trade times number of \\\n shares in position\")\n\n self.assertEqual(\n ppTotal.pnl,\n 100,\n \"drop of 1 on -100 shares should be 100\"\n )\n\n check_perf_period(\n pp,\n gross_leverage=0.8181,\n net_leverage=-0.8181,\n long_exposure=0.0,\n longs_count=0,\n short_exposure=-900.0,\n shorts_count=1)\n\n # Validate that the account attributes.\n account = ppTotal.as_account()\n check_account(account,\n settled_cash=2000.0,\n equity_with_loan=1100.0,\n total_positions_value=-900.0,\n total_positions_exposure=-900.0,\n regt_equity=2000.0,\n available_funds=2000.0,\n excess_liquidity=2000.0,\n cushion=1.8181,\n leverage=0.8181,\n net_leverage=-0.8181,\n net_liquidation=1100.0)\n\n def test_covering_short(self):\n \"\"\"verify performance where short is bought and covered, and shares \\\ntrade after cover\"\"\"\n self.create_environment_stuff(num_days=10)\n\n trades = factory.create_trade_history(\n self.asset1,\n [10, 10, 10, 11, 9, 8, 7, 8, 9, 10],\n [100, 100, 100, 100, 100, 100, 100, 100, 100, 100],\n oneday,\n self.sim_params,\n trading_calendar=self.trading_calendar,\n )\n\n data_portal = create_data_portal_from_trade_history(\n self.env.asset_finder,\n self.trading_calendar,\n self.instance_tmpdir,\n self.sim_params,\n {1: trades})\n\n short_txn = create_txn(self.asset1, trades[1].dt, 10.0, -100)\n cover_txn = create_txn(self.asset1, trades[6].dt, 7.0, 100)\n pt = perf.PositionTracker(self.env.asset_finder,\n self.sim_params.data_frequency)\n pp = perf.PerformancePeriod(1000.0, self.env.asset_finder,\n self.sim_params.data_frequency)\n pp.position_tracker = pt\n\n pt.execute_transaction(short_txn)\n pp.handle_execution(short_txn)\n pt.execute_transaction(cover_txn)\n pp.handle_execution(cover_txn)\n\n pt.sync_last_sale_prices(trades[-1].dt, False, data_portal)\n\n pp.calculate_performance()\n\n short_txn_cost = short_txn.price * short_txn.amount\n cover_txn_cost = cover_txn.price * cover_txn.amount\n\n self.assertEqual(\n pp.cash_flow,\n -1 * short_txn_cost - cover_txn_cost,\n \"capital used should be equal to the net transaction costs\"\n )\n\n self.assertEqual(\n len(pp.positions),\n 0,\n \"should be zero positions\"\n )\n\n self.assertEqual(\n pp.ending_value,\n 0,\n \"ending value should be price of last trade times number of \\\nshares in position\"\n )\n\n self.assertEqual(\n pp.pnl,\n 300,\n \"gain of 1 on 100 shares should be 300\"\n )\n\n check_perf_period(\n pp,\n gross_leverage=0.0,\n net_leverage=0.0,\n long_exposure=0.0,\n longs_count=0,\n short_exposure=0.0,\n shorts_count=0)\n\n account = pp.as_account()\n check_account(account,\n settled_cash=1300.0,\n equity_with_loan=1300.0,\n total_positions_value=0.0,\n total_positions_exposure=0.0,\n regt_equity=1300.0,\n available_funds=1300.0,\n excess_liquidity=1300.0,\n cushion=1.0,\n leverage=0.0,\n net_leverage=0.0,\n net_liquidation=1300.0)\n\n def test_cost_basis_calc(self):\n self.create_environment_stuff(num_days=5)\n\n history_args = (\n self.asset1,\n [10, 11, 11, 12, 10],\n [100, 100, 100, 100, 100],\n oneday,\n self.sim_params,\n self.trading_calendar,\n )\n trades = factory.create_trade_history(*history_args)\n transactions = factory.create_txn_history(*history_args)[:4]\n\n data_portal = create_data_portal_from_trade_history(\n self.env.asset_finder,\n self.trading_calendar,\n self.instance_tmpdir,\n self.sim_params,\n {1: trades})\n\n pt = perf.PositionTracker(self.env.asset_finder,\n self.sim_params.data_frequency)\n pp = perf.PerformancePeriod(\n 1000.0,\n self.env.asset_finder,\n self.sim_params.data_frequency,\n period_open=self.sim_params.start_session,\n period_close=self.sim_params.sessions[-1]\n )\n pp.position_tracker = pt\n\n average_cost = 0\n for i, txn in enumerate(transactions):\n pt.execute_transaction(txn)\n pp.handle_execution(txn)\n average_cost = (average_cost * i + txn.price) / (i + 1)\n self.assertEqual(pt.positions[1].cost_basis, average_cost)\n\n dt = trades[-2].dt\n self.assertEqual(\n pt.positions[1].last_sale_price,\n trades[-2].price,\n \"should have a last sale of 12, got {val}\".format(\n val=pt.positions[1].last_sale_price)\n )\n\n self.assertEqual(\n pt.positions[1].cost_basis,\n 11,\n \"should have a cost basis of 11\"\n )\n\n pt.sync_last_sale_prices(dt, False, data_portal)\n\n pp.calculate_performance()\n\n self.assertEqual(\n pp.pnl,\n 400\n )\n\n down_tick = trades[-1]\n sale_txn = create_txn(self.asset1, down_tick.dt, 10.0, -100)\n pp.rollover()\n\n pt.execute_transaction(sale_txn)\n pp.handle_execution(sale_txn)\n\n dt = down_tick.dt\n pt.sync_last_sale_prices(dt, False, data_portal)\n\n pp.calculate_performance()\n self.assertEqual(\n pp.positions[1].last_sale_price,\n 10,\n \"should have a last sale of 10, was {val}\".format(\n val=pp.positions[1].last_sale_price)\n )\n\n self.assertEqual(\n pp.positions[1].cost_basis,\n 11,\n \"should have a cost basis of 11\"\n )\n\n self.assertEqual(pp.pnl, -800, \"this period goes from +400 to -400\")\n\n pt3 = perf.PositionTracker(self.env.asset_finder,\n self.sim_params.data_frequency)\n pp3 = perf.PerformancePeriod(1000.0, self.env.asset_finder,\n self.sim_params.data_frequency)\n pp3.position_tracker = pt3\n\n average_cost = 0\n for i, txn in enumerate(transactions):\n pt3.execute_transaction(txn)\n pp3.handle_execution(txn)\n average_cost = (average_cost * i + txn.price) / (i + 1)\n self.assertEqual(pp3.positions[1].cost_basis, average_cost)\n\n pt3.execute_transaction(sale_txn)\n pp3.handle_execution(sale_txn)\n\n trades.append(down_tick)\n pt3.sync_last_sale_prices(trades[-1].dt, False, data_portal)\n\n pp3.calculate_performance()\n self.assertEqual(\n pp3.positions[1].last_sale_price,\n 10,\n \"should have a last sale of 10\"\n )\n\n self.assertEqual(\n pp3.positions[1].cost_basis,\n 11,\n \"should have a cost basis of 11\"\n )\n\n self.assertEqual(\n pp3.pnl,\n -400,\n \"should be -400 for all trades and transactions in period\"\n )\n\n def test_cost_basis_calc_close_pos(self):\n self.create_environment_stuff(num_days=8)\n\n history_args = (\n 1,\n [10, 9, 11, 8, 9, 12, 13, 14],\n [200, -100, -100, 100, -300, 100, 500, 400],\n oneday,\n self.sim_params,\n self.trading_calendar,\n )\n cost_bases = [10, 10, 0, 8, 9, 9, 13, 13.5]\n\n transactions = factory.create_txn_history(*history_args)\n\n pt = perf.PositionTracker(self.env.asset_finder,\n self.sim_params.data_frequency)\n pp = perf.PerformancePeriod(1000.0, self.env.asset_finder,\n self.sim_params.data_frequency)\n pp.position_tracker = pt\n\n for idx, (txn, cb) in enumerate(zip(transactions, cost_bases)):\n pt.execute_transaction(txn)\n pp.handle_execution(txn)\n\n if idx == 2:\n # buy 200, sell 100, sell 100 = 0 shares = no position\n self.assertNotIn(1, pp.positions)\n else:\n self.assertEqual(pp.positions[1].cost_basis, cb)\n\n pp.calculate_performance()\n\n self.assertEqual(pp.positions[1].cost_basis, cost_bases[-1])\n\n def test_capital_change_intra_period(self):\n self.create_environment_stuff()\n\n # post some trades in the market\n trades = factory.create_trade_history(\n self.asset1,\n [10.0, 11.0, 12.0, 13.0],\n [100, 100, 100, 100],\n oneday,\n self.sim_params,\n trading_calendar=self.trading_calendar,\n )\n\n data_portal = create_data_portal_from_trade_history(\n self.env.asset_finder,\n self.trading_calendar,\n self.instance_tmpdir,\n self.sim_params,\n {1: trades})\n txn = create_txn(self.asset1, trades[0].dt, 10.0, 100)\n pt = perf.PositionTracker(self.env.asset_finder,\n self.sim_params.data_frequency)\n pp = perf.PerformancePeriod(1000.0, self.env.asset_finder,\n self.sim_params.data_frequency,\n period_open=self.sim_params.start_session,\n period_close=self.sim_params.end_session)\n pp.position_tracker = pt\n\n pt.execute_transaction(txn)\n pp.handle_execution(txn)\n\n # sync prices before we introduce a capital change\n pt.sync_last_sale_prices(trades[2].dt, False, data_portal)\n\n pp.initialize_subperiod_divider()\n pp.set_current_subperiod_starting_values(1000.0)\n\n pt.sync_last_sale_prices(trades[-1].dt, False, data_portal)\n pp.calculate_performance()\n\n self.assertAlmostEqual(pp.returns, 1200/1000 * 2300/2200 - 1)\n self.assertAlmostEqual(pp.pnl, 300)\n self.assertAlmostEqual(pp.cash_flow, -1000)\n\n def test_capital_change_inter_period(self):\n self.create_environment_stuff()\n\n # post some trades in the market\n trades = factory.create_trade_history(\n self.asset1,\n [10.0, 11.0, 12.0, 13.0],\n [100, 100, 100, 100],\n oneday,\n self.sim_params,\n trading_calendar=self.trading_calendar,\n )\n\n data_portal = create_data_portal_from_trade_history(\n self.env.asset_finder,\n self.trading_calendar,\n self.instance_tmpdir,\n self.sim_params,\n {1: trades})\n txn = create_txn(self.asset1, trades[0].dt, 10.0, 100)\n pt = perf.PositionTracker(self.env.asset_finder,\n self.sim_params.data_frequency)\n pp = perf.PerformancePeriod(1000.0, self.env.asset_finder,\n self.sim_params.data_frequency,\n period_open=self.sim_params.start_session,\n period_close=self.sim_params.end_session)\n pp.position_tracker = pt\n\n pt.execute_transaction(txn)\n pp.handle_execution(txn)\n pt.sync_last_sale_prices(trades[0].dt, False, data_portal)\n pp.calculate_performance()\n self.assertAlmostEqual(pp.returns, 0)\n self.assertAlmostEqual(pp.pnl, 0)\n self.assertAlmostEqual(pp.cash_flow, -1000)\n pp.rollover()\n\n pt.sync_last_sale_prices(trades[1].dt, False, data_portal)\n pp.calculate_performance()\n self.assertAlmostEqual(pp.returns, 1100.0/1000.0 - 1)\n self.assertAlmostEqual(pp.pnl, 100)\n self.assertAlmostEqual(pp.cash_flow, 0)\n pp.rollover()\n\n pp.adjust_period_starting_capital(1000)\n pt.sync_last_sale_prices(trades[2].dt, False, data_portal)\n pp.calculate_performance()\n self.assertAlmostEqual(pp.returns, 2200.0/2100.0 - 1)\n self.assertAlmostEqual(pp.pnl, 100)\n self.assertAlmostEqual(pp.cash_flow, 0)\n pp.rollover()\n\n pt.sync_last_sale_prices(trades[3].dt, False, data_portal)\n pp.calculate_performance()\n self.assertAlmostEqual(pp.returns, 2300.0/2200.0 - 1)\n self.assertAlmostEqual(pp.pnl, 100)\n self.assertAlmostEqual(pp.cash_flow, 0)\n\n\nclass TestPositionTracker(WithTradingEnvironment,\n WithInstanceTmpDir,\n ZiplineTestCase):\n ASSET_FINDER_EQUITY_SIDS = 1, 2\n\n @classmethod\n def make_futures_info(cls):\n return pd.DataFrame.from_dict(\n {\n 3: {'multiplier': 1000, 'exchange': 'TEST'},\n 4: {'multiplier': 1000, 'exchange': 'TEST'},\n 1032201401: {'multiplier': 50, 'exchange': 'TEST'},\n },\n orient='index',\n )\n\n def test_empty_positions(self):\n \"\"\"\n make sure all the empty position stats return a numeric 0\n\n Originally this bug was due to np.dot([], []) returning\n np.bool_(False)\n \"\"\"\n sim_params = factory.create_simulation_parameters(num_days=4)\n\n pt = perf.PositionTracker(self.env.asset_finder,\n sim_params.data_frequency)\n pos_stats = pt.stats()\n\n stats = [\n 'net_value',\n 'net_exposure',\n 'gross_value',\n 'gross_exposure',\n 'short_value',\n 'short_exposure',\n 'shorts_count',\n 'long_value',\n 'long_exposure',\n 'longs_count',\n ]\n for name in stats:\n val = getattr(pos_stats, name)\n self.assertEquals(val, 0)\n self.assertNotIsInstance(val, (bool, np.bool_))\n\n def test_position_values_and_exposures(self):\n pt = perf.PositionTracker(self.env.asset_finder, None)\n dt = pd.Timestamp(\"1984/03/06 3:00PM\")\n pos1 = perf.Position(1, amount=np.float64(10.0),\n last_sale_date=dt, last_sale_price=10)\n pos2 = perf.Position(2, amount=np.float64(-20.0),\n last_sale_date=dt, last_sale_price=10)\n pos3 = perf.Position(3, amount=np.float64(30.0),\n last_sale_date=dt, last_sale_price=10)\n pos4 = perf.Position(4, amount=np.float64(-40.0),\n last_sale_date=dt, last_sale_price=10)\n pt.update_positions({1: pos1, 2: pos2, 3: pos3, 4: pos4})\n\n # Test long-only methods\n pos_stats = pt.stats()\n self.assertEqual(100, pos_stats.long_value)\n self.assertEqual(100 + 300000, pos_stats.long_exposure)\n self.assertEqual(2, pos_stats.longs_count)\n\n # Test short-only methods\n self.assertEqual(-200, pos_stats.short_value)\n self.assertEqual(-200 - 400000, pos_stats.short_exposure)\n self.assertEqual(2, pos_stats.shorts_count)\n\n # Test gross and net values\n self.assertEqual(100 + 200, pos_stats.gross_value)\n self.assertEqual(100 - 200, pos_stats.net_value)\n\n # Test gross and net exposures\n self.assertEqual(100 + 200 + 300000 + 400000, pos_stats.gross_exposure)\n self.assertEqual(100 - 200 + 300000 - 400000, pos_stats.net_exposure)\n\n def test_update_positions(self):\n pt = perf.PositionTracker(self.env.asset_finder, None)\n dt = pd.Timestamp(\"2014/01/01 3:00PM\")\n pos1 = perf.Position(1, amount=np.float64(10.0),\n last_sale_date=dt, last_sale_price=10)\n pos2 = perf.Position(2, amount=np.float64(-20.0),\n last_sale_date=dt, last_sale_price=10)\n pos3 = perf.Position(1032201401, amount=np.float64(30.0),\n last_sale_date=dt, last_sale_price=100)\n\n # Call update_positions twice. When the second call is made,\n # self.positions will already contain data. The order of this data\n # needs to be preserved so that it is consistent with the order of the\n # data stored in the multipliers OrderedDict()'s. If self.positions\n # were to be stored as a dict, then its order could change in arbitrary\n # ways when the second update_positions call is made. Hence we also\n # store it as an OrderedDict.\n pt.update_positions({1: pos1, 1032201401: pos3})\n pt.update_positions({2: pos2})\n\n pos_stats = pt.stats()\n # Test long-only methods\n self.assertEqual(100, pos_stats.long_value)\n # 150,000 = 30 * 100 * 50 (amount * last_sale_price * multiplier)\n self.assertEqual(100 + 150000, pos_stats.long_exposure)\n self.assertEqual(2, pos_stats.longs_count)\n\n # Test short-only methods\n self.assertEqual(-200, pos_stats.short_value)\n self.assertEqual(-200, pos_stats.short_exposure)\n self.assertEqual(1, pos_stats.shorts_count)\n\n # Test gross and net values\n self.assertEqual(100 + 200, pos_stats.gross_value)\n self.assertEqual(100 - 200, pos_stats.net_value)\n\n # Test gross and net exposures\n self.assertEqual(100 + 150000 + 200, pos_stats.gross_exposure)\n self.assertEqual(100 + 150000 - 200, pos_stats.net_exposure)\n\n def test_close_position(self):\n future_sid = 1032201401\n equity_sid = 1\n pt = perf.PositionTracker(self.env.asset_finder, None)\n dt = pd.Timestamp('2017/01/04 3:00PM')\n\n pos1 = perf.Position(\n sid=future_sid,\n amount=np.float64(30.0),\n last_sale_date=dt,\n last_sale_price=100,\n )\n pos2 = perf.Position(\n sid=equity_sid,\n amount=np.float64(10.0),\n last_sale_date=dt,\n last_sale_price=10,\n )\n\n # Update the positions dictionary with `future_sid` first. The order\n # matters because it affects the multipliers dictionaries, which are\n # OrderedDicts. If `future_sid` is not removed from the multipliers\n # dictionaries, equities will hit the incorrect multiplier when\n # computing `pt.stats()`.\n pt.update_positions({future_sid: pos1, equity_sid: pos2})\n\n asset_to_close = self.env.asset_finder.retrieve_asset(future_sid)\n txn = create_txn(asset_to_close, dt, 100, -30)\n pt.execute_transaction(txn)\n\n pos_stats = pt.stats()\n\n # Test long-only methods.\n self.assertEqual(100, pos_stats.long_value)\n self.assertEqual(100, pos_stats.long_exposure)\n self.assertEqual(1, pos_stats.longs_count)\n\n # Test short-only methods.\n self.assertEqual(0, pos_stats.short_value)\n self.assertEqual(0, pos_stats.short_exposure)\n self.assertEqual(0, pos_stats.shorts_count)\n\n # Test gross and net values.\n self.assertEqual(100, pos_stats.gross_value)\n self.assertEqual(100, pos_stats.net_value)\n\n # Test gross and net exposures.\n self.assertEqual(100, pos_stats.gross_exposure)\n self.assertEqual(100, pos_stats.net_exposure)\n",
"import warnings\n\nfrom mock import patch\nimport numpy as np\nimport pandas as pd\nfrom pandas.core.common import PerformanceWarning\n\nfrom zipline import TradingAlgorithm\nfrom zipline.finance.trading import SimulationParameters\nfrom zipline.testing import (\n MockDailyBarReader,\n create_daily_df_for_asset,\n create_minute_df_for_asset,\n str_to_seconds,\n)\nfrom zipline.testing.fixtures import (\n WithCreateBarData,\n WithDataPortal,\n WithSimParams,\n ZiplineTestCase,\n)\nfrom zipline.zipline_warnings import ZiplineDeprecationWarning\n\nsimple_algo = \"\"\"\nfrom zipline.api import sid, order\ndef initialize(context):\n pass\n\ndef handle_data(context, data):\n assert sid(1) in data\n assert sid(2) in data\n assert len(data) == 3\n for asset in data:\n pass\n\"\"\"\n\nhistory_algo = \"\"\"\nfrom zipline.api import sid, history\n\ndef initialize(context):\n context.sid1 = sid(1)\n\ndef handle_data(context, data):\n context.history_window = history(5, \"1m\", \"volume\")\n\"\"\"\n\nhistory_bts_algo = \"\"\"\nfrom zipline.api import sid, history, record\n\ndef initialize(context):\n context.sid3 = sid(3)\n context.num_bts = 0\n\ndef before_trading_start(context, data):\n context.num_bts += 1\n\n # Get history at the second BTS (beginning of second day)\n if context.num_bts == 2:\n record(history=history(5, \"1m\", \"volume\"))\n\ndef handle_data(context, data):\n pass\n\"\"\"\n\nsimple_transforms_algo = \"\"\"\nfrom zipline.api import sid\ndef initialize(context):\n context.count = 0\n\ndef handle_data(context, data):\n if context.count == 2:\n context.mavg = data[sid(1)].mavg(5)\n context.vwap = data[sid(1)].vwap(5)\n context.stddev = data[sid(1)].stddev(5)\n context.returns = data[sid(1)].returns()\n\n context.count += 1\n\"\"\"\n\nmanipulation_algo = \"\"\"\ndef initialize(context):\n context.asset1 = sid(1)\n context.asset2 = sid(2)\n\ndef handle_data(context, data):\n assert len(data) == 2\n assert len(data.keys()) == 2\n assert context.asset1 in data.keys()\n assert context.asset2 in data.keys()\n\"\"\"\n\nsid_accessor_algo = \"\"\"\nfrom zipline.api import sid\n\ndef initialize(context):\n context.asset1 = sid(1)\n\ndef handle_data(context,data):\n assert data[sid(1)].sid == context.asset1\n assert data[sid(1)][\"sid\"] == context.asset1\n\"\"\"\n\ndata_items_algo = \"\"\"\nfrom zipline.api import sid\n\ndef initialize(context):\n context.asset1 = sid(1)\n context.asset2 = sid(2)\n\ndef handle_data(context, data):\n iter_list = list(data.iteritems())\n items_list = data.items()\n assert iter_list == items_list\n\"\"\"\n\n\nclass TestAPIShim(WithCreateBarData,\n WithDataPortal,\n WithSimParams,\n ZiplineTestCase,\n ):\n START_DATE = pd.Timestamp(\"2016-01-05\", tz='UTC')\n END_DATE = pd.Timestamp(\"2016-01-28\", tz='UTC')\n SIM_PARAMS_DATA_FREQUENCY = 'minute'\n\n sids = ASSET_FINDER_EQUITY_SIDS = 1, 2, 3\n\n @classmethod\n def make_equity_minute_bar_data(cls):\n for sid in cls.sids:\n yield sid, create_minute_df_for_asset(\n cls.trading_calendar,\n cls.SIM_PARAMS_START,\n cls.SIM_PARAMS_END,\n )\n\n @classmethod\n def make_equity_daily_bar_data(cls):\n for sid in cls.sids:\n yield sid, create_daily_df_for_asset(\n cls.trading_calendar,\n cls.SIM_PARAMS_START,\n cls.SIM_PARAMS_END,\n )\n\n @classmethod\n def make_splits_data(cls):\n return pd.DataFrame([\n {\n 'effective_date': str_to_seconds('2016-01-06'),\n 'ratio': 0.5,\n 'sid': 3,\n }\n ])\n\n @classmethod\n def make_adjustment_writer_equity_daily_bar_reader(cls):\n return MockDailyBarReader()\n\n @classmethod\n def init_class_fixtures(cls):\n super(TestAPIShim, cls).init_class_fixtures()\n\n cls.asset1 = cls.env.asset_finder.retrieve_asset(1)\n cls.asset2 = cls.env.asset_finder.retrieve_asset(2)\n cls.asset3 = cls.env.asset_finder.retrieve_asset(3)\n\n def create_algo(self, code, filename=None, sim_params=None):\n if sim_params is None:\n sim_params = self.sim_params\n\n return TradingAlgorithm(\n script=code,\n sim_params=sim_params,\n env=self.env,\n algo_filename=filename\n )\n\n def test_old_new_data_api_paths(self):\n \"\"\"\n Test that the new and old data APIs hit the same code paths.\n\n We want to ensure that the old data API(data[sid(N)].field and\n similar) and the new data API(data.current(sid(N), field) and\n similar) hit the same code paths on the DataPortal.\n \"\"\"\n test_start_minute = self.trading_calendar.minutes_for_session(\n self.sim_params.sessions[0]\n )[1]\n test_end_minute = self.trading_calendar.minutes_for_session(\n self.sim_params.sessions[0]\n )[-1]\n bar_data = self.create_bardata(\n lambda: test_end_minute,\n )\n ohlcvp_fields = [\n \"open\",\n \"high\",\n \"low\"\n \"close\",\n \"volume\",\n \"price\",\n ]\n spot_value_meth = 'zipline.data.data_portal.DataPortal.get_spot_value'\n\n def assert_get_spot_value_called(fun, field):\n \"\"\"\n Assert that get_spot_value was called during the execution of fun.\n\n Takes in a function fun and a string field.\n \"\"\"\n with patch(spot_value_meth) as gsv:\n fun()\n gsv.assert_called_with(\n self.asset1,\n field,\n test_end_minute,\n 'minute'\n )\n # Ensure that data.current(sid(n), field) has the same behaviour as\n # data[sid(n)].field.\n for field in ohlcvp_fields:\n assert_get_spot_value_called(\n lambda: getattr(bar_data[self.asset1], field),\n field,\n )\n assert_get_spot_value_called(\n lambda: bar_data.current(self.asset1, field),\n field,\n )\n\n history_meth = 'zipline.data.data_portal.DataPortal.get_history_window'\n\n def assert_get_history_window_called(fun, is_legacy):\n \"\"\"\n Assert that get_history_window was called during fun().\n\n Takes in a function fun and a boolean is_legacy.\n \"\"\"\n with patch(history_meth) as ghw:\n fun()\n # Slightly hacky, but done to get around the fact that\n # history( explicitly passes an ffill param as the last arg,\n # while data.history doesn't.\n if is_legacy:\n ghw.assert_called_with(\n [self.asset1, self.asset2, self.asset3],\n test_end_minute,\n 5,\n \"1m\",\n \"volume\",\n True\n )\n else:\n ghw.assert_called_with(\n [self.asset1, self.asset2, self.asset3],\n test_end_minute,\n 5,\n \"1m\",\n \"volume\",\n )\n\n test_sim_params = SimulationParameters(\n start_session=test_start_minute,\n end_session=test_end_minute,\n data_frequency=\"minute\",\n trading_calendar=self.trading_calendar,\n )\n\n history_algorithm = self.create_algo(\n history_algo,\n sim_params=test_sim_params\n )\n assert_get_history_window_called(\n lambda: history_algorithm.run(self.data_portal),\n is_legacy=True\n )\n assert_get_history_window_called(\n lambda: bar_data.history(\n [self.asset1, self.asset2, self.asset3],\n \"volume\",\n 5,\n \"1m\"\n ),\n is_legacy=False\n )\n\n def test_sid_accessor(self):\n \"\"\"\n Test that we maintain backwards compat for sid access on a data object.\n\n We want to support both data[sid(24)].sid, as well as\n data[sid(24)][\"sid\"]. Since these are deprecated and will eventually\n cease to be supported, we also want to assert that we're seeing a\n deprecation warning.\n \"\"\"\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter(\"ignore\", PerformanceWarning)\n warnings.simplefilter(\"default\", ZiplineDeprecationWarning)\n algo = self.create_algo(sid_accessor_algo)\n algo.run(self.data_portal)\n\n # Since we're already raising a warning on doing data[sid(x)],\n # we don't want to raise an extra warning on data[sid(x)].sid.\n self.assertEqual(2, len(w))\n\n # Check that both the warnings raised were in fact\n # ZiplineDeprecationWarnings\n for warning in w:\n self.assertEqual(\n ZiplineDeprecationWarning,\n warning.category\n )\n self.assertEqual(\n \"`data[sid(N)]` is deprecated. Use `data.current`.\",\n str(warning.message)\n )\n\n def test_data_items(self):\n \"\"\"\n Test that we maintain backwards compat for data.[items | iteritems].\n\n We also want to assert that we warn that iterating over the assets\n in `data` is deprecated.\n \"\"\"\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter(\"ignore\", PerformanceWarning)\n warnings.simplefilter(\"default\", ZiplineDeprecationWarning)\n algo = self.create_algo(data_items_algo)\n algo.run(self.data_portal)\n\n self.assertEqual(4, len(w))\n\n for idx, warning in enumerate(w):\n self.assertEqual(\n ZiplineDeprecationWarning,\n warning.category\n )\n if idx % 2 == 0:\n self.assertEqual(\n \"Iterating over the assets in `data` is deprecated.\",\n str(warning.message)\n )\n else:\n self.assertEqual(\n \"`data[sid(N)]` is deprecated. Use `data.current`.\",\n str(warning.message)\n )\n\n def test_iterate_data(self):\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter(\"ignore\", PerformanceWarning)\n warnings.simplefilter(\"default\", ZiplineDeprecationWarning)\n\n algo = self.create_algo(simple_algo)\n algo.run(self.data_portal)\n\n self.assertEqual(4, len(w))\n\n line_nos = [warning.lineno for warning in w]\n self.assertEqual(4, len(set(line_nos)))\n\n for idx, warning in enumerate(w):\n self.assertEqual(ZiplineDeprecationWarning,\n warning.category)\n\n self.assertEqual(\"<string>\", warning.filename)\n self.assertEqual(line_nos[idx], warning.lineno)\n\n if idx < 2:\n self.assertEqual(\n \"Checking whether an asset is in data is deprecated.\",\n str(warning.message)\n )\n else:\n self.assertEqual(\n \"Iterating over the assets in `data` is deprecated.\",\n str(warning.message)\n )\n\n def test_history(self):\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter(\"ignore\", PerformanceWarning)\n warnings.simplefilter(\"default\", ZiplineDeprecationWarning)\n\n sim_params = self.sim_params.create_new(\n self.sim_params.sessions[1],\n self.sim_params.end_session\n )\n\n algo = self.create_algo(history_algo,\n sim_params=sim_params)\n algo.run(self.data_portal)\n\n self.assertEqual(1, len(w))\n self.assertEqual(ZiplineDeprecationWarning, w[0].category)\n self.assertEqual(\"<string>\", w[0].filename)\n self.assertEqual(8, w[0].lineno)\n self.assertEqual(\"The `history` method is deprecated. Use \"\n \"`data.history` instead.\", str(w[0].message))\n\n def test_old_new_history_bts_paths(self):\n \"\"\"\n Tests that calling history in before_trading_start gets us the correct\n values, which involves 1) calling data_portal.get_history_window as of\n the previous market minute, 2) getting adjustments between the previous\n market minute and the current time, and 3) applying those adjustments\n \"\"\"\n algo = self.create_algo(history_bts_algo)\n algo.run(self.data_portal)\n\n expected_vol_without_split = np.arange(386, 391) * 100\n expected_vol_with_split = np.arange(386, 391) * 200\n\n window = algo.recorded_vars['history']\n np.testing.assert_array_equal(window[self.asset1].values,\n expected_vol_without_split)\n np.testing.assert_array_equal(window[self.asset2].values,\n expected_vol_without_split)\n np.testing.assert_array_equal(window[self.asset3].values,\n expected_vol_with_split)\n\n def test_simple_transforms(self):\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter(\"ignore\", PerformanceWarning)\n warnings.simplefilter(\"default\", ZiplineDeprecationWarning)\n\n sim_params = SimulationParameters(\n start_session=self.sim_params.sessions[8],\n end_session=self.sim_params.sessions[-1],\n data_frequency=\"minute\",\n trading_calendar=self.trading_calendar,\n )\n\n algo = self.create_algo(simple_transforms_algo,\n sim_params=sim_params)\n algo.run(self.data_portal)\n\n self.assertEqual(8, len(w))\n transforms = [\"mavg\", \"vwap\", \"stddev\", \"returns\"]\n\n for idx, line_no in enumerate(range(8, 12)):\n warning1 = w[idx * 2]\n warning2 = w[(idx * 2) + 1]\n\n self.assertEqual(\"<string>\", warning1.filename)\n self.assertEqual(\"<string>\", warning2.filename)\n\n self.assertEqual(line_no, warning1.lineno)\n self.assertEqual(line_no, warning2.lineno)\n\n self.assertEqual(\"`data[sid(N)]` is deprecated. Use \"\n \"`data.current`.\",\n str(warning1.message))\n self.assertEqual(\"The `{0}` method is \"\n \"deprecated.\".format(transforms[idx]),\n str(warning2.message))\n\n # now verify the transform values\n # minute price\n # 2016-01-11 14:31:00+00:00 1561\n # ...\n # 2016-01-14 20:59:00+00:00 3119\n # 2016-01-14 21:00:00+00:00 3120\n # 2016-01-15 14:31:00+00:00 3121\n # 2016-01-15 14:32:00+00:00 3122\n # 2016-01-15 14:33:00+00:00 3123\n\n # volume\n # 2016-01-11 14:31:00+00:00 156100\n # ...\n # 2016-01-14 20:59:00+00:00 311900\n # 2016-01-14 21:00:00+00:00 312000\n # 2016-01-15 14:31:00+00:00 312100\n # 2016-01-15 14:32:00+00:00 312200\n # 2016-01-15 14:33:00+00:00 312300\n\n # daily price (last day built with minute data)\n # 2016-01-14 00:00:00+00:00 9\n # 2016-01-15 00:00:00+00:00 3123\n\n # mavg = average of all the prices = (1561 + 3123) / 2 = 2342\n # vwap = sum(price * volume) / sum(volumes)\n # = 889119531400.0 / 366054600.0\n # = 2428.9259891830343\n # stddev = stddev(price, ddof=1) = 451.3435498597493\n # returns = (todayprice - yesterdayprice) / yesterdayprice\n # = (3123 - 9) / 9 = 346\n self.assertEqual(2342, algo.mavg)\n self.assertAlmostEqual(2428.92599, algo.vwap, places=5)\n self.assertAlmostEqual(451.34355, algo.stddev, places=5)\n self.assertAlmostEqual(346, algo.returns)\n\n def test_manipulation(self):\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter(\"ignore\", PerformanceWarning)\n warnings.simplefilter(\"default\", ZiplineDeprecationWarning)\n\n algo = self.create_algo(simple_algo)\n algo.run(self.data_portal)\n\n self.assertEqual(4, len(w))\n\n for idx, warning in enumerate(w):\n self.assertEqual(\"<string>\", warning.filename)\n self.assertEqual(7 + idx, warning.lineno)\n\n if idx < 2:\n self.assertEqual(\"Checking whether an asset is in data is \"\n \"deprecated.\",\n str(warning.message))\n else:\n self.assertEqual(\"Iterating over the assets in `data` is \"\n \"deprecated.\",\n str(warning.message))\n"
] | [
[
"pandas.Timestamp",
"numpy.float64",
"pandas.DataFrame.from_dict",
"numpy.array",
"numpy.testing.assert_allclose"
],
[
"numpy.testing.assert_array_equal",
"numpy.arange",
"pandas.Timestamp"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
steven-murray/powerbox | [
"09809f3fe9e2b25dfb2f956ca4c2d4d40a0ac693"
] | [
"tests/test_power.py"
] | [
"import numpy as np\nimport os\nimport inspect\nimport sys\n\nLOCATION = \"/\".join(os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))).split(\"/\")[:-1])\nsys.path.insert(0, LOCATION)\n\nfrom powerbox import PowerBox, get_power\n\n\ndef test_power1d():\n p = [0] * 40\n for i in range(40):\n pb = PowerBox(8001, dim=1, pk=lambda k: 1.0 * k ** -2., boxlength=1.0, a=0, b=1)\n\n p[i], k = get_power(pb.delta_x(), pb.boxlength, a=0, b=1)\n\n assert np.allclose(np.mean(np.array(p), axis=0)[2000:], 1.0 * k[2000:] ** -2., rtol=2)\n\n\ndef test_power1d_n3():\n p = [0] * 40\n for i in range(40):\n pb = PowerBox(8001, dim=1, pk=lambda k: 1.0 * k ** -3., boxlength=1.0, b=1)\n p[i], k = get_power(pb.delta_x(), pb.boxlength, b=1)\n\n assert np.allclose(np.mean(np.array(p), axis=0)[2000:], 1.0 * k[2000:] ** -3., rtol=2)\n\n\ndef test_power1d_bigL():\n p = [0] * 40\n for i in range(40):\n pb = PowerBox(8001, dim=1, pk=lambda k: 1.0 * k ** -3., boxlength=10.0, b=1)\n p[i], k = get_power(pb.delta_x(), pb.boxlength, b=1)\n\n assert np.allclose(np.mean(np.array(p), axis=0)[2000:], 1.0 * k[2000:] ** -3., rtol=2)\n\n\ndef test_power1d_ordinary_freq():\n p = [0] * 40\n for i in range(40):\n pb = PowerBox(8001, dim=1, pk=lambda k: 1.0 * k ** -3., boxlength=1.0)\n p[i], k = get_power(pb.delta_x(), pb.boxlength)\n\n assert np.allclose(np.mean(np.array(p), axis=0)[2000:], 1.0 * k[2000:] ** -3., rtol=2)\n\n\ndef test_power1d_halfN():\n p = [0] * 40\n for i in range(40):\n pb = PowerBox(4001, dim=1, pk=lambda k: 1.0 * k ** -3., boxlength=1.0, b=1)\n p[i], k = get_power(pb.delta_x(), pb.boxlength, b=1)\n\n assert np.allclose(np.mean(np.array(p), axis=0)[1000:], 1.0 * k[1000:] ** -3., rtol=2)\n\n\ndef test_power2d():\n p = [0] * 5\n for i in range(5):\n pb = PowerBox(200, dim=2, pk=lambda k: 1.0 * k ** -2., boxlength=1.0, b=1)\n p[i], k = get_power(pb.delta_x(), pb.boxlength, b=1)\n\n assert np.allclose(np.mean(np.array(p), axis=0)[100:], 1.0 * k[100:] ** -2., rtol=2)\n\n\ndef test_power3d():\n pb = PowerBox(50, dim=3, pk=lambda k: 1.0 * k ** -2., boxlength=1.0, b=1)\n p, k = get_power(pb.delta_x(), pb.boxlength, b=1)\n\n print(p / (1.0 * k ** -2.))\n assert np.allclose(p, 1.0 * k ** -2., rtol=2)\n\n\ndef test_k_zero_ignore():\n pb = PowerBox(50, dim=2, pk=lambda k: 1.0 * k ** -2., boxlength=1.0, b=1)\n\n dx = pb.delta_x()\n p1, k1 = get_power(dx, pb.boxlength, bin_ave=False)\n p0, k0 = get_power(dx, pb.boxlength, ignore_zero_mode=True, bin_ave=False)\n\n assert np.all(k1 == k0)\n\n assert np.all(p1[1:] == p0[1:])\n\n assert p1[0] != p0[0]\n\n\ndef test_k_weights():\n pb = PowerBox(50, dim=2, pk=lambda k: 1.0 * k ** -2., boxlength=1.0, b=1)\n\n dx = pb.delta_x()\n\n k_weights = np.ones_like(dx)\n k_weights[:, 25] = 0\n\n p1, k1 = get_power(dx, pb.boxlength, bin_ave=False)\n p0, k0 = get_power(dx, pb.boxlength, bin_ave=False, k_weights= k_weights)\n\n assert np.all(k1 == k0)\n assert not np.allclose(p1, p0)"
] | [
[
"numpy.all",
"numpy.array",
"numpy.ones_like",
"numpy.allclose"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jesserobertson/cogj | [
"25f1d85023764ef0cc459a8a715b1b678f971858"
] | [
"setup_extensions.py"
] | [
"\"\"\" file: setup_extensions.py (cogj)\n author: Jess Robertson, @jesserobertson\n date: Saturday, 16 March 2019\n\n description: Set up Cython extensions for CO-GJ\n\"\"\"\n\nfrom pathlib import Path\nfrom logging import getLogger\nfrom multiprocessing import cpu_count\n\nimport numpy\nfrom setuptools import Extension\nfrom setuptools.command.sdist import sdist\n\n# Here we try to import Cython - if it's here then we can generate new c sources\n# directly from the pyx files using their build_ext class.\n# If not then we just use the default setuptools version\ntry:\n from Cython.Distutils import build_ext\n HAVE_CYTHON = True\nexcept ImportError:\n from setuptools.command.build_ext import build_ext\n HAVE_CYTHON = False\n\nLOGGER = getLogger()\n\n# Where are our extensions located?\nEXTENSIONS_MODULE = Path('cogj/extensions')\n\ndef update_thread_count():\n \"\"\" Update the thread count for OpenMP extensions\n\n Uses one thread per core, with the estimate of the number of cores from\n multiprocessing.cpu_count.\n \"\"\"\n LOGGER.info('Updating thread count for cython code to %s', cpu_count())\n num_threads = cpu_count() # We're just going for 1 thread/CPU here\n fname = EXTENSIONS_MODULE / 'common.pxd'\n\n # Don't clobber other definitions\n try:\n with open(fname, 'r') as src:\n content = src.readlines() # this is short, just slurp it\n\n # Write out a new definition\n with open(fname, 'w') as sink:\n for line in content:\n if line.startswith('cdef int NUM_THREADS'):\n sink.write('cdef int NUM_THREADS = {0}'.format(num_threads))\n else:\n sink.write(line)\n\n except FileNotFoundError:\n # doesn't exist so just leave it\n with open(fname, 'w') as sink:\n sink.write('cdef int NUM_THREADS = {0}'.format(num_threads))\n\n\n\ndef get_extensions():\n \"\"\" Find our extensions to build.\n\n Also updates the thread count for OpenMP extensions to the number of CPUs\n availble on the current machine.\n\n Returns:\n a list of Extension objects to pass to setup\n \"\"\"\n update_thread_count()\n\n # Get the extensions\n if HAVE_CYTHON:\n files = [f for f in EXTENSIONS_MODULE.iterdir() if f.suffix == '.pyx']\n else:\n files = [f for f in EXTENSIONS_MODULE.iterdir() if f.suffix == '.c']\n\n # Construct keyword arguments for all extensions\n kwargs = dict(\n # extra_compile_args=['-fopenmp'],\n # extra_link_args=['-fopenmp'],\n include_dirs=[numpy.get_include(), EXTENSIONS_MODULE]\n )\n\n # Construct all the extension objects and return them\n extensions = []\n for fname in files:\n module_name = fname.stem\n extension_name = '.'.join(list(EXTENSIONS_MODULE.parts) + [module_name])\n source = str(fname)\n extensions.append(Extension(extension_name, sources=[source], **kwargs))\n return extensions\n\n# Update source distribution - we always require Cython for this...\nclass cython_sdist(sdist): # pylint: disable=C0103\n\n \"Custom sdist command to build cython on-the-fly\"\n\n def run(self):\n # Make sure the compiled Cython files in the distribution are up-to-date\n from Cython.Build import cythonize\n update_thread_count()\n cythonize([str(f)\n for f in EXTENSIONS_MODULE.iterdir()\n if f.suffix == '.pyx'])\n super().run()\n\ndef get_cmdclass():\n \"\"\" Return a command class which builds cython extensions automatically\n \"\"\"\n cmdclass = {\n 'build_ext': build_ext,\n 'sdist': cython_sdist\n }\n return cmdclass\n"
] | [
[
"numpy.get_include"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
CAVED123/Tensorforce | [
"823177f77f9047b1e71eccfffc08315ed1636878",
"823177f77f9047b1e71eccfffc08315ed1636878"
] | [
"tensorforce/core/optimizers/solvers/line_search.py",
"tensorforce/environments/arcade_learning_environment.py"
] | [
"# Copyright 2018 Tensorforce Team. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nimport tensorflow as tf\n\nfrom tensorforce import TensorforceError, util\nfrom tensorforce.core import parameter_modules\nfrom tensorforce.core.optimizers.solvers import Iterative\n\n\nclass LineSearch(Iterative):\n \"\"\"\n Line search algorithm which iteratively optimizes the value $f(x)$ for $x$ on the line between \n $x'$ and $x_0$ by optimistically taking the first acceptable $x$ starting from $x_0$ and \n moving towards $x'$.\n \"\"\"\n\n def __init__(\n self, name, max_iterations, accept_ratio, mode, parameter, unroll_loop=False\n ):\n \"\"\"\n Creates a new line search solver instance.\n\n Args:\n max_iterations: Maximum number of iterations before termination.\n accept_ratio: Lower limit of what improvement ratio over $x = x'$ is acceptable \n (based either on a given estimated improvement or with respect to the value at \n $x = x'$).\n mode: Mode of movement between $x_0$ and $x'$, either 'linear' or 'exponential'.\n parameter: Movement mode parameter, additive or multiplicative, respectively.\n unroll_loop: Unrolls the TensorFlow while loop if true.\n \"\"\"\n super().__init__(name=name, max_iterations=max_iterations, unroll_loop=unroll_loop)\n\n assert accept_ratio >= 0.0\n self.accept_ratio = self.add_module(\n name='accept-ratio', module=accept_ratio, modules=parameter_modules, dtype='float'\n )\n\n # TODO: Implement such sequences more generally, also useful for learning rate decay or so.\n if mode not in ('linear', 'exponential'):\n raise TensorforceError(\n \"Invalid line search mode: {}, please choose one of 'linear' or 'exponential'\".format(mode)\n )\n self.mode = mode\n\n self.parameter = self.add_module(\n name='parameter', module=parameter, modules=parameter_modules, dtype='float'\n )\n\n def tf_solve(self, fn_x, x_init, base_value, target_value, estimated_improvement=None):\n \"\"\"\n Iteratively optimizes $f(x)$ for $x$ on the line between $x'$ and $x_0$.\n\n Args:\n fn_x: A callable returning the value $f(x)$ at $x$.\n x_init: Initial solution guess $x_0$.\n base_value: Value $f(x')$ at $x = x'$.\n target_value: Value $f(x_0)$ at $x = x_0$.\n estimated_improvement: Estimated improvement for $x = x_0$, $f(x')$ if None.\n\n Returns:\n A solution $x$ to the problem as given by the solver.\n \"\"\"\n return super().tf_solve(fn_x, x_init, base_value, target_value, estimated_improvement)\n\n def tf_start(self, x_init, base_value, target_value, estimated_improvement):\n \"\"\"\n Initialization step preparing the arguments for the first iteration of the loop body.\n\n Args:\n x_init: Initial solution guess $x_0$.\n base_value: Value $f(x')$ at $x = x'$.\n target_value: Value $f(x_0)$ at $x = x_0$.\n estimated_improvement: Estimated value at $x = x_0$, $f(x')$ if None.\n\n Returns:\n Initial arguments for tf_step.\n \"\"\"\n self.base_value = base_value\n\n if estimated_improvement is None: # TODO: Is this a good alternative?\n estimated_improvement = tf.abs(x=base_value)\n\n difference = target_value - self.base_value\n epsilon = tf.constant(value=util.epsilon, dtype=util.tf_dtype(dtype='float'))\n improvement = difference / tf.maximum(x=estimated_improvement, y=epsilon)\n\n last_improvement = improvement - 1.0\n parameter = self.parameter.value()\n\n if self.mode == 'linear':\n deltas = [-t * parameter for t in x_init]\n self.estimated_incr = -estimated_improvement * parameter\n\n elif self.mode == 'exponential':\n deltas = [-t * parameter for t in x_init]\n\n return x_init, deltas, improvement, last_improvement, estimated_improvement\n\n def tf_step(self, x, deltas, improvement, last_improvement, estimated_improvement):\n \"\"\"\n Iteration loop body of the line search algorithm.\n\n Args:\n x: Current solution estimate $x_t$.\n deltas: Current difference $x_t - x'$.\n improvement: Current improvement $(f(x_t) - f(x')) / v'$.\n last_improvement: Last improvement $(f(x_{t-1}) - f(x')) / v'$.\n estimated_improvement: Current estimated value $v'$.\n\n Returns:\n Updated arguments for next iteration.\n \"\"\"\n next_x = [t + delta for t, delta in zip(x, deltas)]\n parameter = self.parameter.value()\n\n if self.mode == 'linear':\n next_deltas = deltas\n next_estimated_improvement = estimated_improvement + self.estimated_incr\n\n elif self.mode == 'exponential':\n next_deltas = [delta * parameter for delta in deltas]\n next_estimated_improvement = estimated_improvement * parameter\n\n target_value = self.fn_x(next_deltas)\n\n difference = target_value - self.base_value\n epsilon = tf.constant(value=util.epsilon, dtype=util.tf_dtype(dtype='float'))\n next_improvement = difference / tf.maximum(x=next_estimated_improvement, y=epsilon)\n\n return next_x, next_deltas, next_improvement, improvement, next_estimated_improvement\n\n def tf_next_step(self, x, deltas, improvement, last_improvement, estimated_improvement):\n \"\"\"\n Termination condition: max number of iterations, or no improvement for last step, or \n improvement less than acceptable ratio, or estimated value not positive.\n\n Args:\n x: Current solution estimate $x_t$.\n deltas: Current difference $x_t - x'$.\n improvement: Current improvement $(f(x_t) - f(x')) / v'$.\n last_improvement: Last improvement $(f(x_{t-1}) - f(x')) / v'$.\n estimated_improvement: Current estimated value $v'$.\n\n Returns:\n True if another iteration should be performed.\n \"\"\"\n improved = improvement > last_improvement\n accept_ratio = self.accept_ratio.value()\n next_step = tf.math.logical_and(x=improved, y=(improvement < accept_ratio))\n epsilon = tf.constant(value=util.epsilon, dtype=util.tf_dtype(dtype='float'))\n return tf.math.logical_and(x=next_step, y=(estimated_improvement > epsilon))\n\n def tf_end(self, x_final, deltas, improvement, last_improvement, estimated_improvement):\n \"\"\"\n Termination step preparing the return value.\n\n Args:\n x_init: Final solution estimate $x_n$.\n deltas: Current difference $x_n - x'$.\n improvement: Current improvement $(f(x_n) - f(x')) / v'$.\n last_improvement: Last improvement $(f(x_{n-1}) - f(x')) / v'$.\n estimated_improvement: Current estimated value $v'$.\n\n Returns:\n Final solution.\n \"\"\"\n def accept_deltas():\n return [t + delta for t, delta in zip(x_final, deltas)]\n\n def undo_deltas():\n value = self.fn_x([-delta for delta in deltas])\n with tf.control_dependencies(control_inputs=(value,)):\n return util.fmap(function=util.identity_operation, xs=x_final)\n\n skip_undo_deltas = improvement > last_improvement\n x_final = self.cond(pred=skip_undo_deltas, true_fn=accept_deltas, false_fn=undo_deltas)\n return x_final\n",
"# Copyright 2018 Tensorforce Team. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nimport numpy as np\n\nfrom tensorforce.environments import Environment\n\n\nclass ArcadeLearningEnvironment(Environment):\n \"\"\"\n [Arcade Learning Environment](https://github.com/mgbellemare/Arcade-Learning-Environment)\n adapter (specification key: `ale`, `arcade_learning_environment`).\n\n May require:\n ```bash\n sudo apt-get install libsdl1.2-dev libsdl-gfx1.2-dev libsdl-image1.2-dev cmake\n\n git clone https://github.com/mgbellemare/Arcade-Learning-Environment.git\n cd Arcade-Learning-Environment\n\n mkdir build && cd build\n cmake -DUSE_SDL=ON -DUSE_RLGLUE=OFF -DBUILD_EXAMPLES=ON ..\n make -j 4\n cd ..\n\n pip3 install .\n ```\n\n Args:\n level (string): ALE rom file\n (<span style=\"color:#C00000\"><b>required</b></span>).\n loss_of_life_termination: Signals a terminal state on loss of life\n (<span style=\"color:#00C000\"><b>default</b></span>: false).\n loss_of_life_reward (float): Reward/Penalty on loss of life (negative values are a penalty)\n (<span style=\"color:#00C000\"><b>default</b></span>: 0.0).\n repeat_action_probability (float): Repeats last action with given probability\n (<span style=\"color:#00C000\"><b>default</b></span>: 0.0).\n visualize (bool): Whether to visualize interaction\n (<span style=\"color:#00C000\"><b>default</b></span>: false).\n frame_skip (int > 0): Number of times to repeat an action without observing\n (<span style=\"color:#00C000\"><b>default</b></span>: 1).\n seed (int): Random seed\n (<span style=\"color:#00C000\"><b>default</b></span>: none).\n \"\"\"\n\n def __init__(\n self, level, life_loss_terminal=False, life_loss_punishment=0.0,\n repeat_action_probability=0.0, visualize=False, frame_skip=1, seed=None\n ):\n super().__init__()\n\n from ale_python_interface import ALEInterface\n\n self.environment = ALEInterface()\n self.rom_file = level\n\n self.life_loss_terminal = life_loss_terminal\n self.life_loss_punishment = life_loss_punishment\n\n self.environment.setFloat(b'repeat_action_probability', repeat_action_probability)\n self.environment.setBool(b'display_screen', visualize)\n self.environment.setInt(b'frame_skip', frame_skip)\n if seed is not None:\n self.environment.setInt(b'random_seed', seed)\n\n # All set commands must be done before loading the ROM.\n self.environment.loadROM(rom_file=self.rom_file.encode())\n self.available_actions = tuple(self.environment.getLegalActionSet())\n\n # Full list of actions:\n # No-Op, Fire, Up, Right, Left, Down, Up Right, Up Left, Down Right, Down Left, Up Fire,\n # Right Fire, Left Fire, Down Fire, Up Right Fire, Up Left Fire, Down Right Fire, Down Left\n # Fire\n\n def __str__(self):\n return super().__str__() + '({})'.format(self.rom_file)\n\n def states(self):\n width, height = self.environment.getScreenDims()\n return dict(type='float', shape=(height, width, 3))\n\n def actions(self):\n return dict(type='int', num_values=len(self.available_actions))\n\n def close(self):\n self.environment.__del__()\n self.environment = None\n\n def get_states(self):\n screen = np.copy(self.environment.getScreenRGB(screen_data=self.screen))\n screen = screen.astype(dtype=np.float32) / 255.0\n return screen\n\n def reset(self):\n self.environment.reset_game()\n width, height = self.environment.getScreenDims()\n self.screen = np.empty((height, width, 3), dtype=np.uint8)\n self.lives = self.environment.lives()\n return self.get_states()\n\n def execute(self, actions):\n reward = self.environment.act(action=self.available_actions[actions])\n terminal = self.environment.game_over()\n states = self.get_states()\n\n next_lives = self.environment.lives()\n if next_lives < self.lives:\n if self.life_loss_terminal:\n terminal = True\n elif self.life_loss_punishment > 0.0:\n reward -= self.life_loss_punishment\n self.lives = next_lives\n\n return states, terminal, reward\n"
] | [
[
"tensorflow.maximum",
"tensorflow.math.logical_and",
"tensorflow.abs",
"tensorflow.control_dependencies"
],
[
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
spirit-code/aiida-spirit | [
"7a0c0ca7406f958599b691a410201137f9fb94e9"
] | [
"tests/test_calculations.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\" Tests for calculations\n\n\"\"\"\nimport os\nimport numpy as np\nfrom aiida.plugins import CalculationFactory\nfrom aiida.orm import Dict\nfrom aiida.engine import run, run_get_node\nfrom aiida_spirit.tools.helpers import prepare_test_inputs\n\nfrom . import TEST_DIR\n\n\ndef test_input_para_validator():\n \"\"\"Test running a calculation\n note this does only a dry run to check if the calculation plugin works\"\"\"\n\n # put an invalid type in params and check if the validator captures it\n for key, val in {\n 'llg_n_iterations': 17.2,\n 'mc_n_iterations': [1, 2, 3],\n 'bravais_lattice': 'test'\n }.items():\n parameters = Dict(dict={key: val})\n builder = CalculationFactory('spirit').get_builder()\n raised_error = False\n try:\n builder.parameters = parameters\n except (TypeError, ValueError):\n raised_error = True\n # check if an error was raised\n assert raised_error\n\n\ndef test_spirit_calc_dry_run(spirit_code):\n \"\"\"Test running a calculation\n note this does only a dry run to check if the calculation plugin works\"\"\"\n\n # Prepare input parameters\n inputs = prepare_test_inputs(os.path.join(TEST_DIR, 'input_files'))\n inputs['code'] = spirit_code\n inputs['metadata']['options'] = {\n # 5 mins max runtime\n 'max_wallclock_seconds': 300\n }\n inputs['metadata']['dry_run'] = True\n\n result = run(CalculationFactory('spirit'), **inputs)\n print(result)\n\n assert result is not None\n\n\ndef test_spirit_calc(spirit_code):\n \"\"\"Test running a calculation\n this actually runs spirit and therefore needs\n to have spirit installed in the python environment.\"\"\"\n\n # Prepare input parameters\n inputs = prepare_test_inputs(os.path.join(TEST_DIR, 'input_files'))\n inputs['code'] = spirit_code\n inputs['metadata']['options'] = {\n # 5 mins max runtime\n 'max_wallclock_seconds': 300\n }\n\n result, node = run_get_node(CalculationFactory('spirit'), **inputs)\n print(result, node)\n assert node.is_finished_ok\n\n # check consistency of the output files\n check_outcome(result)\n\n\ndef test_spirit_calc_with_param(spirit_code):\n \"\"\"Test running a calculation\n this actually runs spirit and therefore needs\n to have spirit installed in the python environment.\n\n This test runs a spirit calculation with an external field and a small temperature\n \"\"\"\n\n # Prepare input parameters\n inputs = prepare_test_inputs(os.path.join(TEST_DIR, 'input_files'))\n inputs['code'] = spirit_code\n inputs['metadata']['options'] = {\n # 5 mins max runtime\n 'max_wallclock_seconds': 300\n }\n # prepare parameters\n parameters = Dict(\n dict={\n 'llg_temperature': 10.0, # 10 K temperature noise\n 'external_field_magnitude': 2.0, # external field of 2 T\n 'external_field_normal':\n [0.0, 0.0, 1.0], # external field points in z direction\n 'mu_s': [2.2], # change spin moment to have the right size for Fe\n 'llg_n_iterations': 20000 # limit the number of iterations\n })\n inputs['parameters'] = parameters\n\n # run options input dict\n inputs['run_options'] = Dict(dict={\n 'simulation_method': 'LLG',\n 'solver': 'Depondt',\n })\n\n # first a dry run\n inputs['metadata']['dry_run'] = True\n result = run(CalculationFactory('spirit'), **inputs)\n\n # then run the calculation\n inputs['metadata']['dry_run'] = False\n result, node = run_get_node(CalculationFactory('spirit'), **inputs)\n print(result)\n assert node.is_finished_ok\n\n # check consistency of the output files\n spins_final = check_outcome(result, threshold=0.10)\n mag_mean = np.mean(spins_final, axis=0)\n print(mag_mean)\n assert mag_mean[0] < 0.25\n assert mag_mean[1] < 0.25\n assert mag_mean[2] > 0.85\n\n\ndef check_outcome(result, threshold=1e-5):\n \"\"\"check the result of a spirit calculation\n Checks if retrieved is there and if the output inside of the retreived makes sense\"\"\"\n\n # check output\n assert 'retrieved' in result\n ret = result['retrieved']\n out_file_list = ret.list_object_names()\n\n # check if spirit std out exists\n print(f'contents of retrieved: {out_file_list}')\n assert 'spirit.stdout' in out_file_list\n with ret.open('spirit.stdout') as _file:\n txt = _file.readlines()\n #from pprint import pprint\n #pprint(txt)\n assert len(txt) > 100\n\n # check some lines in the spirit std output\n for line in txt:\n if 'Number of Errors:' in line:\n errors = line.split()[-1]\n if 'Number of Warnings:' in line:\n warnings = line.split()[-1]\n assert int(errors) == 0\n assert int(warnings) == 0\n\n # check if initial and final spin image make sense\n spins_initial = result['magnetization'].get_array('initial')\n var_initial = np.std(spins_initial, axis=0).max()\n print('std initial', var_initial)\n assert var_initial > 0.4\n\n spins_final = result['magnetization'].get_array('final')\n var_final = np.std(spins_final, axis=0).max()\n print('std final', var_final)\n assert var_final < threshold\n\n return spins_final\n"
] | [
[
"numpy.std",
"numpy.mean"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
heather999/lenstronomy | [
"8102fe026c1f3ba6e81d8a1f59cceb90e68430b4",
"8102fe026c1f3ba6e81d8a1f59cceb90e68430b4",
"8102fe026c1f3ba6e81d8a1f59cceb90e68430b4",
"8102fe026c1f3ba6e81d8a1f59cceb90e68430b4",
"8102fe026c1f3ba6e81d8a1f59cceb90e68430b4"
] | [
"lenstronomy/ImSim/differential_extinction.py",
"test/test_Analysis/test_kinematics_api.py",
"lenstronomy/LensModel/Profiles/shear.py",
"test/test_ImSim/test_Numerics/test_numerics.py",
"lenstronomy/LensModel/Profiles/constant_shift.py"
] | [
"from lenstronomy.LightModel.light_model import LightModel\nimport numpy as np\n\n__all__ = ['DifferentialExtinction']\n\n\nclass DifferentialExtinction(object):\n \"\"\"\n class to compute an extinction (for a specific band/wavelength). This class uses the functionality available in\n the LightModel module to describe an optical depth tau_ext to compute the extinction on the sky/image.\n \"\"\"\n\n def __init__(self, optical_depth_model=None, tau0_index=0):\n \"\"\"\n\n :param optical_depth_model: list of strings naming the profiles (same convention as LightModel module)\n describing the optical depth of the extinction\n \"\"\"\n if optical_depth_model is None:\n optical_depth_model = []\n self._profile = LightModel(light_model_list=optical_depth_model)\n if len(optical_depth_model) == 0:\n self._compute_bool = False\n else:\n self._compute_bool = True\n self._tau0_index = tau0_index\n\n @property\n def compute_bool(self):\n \"\"\"\n :return: True when a differential extinction is set, False otherwise \n \"\"\"\n return self._compute_bool\n\n def extinction(self, x, y, kwargs_extinction=None, kwargs_special=None):\n \"\"\"\n\n :param x: coordinate in image plane of flux intensity\n :param y: coordinate in image plane of flux intensity\n :param kwargs_extinction: keyword argument list matching the extinction profile\n :param kwargs_special: keyword arguments hosting special parameters, here required 'tau0_list'\n :return: extinction corrected flux\n \"\"\"\n if self._compute_bool is False or kwargs_extinction is None:\n return 1\n tau = self._profile.surface_brightness(x, y, kwargs_list=kwargs_extinction)\n tau0_list = kwargs_special.get('tau0_list', None)\n if tau0_list is not None:\n tau0 = tau0_list[self._tau0_index]\n else:\n tau0 = 1\n return np.exp(-tau0 * tau)\n",
"__author__ = 'sibirrer'\n\nimport numpy.testing as npt\nimport numpy as np\nimport pytest\nimport unittest\n\nfrom lenstronomy.Analysis.kinematics_api import KinematicsAPI\nimport lenstronomy.Util.param_util as param_util\n\n\nclass TestKinematicsAPI(object):\n\n def setup(self):\n pass\n\n def test_velocity_dispersion(self):\n z_lens = 0.5\n z_source = 1.5\n kwargs_model = {'lens_model_list': ['SPEP', 'SHEAR', 'SIS', 'SIS', 'SIS'],\n 'lens_light_model_list': ['SERSIC_ELLIPSE', 'SERSIC']}\n\n theta_E = 1.5\n gamma = 1.8\n kwargs_lens = [{'theta_E': theta_E, 'e1': 0, 'center_x': -0.044798916793300093, 'center_y': 0.0054408937891703788, 'e2': 0, 'gamma': gamma},\n {'e1': -0.050871696555354479, 'e2': -0.0061601733920590464}, {'center_y': 2.79985456, 'center_x': -2.32019894,\n 'theta_E': 0.28165274714097904}, {'center_y': 3.83985426,\n 'center_x': -2.32019933, 'theta_E': 0.0038110812674654873},\n {'center_y': 4.31985428, 'center_x': -1.68019931, 'theta_E': 0.45552039839735037}]\n\n phi, q = -0.52624727893702705, 0.79703498156919605\n e1, e2 = param_util.phi_q2_ellipticity(phi, q)\n kwargs_lens_light = [{'n_sersic': 1.1212528655709217,\n 'center_x': -0.019674496231393473,\n 'e1': e1, 'e2': e2, 'amp': 1.1091367792010356, 'center_y': 0.076914975081560991,\n 'R_sersic': 0.42691611878867058},\n {'R_sersic': 0.03025682660635394, 'amp': 139.96763298885992, 'n_sersic': 1.90000008624093865,\n 'center_x': -0.019674496231393473, 'center_y': 0.076914975081560991}]\n r_ani = 0.62\n kwargs_anisotropy = {'r_ani': r_ani}\n R_slit = 3.8\n dR_slit = 1.\n aperture_type = 'slit'\n kwargs_aperture = {'aperture_type': aperture_type, 'center_ra': 0, 'width': dR_slit, 'length': R_slit, 'angle': 0, 'center_dec': 0}\n\n psf_fwhm = 0.7\n kwargs_psf = {'psf_type': 'GAUSSIAN', 'fwhm': psf_fwhm}\n anisotropy_model = 'OM'\n kwargs_mge = {'n_comp': 20}\n r_eff = 0.211919902322\n kinematicAPI = KinematicsAPI(z_lens, z_source, kwargs_model, kwargs_aperture=kwargs_aperture, kwargs_seeing=kwargs_psf,\n lens_model_kinematics_bool=[True, False, False, False, False], anisotropy_model=anisotropy_model,\n kwargs_mge_light=kwargs_mge, kwargs_mge_mass=kwargs_mge, sampling_number=1000,\n MGE_light=True)\n\n v_sigma = kinematicAPI.velocity_dispersion(kwargs_lens, kwargs_lens_light, kwargs_anisotropy, r_eff=r_eff)\n\n kinematicAPI = KinematicsAPI(z_lens, z_source, kwargs_model, kwargs_aperture=kwargs_aperture,\n kwargs_seeing=kwargs_psf, lens_model_kinematics_bool=[True, False, False, False, False],\n anisotropy_model=anisotropy_model,\n kwargs_mge_light=kwargs_mge, kwargs_mge_mass=kwargs_mge, sampling_number=1000,\n MGE_light=True, MGE_mass=True)\n v_sigma_mge_lens = kinematicAPI.velocity_dispersion(kwargs_lens, kwargs_lens_light, kwargs_anisotropy, r_eff=r_eff, theta_E=theta_E)\n #v_sigma_mge_lens = kinematicAPI.velocity_dispersion_numerical(kwargs_lens, kwargs_lens_light, kwargs_anisotropy, kwargs_aperture,\n # kwargs_psf, anisotropy_model, MGE_light=True, MGE_mass=True, theta_E=theta_E,\n # kwargs_mge_light=kwargs_mge, kwargs_mge_mass=kwargs_mge,\n # r_eff=r_eff)\n kinematicAPI = KinematicsAPI(z_lens, z_source, kwargs_model, kwargs_aperture=kwargs_aperture,\n kwargs_seeing=kwargs_psf,\n lens_model_kinematics_bool=[True, False, False, False, False],\n anisotropy_model=anisotropy_model,\n kwargs_mge_light=kwargs_mge, kwargs_mge_mass=kwargs_mge, sampling_number=1000,\n MGE_light=False, MGE_mass=False, Hernquist_approx=True)\n v_sigma_hernquist = kinematicAPI.velocity_dispersion(kwargs_lens, kwargs_lens_light, kwargs_anisotropy,\n r_eff=r_eff, theta_E=theta_E)\n #v_sigma_hernquist = kinematicAPI.velocity_dispersion_numerical(kwargs_lens, kwargs_lens_light, kwargs_anisotropy,\n # kwargs_aperture, kwargs_psf, anisotropy_model,\n # MGE_light=False, MGE_mass=False,\n # r_eff=r_eff, Hernquist_approx=True)\n\n vel_disp_temp = kinematicAPI.velocity_dispersion_analytical(theta_E, gamma, r_ani=r_ani, r_eff=r_eff)\n print(v_sigma, vel_disp_temp)\n #assert 1 == 0\n npt.assert_almost_equal(v_sigma / vel_disp_temp, 1, decimal=1)\n npt.assert_almost_equal(v_sigma_mge_lens / v_sigma, 1, decimal=1)\n npt.assert_almost_equal(v_sigma / v_sigma_hernquist, 1, decimal=1)\n\n def test_galkin_settings(self):\n z_lens = 0.5\n z_source = 1.5\n kwargs_model = {'lens_model_list': ['SIS'],\n 'lens_light_model_list': ['HERNQUIST']}\n\n kwargs_lens = [{'theta_E': 1, 'center_x': 0, 'center_y': 0}]\n kwargs_lens_light = [{'amp': 1, 'Rs': 1, 'center_x': 0, 'center_y': 0}]\n r_ani = 0.62\n kwargs_anisotropy = {'r_ani': r_ani}\n R_slit = 3.8\n dR_slit = 1.\n aperture_type = 'slit'\n kwargs_aperture = {'aperture_type': aperture_type, 'center_ra': 0, 'width': dR_slit, 'length': R_slit,\n 'angle': 0, 'center_dec': 0}\n\n psf_fwhm = 0.7\n kwargs_psf = {'psf_type': 'GAUSSIAN', 'fwhm': psf_fwhm}\n anisotropy_model = 'OM'\n kwargs_mge = {'n_comp': 20}\n kinematicAPI = KinematicsAPI(z_lens, z_source, kwargs_model, kwargs_aperture=kwargs_aperture,\n kwargs_seeing=kwargs_psf, analytic_kinematics=True,\n anisotropy_model=anisotropy_model,\n kwargs_mge_light=kwargs_mge, kwargs_mge_mass=kwargs_mge, sampling_number=1000)\n galkin, kwargs_profile, kwargs_light = kinematicAPI.galkin_settings(kwargs_lens, kwargs_lens_light, r_eff=None,\n theta_E=None, gamma=None)\n npt.assert_almost_equal(kwargs_profile['gamma'], 2, decimal=2)\n\n kinematicAPI = KinematicsAPI(z_lens, z_source, kwargs_model, kwargs_aperture=[kwargs_aperture],\n kwargs_seeing=[kwargs_psf], analytic_kinematics=True,\n anisotropy_model=anisotropy_model, multi_observations=True,\n kwargs_mge_light=kwargs_mge, kwargs_mge_mass=kwargs_mge, sampling_number=1000)\n galkin, kwargs_profile, kwargs_light = kinematicAPI.galkin_settings(kwargs_lens, kwargs_lens_light, r_eff=None,\n theta_E=None, gamma=None)\n npt.assert_almost_equal(kwargs_profile['gamma'], 2, decimal=2)\n\n def test_kinematic_light_profile(self):\n z_lens = 0.5\n z_source = 1.5\n kwargs_options = {'lens_light_model_list': ['HERNQUIST_ELLIPSE', 'SERSIC']}\n kwargs_mge = {'n_comp': 20}\n kinematicAPI = KinematicsAPI(z_lens, z_source, kwargs_options, kwargs_seeing={}, kwargs_aperture={}, anisotropy_model='OM')\n r_eff = 0.2\n kwargs_lens_light = [{'amp': 1, 'Rs': r_eff * 0.551, 'e1': 0., 'e2': 0, 'center_x': 0, 'center_y': 0},\n {'amp': 1, 'R_sersic': 1, 'n_sersic': 2, 'center_x': -10, 'center_y': -10}]\n light_profile_list, kwargs_light = kinematicAPI.kinematic_light_profile(kwargs_lens_light, MGE_fit=True,\n r_eff=r_eff,\n model_kinematics_bool=[True, False],\n kwargs_mge=kwargs_mge)\n assert light_profile_list[0] == 'MULTI_GAUSSIAN'\n\n light_profile_list, kwargs_light = kinematicAPI.kinematic_light_profile(kwargs_lens_light, MGE_fit=False,\n r_eff=r_eff, model_kinematics_bool=[True, False])\n assert light_profile_list[0] == 'HERNQUIST_ELLIPSE'\n\n light_profile_list, kwargs_light = kinematicAPI.kinematic_light_profile(kwargs_lens_light, MGE_fit=False,\n Hernquist_approx=True, r_eff=r_eff,\n model_kinematics_bool=[True, False])\n assert light_profile_list[0] == 'HERNQUIST'\n npt.assert_almost_equal(kwargs_light[0]['Rs'] / kwargs_lens_light[0]['Rs'], 1, decimal=2)\n\n def test_kinematic_lens_profiles(self):\n z_lens = 0.5\n z_source = 1.5\n kwargs_options = {'lens_model_list': ['SPEP', 'SHEAR']}\n kin_api = KinematicsAPI(z_lens, z_source, kwargs_options, kwargs_aperture={}, kwargs_seeing={}, anisotropy_model='OM')\n kwargs_lens = [{'theta_E': 1.4272358196260446, 'e1': 0, 'center_x': -0.044798916793300093,\n 'center_y': 0.0054408937891703788, 'e2': 0, 'gamma': 1.8},\n {'e1': -0.050871696555354479, 'e2': -0.0061601733920590464}\n ]\n\n kwargs_mge = {'n_comp': 20}\n mass_profile_list, kwargs_profile = kin_api.kinematic_lens_profiles(kwargs_lens, MGE_fit=True,\n kwargs_mge=kwargs_mge, theta_E=1.4,\n model_kinematics_bool=[True, False])\n assert mass_profile_list[0] == 'MULTI_GAUSSIAN_KAPPA'\n\n mass_profile_list, kwargs_profile = kin_api.kinematic_lens_profiles(kwargs_lens, MGE_fit=False,\n model_kinematics_bool=[True, False])\n assert mass_profile_list[0] == 'SPEP'\n\n def test_model_dispersion(self):\n np.random.seed(42)\n z_lens = 0.5\n z_source = 1.5\n r_eff = 1.\n theta_E = 1.\n kwargs_model = {'lens_model_list': ['SIS'], 'lens_light_model_list': ['HERNQUIST']}\n kwargs_lens = [{'theta_E': theta_E, 'center_x': 0, 'center_y': 0}]\n kwargs_lens_light = [{'amp': 1, 'Rs': r_eff * 0.551, 'center_x': 0, 'center_y': 0}]\n kwargs_anisotropy = {'r_ani': 1}\n # settings\n\n R_slit = 3.8\n dR_slit = 1.\n aperture_type = 'slit'\n kwargs_aperture = {'aperture_type': aperture_type, 'center_ra': 0, 'width': dR_slit, 'length': R_slit,\n 'angle': 0, 'center_dec': 0}\n psf_fwhm = 0.7\n kwargs_seeing = {'psf_type': 'GAUSSIAN', 'fwhm': psf_fwhm}\n anisotropy_model = 'OM'\n kin_api = KinematicsAPI(z_lens, z_source, kwargs_model, kwargs_aperture, kwargs_seeing,\n anisotropy_model=anisotropy_model)\n\n kwargs_numerics_galkin = {'interpol_grid_num': 2000, 'log_integration': True,\n 'max_integrate': 1000, 'min_integrate': 0.0001}\n kin_api.kinematics_modeling_settings(anisotropy_model, kwargs_numerics_galkin, analytic_kinematics=True,\n Hernquist_approx=False, MGE_light=False, MGE_mass=False)\n vel_disp_analytic = kin_api.velocity_dispersion(kwargs_lens, kwargs_lens_light, kwargs_anisotropy, r_eff=r_eff,\n theta_E=theta_E, gamma=2)\n\n kin_api.kinematics_modeling_settings(anisotropy_model, kwargs_numerics_galkin, analytic_kinematics=False,\n Hernquist_approx=False, MGE_light=False, MGE_mass=False)\n vel_disp_numerical = kin_api.velocity_dispersion(kwargs_lens, kwargs_lens_light, kwargs_anisotropy) #,\n # r_eff=r_eff, theta_E=theta_E, gamma=2)\n npt.assert_almost_equal(vel_disp_numerical / vel_disp_analytic, 1, decimal=2)\n\n kin_api.kinematics_modeling_settings(anisotropy_model, kwargs_numerics_galkin, analytic_kinematics=False,\n Hernquist_approx=False, MGE_light=False, MGE_mass=False,\n kwargs_mge_light={'n_comp': 10}, kwargs_mge_mass={'n_comp': 5})\n assert kin_api._kwargs_mge_mass['n_comp'] == 5\n assert kin_api._kwargs_mge_light['n_comp'] == 10\n\n def test_velocity_dispersion_map(self):\n np.random.seed(42)\n z_lens = 0.5\n z_source = 1.5\n kwargs_options = {'lens_model_list': ['SIS'], 'lens_light_model_list': ['HERNQUIST']}\n r_eff = 1.\n theta_E = 1\n kwargs_lens = [{'theta_E': theta_E, 'center_x': 0, 'center_y': 0}]\n kwargs_lens_light = [{'amp': 1, 'Rs': r_eff * 0.551, 'center_x': 0, 'center_y': 0}]\n kwargs_anisotropy = {'r_ani': 1}\n\n r_bins = np.array([0, 0.5, 1])\n aperture_type = 'IFU_shells'\n kwargs_aperture = {'aperture_type': aperture_type, 'center_ra': 0, 'r_bins': r_bins, 'center_dec': 0}\n psf_fwhm = 0.7\n kwargs_seeing = {'psf_type': 'GAUSSIAN', 'fwhm': psf_fwhm}\n anisotropy_model = 'OM'\n kin_api = KinematicsAPI(z_lens, z_source, kwargs_options, kwargs_aperture=kwargs_aperture,\n kwargs_seeing=kwargs_seeing, anisotropy_model=anisotropy_model)\n\n kwargs_numerics_galkin = {'interpol_grid_num': 500, 'log_integration': True,\n 'max_integrate': 10, 'min_integrate': 0.001}\n kin_api.kinematics_modeling_settings(anisotropy_model, kwargs_numerics_galkin, analytic_kinematics=True,\n Hernquist_approx=False, MGE_light=False, MGE_mass=False,\n num_kin_sampling=1000, num_psf_sampling=100)\n vel_disp_analytic = kin_api.velocity_dispersion_map(kwargs_lens, kwargs_lens_light, kwargs_anisotropy,\n r_eff=r_eff, theta_E=theta_E, gamma=2)\n\n kin_api.kinematics_modeling_settings(anisotropy_model, kwargs_numerics_galkin, analytic_kinematics=False,\n Hernquist_approx=False, MGE_light=False, MGE_mass=False,\n num_kin_sampling=1000, num_psf_sampling=100)\n vel_disp_numerical = kin_api.velocity_dispersion_map(kwargs_lens, kwargs_lens_light, kwargs_anisotropy,\n r_eff=r_eff, theta_E=theta_E, gamma=2)\n print(vel_disp_numerical, vel_disp_analytic)\n npt.assert_almost_equal(vel_disp_numerical, vel_disp_analytic, decimal=-1)\n\n def test_interpolated_sersic(self):\n from lenstronomy.Analysis.light2mass import light2mass_interpol\n kwargs_light = [{'n_sersic': 2, 'R_sersic': 0.5, 'amp': 1, 'center_x': 0.01, 'center_y': 0.01}]\n kwargs_lens = [{'n_sersic': 2, 'R_sersic': 0.5, 'k_eff': 1, 'center_x': 0.01, 'center_y': 0.01}]\n deltaPix = 0.1\n numPix = 100\n\n kwargs_interp = light2mass_interpol(['SERSIC'], kwargs_lens_light=kwargs_light, numPix=numPix,\n deltaPix=deltaPix, subgrid_res=5)\n kwargs_lens_interp = [kwargs_interp]\n from lenstronomy.Analysis.kinematics_api import KinematicsAPI\n z_lens = 0.5\n z_source = 1.5\n r_ani = 0.62\n kwargs_anisotropy = {'r_ani': r_ani}\n R_slit = 3.8\n dR_slit = 1.\n aperture_type = 'slit'\n kwargs_aperture = {'center_ra': 0, 'width': dR_slit, 'length': R_slit, 'angle': 0, 'center_dec': 0, 'aperture_type': aperture_type}\n psf_fwhm = 0.7\n kwargs_psf = {'psf_type': 'GAUSSIAN', 'fwhm': psf_fwhm}\n anisotropy_model = 'OM'\n r_eff = 0.5\n kwargs_model = {'lens_model_list': ['SERSIC'],\n 'lens_light_model_list': ['SERSIC']}\n kwargs_mge = {'n_comp': 20}\n kinematic_api = KinematicsAPI(z_lens, z_source, kwargs_model, kwargs_aperture, kwargs_seeing=kwargs_psf,\n anisotropy_model=anisotropy_model, MGE_light=True, MGE_mass=True,\n kwargs_mge_mass=kwargs_mge, kwargs_mge_light=kwargs_mge)\n\n v_sigma = kinematic_api.velocity_dispersion(kwargs_lens, kwargs_light, kwargs_anisotropy, r_eff=r_eff, theta_E=1)\n kwargs_model_interp = {'lens_model_list': ['INTERPOL'],\n 'lens_light_model_list': ['SERSIC']}\n kinematic_api_interp = KinematicsAPI(z_lens, z_source, kwargs_model_interp, kwargs_aperture, kwargs_seeing=kwargs_psf,\n anisotropy_model=anisotropy_model, MGE_light=True, MGE_mass=True,\n kwargs_mge_mass=kwargs_mge,\n kwargs_mge_light=kwargs_mge)\n v_sigma_interp = kinematic_api_interp.velocity_dispersion(kwargs_lens_interp, kwargs_light, kwargs_anisotropy,\n theta_E=1., r_eff=r_eff)\n npt.assert_almost_equal(v_sigma / v_sigma_interp, 1, 1)\n # use as kinematic constraints\n # compare with MGE Sersic kinematic estimate\n\n\nclass TestRaise(unittest.TestCase):\n\n def test_raise(self):\n with self.assertRaises(ValueError):\n z_lens = 0.5\n z_source = 1.5\n kwargs_model = {'lens_light_model_list': ['HERNQUIST']}\n kinematicAPI = KinematicsAPI(z_lens, z_source, kwargs_model, kwargs_seeing={}, kwargs_aperture={}, anisotropy_model='OM')\n kwargs_light = [{'Rs': 1, 'amp': 1, 'center_x': 0, 'center_y': 0}]\n kinematicAPI.kinematic_light_profile(kwargs_light, MGE_fit=False,\n Hernquist_approx=True, r_eff=None, model_kinematics_bool=[True])\n with self.assertRaises(ValueError):\n z_lens = 0.5\n z_source = 1.5\n kwargs_model = {'lens_light_model_list': ['HERNQUIST']}\n kinematicAPI = KinematicsAPI(z_lens, z_source, kwargs_model, kwargs_seeing={}, kwargs_aperture={}, anisotropy_model='OM')\n kwargs_light = [{'Rs': 1, 'amp': 1, 'center_x': 0, 'center_y': 0}]\n kinematicAPI.kinematic_light_profile(kwargs_light, MGE_fit=False,\n Hernquist_approx=False, r_eff=None, analytic_kinematics=True)\n with self.assertRaises(ValueError):\n z_lens = 0.5\n z_source = 1.5\n kwargs_model = {'lens_light_model_list': ['HERNQUIST'], 'lens_model_list': []}\n kinematicAPI = KinematicsAPI(z_lens, z_source, kwargs_model, kwargs_seeing={}, kwargs_aperture={}, anisotropy_model='OM')\n kwargs_light = [{'Rs': 1, 'amp': 1, 'center_x': 0, 'center_y': 0}]\n kinematicAPI.kinematic_lens_profiles(kwargs_light, MGE_fit=True, model_kinematics_bool=[True])\n with self.assertRaises(ValueError):\n z_lens = 0.5\n z_source = 1.5\n kwargs_model = {'lens_light_model_list': ['HERNQUIST'], 'lens_model_list': []}\n kinematicAPI = KinematicsAPI(z_lens, z_source, kwargs_model, kwargs_seeing={}, kwargs_aperture={}, anisotropy_model='OM')\n kinematicAPI.kinematic_lens_profiles(kwargs_lens=None, analytic_kinematics=True)\n\n with self.assertRaises(ValueError):\n z_lens = 0.5\n z_source = 1.5\n kwargs_model = {'lens_light_model_list': ['HERNQUIST'], 'lens_model_list': []}\n kinematicAPI = KinematicsAPI(z_lens, z_source, kwargs_model, kwargs_seeing={}, kwargs_aperture={}, anisotropy_model='OM')\n kwargs_lens_light = [{'Rs': 1, 'center_x': 0, 'center_y': 0}]\n kinematicAPI.kinematic_light_profile(kwargs_lens_light, r_eff=None, MGE_fit=True, model_kinematics_bool=None,\n Hernquist_approx=False, kwargs_mge=None)\n with self.assertRaises(ValueError):\n z_lens = 0.5\n z_source = 1.5\n kwargs_model = {'lens_light_model_list': ['HERNQUIST'], 'lens_model_list': ['SIS']}\n kwargs_lens = [{'theta_E': 1, 'center_x': 0, 'center_y': 0}]\n kinematicAPI = KinematicsAPI(z_lens, z_source, kwargs_model, kwargs_seeing={}, kwargs_aperture={}, anisotropy_model='OM')\n kinematicAPI.kinematic_lens_profiles(kwargs_lens, MGE_fit=True, model_kinematics_bool=None, theta_E=None,\n kwargs_mge={})\n\n\nif __name__ == '__main__':\n pytest.main()\n",
"__author__ = 'sibirrer'\n\nimport lenstronomy.Util.param_util as param_util\nfrom lenstronomy.LensModel.Profiles.base_profile import LensProfileBase\nfrom lenstronomy.LensModel.Profiles.convergence import Convergence\nimport numpy as np\n\n__all__ = ['Shear', 'ShearGammaPsi', 'ShearReduced']\n\n\nclass Shear(LensProfileBase):\n \"\"\"\n class for external shear gamma1, gamma2 expression\n \"\"\"\n param_names = ['gamma1', 'gamma2', 'ra_0', 'dec_0']\n lower_limit_default = {'gamma1': -0.5, 'gamma2': -0.5, 'ra_0': -100, 'dec_0': -100}\n upper_limit_default = {'gamma1': 0.5, 'gamma2': 0.5, 'ra_0': 100, 'dec_0': 100}\n\n def function(self, x, y, gamma1, gamma2, ra_0=0, dec_0=0):\n \"\"\"\n\n :param x: x-coordinate (angle)\n :param y: y0-coordinate (angle)\n :param gamma1: shear component\n :param gamma2: shear component\n :param ra_0: x/ra position where shear deflection is 0\n :param dec_0: y/dec position where shear deflection is 0\n :return: lensing potential\n \"\"\"\n x_ = x - ra_0\n y_ = y - dec_0\n f_ = 1/2. * (gamma1 * x_ * x_ + 2 * gamma2 * x_ * y_ - gamma1 * y_ * y_)\n return f_\n\n def derivatives(self, x, y, gamma1, gamma2, ra_0=0, dec_0=0):\n \"\"\"\n\n :param x: x-coordinate (angle)\n :param y: y0-coordinate (angle)\n :param gamma1: shear component\n :param gamma2: shear component\n :param ra_0: x/ra position where shear deflection is 0\n :param dec_0: y/dec position where shear deflection is 0\n :return: deflection angles\n \"\"\"\n x_ = x - ra_0\n y_ = y - dec_0\n f_x = gamma1 * x_ + gamma2 * y_\n f_y = +gamma2 * x_ - gamma1 * y_\n return f_x, f_y\n\n def hessian(self, x, y, gamma1, gamma2, ra_0=0, dec_0=0):\n \"\"\"\n\n :param x: x-coordinate (angle)\n :param y: y0-coordinate (angle)\n :param gamma1: shear component\n :param gamma2: shear component\n :param ra_0: x/ra position where shear deflection is 0\n :param dec_0: y/dec position where shear deflection is 0\n :return: f_xx, f_xy, f_yx, f_yy\n \"\"\"\n gamma1 = gamma1\n gamma2 = gamma2\n kappa = 0\n f_xx = kappa + gamma1\n f_yy = kappa - gamma1\n f_xy = gamma2\n return f_xx, f_xy, f_xy, f_yy\n\n\nclass ShearGammaPsi(LensProfileBase):\n \"\"\"\n class to model a shear field with shear strength and direction. The translation ot the cartesian shear distortions\n is as follow:\n\n .. math::\n \\\\gamma_1 = \\\\gamma_{ext} \\\\cos(2 \\\\phi_{ext}\n \\\\gamma_2 = \\\\gamma_{ext} \\\\sin(2 \\\\phi_{ext}\n\n \"\"\"\n param_names = ['gamma_ext', 'psi_ext', 'ra_0', 'dec_0']\n lower_limit_default = {'gamma_ext': 0, 'psi_ext': -np.pi, 'ra_0': -100, 'dec_0': -100}\n upper_limit_default = {'gamma_ext': 1, 'psi_ext': np.pi, 'ra_0': 100, 'dec_0': 100}\n\n def __init__(self):\n self._shear_e1e2 = Shear()\n super(ShearGammaPsi, self).__init__()\n\n @staticmethod\n def function(x, y, gamma_ext, psi_ext, ra_0=0, dec_0=0):\n \"\"\"\n\n :param x: x-coordinate (angle)\n :param y: y0-coordinate (angle)\n :param gamma_ext: shear strength\n :param psi_ext: shear angle (radian)\n :param ra_0: x/ra position where shear deflection is 0\n :param dec_0: y/dec position where shear deflection is 0\n :return:\n \"\"\"\n # change to polar coordinate\n r, phi = param_util.cart2polar(x-ra_0, y-dec_0)\n f_ = 1. / 2 * gamma_ext * r ** 2 * np.cos(2 * (phi - psi_ext))\n return f_\n\n def derivatives(self, x, y, gamma_ext, psi_ext, ra_0=0, dec_0=0):\n # rotation angle\n gamma1, gamma2 = param_util.shear_polar2cartesian(psi_ext, gamma_ext)\n return self._shear_e1e2.derivatives(x, y, gamma1, gamma2, ra_0, dec_0)\n\n def hessian(self, x, y, gamma_ext, psi_ext, ra_0=0, dec_0=0):\n gamma1, gamma2 = param_util.shear_polar2cartesian(psi_ext, gamma_ext)\n return self._shear_e1e2.hessian(x, y, gamma1, gamma2, ra_0, dec_0)\n\n\nclass ShearReduced(LensProfileBase):\n \"\"\"\n reduced shear distortions :math:`\\\\gamma' = \\\\gamma / (1-\\\\kappa)`.\n This distortion keeps the magnification as unity and, thus, does not change the size of apparent objects.\n To keep the magnification at unity, it requires\n\n .. math::\n (1-\\\\kappa)^2 - \\\\gamma_1^2 - \\\\gamma_2^ = 1\n\n Thus, for given pair of reduced shear :math:`(\\\\gamma'_1, \\\\gamma'_2)`, an additional convergence term is calculated\n and added to the lensing distortions.\n \"\"\"\n param_names = ['gamma1', 'gamma2', 'ra_0', 'dec_0']\n lower_limit_default = {'gamma1': -0.5, 'gamma2': -0.5, 'ra_0': -100, 'dec_0': -100}\n upper_limit_default = {'gamma1': 0.5, 'gamma2': 0.5, 'ra_0': 100, 'dec_0': 100}\n\n def __init__(self):\n self._shear = Shear()\n self._convergence = Convergence()\n super(ShearReduced, self).__init__()\n\n @staticmethod\n def _kappa_reduced(gamma1, gamma2):\n \"\"\"\n compute convergence such that magnification is unity\n\n :param gamma1: reduced shear\n :param gamma2: reduced shear\n :return: kappa\n \"\"\"\n kappa = 1 - 1. / np.sqrt(1 - gamma1**2 - gamma2**2)\n gamma1_ = (1-kappa) * gamma1\n gamma2_ = (1-kappa) * gamma2\n return kappa, gamma1_, gamma2_\n\n def function(self, x, y, gamma1, gamma2, ra_0=0, dec_0=0):\n \"\"\"\n\n :param x: x-coordinate (angle)\n :param y: y0-coordinate (angle)\n :param gamma1: shear component\n :param gamma2: shear component\n :param ra_0: x/ra position where shear deflection is 0\n :param dec_0: y/dec position where shear deflection is 0\n :return: lensing potential\n \"\"\"\n kappa, gamma1_, gamma2_ = self._kappa_reduced(gamma1, gamma2)\n f_shear = self._shear.function(x, y, gamma1_, gamma2_, ra_0, dec_0)\n f_kappa = self._convergence.function(x, y, kappa, ra_0, dec_0)\n return f_shear + f_kappa\n\n def derivatives(self, x, y, gamma1, gamma2, ra_0=0, dec_0=0):\n \"\"\"\n\n :param x: x-coordinate (angle)\n :param y: y0-coordinate (angle)\n :param gamma1: shear component\n :param gamma2: shear component\n :param ra_0: x/ra position where shear deflection is 0\n :param dec_0: y/dec position where shear deflection is 0\n :return: deflection angles\n \"\"\"\n kappa, gamma1_, gamma2_ = self._kappa_reduced(gamma1, gamma2)\n f_x_shear, f_y_shear = self._shear.derivatives(x, y, gamma1_, gamma2_, ra_0, dec_0)\n f_x_kappa, f_y_kappa = self._convergence.derivatives(x, y, kappa, ra_0, dec_0)\n return f_x_shear + f_x_kappa, f_y_shear + f_y_kappa\n\n def hessian(self, x, y, gamma1, gamma2, ra_0=0, dec_0=0):\n \"\"\"\n\n :param x: x-coordinate (angle)\n :param y: y0-coordinate (angle)\n :param gamma1: shear component\n :param gamma2: shear component\n :param ra_0: x/ra position where shear deflection is 0\n :param dec_0: y/dec position where shear deflection is 0\n :return: f_xx, f_xy, f_yx, f_yy\n \"\"\"\n kappa, gamma1_, gamma2_ = self._kappa_reduced(gamma1, gamma2)\n f_xx_g, f_xy_g, f_yx_g, f_yy_g = self._shear.hessian(x, y, gamma1_, gamma2_, ra_0, dec_0)\n f_xx_k, f_xy_k, f_yx_k, f_yy_k = self._convergence.hessian(x, y, kappa, ra_0, dec_0)\n f_xx = f_xx_g + f_xx_k\n f_yy = f_yy_g + f_yy_k\n f_xy = f_xy_g + f_xy_k\n return f_xx, f_xy, f_xy, f_yy\n",
"import pytest\nimport numpy.testing as npt\nimport numpy as np\nimport unittest\n\nimport lenstronomy.Util.util as util\nimport lenstronomy.Util.kernel_util as kernel_util\nfrom lenstronomy.ImSim.image_model import ImageModel\n\n\nclass TestNumerics(object):\n\n def setup(self):\n\n # we define a model consisting of a singe Sersric profile\n from lenstronomy.LightModel.light_model import LightModel\n light_model_list = ['SERSIC_ELLIPSE']\n self.lightModel = LightModel(light_model_list=light_model_list)\n self.kwargs_light = [\n {'amp': 100, 'R_sersic': 0.5, 'n_sersic': 3, 'e1': 0, 'e2': 0, 'center_x': 0.02, 'center_y': 0}]\n\n # we define a pixel grid and a higher resolution super sampling factor\n self._supersampling_factor = 5\n numPix = 61 # cutout pixel size\n deltaPix = 0.05 # pixel size in arcsec (area per pixel = deltaPix**2)\n x, y, ra_at_xy_0, dec_at_xy_0, x_at_radec_0, y_at_radec_0, Mpix2coord, Mcoord2pix = util.make_grid_with_coordtransform(\n numPix=numPix, deltapix=deltaPix, subgrid_res=1, left_lower=False, inverse=False)\n flux = self.lightModel.surface_brightness(x, y, kwargs_list=self.kwargs_light)\n flux = util.array2image(flux)\n flux_max = np.max(flux)\n conv_pixels_partial = np.zeros((numPix, numPix), dtype=bool)\n conv_pixels_partial[flux >= flux_max / 20] = True\n self._conv_pixels_partial = conv_pixels_partial\n\n # high resolution ray-tracing and high resolution convolution, the full calculation\n self.kwargs_numerics_true = {'supersampling_factor': self._supersampling_factor,\n # super sampling factor of (partial) high resolution ray-tracing\n 'compute_mode': 'regular', # 'regular' or 'adaptive'\n 'supersampling_convolution': True,\n # bool, if True, performs the supersampled convolution (either on regular or adaptive grid)\n 'supersampling_kernel_size': None,\n # size of the higher resolution kernel region (can be smaller than the original kernel). None leads to use the full size\n 'flux_evaluate_indexes': None, # bool mask, if None, it will evaluate all (sub) pixels\n 'supersampled_indexes': None,\n # bool mask of pixels to be computed in supersampled grid (only for adaptive mode)\n 'compute_indexes': None,\n # bool mask of pixels to be computed the PSF response (flux being added to). Only used for adaptive mode and can be set =likelihood mask.\n 'point_source_supersampling_factor': 1,\n # int, supersampling factor when rendering a point source (not used in this script)\n }\n\n # high resolution convolution on a smaller PSF with low resolution convolution on the edges of the PSF and high resolution ray tracing\n self.kwargs_numerics_high_res_narrow = {'supersampling_factor': self._supersampling_factor,\n 'compute_mode': 'regular',\n 'supersampling_convolution': True,\n 'supersampling_kernel_size': 5,\n }\n\n # low resolution convolution based on high resolution ray-tracing grid\n self.kwargs_numerics_low_conv_high_grid = {'supersampling_factor': self._supersampling_factor,\n 'compute_mode': 'regular',\n 'supersampling_convolution': False,\n # does not matter for supersampling_factor=1\n 'supersampling_kernel_size': None,\n # does not matter for supersampling_factor=1\n }\n\n # low resolution convolution with a subset of pixels with high resolution ray-tracing\n self.kwargs_numerics_low_conv_high_adaptive = {'supersampling_factor': self._supersampling_factor,\n 'compute_mode': 'adaptive',\n 'supersampling_convolution': False,\n # does not matter for supersampling_factor=1\n 'supersampling_kernel_size': None,\n # does not matter for supersampling_factor=1\n 'supersampled_indexes': self._conv_pixels_partial,\n 'convolution_kernel_size': 9,\n }\n\n # low resolution convolution with a subset of pixels with high resolution ray-tracing and high resoluton convolution on smaller kernel size\n self.kwargs_numerics_high_adaptive = {'supersampling_factor': self._supersampling_factor,\n 'compute_mode': 'adaptive',\n 'supersampling_convolution': True,\n # does not matter for supersampling_factor=1\n 'supersampling_kernel_size': 5, # does not matter for supersampling_factor=1\n 'supersampled_indexes': self._conv_pixels_partial,\n 'convolution_kernel_size': 9,\n }\n\n # low resolution convolution and low resolution ray tracing, the simplest calculation\n self.kwargs_numerics_low_res = {'supersampling_factor': 1,\n 'compute_mode': 'regular',\n 'supersampling_convolution': False, # does not matter for supersampling_factor=1\n 'supersampling_kernel_size': None, # does not matter for supersampling_factor=1\n 'convolution_kernel_size': 9,\n }\n\n flux_evaluate_indexes = np.zeros((numPix, numPix), dtype=bool)\n flux_evaluate_indexes[flux >= flux_max / 1000] = True\n # low resolution convolution on subframe\n self.kwargs_numerics_partial = {'supersampling_factor': 1,\n 'compute_mode': 'regular',\n 'supersampling_convolution': False,\n # does not matter for supersampling_factor=1\n 'supersampling_kernel_size': None, # does not matter for supersampling_factor=1\n 'flux_evaluate_indexes': flux_evaluate_indexes,\n 'convolution_kernel_size': 9\n }\n\n\n # import PSF file\n kernel_super = kernel_util.kernel_gaussian(kernel_numPix=11 * self._supersampling_factor,\n deltaPix=deltaPix / self._supersampling_factor, fwhm=0.1)\n\n\n kernel_size = 9\n kernel_super = kernel_util.cut_psf(psf_data=kernel_super, psf_size=kernel_size * self._supersampling_factor)\n\n # make instance of the PixelGrid class\n from lenstronomy.Data.pixel_grid import PixelGrid\n kwargs_grid = {'nx': numPix, 'ny': numPix, 'transform_pix2angle': Mpix2coord, 'ra_at_xy_0': ra_at_xy_0,\n 'dec_at_xy_0': dec_at_xy_0}\n self.pixel_grid = PixelGrid(**kwargs_grid)\n\n # make instance of the PSF class\n from lenstronomy.Data.psf import PSF\n kwargs_psf = {'psf_type': 'PIXEL', 'kernel_point_source': kernel_super,\n 'point_source_supersampling_factor': self._supersampling_factor}\n self.psf_class = PSF(**kwargs_psf)\n\n\n\n # without convolution\n image_model_true = ImageModel(self.pixel_grid, self.psf_class, lens_light_model_class=self.lightModel,\n kwargs_numerics=self.kwargs_numerics_true)\n self.image_true = image_model_true.image(kwargs_lens_light=self.kwargs_light)\n\n def test_full(self):\n image_model_true = ImageModel(self.pixel_grid, self.psf_class, lens_light_model_class=self.lightModel,\n kwargs_numerics=self.kwargs_numerics_true)\n image_unconvolved = image_model_true.image(kwargs_lens_light=self.kwargs_light, unconvolved=True)\n npt.assert_almost_equal(np.sum(self.image_true) / np.sum(image_unconvolved), 1, decimal=2)\n\n def test_high_res_narrow(self):\n image_model = ImageModel(self.pixel_grid, self.psf_class, lens_light_model_class=self.lightModel,\n kwargs_numerics=self.kwargs_numerics_high_res_narrow)\n image_conv = image_model.image(kwargs_lens_light=self.kwargs_light, unconvolved=False)\n npt.assert_almost_equal((self.image_true - image_conv) / self.image_true, 0, decimal=2)\n\n def test_low_conv_high_grid(self):\n image_model = ImageModel(self.pixel_grid, self.psf_class, lens_light_model_class=self.lightModel,\n kwargs_numerics=self.kwargs_numerics_low_conv_high_grid)\n image_conv = image_model.image(kwargs_lens_light=self.kwargs_light, unconvolved=False)\n npt.assert_almost_equal((self.image_true - image_conv) / self.image_true, 0, decimal=1)\n\n def test_low_conv_high_adaptive(self):\n image_model = ImageModel(self.pixel_grid, self.psf_class, lens_light_model_class=self.lightModel,\n kwargs_numerics=self.kwargs_numerics_low_conv_high_adaptive)\n image_conv = image_model.image(kwargs_lens_light=self.kwargs_light, unconvolved=False)\n npt.assert_almost_equal((self.image_true - image_conv) / self.image_true, 0, decimal=1)\n\n def test_high_adaptive(self):\n image_model = ImageModel(self.pixel_grid, self.psf_class, lens_light_model_class=self.lightModel,\n kwargs_numerics=self.kwargs_numerics_high_adaptive)\n image_conv = image_model.image(kwargs_lens_light=self.kwargs_light, unconvolved=False)\n npt.assert_almost_equal((self.image_true - image_conv) / self.image_true, 0, decimal=1)\n\n def test_low_res(self):\n image_model = ImageModel(self.pixel_grid, self.psf_class, lens_light_model_class=self.lightModel,\n kwargs_numerics=self.kwargs_numerics_low_res)\n image_conv = image_model.image(kwargs_lens_light=self.kwargs_light, unconvolved=False)\n npt.assert_almost_equal((self.image_true - image_conv) / self.image_true, 0, decimal=1)\n\n def test_sub_frame(self):\n image_model = ImageModel(self.pixel_grid, self.psf_class, lens_light_model_class=self.lightModel,\n kwargs_numerics=self.kwargs_numerics_partial)\n image_conv = image_model.image(kwargs_lens_light=self.kwargs_light, unconvolved=False)\n delta = (self.image_true - image_conv) / self.image_true\n npt.assert_almost_equal(delta[self._conv_pixels_partial], 0, decimal=1)\n\n def test_property_access(self):\n image_model = ImageModel(self.pixel_grid, self.psf_class, lens_light_model_class=self.lightModel,\n kwargs_numerics=self.kwargs_numerics_true)\n grid_supersampling_factor = image_model.ImageNumerics.grid_supersampling_factor\n assert grid_supersampling_factor == self._supersampling_factor\n\n kwargs_numerics = {'supersampling_factor': 1, 'compute_mode': 'regular', 'supersampling_convolution': False}\n image_model = ImageModel(self.pixel_grid, self.psf_class, lens_light_model_class=self.lightModel,\n kwargs_numerics=kwargs_numerics)\n from lenstronomy.ImSim.Numerics.convolution import PixelKernelConvolution\n convolution_class = image_model.ImageNumerics.convolution_class\n assert isinstance(convolution_class, PixelKernelConvolution)\n\n kwargs_numerics = {'supersampling_factor': 2, 'compute_mode': 'regular', 'supersampling_convolution': True}\n image_model = ImageModel(self.pixel_grid, self.psf_class, lens_light_model_class=self.lightModel,\n kwargs_numerics=kwargs_numerics)\n from lenstronomy.ImSim.Numerics.convolution import SubgridKernelConvolution\n convolution_class = image_model.ImageNumerics.convolution_class\n assert isinstance(convolution_class, SubgridKernelConvolution)\n\n kwargs_numerics = {'supersampling_factor': 2, 'compute_mode': 'adaptive', 'supersampling_convolution': True}\n image_model = ImageModel(self.pixel_grid, self.psf_class, lens_light_model_class=self.lightModel,\n kwargs_numerics=kwargs_numerics)\n from lenstronomy.ImSim.Numerics.adaptive_numerics import AdaptiveConvolution\n convolution_class = image_model.ImageNumerics.convolution_class\n assert isinstance(convolution_class, AdaptiveConvolution)\n\n kwargs_numerics = {'compute_mode': 'regular'}\n image_model = ImageModel(self.pixel_grid, self.psf_class, lens_light_model_class=self.lightModel,\n kwargs_numerics=kwargs_numerics)\n from lenstronomy.ImSim.Numerics.grid import RegularGrid\n grid_class = image_model.ImageNumerics.grid_class\n assert isinstance(grid_class, RegularGrid)\n\n kwargs_numerics = {'compute_mode': 'adaptive'}\n image_model = ImageModel(self.pixel_grid, self.psf_class, lens_light_model_class=self.lightModel,\n kwargs_numerics=kwargs_numerics)\n from lenstronomy.ImSim.Numerics.grid import AdaptiveGrid\n grid_class = image_model.ImageNumerics.grid_class\n assert isinstance(grid_class, AdaptiveGrid)\n\n\ndef test_supersampling_simple():\n \"\"\"\n\n :return:\n \"\"\"\n from lenstronomy.Data.psf import PSF\n from lenstronomy.SimulationAPI.data_api import DataAPI\n\n detector_pixel_scale = 0.04\n numpix = 64\n supersampling_factor = 2\n # generate a Gaussian image\n\n x, y = util.make_grid(numPix=numpix * supersampling_factor, deltapix=detector_pixel_scale / supersampling_factor)\n from lenstronomy.LightModel.Profiles.gaussian import Gaussian\n gaussian = Gaussian()\n image_1d = gaussian.function(x, y, amp=1, sigma=0.1)\n image = util.array2image(image_1d)\n\n # generate psf kernal supersampled\n kernel_super = kernel_util.kernel_gaussian(kernel_numPix=21 * supersampling_factor + 1,\n deltaPix=detector_pixel_scale / supersampling_factor, fwhm=0.2)\n\n psf_parameters = {'psf_type': 'PIXEL', 'kernel_point_source': kernel_super,\n 'point_source_supersampling_factor': supersampling_factor}\n kwargs_detector = {'pixel_scale': detector_pixel_scale,\n 'ccd_gain': 2.5, 'read_noise': 4.0, 'magnitude_zero_point': 25.0,\n 'exposure_time': 5400.0, 'sky_brightness': 22, 'num_exposures': 1,\n 'background_noise': None}\n kwargs_numerics = {'supersampling_factor': 2,\n 'supersampling_convolution': True,\n 'point_source_supersampling_factor': 2,\n 'supersampling_kernel_size': 21\n }\n psf_model = PSF(**psf_parameters)\n data_class = DataAPI(numpix=numpix, **kwargs_detector).data_class\n\n from lenstronomy.ImSim.Numerics.numerics_subframe import NumericsSubFrame\n image_numerics = NumericsSubFrame(pixel_grid=data_class,\n psf=psf_model, **kwargs_numerics)\n\n conv_class = image_numerics.convolution_class\n conv_flat = conv_class.convolution2d(image)\n print(np.shape(conv_flat), 'shape of output')\n\n # psf_helper = lenstronomy_utils.PSFHelper(data_class, psf_model, kwargs_numerics)\n\n # Convolve with lenstronomy and with scipy\n # helper_image = psf_helper.psf_model(image)\n from scipy import signal\n\n scipy_image = signal.fftconvolve(image, kernel_super, mode='same')\n from lenstronomy.Util import image_util\n image_scipy_resized = image_util.re_size(scipy_image, supersampling_factor)\n image_unconvolved = image_util.re_size(image, supersampling_factor)\n\n # Compare the outputs\n\n # low res convolution as comparison\n kwargs_numerics_low_res = {'supersampling_factor': 2,\n 'supersampling_convolution': False,\n 'point_source_supersampling_factor': 2,\n }\n image_numerics_low_res = NumericsSubFrame(pixel_grid=data_class,\n psf=psf_model, **kwargs_numerics_low_res)\n conv_class_low_res = image_numerics_low_res.convolution_class\n conv_flat_low_res = conv_class_low_res.convolution2d(image_unconvolved)\n\n #import matplotlib.pyplot as plt\n #plt.matshow(image_scipy_resized - image_unconvolved)\n #plt.colorbar()\n #plt.show()\n\n #plt.matshow(image_scipy_resized - conv_flat)\n #plt.colorbar()\n #plt.show()\n\n #plt.matshow(image_scipy_resized - conv_flat_low_res)\n #plt.colorbar()\n #plt.show()\n\n np.testing.assert_almost_equal(conv_flat, image_scipy_resized)\n\n\n\n\nclass TestRaise(unittest.TestCase):\n\n def test_integer_in_supersampling_factor(self):\n from lenstronomy.Data.psf import PSF\n kwargs_psf = {'psf_type': 'NONE'}\n psf_class = PSF(**kwargs_psf)\n\n from lenstronomy.ImSim.Numerics.numerics import Numerics\n with self.assertRaises(TypeError):\n Numerics(pixel_grid=None, psf=psf_class, supersampling_factor=1.)\n\n\nif __name__ == '__main__':\n pytest.main()\n",
"__author__ = 'sibirrer'\n\nfrom lenstronomy.LensModel.Profiles.base_profile import LensProfileBase\nimport numpy as np\n\n__all__ = ['Shift']\n\n\nclass Shift(LensProfileBase):\n \"\"\"\n Lens model with a constant shift of the deflection field\n \"\"\"\n param_names = ['alpha_x', 'alpha_y']\n lower_limit_default = {'alpha_x': -1000, 'alpha_y': -1000}\n upper_limit_default = {'alpha_x': 1000, 'alpha_y': 1000}\n\n def function(self, x, y, alpha_x, alpha_y):\n \"\"\"\n\n :param x: coordinate in image plane (angle)\n :param y: coordinate in image plane (angle)\n :param alpha_x: shift in x-direction (angle)\n :param alpha_y: shift in y-direction (angle)\n :return: lensing potential\n \"\"\"\n\n return np.zeros_like(x)\n\n def derivatives(self, x, y, alpha_x, alpha_y):\n \"\"\"\n\n :param x: coordinate in image plane (angle)\n :param y: coordinate in image plane (angle)\n :param alpha_x: shift in x-direction (angle)\n :param alpha_y: shift in y-direction (angle)\n :return: deflection in x- and y-direction\n \"\"\"\n f_x = np.ones_like(x) * alpha_x\n f_y = np.ones_like(x) * alpha_y\n return f_x, f_y\n\n def hessian(self, x, y, alpha_x, alpha_y):\n \"\"\"\n\n :param x: coordinate in image plane (angle)\n :param y: coordinate in image plane (angle)\n :param alpha_x: shift in x-direction (angle)\n :param alpha_y: shift in y-direction (angle)\n :return: hessian elements f_xx, f_xy, f_yx, f_yy\n \"\"\"\n f_xx, f_xy, f_yx, f_yy = 0, 0, 0, 0\n return f_xx, f_xy, f_yx, f_yy\n"
] | [
[
"numpy.exp"
],
[
"numpy.testing.assert_almost_equal",
"numpy.array",
"numpy.random.seed"
],
[
"numpy.sqrt",
"numpy.cos"
],
[
"scipy.signal.fftconvolve",
"numpy.testing.assert_almost_equal",
"numpy.max",
"numpy.shape",
"numpy.zeros",
"numpy.sum"
],
[
"numpy.ones_like",
"numpy.zeros_like"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
forestriveral/floris | [
"02c31e121283ad6ccae987cfa3aa1bf1e4b43014"
] | [
"examples/visualization/subtract_inflow.py"
] | [
"# Copyright 2021 NREL\n\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may not\n# use this file except in compliance with the License. You may obtain a copy of\n# the License at http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations under\n# the License.\n\n# See https://floris.readthedocs.io for documentation\n\n\nimport matplotlib.pyplot as plt\n\nimport floris.tools as wfct\nimport floris.tools.cut_plane as cp\n\n\n# Initialize the FLORIS interface fi\nfi = wfct.floris_interface.FlorisInterface(\"../example_input.json\")\n\n# Single turbine at 0,0\nfi.reinitialize_flow_field(layout_array=([0], [0]))\n\n# Calculate wake\nfi.calculate_wake()\n\n# Grab some cross planes\nD = 126\ncut_plane_base_5 = fi.get_cross_plane(5 * D)\ncut_plane_base_in = fi.get_cross_plane(-5 * D)\n\n\n# Get the difference planes\ncut_plane_diff = cp.subtract(cut_plane_base_5, cut_plane_base_in)\n\n\n# Plot and show\nfig, axarr = plt.subplots(3, 1, figsize=(7, 10))\n\nax = axarr[0]\nwfct.visualization.visualize_cut_plane(cut_plane_base_5, ax=ax, minSpeed=4, maxSpeed=8)\nax.set_title(\"Baseline, 5D\")\n\nax = axarr[1]\nwfct.visualization.visualize_cut_plane(cut_plane_base_in, ax=ax, minSpeed=4, maxSpeed=8)\nax.set_title(\"Baseline, Inflow\")\n\nax = axarr[2]\nwfct.visualization.visualize_cut_plane(cut_plane_diff, ax=ax, minSpeed=-2, maxSpeed=2)\nax.set_title(\"5D - INFLOW\")\n\n# Reverse axis making the view upstream looking down\nfor ax in axarr.flatten():\n wfct.visualization.reverse_cut_plane_x_axis_in_plot(ax)\n\nplt.savefig(\"sub_inflow.png\", format='png', bbox_inches='tight', dpi=150)\nplt.show()\n"
] | [
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ConsenSys/handel | [
"bc3f6f8194db140a1067ab157fc6bb1fb53a0144"
] | [
"simul/plots/failing_time.py"
] | [
"#!/usr/bin/env python\n\n## This script generate the graphs that compares handel signature \n## generation with different number of failing nodes for a fixed \n## number of total nodes, and a fixed threshold 51%\n##\nimport sys\n\nimport matplotlib.pyplot as plt\nimport pandas as pd\nplt.figure(figsize=(4,2))\nfrom lib import *\n\n\nsigColumn = \"sigen_wall_avg\"\nnodeColumn = \"totalNbOfNodes\"\nfailingColumn = \"failing\"\n\nyColumns = {\n \"sigen_wall_avg\": \"Average\",\n \"sigen_wall_max\": \"Maximum\"}\n\n# \"sigen_wall_min\": \"Minimum\",\n \n\n## threshold of signatures required\nthreshold = \"51\"\nexpectedNodes = 4000\nnodes = None\n\nfiles = {\"csv/handel_4000_failing.csv\": \"handel\"}\ndatas = read_datafiles(files)\n\nfor f,v in datas.items():\n nodes = v[nodeColumn].max() # should be 2000\n if int(v[nodeColumn].mean()) != expectedNodes:\n print(\"error : nodes should be \" + str(expectedNodes))\n sys.exit(1)\n\n x = v[failingColumn].map(lambda x: int((x/nodes) * 100))\n for c,name in yColumns.items():\n # y = v[c]\n y = v[c].map(lambda x: x * 1000)\n print(\"file %s -> %d data points on %s\" % (f,len(y),sigColumn))\n # label = files[f]\n label = name\n if label == \"\":\n label = input(\"Label for file %s: \" % f)\n\n plot(x,y,\"-\",label,allColors.popleft())\n\nlabel= 35\nplt.legend(fontsize=label)\nplt.ylabel(\"Signature generation (ms)\",fontsize=label)\nplt.xlabel(\"Failing nodes in %\",fontsize=label)\n# plt.yscale('log')\n# plt.title(\"Time for 51% signature threshold over 4000 nodes\")\n# plt.axis('log')\nplt.show()\n"
] | [
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sakibguy/models | [
"7214e17eb425963ec3d0295be215d5d26deaeb32",
"662f392677be0e6822eb9852a57f06b3fd1513bf",
"7214e17eb425963ec3d0295be215d5d26deaeb32"
] | [
"official/nlp/tools/export_tfhub_lib_test.py",
"official/legacy/detection/modeling/learning_rates.py",
"official/vision/beta/projects/centernet/ops/box_list_ops.py"
] | [
"# Copyright 2022 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests export_tfhub_lib.\"\"\"\n\nimport os\nimport tempfile\n\nfrom absl.testing import parameterized\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow_hub as hub\nimport tensorflow_text as text\n\nfrom sentencepiece import SentencePieceTrainer\nfrom official.legacy.bert import configs\nfrom official.modeling import tf_utils\nfrom official.nlp.configs import encoders\nfrom official.nlp.modeling import layers\nfrom official.nlp.modeling import models\nfrom official.nlp.tools import export_tfhub_lib\n\n\ndef _get_bert_config_or_encoder_config(use_bert_config, hidden_size,\n num_hidden_layers, vocab_size=100):\n \"\"\"Returns config args for export_tfhub_lib._create_model().\"\"\"\n if use_bert_config:\n bert_config = configs.BertConfig(\n vocab_size=vocab_size,\n hidden_size=hidden_size,\n intermediate_size=32,\n max_position_embeddings=128,\n num_attention_heads=2,\n num_hidden_layers=num_hidden_layers)\n encoder_config = None\n else:\n bert_config = None\n encoder_config = encoders.EncoderConfig(\n type=\"albert\",\n albert=encoders.AlbertEncoderConfig(\n vocab_size=vocab_size,\n embedding_width=16,\n hidden_size=hidden_size,\n intermediate_size=32,\n max_position_embeddings=128,\n num_attention_heads=2,\n num_layers=num_hidden_layers,\n dropout_rate=0.1))\n\n return bert_config, encoder_config\n\n\ndef _get_vocab_or_sp_model_dummy(temp_dir, use_sp_model):\n \"\"\"Returns tokenizer asset args for export_tfhub_lib.export_model().\"\"\"\n dummy_file = os.path.join(temp_dir, \"dummy_file.txt\")\n with tf.io.gfile.GFile(dummy_file, \"w\") as f:\n f.write(\"dummy content\")\n if use_sp_model:\n vocab_file, sp_model_file = None, dummy_file\n else:\n vocab_file, sp_model_file = dummy_file, None\n return vocab_file, sp_model_file\n\n\ndef _read_asset(asset: tf.saved_model.Asset):\n return tf.io.gfile.GFile(asset.asset_path.numpy()).read()\n\n\ndef _find_lambda_layers(layer):\n \"\"\"Returns list of all Lambda layers in a Keras model.\"\"\"\n if isinstance(layer, tf.keras.layers.Lambda):\n return [layer]\n elif hasattr(layer, \"layers\"): # It's nested, like a Model.\n result = []\n for l in layer.layers:\n result += _find_lambda_layers(l)\n return result\n else:\n return []\n\n\nclass ExportModelTest(tf.test.TestCase, parameterized.TestCase):\n \"\"\"Tests exporting a Transformer Encoder model as a SavedModel.\n\n This covers export from an Encoder checkpoint to a SavedModel without\n the .mlm subobject. This is no longer preferred, but still useful\n for models like Electra that are trained without the MLM task.\n\n The export code is generic. This test focuses on two main cases\n (the most important ones in practice when this was written in 2020):\n - BERT built from a legacy BertConfig, for use with BertTokenizer.\n - ALBERT built from an EncoderConfig (as a representative of all other\n choices beyond BERT, for use with SentencepieceTokenizer (the one\n alternative to BertTokenizer).\n \"\"\"\n\n @parameterized.named_parameters((\"Bert\", True), (\"Albert\", False))\n def test_export_model(self, use_bert):\n # Create the encoder and export it.\n hidden_size = 16\n num_hidden_layers = 1\n bert_config, encoder_config = _get_bert_config_or_encoder_config(\n use_bert, hidden_size, num_hidden_layers)\n bert_model, encoder = export_tfhub_lib._create_model(\n bert_config=bert_config, encoder_config=encoder_config, with_mlm=False)\n self.assertEmpty(\n _find_lambda_layers(bert_model),\n \"Lambda layers are non-portable since they serialize Python bytecode.\")\n model_checkpoint_dir = os.path.join(self.get_temp_dir(), \"checkpoint\")\n checkpoint = tf.train.Checkpoint(encoder=encoder)\n checkpoint.save(os.path.join(model_checkpoint_dir, \"test\"))\n model_checkpoint_path = tf.train.latest_checkpoint(model_checkpoint_dir)\n\n vocab_file, sp_model_file = _get_vocab_or_sp_model_dummy(\n self.get_temp_dir(), use_sp_model=not use_bert)\n export_path = os.path.join(self.get_temp_dir(), \"hub\")\n export_tfhub_lib.export_model(\n export_path=export_path,\n bert_config=bert_config,\n encoder_config=encoder_config,\n model_checkpoint_path=model_checkpoint_path,\n with_mlm=False,\n vocab_file=vocab_file,\n sp_model_file=sp_model_file,\n do_lower_case=True)\n\n # Restore the exported model.\n hub_layer = hub.KerasLayer(export_path, trainable=True)\n\n # Check legacy tokenization data.\n if use_bert:\n self.assertTrue(hub_layer.resolved_object.do_lower_case.numpy())\n self.assertEqual(\"dummy content\",\n _read_asset(hub_layer.resolved_object.vocab_file))\n self.assertFalse(hasattr(hub_layer.resolved_object, \"sp_model_file\"))\n else:\n self.assertFalse(hasattr(hub_layer.resolved_object, \"do_lower_case\"))\n self.assertFalse(hasattr(hub_layer.resolved_object, \"vocab_file\"))\n self.assertEqual(\"dummy content\",\n _read_asset(hub_layer.resolved_object.sp_model_file))\n\n # Check restored weights.\n self.assertEqual(len(bert_model.trainable_weights),\n len(hub_layer.trainable_weights))\n for source_weight, hub_weight in zip(bert_model.trainable_weights,\n hub_layer.trainable_weights):\n self.assertAllClose(source_weight.numpy(), hub_weight.numpy())\n\n # Check computation.\n seq_length = 10\n dummy_ids = np.zeros((2, seq_length), dtype=np.int32)\n input_dict = dict(\n input_word_ids=dummy_ids,\n input_mask=dummy_ids,\n input_type_ids=dummy_ids)\n hub_output = hub_layer(input_dict)\n source_output = bert_model(input_dict)\n encoder_output = encoder(input_dict)\n self.assertEqual(hub_output[\"pooled_output\"].shape, (2, hidden_size))\n self.assertEqual(hub_output[\"sequence_output\"].shape,\n (2, seq_length, hidden_size))\n self.assertLen(hub_output[\"encoder_outputs\"], num_hidden_layers)\n\n for key in (\"pooled_output\", \"sequence_output\", \"encoder_outputs\"):\n self.assertAllClose(source_output[key], hub_output[key])\n self.assertAllClose(source_output[key], encoder_output[key])\n\n # The \"default\" output of BERT as a text representation is pooled_output.\n self.assertAllClose(hub_output[\"pooled_output\"], hub_output[\"default\"])\n\n # Test that training=True makes a difference (activates dropout).\n def _dropout_mean_stddev(training, num_runs=20):\n input_ids = np.array([[14, 12, 42, 95, 99]], np.int32)\n input_dict = dict(\n input_word_ids=input_ids,\n input_mask=np.ones_like(input_ids),\n input_type_ids=np.zeros_like(input_ids))\n outputs = np.concatenate([\n hub_layer(input_dict, training=training)[\"pooled_output\"]\n for _ in range(num_runs)\n ])\n return np.mean(np.std(outputs, axis=0))\n\n self.assertLess(_dropout_mean_stddev(training=False), 1e-6)\n self.assertGreater(_dropout_mean_stddev(training=True), 1e-3)\n\n # Test propagation of seq_length in shape inference.\n input_word_ids = tf.keras.layers.Input(shape=(seq_length,), dtype=tf.int32)\n input_mask = tf.keras.layers.Input(shape=(seq_length,), dtype=tf.int32)\n input_type_ids = tf.keras.layers.Input(shape=(seq_length,), dtype=tf.int32)\n input_dict = dict(\n input_word_ids=input_word_ids,\n input_mask=input_mask,\n input_type_ids=input_type_ids)\n output_dict = hub_layer(input_dict)\n pooled_output = output_dict[\"pooled_output\"]\n sequence_output = output_dict[\"sequence_output\"]\n encoder_outputs = output_dict[\"encoder_outputs\"]\n\n self.assertEqual(pooled_output.shape.as_list(), [None, hidden_size])\n self.assertEqual(sequence_output.shape.as_list(),\n [None, seq_length, hidden_size])\n self.assertLen(encoder_outputs, num_hidden_layers)\n\n\nclass ExportModelWithMLMTest(tf.test.TestCase, parameterized.TestCase):\n \"\"\"Tests exporting a Transformer Encoder model as a SavedModel.\n\n This covers export from a Pretrainer checkpoint to a SavedModel including\n the .mlm subobject, which is the preferred way since 2020.\n\n The export code is generic. This test focuses on two main cases\n (the most important ones in practice when this was written in 2020):\n - BERT built from a legacy BertConfig, for use with BertTokenizer.\n - ALBERT built from an EncoderConfig (as a representative of all other\n choices beyond BERT, for use with SentencepieceTokenizer (the one\n alternative to BertTokenizer).\n \"\"\"\n\n def test_copy_pooler_dense_to_encoder(self):\n encoder_config = encoders.EncoderConfig(\n type=\"bert\",\n bert=encoders.BertEncoderConfig(\n hidden_size=24, intermediate_size=48, num_layers=2))\n cls_heads = [\n layers.ClassificationHead(\n inner_dim=24, num_classes=2, name=\"next_sentence\")\n ]\n encoder = encoders.build_encoder(encoder_config)\n pretrainer = models.BertPretrainerV2(\n encoder_network=encoder,\n classification_heads=cls_heads,\n mlm_activation=tf_utils.get_activation(\n encoder_config.get().hidden_activation))\n # Makes sure the pretrainer variables are created.\n _ = pretrainer(pretrainer.inputs)\n checkpoint = tf.train.Checkpoint(**pretrainer.checkpoint_items)\n model_checkpoint_dir = os.path.join(self.get_temp_dir(), \"checkpoint\")\n checkpoint.save(os.path.join(model_checkpoint_dir, \"test\"))\n\n vocab_file, sp_model_file = _get_vocab_or_sp_model_dummy(\n self.get_temp_dir(), use_sp_model=True)\n export_path = os.path.join(self.get_temp_dir(), \"hub\")\n export_tfhub_lib.export_model(\n export_path=export_path,\n encoder_config=encoder_config,\n model_checkpoint_path=tf.train.latest_checkpoint(model_checkpoint_dir),\n with_mlm=True,\n copy_pooler_dense_to_encoder=True,\n vocab_file=vocab_file,\n sp_model_file=sp_model_file,\n do_lower_case=True)\n # Restores a hub KerasLayer.\n hub_layer = hub.KerasLayer(export_path, trainable=True)\n dummy_ids = np.zeros((2, 10), dtype=np.int32)\n input_dict = dict(\n input_word_ids=dummy_ids,\n input_mask=dummy_ids,\n input_type_ids=dummy_ids)\n hub_pooled_output = hub_layer(input_dict)[\"pooled_output\"]\n encoder_outputs = encoder(input_dict)\n # Verify that hub_layer's pooled_output is the same as the output of next\n # sentence prediction's dense layer.\n pretrained_pooled_output = cls_heads[0].dense(\n (encoder_outputs[\"sequence_output\"][:, 0, :]))\n self.assertAllClose(hub_pooled_output, pretrained_pooled_output)\n # But the pooled_output between encoder and hub_layer are not the same.\n encoder_pooled_output = encoder_outputs[\"pooled_output\"]\n self.assertNotAllClose(hub_pooled_output, encoder_pooled_output)\n\n @parameterized.named_parameters(\n (\"Bert\", True),\n (\"Albert\", False),\n )\n def test_export_model_with_mlm(self, use_bert):\n # Create the encoder and export it.\n hidden_size = 16\n num_hidden_layers = 2\n bert_config, encoder_config = _get_bert_config_or_encoder_config(\n use_bert, hidden_size, num_hidden_layers)\n bert_model, pretrainer = export_tfhub_lib._create_model(\n bert_config=bert_config, encoder_config=encoder_config, with_mlm=True)\n self.assertEmpty(\n _find_lambda_layers(bert_model),\n \"Lambda layers are non-portable since they serialize Python bytecode.\")\n bert_model_with_mlm = bert_model.mlm\n model_checkpoint_dir = os.path.join(self.get_temp_dir(), \"checkpoint\")\n\n checkpoint = tf.train.Checkpoint(**pretrainer.checkpoint_items)\n\n checkpoint.save(os.path.join(model_checkpoint_dir, \"test\"))\n model_checkpoint_path = tf.train.latest_checkpoint(model_checkpoint_dir)\n\n vocab_file, sp_model_file = _get_vocab_or_sp_model_dummy(\n self.get_temp_dir(), use_sp_model=not use_bert)\n export_path = os.path.join(self.get_temp_dir(), \"hub\")\n export_tfhub_lib.export_model(\n export_path=export_path,\n bert_config=bert_config,\n encoder_config=encoder_config,\n model_checkpoint_path=model_checkpoint_path,\n with_mlm=True,\n vocab_file=vocab_file,\n sp_model_file=sp_model_file,\n do_lower_case=True)\n\n # Restore the exported model.\n hub_layer = hub.KerasLayer(export_path, trainable=True)\n\n # Check legacy tokenization data.\n if use_bert:\n self.assertTrue(hub_layer.resolved_object.do_lower_case.numpy())\n self.assertEqual(\"dummy content\",\n _read_asset(hub_layer.resolved_object.vocab_file))\n self.assertFalse(hasattr(hub_layer.resolved_object, \"sp_model_file\"))\n else:\n self.assertFalse(hasattr(hub_layer.resolved_object, \"do_lower_case\"))\n self.assertFalse(hasattr(hub_layer.resolved_object, \"vocab_file\"))\n self.assertEqual(\"dummy content\",\n _read_asset(hub_layer.resolved_object.sp_model_file))\n\n # Check restored weights.\n # Note that we set `_auto_track_sub_layers` to False when exporting the\n # SavedModel, so hub_layer has the same number of weights as bert_model;\n # otherwise, hub_layer will have extra weights from its `mlm` subobject.\n self.assertEqual(len(bert_model.trainable_weights),\n len(hub_layer.trainable_weights))\n for source_weight, hub_weight in zip(bert_model.trainable_weights,\n hub_layer.trainable_weights):\n self.assertAllClose(source_weight, hub_weight)\n\n # Check computation.\n seq_length = 10\n dummy_ids = np.zeros((2, seq_length), dtype=np.int32)\n input_dict = dict(\n input_word_ids=dummy_ids,\n input_mask=dummy_ids,\n input_type_ids=dummy_ids)\n hub_outputs_dict = hub_layer(input_dict)\n source_outputs_dict = bert_model(input_dict)\n encoder_outputs_dict = pretrainer.encoder_network(\n [dummy_ids, dummy_ids, dummy_ids])\n self.assertEqual(hub_outputs_dict[\"pooled_output\"].shape, (2, hidden_size))\n self.assertEqual(hub_outputs_dict[\"sequence_output\"].shape,\n (2, seq_length, hidden_size))\n for output_key in (\"pooled_output\", \"sequence_output\", \"encoder_outputs\"):\n self.assertAllClose(source_outputs_dict[output_key],\n hub_outputs_dict[output_key])\n self.assertAllClose(source_outputs_dict[output_key],\n encoder_outputs_dict[output_key])\n\n # The \"default\" output of BERT as a text representation is pooled_output.\n self.assertAllClose(hub_outputs_dict[\"pooled_output\"],\n hub_outputs_dict[\"default\"])\n\n # Test that training=True makes a difference (activates dropout).\n def _dropout_mean_stddev(training, num_runs=20):\n input_ids = np.array([[14, 12, 42, 95, 99]], np.int32)\n input_dict = dict(\n input_word_ids=input_ids,\n input_mask=np.ones_like(input_ids),\n input_type_ids=np.zeros_like(input_ids))\n outputs = np.concatenate([\n hub_layer(input_dict, training=training)[\"pooled_output\"]\n for _ in range(num_runs)\n ])\n return np.mean(np.std(outputs, axis=0))\n\n self.assertLess(_dropout_mean_stddev(training=False), 1e-6)\n self.assertGreater(_dropout_mean_stddev(training=True), 1e-3)\n\n # Checks sub-object `mlm`.\n self.assertTrue(hasattr(hub_layer.resolved_object, \"mlm\"))\n\n self.assertLen(hub_layer.resolved_object.mlm.trainable_variables,\n len(bert_model_with_mlm.trainable_weights))\n self.assertLen(hub_layer.resolved_object.mlm.trainable_variables,\n len(pretrainer.trainable_weights))\n for source_weight, hub_weight, pretrainer_weight in zip(\n bert_model_with_mlm.trainable_weights,\n hub_layer.resolved_object.mlm.trainable_variables,\n pretrainer.trainable_weights):\n self.assertAllClose(source_weight, hub_weight)\n self.assertAllClose(source_weight, pretrainer_weight)\n\n max_predictions_per_seq = 4\n mlm_positions = np.zeros((2, max_predictions_per_seq), dtype=np.int32)\n input_dict = dict(\n input_word_ids=dummy_ids,\n input_mask=dummy_ids,\n input_type_ids=dummy_ids,\n masked_lm_positions=mlm_positions)\n hub_mlm_outputs_dict = hub_layer.resolved_object.mlm(input_dict)\n source_mlm_outputs_dict = bert_model_with_mlm(input_dict)\n for output_key in (\"pooled_output\", \"sequence_output\", \"mlm_logits\",\n \"encoder_outputs\"):\n self.assertAllClose(hub_mlm_outputs_dict[output_key],\n source_mlm_outputs_dict[output_key])\n\n pretrainer_mlm_logits_output = pretrainer(input_dict)[\"mlm_logits\"]\n self.assertAllClose(hub_mlm_outputs_dict[\"mlm_logits\"],\n pretrainer_mlm_logits_output)\n\n # Test that training=True makes a difference (activates dropout).\n def _dropout_mean_stddev_mlm(training, num_runs=20):\n input_ids = np.array([[14, 12, 42, 95, 99]], np.int32)\n mlm_position_ids = np.array([[1, 2, 3, 4]], np.int32)\n input_dict = dict(\n input_word_ids=input_ids,\n input_mask=np.ones_like(input_ids),\n input_type_ids=np.zeros_like(input_ids),\n masked_lm_positions=mlm_position_ids)\n outputs = np.concatenate([\n hub_layer.resolved_object.mlm(input_dict,\n training=training)[\"pooled_output\"]\n for _ in range(num_runs)\n ])\n return np.mean(np.std(outputs, axis=0))\n\n self.assertLess(_dropout_mean_stddev_mlm(training=False), 1e-6)\n self.assertGreater(_dropout_mean_stddev_mlm(training=True), 1e-3)\n\n # Test propagation of seq_length in shape inference.\n input_word_ids = tf.keras.layers.Input(shape=(seq_length,), dtype=tf.int32)\n input_mask = tf.keras.layers.Input(shape=(seq_length,), dtype=tf.int32)\n input_type_ids = tf.keras.layers.Input(shape=(seq_length,), dtype=tf.int32)\n input_dict = dict(\n input_word_ids=input_word_ids,\n input_mask=input_mask,\n input_type_ids=input_type_ids)\n hub_outputs_dict = hub_layer(input_dict)\n self.assertEqual(hub_outputs_dict[\"pooled_output\"].shape.as_list(),\n [None, hidden_size])\n self.assertEqual(hub_outputs_dict[\"sequence_output\"].shape.as_list(),\n [None, seq_length, hidden_size])\n\n\n_STRING_NOT_TO_LEAK = \"private_path_component_\"\n\n\nclass ExportPreprocessingTest(tf.test.TestCase, parameterized.TestCase):\n\n def _make_vocab_file(self, vocab, filename=\"vocab.txt\", add_mask_token=False):\n \"\"\"Creates wordpiece vocab file with given words plus special tokens.\n\n The tokens of the resulting model are, in this order:\n [PAD], [UNK], [CLS], [SEP], [MASK]*, ...vocab...\n *=if requested by args.\n\n This function also accepts wordpieces that start with the ## continuation\n marker, but avoiding those makes this function interchangeable with\n _make_sp_model_file(), up to the extra dimension returned by BertTokenizer.\n\n Args:\n vocab: a list of strings with the words or wordpieces to put into the\n model's vocabulary. Do not include special tokens here.\n filename: Optionally, a filename (relative to the temporary directory\n created by this function).\n add_mask_token: an optional bool, whether to include a [MASK] token.\n\n Returns:\n The absolute filename of the created vocab file.\n \"\"\"\n full_vocab = [\"[PAD]\", \"[UNK]\", \"[CLS]\", \"[SEP]\"\n ] + [\"[MASK]\"]*add_mask_token + vocab\n path = os.path.join(\n tempfile.mkdtemp(dir=self.get_temp_dir(), # New subdir each time.\n prefix=_STRING_NOT_TO_LEAK),\n filename)\n with tf.io.gfile.GFile(path, \"w\") as f:\n f.write(\"\\n\".join(full_vocab + [\"\"]))\n return path\n\n def _make_sp_model_file(self, vocab, prefix=\"spm\", add_mask_token=False):\n \"\"\"Creates Sentencepiece word model with given words plus special tokens.\n\n The tokens of the resulting model are, in this order:\n <pad>, <unk>, [CLS], [SEP], [MASK]*, ...vocab..., <s>, </s>\n *=if requested by args.\n\n The words in the input vocab are plain text, without the whitespace marker.\n That makes this function interchangeable with _make_vocab_file().\n\n Args:\n vocab: a list of strings with the words to put into the model's\n vocabulary. Do not include special tokens here.\n prefix: an optional string, to change the filename prefix for the model\n (relative to the temporary directory created by this function).\n add_mask_token: an optional bool, whether to include a [MASK] token.\n\n Returns:\n The absolute filename of the created Sentencepiece model file.\n \"\"\"\n model_prefix = os.path.join(\n tempfile.mkdtemp(dir=self.get_temp_dir()), # New subdir each time.\n prefix)\n input_file = model_prefix + \"_train_input.txt\"\n # Create input text for training the sp model from the tokens provided.\n # Repeat tokens, the earlier the more, because they are sorted by frequency.\n input_text = []\n for i, token in enumerate(vocab):\n input_text.append(\" \".join([token] * (len(vocab) - i)))\n with tf.io.gfile.GFile(input_file, \"w\") as f:\n f.write(\"\\n\".join(input_text + [\"\"]))\n control_symbols = \"[CLS],[SEP]\"\n full_vocab_size = len(vocab) + 6 # <pad>, <unk>, [CLS], [SEP], <s>, </s>.\n if add_mask_token:\n control_symbols += \",[MASK]\"\n full_vocab_size += 1\n flags = dict(\n model_prefix=model_prefix,\n model_type=\"word\",\n input=input_file,\n pad_id=0, unk_id=1, control_symbols=control_symbols,\n vocab_size=full_vocab_size,\n bos_id=full_vocab_size-2, eos_id=full_vocab_size-1)\n SentencePieceTrainer.Train(\n \" \".join([\"--{}={}\".format(k, v) for k, v in flags.items()]))\n return model_prefix + \".model\"\n\n def _do_export(self, vocab, do_lower_case, default_seq_length=128,\n tokenize_with_offsets=True, use_sp_model=False,\n experimental_disable_assert=False, add_mask_token=False):\n \"\"\"Runs SavedModel export and returns the export_path.\"\"\"\n export_path = tempfile.mkdtemp(dir=self.get_temp_dir())\n vocab_file = sp_model_file = None\n if use_sp_model:\n sp_model_file = self._make_sp_model_file(vocab,\n add_mask_token=add_mask_token)\n else:\n vocab_file = self._make_vocab_file(vocab, add_mask_token=add_mask_token)\n export_tfhub_lib.export_preprocessing(\n export_path,\n vocab_file=vocab_file,\n sp_model_file=sp_model_file,\n do_lower_case=do_lower_case,\n tokenize_with_offsets=tokenize_with_offsets,\n default_seq_length=default_seq_length,\n experimental_disable_assert=experimental_disable_assert)\n # Invalidate the original filename to verify loading from the SavedModel.\n tf.io.gfile.remove(sp_model_file or vocab_file)\n return export_path\n\n def test_no_leaks(self):\n \"\"\"Tests not leaking the path to the original vocab file.\"\"\"\n path = self._do_export(\n [\"d\", \"ef\", \"abc\", \"xy\"], do_lower_case=True, use_sp_model=False)\n with tf.io.gfile.GFile(os.path.join(path, \"saved_model.pb\"), \"rb\") as f:\n self.assertFalse( # pylint: disable=g-generic-assert\n _STRING_NOT_TO_LEAK.encode(\"ascii\") in f.read())\n\n @parameterized.named_parameters((\"Bert\", False), (\"Sentencepiece\", True))\n def test_exported_callables(self, use_sp_model):\n preprocess = tf.saved_model.load(self._do_export(\n [\"d\", \"ef\", \"abc\", \"xy\"], do_lower_case=True,\n tokenize_with_offsets=not use_sp_model, # TODO(b/181866850): drop this.\n experimental_disable_assert=True, # TODO(b/175369555): drop this.\n use_sp_model=use_sp_model))\n\n def fold_dim(rt):\n \"\"\"Removes the word/subword distinction of BertTokenizer.\"\"\"\n return rt if use_sp_model else rt.merge_dims(1, 2)\n\n # .tokenize()\n inputs = tf.constant([\"abc d ef\", \"ABC D EF d\"])\n token_ids = preprocess.tokenize(inputs)\n self.assertAllEqual(fold_dim(token_ids),\n tf.ragged.constant([[6, 4, 5],\n [6, 4, 5, 4]]))\n\n special_tokens_dict = {\n k: v.numpy().item() # Expecting eager Tensor, converting to Python.\n for k, v in preprocess.tokenize.get_special_tokens_dict().items()}\n self.assertDictEqual(special_tokens_dict,\n dict(padding_id=0,\n start_of_sequence_id=2,\n end_of_segment_id=3,\n vocab_size=4+6 if use_sp_model else 4+4))\n\n # .tokenize_with_offsets()\n if use_sp_model:\n # TODO(b/181866850): Enable tokenize_with_offsets when it works and test.\n self.assertFalse(hasattr(preprocess, \"tokenize_with_offsets\"))\n else:\n token_ids, start_offsets, limit_offsets = (\n preprocess.tokenize_with_offsets(inputs))\n self.assertAllEqual(fold_dim(token_ids),\n tf.ragged.constant([[6, 4, 5],\n [6, 4, 5, 4]]))\n self.assertAllEqual(fold_dim(start_offsets),\n tf.ragged.constant([[0, 4, 6],\n [0, 4, 6, 9]]))\n self.assertAllEqual(fold_dim(limit_offsets),\n tf.ragged.constant([[3, 5, 8],\n [3, 5, 8, 10]]))\n self.assertIs(preprocess.tokenize.get_special_tokens_dict,\n preprocess.tokenize_with_offsets.get_special_tokens_dict)\n\n # Root callable.\n bert_inputs = preprocess(inputs)\n self.assertAllEqual(bert_inputs[\"input_word_ids\"].shape.as_list(), [2, 128])\n self.assertAllEqual(bert_inputs[\"input_word_ids\"][:, :10],\n tf.constant([[2, 6, 4, 5, 3, 0, 0, 0, 0, 0],\n [2, 6, 4, 5, 4, 3, 0, 0, 0, 0]]))\n self.assertAllEqual(bert_inputs[\"input_mask\"].shape.as_list(), [2, 128])\n self.assertAllEqual(bert_inputs[\"input_mask\"][:, :10],\n tf.constant([[1, 1, 1, 1, 1, 0, 0, 0, 0, 0],\n [1, 1, 1, 1, 1, 1, 0, 0, 0, 0]]))\n self.assertAllEqual(bert_inputs[\"input_type_ids\"].shape.as_list(), [2, 128])\n self.assertAllEqual(bert_inputs[\"input_type_ids\"][:, :10],\n tf.constant([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]))\n\n # .bert_pack_inputs()\n inputs_2 = tf.constant([\"d xy\", \"xy abc\"])\n token_ids_2 = preprocess.tokenize(inputs_2)\n bert_inputs = preprocess.bert_pack_inputs(\n [token_ids, token_ids_2], seq_length=256)\n self.assertAllEqual(bert_inputs[\"input_word_ids\"].shape.as_list(), [2, 256])\n self.assertAllEqual(bert_inputs[\"input_word_ids\"][:, :10],\n tf.constant([[2, 6, 4, 5, 3, 4, 7, 3, 0, 0],\n [2, 6, 4, 5, 4, 3, 7, 6, 3, 0]]))\n self.assertAllEqual(bert_inputs[\"input_mask\"].shape.as_list(), [2, 256])\n self.assertAllEqual(bert_inputs[\"input_mask\"][:, :10],\n tf.constant([[1, 1, 1, 1, 1, 1, 1, 1, 0, 0],\n [1, 1, 1, 1, 1, 1, 1, 1, 1, 0]]))\n self.assertAllEqual(bert_inputs[\"input_type_ids\"].shape.as_list(), [2, 256])\n self.assertAllEqual(bert_inputs[\"input_type_ids\"][:, :10],\n tf.constant([[0, 0, 0, 0, 0, 1, 1, 1, 0, 0],\n [0, 0, 0, 0, 0, 0, 1, 1, 1, 0]]))\n\n # For BertTokenizer only: repeat relevant parts for do_lower_case=False,\n # default_seq_length=10, experimental_disable_assert=False,\n # tokenize_with_offsets=False, and without folding the word/subword dimension.\n def test_cased_length10(self):\n preprocess = tf.saved_model.load(self._do_export(\n [\"d\", \"##ef\", \"abc\", \"ABC\"],\n do_lower_case=False, default_seq_length=10,\n tokenize_with_offsets=False,\n use_sp_model=False,\n experimental_disable_assert=False))\n inputs = tf.constant([\"abc def\", \"ABC DEF\"])\n token_ids = preprocess.tokenize(inputs)\n self.assertAllEqual(token_ids, tf.ragged.constant([[[6], [4, 5]],\n [[7], [1]]]))\n\n self.assertFalse(hasattr(preprocess, \"tokenize_with_offsets\"))\n\n bert_inputs = preprocess(inputs)\n self.assertAllEqual(bert_inputs[\"input_word_ids\"],\n tf.constant([[2, 6, 4, 5, 3, 0, 0, 0, 0, 0],\n [2, 7, 1, 3, 0, 0, 0, 0, 0, 0]]))\n self.assertAllEqual(bert_inputs[\"input_mask\"],\n tf.constant([[1, 1, 1, 1, 1, 0, 0, 0, 0, 0],\n [1, 1, 1, 1, 0, 0, 0, 0, 0, 0]]))\n self.assertAllEqual(bert_inputs[\"input_type_ids\"],\n tf.constant([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]))\n\n inputs_2 = tf.constant([\"d ABC\", \"ABC abc\"])\n token_ids_2 = preprocess.tokenize(inputs_2)\n bert_inputs = preprocess.bert_pack_inputs([token_ids, token_ids_2])\n # Test default seq_length=10.\n self.assertAllEqual(bert_inputs[\"input_word_ids\"],\n tf.constant([[2, 6, 4, 5, 3, 4, 7, 3, 0, 0],\n [2, 7, 1, 3, 7, 6, 3, 0, 0, 0]]))\n self.assertAllEqual(bert_inputs[\"input_mask\"],\n tf.constant([[1, 1, 1, 1, 1, 1, 1, 1, 0, 0],\n [1, 1, 1, 1, 1, 1, 1, 0, 0, 0]]))\n self.assertAllEqual(bert_inputs[\"input_type_ids\"],\n tf.constant([[0, 0, 0, 0, 0, 1, 1, 1, 0, 0],\n [0, 0, 0, 0, 1, 1, 1, 0, 0, 0]]))\n\n # XLA requires fixed shapes for tensors found in graph mode.\n # Statically known shapes in Python are a particularly firm way to\n # guarantee that, and they are generally more convenient to work with.\n # We test that the exported SavedModel plays well with TF's shape\n # inference when applied to fully or partially known input shapes.\n @parameterized.named_parameters((\"Bert\", False), (\"Sentencepiece\", True))\n def test_shapes(self, use_sp_model):\n preprocess = tf.saved_model.load(self._do_export(\n [\"abc\", \"def\"], do_lower_case=True,\n tokenize_with_offsets=not use_sp_model, # TODO(b/181866850): drop this.\n experimental_disable_assert=True, # TODO(b/175369555): drop this.\n use_sp_model=use_sp_model))\n\n def expected_bert_input_shapes(batch_size, seq_length):\n return dict(input_word_ids=[batch_size, seq_length],\n input_mask=[batch_size, seq_length],\n input_type_ids=[batch_size, seq_length])\n\n for batch_size in [7, None]:\n if use_sp_model:\n token_out_shape = [batch_size, None] # No word/subword distinction.\n else:\n token_out_shape = [batch_size, None, None]\n self.assertEqual(\n _result_shapes_in_tf_function(\n preprocess.tokenize,\n tf.TensorSpec([batch_size], tf.string)),\n token_out_shape,\n \"with batch_size=%s\" % batch_size)\n # TODO(b/181866850): Enable tokenize_with_offsets when it works and test.\n if use_sp_model:\n self.assertFalse(hasattr(preprocess, \"tokenize_with_offsets\"))\n else:\n self.assertEqual(\n _result_shapes_in_tf_function(\n preprocess.tokenize_with_offsets,\n tf.TensorSpec([batch_size], tf.string)),\n [token_out_shape] * 3,\n \"with batch_size=%s\" % batch_size)\n self.assertEqual(\n _result_shapes_in_tf_function(\n preprocess.bert_pack_inputs,\n [tf.RaggedTensorSpec([batch_size, None, None], tf.int32)] * 2,\n seq_length=256), expected_bert_input_shapes(batch_size, 256),\n \"with batch_size=%s\" % batch_size)\n self.assertEqual(\n _result_shapes_in_tf_function(preprocess,\n tf.TensorSpec([batch_size], tf.string)),\n expected_bert_input_shapes(batch_size, 128),\n \"with batch_size=%s\" % batch_size)\n\n @parameterized.named_parameters((\"Bert\", False), (\"Sentencepiece\", True))\n def test_reexport(self, use_sp_model):\n \"\"\"Test that preprocess keeps working after another save/load cycle.\"\"\"\n path1 = self._do_export(\n [\"d\", \"ef\", \"abc\", \"xy\"], do_lower_case=True, default_seq_length=10,\n tokenize_with_offsets=False,\n experimental_disable_assert=True, # TODO(b/175369555): drop this.\n use_sp_model=use_sp_model)\n path2 = path1.rstrip(\"/\") + \".2\"\n model1 = tf.saved_model.load(path1)\n tf.saved_model.save(model1, path2)\n # Delete the first SavedModel to test that the sceond one loads by itself.\n # https://github.com/tensorflow/tensorflow/issues/46456 reports such a\n # failure case for BertTokenizer.\n tf.io.gfile.rmtree(path1)\n model2 = tf.saved_model.load(path2)\n\n inputs = tf.constant([\"abc d ef\", \"ABC D EF d\"])\n bert_inputs = model2(inputs)\n self.assertAllEqual(bert_inputs[\"input_word_ids\"],\n tf.constant([[2, 6, 4, 5, 3, 0, 0, 0, 0, 0],\n [2, 6, 4, 5, 4, 3, 0, 0, 0, 0]]))\n self.assertAllEqual(bert_inputs[\"input_mask\"],\n tf.constant([[1, 1, 1, 1, 1, 0, 0, 0, 0, 0],\n [1, 1, 1, 1, 1, 1, 0, 0, 0, 0]]))\n self.assertAllEqual(bert_inputs[\"input_type_ids\"],\n tf.constant([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]))\n\n @parameterized.named_parameters((\"Bert\", True), (\"Albert\", False))\n def test_preprocessing_for_mlm(self, use_bert):\n \"\"\"Combines both SavedModel types and TF.text helpers for MLM.\"\"\"\n # Create the preprocessing SavedModel with a [MASK] token.\n non_special_tokens = [\"hello\", \"world\",\n \"nice\", \"movie\", \"great\", \"actors\",\n \"quick\", \"fox\", \"lazy\", \"dog\"]\n preprocess = tf.saved_model.load(self._do_export(\n non_special_tokens, do_lower_case=True,\n tokenize_with_offsets=use_bert, # TODO(b/181866850): drop this.\n experimental_disable_assert=True, # TODO(b/175369555): drop this.\n add_mask_token=True, use_sp_model=not use_bert))\n vocab_size = len(non_special_tokens) + (5 if use_bert else 7)\n\n # Create the encoder SavedModel with an .mlm subobject.\n hidden_size = 16\n num_hidden_layers = 2\n bert_config, encoder_config = _get_bert_config_or_encoder_config(\n use_bert, hidden_size, num_hidden_layers, vocab_size)\n _, pretrainer = export_tfhub_lib._create_model(\n bert_config=bert_config, encoder_config=encoder_config, with_mlm=True)\n model_checkpoint_dir = os.path.join(self.get_temp_dir(), \"checkpoint\")\n checkpoint = tf.train.Checkpoint(**pretrainer.checkpoint_items)\n checkpoint.save(os.path.join(model_checkpoint_dir, \"test\"))\n model_checkpoint_path = tf.train.latest_checkpoint(model_checkpoint_dir)\n vocab_file, sp_model_file = _get_vocab_or_sp_model_dummy( # Not used below.\n self.get_temp_dir(), use_sp_model=not use_bert)\n encoder_export_path = os.path.join(self.get_temp_dir(), \"encoder_export\")\n export_tfhub_lib.export_model(\n export_path=encoder_export_path,\n bert_config=bert_config,\n encoder_config=encoder_config,\n model_checkpoint_path=model_checkpoint_path,\n with_mlm=True,\n vocab_file=vocab_file,\n sp_model_file=sp_model_file,\n do_lower_case=True)\n encoder = tf.saved_model.load(encoder_export_path)\n\n # Get special tokens from the vocab (and vocab size).\n special_tokens_dict = preprocess.tokenize.get_special_tokens_dict()\n self.assertEqual(int(special_tokens_dict[\"vocab_size\"]), vocab_size)\n padding_id = int(special_tokens_dict[\"padding_id\"])\n self.assertEqual(padding_id, 0)\n start_of_sequence_id = int(special_tokens_dict[\"start_of_sequence_id\"])\n self.assertEqual(start_of_sequence_id, 2)\n end_of_segment_id = int(special_tokens_dict[\"end_of_segment_id\"])\n self.assertEqual(end_of_segment_id, 3)\n mask_id = int(special_tokens_dict[\"mask_id\"])\n self.assertEqual(mask_id, 4)\n\n # A batch of 3 segment pairs.\n raw_segments = [tf.constant([\"hello\", \"nice movie\", \"quick fox\"]),\n tf.constant([\"world\", \"great actors\", \"lazy dog\"])]\n batch_size = 3\n\n # Misc hyperparameters.\n seq_length = 10\n max_selections_per_seq = 2\n\n # Tokenize inputs.\n tokenized_segments = [preprocess.tokenize(s) for s in raw_segments]\n # Trim inputs to eventually fit seq_lentgh.\n num_special_tokens = len(raw_segments) + 1\n trimmed_segments = text.WaterfallTrimmer(\n seq_length - num_special_tokens).trim(tokenized_segments)\n # Combine input segments into one input sequence.\n input_ids, segment_ids = text.combine_segments(\n trimmed_segments,\n start_of_sequence_id=start_of_sequence_id,\n end_of_segment_id=end_of_segment_id)\n # Apply random masking controlled by policy objects.\n (masked_input_ids, masked_lm_positions,\n masked_ids) = text.mask_language_model(\n input_ids=input_ids,\n item_selector=text.RandomItemSelector(\n max_selections_per_seq,\n selection_rate=0.5, # Adjusted for the short test examples.\n unselectable_ids=[start_of_sequence_id, end_of_segment_id]),\n mask_values_chooser=text.MaskValuesChooser(\n vocab_size=vocab_size, mask_token=mask_id,\n # Always put [MASK] to have a predictable result.\n mask_token_rate=1.0, random_token_rate=0.0))\n # Pad to fixed-length Transformer encoder inputs.\n input_word_ids, _ = text.pad_model_inputs(masked_input_ids,\n seq_length,\n pad_value=padding_id)\n input_type_ids, input_mask = text.pad_model_inputs(segment_ids, seq_length,\n pad_value=0)\n masked_lm_positions, _ = text.pad_model_inputs(masked_lm_positions,\n max_selections_per_seq,\n pad_value=0)\n masked_lm_positions = tf.cast(masked_lm_positions, tf.int32)\n num_predictions = int(tf.shape(masked_lm_positions)[1])\n\n # Test transformer inputs.\n self.assertEqual(num_predictions, max_selections_per_seq)\n expected_word_ids = np.array([\n # [CLS] hello [SEP] world [SEP]\n [2, 5, 3, 6, 3, 0, 0, 0, 0, 0],\n # [CLS] nice movie [SEP] great actors [SEP]\n [2, 7, 8, 3, 9, 10, 3, 0, 0, 0],\n # [CLS] brown fox [SEP] lazy dog [SEP]\n [2, 11, 12, 3, 13, 14, 3, 0, 0, 0]])\n for i in range(batch_size):\n for j in range(num_predictions):\n k = int(masked_lm_positions[i, j])\n if k != 0:\n expected_word_ids[i, k] = 4 # [MASK]\n self.assertAllEqual(input_word_ids, expected_word_ids)\n\n # Call the MLM head of the Transformer encoder.\n mlm_inputs = dict(\n input_word_ids=input_word_ids,\n input_mask=input_mask,\n input_type_ids=input_type_ids,\n masked_lm_positions=masked_lm_positions,\n )\n mlm_outputs = encoder.mlm(mlm_inputs)\n self.assertEqual(mlm_outputs[\"pooled_output\"].shape,\n (batch_size, hidden_size))\n self.assertEqual(mlm_outputs[\"sequence_output\"].shape,\n (batch_size, seq_length, hidden_size))\n self.assertEqual(mlm_outputs[\"mlm_logits\"].shape,\n (batch_size, num_predictions, vocab_size))\n self.assertLen(mlm_outputs[\"encoder_outputs\"], num_hidden_layers)\n\n # A real trainer would now compute the loss of mlm_logits\n # trying to predict the masked_ids.\n del masked_ids # Unused.\n\n @parameterized.named_parameters((\"Bert\", False), (\"Sentencepiece\", True))\n def test_special_tokens_in_estimator(self, use_sp_model):\n \"\"\"Tests getting special tokens without an Eager init context.\"\"\"\n preprocess_export_path = self._do_export(\n [\"d\", \"ef\", \"abc\", \"xy\"], do_lower_case=True,\n use_sp_model=use_sp_model, tokenize_with_offsets=False)\n\n def _get_special_tokens_dict(obj):\n \"\"\"Returns special tokens of restored tokenizer as Python values.\"\"\"\n if tf.executing_eagerly():\n special_tokens_numpy = {k: v.numpy()\n for k, v in obj.get_special_tokens_dict()}\n else:\n with tf.Graph().as_default():\n # This code expects `get_special_tokens_dict()` to be a tf.function\n # with no dependencies (bound args) from the context it was loaded in,\n # and boldly assumes that it can just be called in a dfferent context.\n special_tokens_tensors = obj.get_special_tokens_dict()\n with tf.compat.v1.Session() as sess:\n special_tokens_numpy = sess.run(special_tokens_tensors)\n return {k: v.item() # Numpy to Python.\n for k, v in special_tokens_numpy.items()}\n\n def input_fn():\n self.assertFalse(tf.executing_eagerly())\n # Build a preprocessing Model.\n sentences = tf.keras.layers.Input(shape=[], dtype=tf.string)\n preprocess = tf.saved_model.load(preprocess_export_path)\n tokenize = hub.KerasLayer(preprocess.tokenize)\n special_tokens_dict = _get_special_tokens_dict(tokenize.resolved_object)\n for k, v in special_tokens_dict.items():\n self.assertIsInstance(v, int, \"Unexpected type for {}\".format(k))\n tokens = tokenize(sentences)\n packed_inputs = layers.BertPackInputs(\n 4, special_tokens_dict=special_tokens_dict)(tokens)\n preprocessing = tf.keras.Model(sentences, packed_inputs)\n # Map the dataset.\n ds = tf.data.Dataset.from_tensors(\n (tf.constant([\"abc\", \"D EF\"]), tf.constant([0, 1])))\n ds = ds.map(lambda features, labels: (preprocessing(features), labels))\n return ds\n\n def model_fn(features, labels, mode):\n del labels # Unused.\n return tf.estimator.EstimatorSpec(mode=mode,\n predictions=features[\"input_word_ids\"])\n\n estimator = tf.estimator.Estimator(model_fn=model_fn)\n outputs = list(estimator.predict(input_fn))\n self.assertAllEqual(outputs, np.array([[2, 6, 3, 0],\n [2, 4, 5, 3]]))\n\n # TODO(b/175369555): Remove that code and its test.\n @parameterized.named_parameters((\"Bert\", False), (\"Sentencepiece\", True))\n def test_check_no_assert(self, use_sp_model):\n \"\"\"Tests the self-check during export without assertions.\"\"\"\n preprocess_export_path = self._do_export(\n [\"d\", \"ef\", \"abc\", \"xy\"], do_lower_case=True,\n use_sp_model=use_sp_model, tokenize_with_offsets=False,\n experimental_disable_assert=False)\n with self.assertRaisesRegex(AssertionError,\n r\"failed to suppress \\d+ Assert ops\"):\n export_tfhub_lib._check_no_assert(preprocess_export_path)\n\n\ndef _result_shapes_in_tf_function(fn, *args, **kwargs):\n \"\"\"Returns shapes (as lists) observed on the result of `fn`.\n\n Args:\n fn: A callable.\n *args: TensorSpecs for Tensor-valued arguments and actual values\n for Python-valued arguments to fn.\n **kwargs: Same for keyword arguments.\n\n Returns:\n The nest of partial tensor shapes (as lists) that is statically known inside\n tf.function(fn)(*args, **kwargs) for the nest of its results.\n \"\"\"\n # Use a captured mutable container for a side outout from the wrapper.\n uninitialized = \"uninitialized!\"\n result_shapes_container = [uninitialized]\n assert result_shapes_container[0] is uninitialized\n\n @tf.function\n def shape_reporting_wrapper(*args, **kwargs):\n result = fn(*args, **kwargs)\n result_shapes_container[0] = tf.nest.map_structure(\n lambda x: x.shape.as_list(), result)\n return result\n\n shape_reporting_wrapper.get_concrete_function(*args, **kwargs)\n assert result_shapes_container[0] is not uninitialized\n return result_shapes_container[0]\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n",
"# Copyright 2022 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Learning rate schedule.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport tensorflow as tf\nfrom official.modeling.hyperparams import params_dict\n\n\nclass StepLearningRateWithLinearWarmup(\n tf.keras.optimizers.schedules.LearningRateSchedule):\n \"\"\"Class to generate learning rate tensor.\"\"\"\n\n def __init__(self, total_steps, params):\n \"\"\"Creates the step learning rate tensor with linear warmup.\"\"\"\n super(StepLearningRateWithLinearWarmup, self).__init__()\n self._total_steps = total_steps\n assert isinstance(params, (dict, params_dict.ParamsDict))\n if isinstance(params, dict):\n params = params_dict.ParamsDict(params)\n self._params = params\n\n def __call__(self, global_step):\n warmup_lr = self._params.warmup_learning_rate\n warmup_steps = self._params.warmup_steps\n init_lr = self._params.init_learning_rate\n lr_levels = self._params.learning_rate_levels\n lr_steps = self._params.learning_rate_steps\n linear_warmup = (\n warmup_lr + tf.cast(global_step, dtype=tf.float32) / warmup_steps *\n (init_lr - warmup_lr))\n learning_rate = tf.where(global_step < warmup_steps, linear_warmup, init_lr)\n\n for next_learning_rate, start_step in zip(lr_levels, lr_steps):\n learning_rate = tf.where(global_step >= start_step, next_learning_rate,\n learning_rate)\n return learning_rate\n\n def get_config(self):\n return {'_params': self._params.as_dict()}\n\n\nclass CosineLearningRateWithLinearWarmup(\n tf.keras.optimizers.schedules.LearningRateSchedule):\n \"\"\"Class to generate learning rate tensor.\"\"\"\n\n def __init__(self, total_steps, params):\n \"\"\"Creates the consine learning rate tensor with linear warmup.\"\"\"\n super(CosineLearningRateWithLinearWarmup, self).__init__()\n self._total_steps = total_steps\n assert isinstance(params, (dict, params_dict.ParamsDict))\n if isinstance(params, dict):\n params = params_dict.ParamsDict(params)\n self._params = params\n\n def __call__(self, global_step):\n global_step = tf.cast(global_step, dtype=tf.float32)\n warmup_lr = self._params.warmup_learning_rate\n warmup_steps = self._params.warmup_steps\n init_lr = self._params.init_learning_rate\n total_steps = self._total_steps\n linear_warmup = (\n warmup_lr + global_step / warmup_steps * (init_lr - warmup_lr))\n cosine_learning_rate = (\n init_lr * (tf.cos(np.pi * (global_step - warmup_steps) /\n (total_steps - warmup_steps)) + 1.0) / 2.0)\n learning_rate = tf.where(global_step < warmup_steps, linear_warmup,\n cosine_learning_rate)\n return learning_rate\n\n def get_config(self):\n return {'_params': self._params.as_dict()}\n\n\ndef learning_rate_generator(total_steps, params):\n \"\"\"The learning rate function generator.\"\"\"\n if params.type == 'step':\n return StepLearningRateWithLinearWarmup(total_steps, params)\n elif params.type == 'cosine':\n return CosineLearningRateWithLinearWarmup(total_steps, params)\n else:\n raise ValueError('Unsupported learning rate type: {}.'.format(params.type))\n",
"# Copyright 2022 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Bounding Box List operations.\"\"\"\n\nimport tensorflow as tf\n\nfrom official.vision.beta.ops import sampling_ops\nfrom official.vision.beta.projects.centernet.ops import box_list\n\n\ndef _copy_extra_fields(boxlist_to_copy_to, boxlist_to_copy_from):\n \"\"\"Copies the extra fields of boxlist_to_copy_from to boxlist_to_copy_to.\n\n Args:\n boxlist_to_copy_to: BoxList to which extra fields are copied.\n boxlist_to_copy_from: BoxList from which fields are copied.\n\n Returns:\n boxlist_to_copy_to with extra fields.\n \"\"\"\n for field in boxlist_to_copy_from.get_extra_fields():\n boxlist_to_copy_to.add_field(field, boxlist_to_copy_from.get_field(field))\n return boxlist_to_copy_to\n\n\ndef scale(boxlist, y_scale, x_scale):\n \"\"\"scale box coordinates in x and y dimensions.\n\n Args:\n boxlist: BoxList holding N boxes\n y_scale: (float) scalar tensor\n x_scale: (float) scalar tensor\n\n Returns:\n boxlist: BoxList holding N boxes\n \"\"\"\n with tf.name_scope('Scale'):\n y_scale = tf.cast(y_scale, tf.float32)\n x_scale = tf.cast(x_scale, tf.float32)\n y_min, x_min, y_max, x_max = tf.split(\n value=boxlist.get(), num_or_size_splits=4, axis=1)\n y_min = y_scale * y_min\n y_max = y_scale * y_max\n x_min = x_scale * x_min\n x_max = x_scale * x_max\n scaled_boxlist = box_list.BoxList(\n tf.concat([y_min, x_min, y_max, x_max], 1))\n return _copy_extra_fields(scaled_boxlist, boxlist)\n\n\ndef area(boxlist):\n \"\"\"Computes area of boxes.\n\n Args:\n boxlist: BoxList holding N boxes\n\n Returns:\n a tensor with shape [N] representing box areas.\n \"\"\"\n with tf.name_scope('Area'):\n y_min, x_min, y_max, x_max = tf.split(\n value=boxlist.get(), num_or_size_splits=4, axis=1)\n return tf.squeeze((y_max - y_min) * (x_max - x_min), [1])\n\n\ndef change_coordinate_frame(boxlist, window):\n \"\"\"Change coordinate frame of the boxlist to be relative to window's frame.\n\n Given a window of the form [ymin, xmin, ymax, xmax],\n changes bounding box coordinates from boxlist to be relative to this window\n (e.g., the min corner maps to (0,0) and the max corner maps to (1,1)).\n\n An example use case is data augmentation: where we are given groundtruth\n boxes (boxlist) and would like to randomly crop the image to some\n window (window). In this case we need to change the coordinate frame of\n each groundtruth box to be relative to this new window.\n\n Args:\n boxlist: A BoxList object holding N boxes.\n window: A rank 1 tensor [4].\n\n Returns:\n Returns a BoxList object with N boxes.\n \"\"\"\n with tf.name_scope('ChangeCoordinateFrame'):\n win_height = window[2] - window[0]\n win_width = window[3] - window[1]\n boxlist_new = scale(box_list.BoxList(\n boxlist.get() - [window[0], window[1], window[0], window[1]]),\n 1.0 / win_height, 1.0 / win_width)\n boxlist_new = _copy_extra_fields(boxlist_new, boxlist)\n return boxlist_new\n\n\ndef matmul_gather_on_zeroth_axis(params, indices):\n \"\"\"Matrix multiplication based implementation of tf.gather on zeroth axis.\n\n Args:\n params: A float32 Tensor. The tensor from which to gather values.\n Must be at least rank 1.\n indices: A Tensor. Must be one of the following types: int32, int64.\n Must be in range [0, params.shape[0])\n\n Returns:\n A Tensor. Has the same type as params. Values from params gathered\n from indices given by indices, with shape indices.shape + params.shape[1:].\n \"\"\"\n with tf.name_scope('MatMulGather'):\n params_shape = sampling_ops.combined_static_and_dynamic_shape(params)\n indices_shape = sampling_ops.combined_static_and_dynamic_shape(indices)\n params2d = tf.reshape(params, [params_shape[0], -1])\n indicator_matrix = tf.one_hot(indices, params_shape[0])\n gathered_result_flattened = tf.matmul(indicator_matrix, params2d)\n return tf.reshape(gathered_result_flattened,\n tf.stack(indices_shape + params_shape[1:]))\n\n\ndef gather(boxlist, indices, fields=None, use_static_shapes=False):\n \"\"\"Gather boxes from BoxList according to indices and return new BoxList.\n\n By default, `gather` returns boxes corresponding to the input index list, as\n well as all additional fields stored in the boxlist (indexing into the\n first dimension). However one can optionally only gather from a\n subset of fields.\n\n Args:\n boxlist: BoxList holding N boxes\n indices: a rank-1 tensor of type int32 / int64\n fields: (optional) list of fields to also gather from. If None (default),\n all fields are gathered from. Pass an empty fields list to only gather\n the box coordinates.\n use_static_shapes: Whether to use an implementation with static shape\n gurantees.\n\n Returns:\n subboxlist: a BoxList corresponding to the subset of the input BoxList\n specified by indices\n\n Raises:\n ValueError: if specified field is not contained in boxlist or if the\n indices are not of type int32\n \"\"\"\n with tf.name_scope('Gather'):\n if len(indices.shape.as_list()) != 1:\n raise ValueError('indices should have rank 1')\n if indices.dtype != tf.int32 and indices.dtype != tf.int64:\n raise ValueError('indices should be an int32 / int64 tensor')\n gather_op = tf.gather\n if use_static_shapes:\n gather_op = matmul_gather_on_zeroth_axis\n subboxlist = box_list.BoxList(gather_op(boxlist.get(), indices))\n if fields is None:\n fields = boxlist.get_extra_fields()\n fields += ['boxes']\n for field in fields:\n if not boxlist.has_field(field):\n raise ValueError('boxlist must contain all specified fields')\n subfieldlist = gather_op(boxlist.get_field(field), indices)\n subboxlist.add_field(field, subfieldlist)\n return subboxlist\n\n\ndef prune_completely_outside_window(boxlist, window):\n \"\"\"Prunes bounding boxes that fall completely outside of the given window.\n\n The function clip_to_window prunes bounding boxes that fall\n completely outside the window, but also clips any bounding boxes that\n partially overflow. This function does not clip partially overflowing boxes.\n\n Args:\n boxlist: a BoxList holding M_in boxes.\n window: a float tensor of shape [4] representing [ymin, xmin, ymax, xmax]\n of the window\n\n Returns:\n pruned_boxlist: a new BoxList with all bounding boxes partially or fully in\n the window.\n valid_indices: a tensor with shape [M_out] indexing the valid bounding boxes\n in the input tensor.\n \"\"\"\n with tf.name_scope('PruneCompleteleyOutsideWindow'):\n y_min, x_min, y_max, x_max = tf.split(\n value=boxlist.get(), num_or_size_splits=4, axis=1)\n win_y_min, win_x_min, win_y_max, win_x_max = tf.unstack(window)\n coordinate_violations = tf.concat([\n tf.greater_equal(y_min, win_y_max), tf.greater_equal(x_min, win_x_max),\n tf.less_equal(y_max, win_y_min), tf.less_equal(x_max, win_x_min)\n ], 1)\n valid_indices = tf.reshape(\n tf.where(tf.logical_not(tf.reduce_any(coordinate_violations, 1))), [-1])\n return gather(boxlist, valid_indices), valid_indices\n\n\ndef clip_to_window(boxlist, window, filter_nonoverlapping=True):\n \"\"\"Clip bounding boxes to a window.\n\n This op clips any input bounding boxes (represented by bounding box\n corners) to a window, optionally filtering out boxes that do not\n overlap at all with the window.\n\n Args:\n boxlist: BoxList holding M_in boxes\n window: a tensor of shape [4] representing the [y_min, x_min, y_max, x_max]\n window to which the op should clip boxes.\n filter_nonoverlapping: whether to filter out boxes that do not overlap at\n all with the window.\n\n Returns:\n a BoxList holding M_out boxes where M_out <= M_in\n \"\"\"\n\n with tf.name_scope('ClipToWindow'):\n y_min, x_min, y_max, x_max = tf.split(\n value=boxlist.get(), num_or_size_splits=4, axis=1)\n win_y_min = window[0]\n win_x_min = window[1]\n win_y_max = window[2]\n win_x_max = window[3]\n y_min_clipped = tf.maximum(tf.minimum(y_min, win_y_max), win_y_min)\n y_max_clipped = tf.maximum(tf.minimum(y_max, win_y_max), win_y_min)\n x_min_clipped = tf.maximum(tf.minimum(x_min, win_x_max), win_x_min)\n x_max_clipped = tf.maximum(tf.minimum(x_max, win_x_max), win_x_min)\n clipped = box_list.BoxList(\n tf.concat([y_min_clipped, x_min_clipped, y_max_clipped, x_max_clipped],\n 1))\n clipped = _copy_extra_fields(clipped, boxlist)\n if filter_nonoverlapping:\n areas = area(clipped)\n nonzero_area_indices = tf.cast(\n tf.reshape(tf.where(tf.greater(areas, 0.0)), [-1]), tf.int32)\n clipped = gather(clipped, nonzero_area_indices)\n return clipped\n\n\ndef height_width(boxlist):\n \"\"\"Computes height and width of boxes in boxlist.\n\n Args:\n boxlist: BoxList holding N boxes\n\n Returns:\n Height: A tensor with shape [N] representing box heights.\n Width: A tensor with shape [N] representing box widths.\n \"\"\"\n with tf.name_scope('HeightWidth'):\n y_min, x_min, y_max, x_max = tf.split(\n value=boxlist.get(), num_or_size_splits=4, axis=1)\n return tf.squeeze(y_max - y_min, [1]), tf.squeeze(x_max - x_min, [1])\n\n\ndef prune_small_boxes(boxlist, min_side):\n \"\"\"Prunes small boxes in the boxlist which have a side smaller than min_side.\n\n Args:\n boxlist: BoxList holding N boxes.\n min_side: Minimum width AND height of box to survive pruning.\n\n Returns:\n A pruned boxlist.\n \"\"\"\n with tf.name_scope('PruneSmallBoxes'):\n height, width = height_width(boxlist)\n is_valid = tf.logical_and(tf.greater_equal(width, min_side),\n tf.greater_equal(height, min_side))\n return gather(boxlist, tf.reshape(tf.where(is_valid), [-1]))\n\n\ndef assert_or_prune_invalid_boxes(boxes):\n \"\"\"Makes sure boxes have valid sizes (ymax >= ymin, xmax >= xmin).\n\n When the hardware supports assertions, the function raises an error when\n boxes have an invalid size. If assertions are not supported (e.g. on TPU),\n boxes with invalid sizes are filtered out.\n\n Args:\n boxes: float tensor of shape [num_boxes, 4]\n\n Returns:\n boxes: float tensor of shape [num_valid_boxes, 4] with invalid boxes\n filtered out.\n\n Raises:\n tf.errors.InvalidArgumentError: When we detect boxes with invalid size.\n This is not supported on TPUs.\n \"\"\"\n\n ymin, xmin, ymax, xmax = tf.split(\n boxes, num_or_size_splits=4, axis=1)\n\n height_check = tf.Assert(tf.reduce_all(ymax >= ymin), [ymin, ymax])\n width_check = tf.Assert(tf.reduce_all(xmax >= xmin), [xmin, xmax])\n\n with tf.control_dependencies([height_check, width_check]):\n boxes_tensor = tf.concat([ymin, xmin, ymax, xmax], axis=1)\n boxlist = box_list.BoxList(boxes_tensor)\n boxlist = prune_small_boxes(boxlist, 0)\n\n return boxlist.get()\n\n\ndef to_absolute_coordinates(boxlist,\n height,\n width,\n check_range=True,\n maximum_normalized_coordinate=1.1):\n \"\"\"Converts normalized box coordinates to absolute pixel coordinates.\n\n This function raises an assertion failed error when the maximum box coordinate\n value is larger than maximum_normalized_coordinate (in which case coordinates\n are already absolute).\n\n Args:\n boxlist: BoxList with coordinates in range [0, 1].\n height: Maximum value for height of absolute box coordinates.\n width: Maximum value for width of absolute box coordinates.\n check_range: If True, checks if the coordinates are normalized or not.\n maximum_normalized_coordinate: Maximum coordinate value to be considered\n as normalized, default to 1.1.\n\n Returns:\n boxlist with absolute coordinates in terms of the image size.\n\n \"\"\"\n with tf.name_scope('ToAbsoluteCoordinates'):\n height = tf.cast(height, tf.float32)\n width = tf.cast(width, tf.float32)\n\n # Ensure range of input boxes is correct.\n if check_range:\n box_maximum = tf.reduce_max(boxlist.get())\n max_assert = tf.Assert(\n tf.greater_equal(maximum_normalized_coordinate, box_maximum),\n ['maximum box coordinate value is larger '\n 'than %f: ' % maximum_normalized_coordinate, box_maximum])\n with tf.control_dependencies([max_assert]):\n width = tf.identity(width)\n\n return scale(boxlist, height, width)\n"
] | [
[
"tensorflow.ragged.constant",
"tensorflow.cast",
"tensorflow.io.gfile.remove",
"numpy.zeros_like",
"tensorflow.Graph",
"numpy.ones_like",
"tensorflow.io.gfile.GFile",
"tensorflow.test.main",
"tensorflow.saved_model.save",
"numpy.std",
"numpy.zeros",
"tensorflow.estimator.Estimator",
"tensorflow.executing_eagerly",
"tensorflow.shape",
"tensorflow.train.Checkpoint",
"tensorflow.keras.Model",
"tensorflow.RaggedTensorSpec",
"tensorflow.io.gfile.rmtree",
"numpy.array",
"tensorflow.train.latest_checkpoint",
"tensorflow.constant",
"tensorflow.saved_model.load",
"tensorflow.compat.v1.Session",
"tensorflow.estimator.EstimatorSpec",
"tensorflow.TensorSpec",
"tensorflow.keras.layers.Input"
],
[
"tensorflow.cast",
"tensorflow.cos",
"tensorflow.where"
],
[
"tensorflow.concat",
"tensorflow.control_dependencies",
"tensorflow.stack",
"tensorflow.cast",
"tensorflow.minimum",
"tensorflow.where",
"tensorflow.greater",
"tensorflow.squeeze",
"tensorflow.name_scope",
"tensorflow.matmul",
"tensorflow.unstack",
"tensorflow.reduce_any",
"tensorflow.less_equal",
"tensorflow.identity",
"tensorflow.one_hot",
"tensorflow.split",
"tensorflow.reshape",
"tensorflow.greater_equal",
"tensorflow.reduce_all"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.6",
"2.4",
"2.3",
"2.5",
"2.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
good5dog5/Speaker-Diarization | [
"4cc38f77a2f2c24ce086323aa37098f6cd0f7f10"
] | [
"ghostvlad/generate_embeddings.py"
] | [
"from __future__ import absolute_import\nfrom __future__ import print_function\nimport os\nimport sys\nimport numpy as np\nimport librosa\n\nimport toolkits\nimport random\n\n# ===========================================\n# Parse the argument\n# ===========================================\nimport argparse\nparser = argparse.ArgumentParser()\n# set up training configuration.\nparser.add_argument('--gpu', default='', type=str)\nparser.add_argument('--resume', default=r'pretrained/weights.h5', type=str)\nparser.add_argument('--data_path', default='4persons', type=str)\n# set up network configuration.\nparser.add_argument('--net', default='resnet34s',\n choices=['resnet34s', 'resnet34l'], type=str)\nparser.add_argument('--ghost_cluster', default=2, type=int)\nparser.add_argument('--vlad_cluster', default=8, type=int)\nparser.add_argument('--bottleneck_dim', default=512, type=int)\nparser.add_argument('--aggregation_mode', default='gvlad',\n choices=['avg', 'vlad', 'gvlad'], type=str)\n# set up learning rate, training loss and optimizer.\nparser.add_argument('--loss', default='softmax',\n choices=['softmax', 'amsoftmax'], type=str)\nparser.add_argument('--test_type', default='normal',\n choices=['normal', 'hard', 'extend'], type=str)\n\nglobal args\nargs = parser.parse_args()\n\n\n# calc speaker-embeddings similarity in pretty format output.\ndef similar(matrix):\n ids = matrix.shape[0]\n for i in range(ids):\n for j in range(ids):\n dist = matrix[i, :]*matrix[j, :]\n dist = np.linalg.norm(matrix[i, :] - matrix[j, :])\n print('%.2f ' % dist, end='')\n if((j+1) % 3 == 0 and j != 0):\n print(\"| \", end='')\n if((i+1) % 3 == 0 and i != 0):\n print('\\n')\n print(\"*\"*80, end='')\n print(\"\\n\")\n\n# ===============================================\n# code from Arsha for loading data.\n# ===============================================\n\n\ndef load_wav(vid_path, sr):\n wav, sr_ret = librosa.load(vid_path, sr=sr)\n assert sr_ret == sr\n\n intervals = librosa.effects.split(wav, top_db=20)\n wav_output = []\n for sliced in intervals:\n wav_output.extend(wav[sliced[0]:sliced[1]])\n wav_output = np.array(wav_output)\n return wav_output\n\n\ndef lin_spectogram_from_wav(wav, hop_length, win_length, n_fft=1024):\n linear = librosa.stft(wav, n_fft=n_fft, win_length=win_length,\n hop_length=hop_length) # linear spectrogram\n return linear.T\n\n\ndef load_data(path_spk_tuples, win_length=400, sr=16000, hop_length=160, n_fft=512, min_win_time=240, max_win_time=1600):\n win_time = np.random.randint(min_win_time, max_win_time, 1)[\n 0] # win_length in [240,1600] ms\n win_spec = win_time//(1000//(sr//hop_length)) # win_length in spectrum\n hop_spec = win_spec//2\n\n wavs = np.array([])\n change_points = []\n paths = list(zip(*path_spk_tuples))[0]\n speakers = list(zip(*path_spk_tuples))[1]\n\n for path in paths:\n wav = load_wav(path, sr=sr) # VAD\n wavs = np.concatenate((wavs, wav))\n # change_point in spectrum\n change_points.append(wavs.shape[0]//hop_length)\n\n linear_spect = lin_spectogram_from_wav(wavs, hop_length, win_length, n_fft)\n mag, _ = librosa.magphase(linear_spect) # magnitude\n mag_T = mag.T\n freq, time = mag_T.shape\n spec_mag = mag_T\n\n utterance_specs = []\n utterance_speakers = []\n\n cur_spec = 0\n cur_speaker = speakers[0]\n i = 0\n while(True):\n if(cur_spec+win_spec > time):\n break\n spec_mag = mag_T[:, cur_spec:cur_spec+win_spec]\n\n # cur win_spec span to the next speaker\n if(cur_spec+win_spec//2 > change_points[i]):\n i += 1\n cur_speaker = speakers[i]\n\n # preprocessing, subtract mean, divided by time-wise var\n mu = np.mean(spec_mag, 0, keepdims=True)\n std = np.std(spec_mag, 0, keepdims=True)\n spec_mag = (spec_mag - mu) / (std + 1e-5)\n utterance_specs.append(spec_mag)\n utterance_speakers.append(cur_speaker)\n cur_spec += hop_spec\n\n return utterance_specs, utterance_speakers\n\n\ndef prepare_data(SRC_PATH):\n wavDir = os.listdir(SRC_PATH)\n wavDir.sort()\n print(f'wavDir: {wavDir}')\n\n allpath_list = []\n allspk_list = []\n for i, spkDir in enumerate(wavDir): # Each speaker's directory\n spk = spkDir # speaker name\n wavPath = os.path.join(SRC_PATH, spkDir, 'audio')\n for wav in os.listdir(wavPath): # wavfile\n utter_path = os.path.join(wavPath, wav)\n allpath_list.append(utter_path)\n allspk_list.append(i)\n if(i > 100):\n break\n\n path_spk_list = list(zip(allpath_list, allspk_list))\n return path_spk_list\n\n\ndef main():\n\n # gpu configuration\n toolkits.initialize_GPU(args)\n\n import model\n # ==================================\n # Get Train/Val.\n # ==================================\n\n total_list = [os.path.join(args.data_path, file)\n for file in os.listdir(args.data_path)]\n unique_list = np.unique(total_list)\n\n # ==================================\n # Get Model\n # ==================================\n # construct the data generator.\n params = {'dim': (257, None, 1),\n 'nfft': 512,\n 'min_slice': 720,\n 'win_length': 400,\n 'hop_length': 160,\n 'n_classes': 5994,\n 'sampling_rate': 16000,\n 'normalize': True,\n }\n\n network_eval = model.vggvox_resnet2d_icassp(input_dim=params['dim'],\n num_class=params['n_classes'],\n mode='eval', args=args)\n\n # ==> load pre-trained model ???\n if args.resume:\n # ==> get real_model from arguments input,\n # load the model if the imag_model == real_model.\n if os.path.isfile(args.resume):\n network_eval.load_weights(os.path.join(args.resume), by_name=True)\n print('==> successfully loading model {}.'.format(args.resume))\n else:\n raise IOError(\n \"==> no checkpoint found at '{}'\".format(args.resume))\n else:\n raise IOError('==> please type in the model to load')\n\n # The feature extraction process has to be done sample-by-sample,\n # because each sample is of different lengths.\n\n SRC_PATH = r'~/Workspace/SOHO/speaker-diarization/dataset/ST-CMDS-20170001_1-OS'\n path_spk_tuples = prepare_data(SRC_PATH)\n train_sequence = []\n train_cluster_id = []\n\n for epoch in range(7000): # Random choice utterances from whole wavfiles\n # A merged utterance contains [10,20] utterances\n splits_count = np.random.randint(10, 20, 1)\n path_spks = random.sample(path_spk_tuples, splits_count[0])\n utterance_specs, utterance_speakers = load_data(\n path_spks, min_win_time=500, max_win_time=1600)\n feats = []\n for spec in utterance_specs:\n spec = np.expand_dims(np.expand_dims(spec, 0), -1)\n v = network_eval.predict(spec)\n feats += [v]\n\n feats = np.array(feats)[:, 0, :] # [splits, embedding dim]\n train_sequence.append(feats)\n train_cluster_id.append(utterance_speakers)\n print(\"epoch:{}, utterance length: {}, speakers: {}\".format(\n epoch, len(utterance_speakers), len(path_spks)))\n\n np.savez('training_data', train_sequence=train_sequence,\n train_cluster_id=train_cluster_id)\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"numpy.savez",
"numpy.expand_dims",
"numpy.unique",
"numpy.linalg.norm",
"numpy.concatenate",
"numpy.std",
"numpy.mean",
"numpy.array",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
fzyzcjy/ncnn | [
"42e71609508fde1bd54d9d9de6ca5522ee3bcf37",
"42e71609508fde1bd54d9d9de6ca5522ee3bcf37",
"42e71609508fde1bd54d9d9de6ca5522ee3bcf37",
"42e71609508fde1bd54d9d9de6ca5522ee3bcf37",
"42e71609508fde1bd54d9d9de6ca5522ee3bcf37",
"42e71609508fde1bd54d9d9de6ca5522ee3bcf37",
"42e71609508fde1bd54d9d9de6ca5522ee3bcf37",
"42e71609508fde1bd54d9d9de6ca5522ee3bcf37",
"42e71609508fde1bd54d9d9de6ca5522ee3bcf37",
"42e71609508fde1bd54d9d9de6ca5522ee3bcf37"
] | [
"tools/pnnx/tests/test_nn_BatchNorm2d.py",
"tools/pnnx/tests/ncnn/test_nn_ConvTranspose2d.py",
"tools/pnnx/tests/ncnn/test_F_softmax.py",
"tools/pnnx/tests/test_F_relu.py",
"python/tests/test_net.py",
"tools/pnnx/tests/ncnn/test_nn_AlphaDropout.py",
"tools/pnnx/tests/ncnn/test_F_max_pool2d.py",
"tools/pnnx/tests/test_nn_Softsign.py",
"tools/pnnx/tests/ncnn/test_nn_ChannelShuffle.py",
"tools/pnnx/tests/test_nn_AdaptiveMaxPool2d.py"
] | [
"# Tencent is pleased to support the open source community by making ncnn available.\n#\n# Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.\n#\n# Licensed under the BSD 3-Clause License (the \"License\"); you may not use this file except\n# in compliance with the License. You may obtain a copy of the License at\n#\n# https://opensource.org/licenses/BSD-3-Clause\n#\n# Unless required by applicable law or agreed to in writing, software distributed\n# under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR\n# CONDITIONS OF ANY KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations under the License.\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass Model(nn.Module):\n def __init__(self):\n super(Model, self).__init__()\n\n self.bn_0 = nn.BatchNorm2d(num_features=32)\n self.bn_1 = nn.BatchNorm2d(num_features=32, eps=1e-1, affine=False)\n self.bn_2 = nn.BatchNorm2d(num_features=11, affine=True)\n\n def forward(self, x, y):\n x = self.bn_0(x)\n x = self.bn_1(x)\n\n y = self.bn_2(y)\n\n return x, y\n\ndef test():\n net = Model()\n net.eval()\n\n torch.manual_seed(0)\n x = torch.rand(1, 32, 12, 64)\n y = torch.rand(1, 11, 1, 1)\n\n a0, a1 = net(x, y)\n\n # export torchscript\n mod = torch.jit.trace(net, (x, y))\n mod.save(\"test_nn_BatchNorm2d.pt\")\n\n # torchscript to pnnx\n import os\n os.system(\"../src/pnnx test_nn_BatchNorm2d.pt inputshape=[1,32,12,64],[1,11,1,1]\")\n\n # pnnx inference\n import test_nn_BatchNorm2d_pnnx\n b0, b1 = test_nn_BatchNorm2d_pnnx.test_inference()\n\n return torch.equal(a0, b0) and torch.equal(a1, b1)\n\nif __name__ == \"__main__\":\n if test():\n exit(0)\n else:\n exit(1)\n",
"# Tencent is pleased to support the open source community by making ncnn available.\n#\n# Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.\n#\n# Licensed under the BSD 3-Clause License (the \"License\"); you may not use this file except\n# in compliance with the License. You may obtain a copy of the License at\n#\n# https://opensource.org/licenses/BSD-3-Clause\n#\n# Unless required by applicable law or agreed to in writing, software distributed\n# under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR\n# CONDITIONS OF ANY KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations under the License.\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass Model(nn.Module):\n def __init__(self):\n super(Model, self).__init__()\n\n self.deconv_0 = nn.ConvTranspose2d(in_channels=12, out_channels=16, kernel_size=3)\n self.deconv_1 = nn.ConvTranspose2d(in_channels=16, out_channels=20, kernel_size=(2,4), stride=(2,1), padding=2, output_padding=0)\n self.deconv_2 = nn.ConvTranspose2d(in_channels=20, out_channels=24, kernel_size=(1,3), stride=1, padding=(2,4), output_padding=(0,0), dilation=1, groups=1, bias=False)\n self.deconv_3 = nn.ConvTranspose2d(in_channels=24, out_channels=28, kernel_size=(5,4), stride=2, padding=0, output_padding=(0,1), dilation=1, groups=4, bias=True)\n self.deconv_4 = nn.ConvTranspose2d(in_channels=28, out_channels=32, kernel_size=3, stride=1, padding=1, output_padding=0, dilation=(1,2), groups=2, bias=False)\n self.deconv_5 = nn.ConvTranspose2d(in_channels=32, out_channels=32, kernel_size=2, stride=2, padding=3, output_padding=1, dilation=1, groups=32, bias=True)\n self.deconv_6 = nn.ConvTranspose2d(in_channels=32, out_channels=28, kernel_size=2, stride=1, padding=2, output_padding=0, dilation=1, groups=1, bias=False)\n self.deconv_7 = nn.ConvTranspose2d(in_channels=28, out_channels=24, kernel_size=3, stride=2, padding=(5,6), output_padding=(1,0), dilation=2, groups=1, bias=True)\n\n def forward(self, x):\n x = self.deconv_0(x)\n x = self.deconv_1(x)\n x = self.deconv_2(x)\n x = self.deconv_3(x)\n x = self.deconv_4(x)\n x = self.deconv_5(x)\n x = self.deconv_6(x)\n x = self.deconv_7(x)\n\n return x\n\ndef test():\n net = Model()\n net.eval()\n\n torch.manual_seed(0)\n x = torch.rand(1, 12, 10, 10)\n\n a = net(x)\n\n # export torchscript\n mod = torch.jit.trace(net, x)\n mod.save(\"test_nn_ConvTranspose2d.pt\")\n\n # torchscript to pnnx\n import os\n os.system(\"../../src/pnnx test_nn_ConvTranspose2d.pt inputshape=[1,12,10,10]\")\n\n # ncnn inference\n import test_nn_ConvTranspose2d_ncnn\n b = test_nn_ConvTranspose2d_ncnn.test_inference()\n\n return torch.allclose(a, b, 1e-4, 1e-4)\n\nif __name__ == \"__main__\":\n if test():\n exit(0)\n else:\n exit(1)\n",
"# Tencent is pleased to support the open source community by making ncnn available.\n#\n# Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.\n#\n# Licensed under the BSD 3-Clause License (the \"License\"); you may not use this file except\n# in compliance with the License. You may obtain a copy of the License at\n#\n# https://opensource.org/licenses/BSD-3-Clause\n#\n# Unless required by applicable law or agreed to in writing, software distributed\n# under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR\n# CONDITIONS OF ANY KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations under the License.\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass Model(nn.Module):\n def __init__(self):\n super(Model, self).__init__()\n\n def forward(self, x, y, z):\n x = F.softmax(x, 0)\n y = F.softmax(y, 1)\n z = F.softmax(z, 2)\n return x, y, z\n\ndef test():\n net = Model()\n net.eval()\n\n torch.manual_seed(0)\n x = torch.rand(16)\n y = torch.rand(2, 16)\n z = torch.rand(3, 12, 16)\n\n a = net(x, y, z)\n\n # export torchscript\n mod = torch.jit.trace(net, (x, y, z))\n mod.save(\"test_F_softmax.pt\")\n\n # torchscript to pnnx\n import os\n os.system(\"../../src/pnnx test_F_softmax.pt inputshape=[16],[2,16],[3,12,16]\")\n\n # ncnn inference\n import test_F_softmax_ncnn\n b = test_F_softmax_ncnn.test_inference()\n\n for a0, b0 in zip(a, b):\n if not torch.allclose(a0, b0, 1e-4, 1e-4):\n return False\n return True\n\nif __name__ == \"__main__\":\n if test():\n exit(0)\n else:\n exit(1)\n",
"# Tencent is pleased to support the open source community by making ncnn available.\n#\n# Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.\n#\n# Licensed under the BSD 3-Clause License (the \"License\"); you may not use this file except\n# in compliance with the License. You may obtain a copy of the License at\n#\n# https://opensource.org/licenses/BSD-3-Clause\n#\n# Unless required by applicable law or agreed to in writing, software distributed\n# under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR\n# CONDITIONS OF ANY KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations under the License.\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass Model(nn.Module):\n def __init__(self):\n super(Model, self).__init__()\n\n def forward(self, x, y, z, w):\n x = F.relu(x)\n y = F.relu(y)\n z = F.relu(z)\n w = F.relu(w)\n return x, y, z, w\n\ndef test():\n net = Model()\n net.eval()\n\n torch.manual_seed(0)\n x = torch.rand(1, 16)\n y = torch.rand(12, 2, 16)\n z = torch.rand(1, 3, 12, 16)\n w = torch.rand(1, 5, 7, 9, 11)\n\n a0, a1, a2, a3 = net(x, y, z, w)\n\n # export torchscript\n mod = torch.jit.trace(net, (x, y, z, w))\n mod.save(\"test_F_relu.pt\")\n\n # torchscript to pnnx\n import os\n os.system(\"../src/pnnx test_F_relu.pt inputshape=[1,16],[12,2,16],[1,3,12,16],[1,5,7,9,11]\")\n\n # pnnx inference\n import test_F_relu_pnnx\n b0, b1, b2, b3 = test_F_relu_pnnx.test_inference()\n\n return torch.equal(a0, b0) and torch.equal(a1, b1) and torch.equal(a2, b2) and torch.equal(a3, b3)\n\nif __name__ == \"__main__\":\n if test():\n exit(0)\n else:\n exit(1)\n",
"# Tencent is pleased to support the open source community by making ncnn available.\n#\n# Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.\n#\n# Licensed under the BSD 3-Clause License (the \"License\"); you may not use this file except\n# in compliance with the License. You may obtain a copy of the License at\n#\n# https://opensource.org/licenses/BSD-3-Clause\n#\n# Unless required by applicable law or agreed to in writing, software distributed\n# under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR\n# CONDITIONS OF ANY KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations under the License.\n\nimport numpy as np\nimport pytest\n\nimport ncnn\n\n\ndef test_net():\n dr = ncnn.DataReaderFromEmpty()\n\n with ncnn.Net() as net:\n ret = net.load_param(\"tests/test.param\")\n net.load_model(dr)\n assert ret == 0 and len(net.blobs()) == 3 and len(net.layers()) == 3\n\n input_names = net.input_names()\n output_names = net.output_names()\n assert len(input_names) > 0 and len(output_names) > 0\n\n in_mat = ncnn.Mat((227, 227, 3))\n\n with net.create_extractor() as ex:\n ex.input(\"data\", in_mat)\n ret, out_mat = ex.extract(\"output\")\n\n assert ret == 0 and out_mat.dims == 1 and out_mat.w == 1\n\n net.clear()\n assert len(net.blobs()) == 0 and len(net.layers()) == 0\n\n\ndef test_net_vulkan():\n if not hasattr(ncnn, \"get_gpu_count\"):\n return\n\n dr = ncnn.DataReaderFromEmpty()\n\n net = ncnn.Net()\n net.opt.use_vulkan_compute = True\n ret = net.load_param(\"tests/test.param\")\n net.load_model(dr)\n assert ret == 0 and len(net.blobs()) == 3 and len(net.layers()) == 3\n\n in_mat = ncnn.Mat((227, 227, 3))\n\n ex = net.create_extractor()\n ex.input(\"data\", in_mat)\n ret, out_mat = ex.extract(\"output\")\n\n assert ret == 0 and out_mat.dims == 1 and out_mat.w == 1\n\n ex.clear()\n\n net.clear()\n assert len(net.blobs()) == 0 and len(net.layers()) == 0\n\n\ndef test_custom_layer():\n class CustomLayer(ncnn.Layer):\n customLayers = []\n\n def __init__(self):\n ncnn.Layer.__init__(self)\n self.one_blob_only = True\n\n self.customLayers.append(self)\n\n def forward(self, bottom_blob, top_blob, opt):\n x = np.array(bottom_blob)\n x += 1\n\n top_blob.clone_from(ncnn.Mat(x), opt.blob_allocator)\n if top_blob.empty():\n return -100\n\n return 0\n\n def CustomLayer_layer_creator():\n return CustomLayer()\n\n def CustomLayer_layer_destroyer(layer):\n for i in range(len(CustomLayer.customLayers)):\n if CustomLayer.customLayers[i] == layer:\n del CustomLayer.customLayers[i]\n break\n\n dr = ncnn.DataReaderFromEmpty()\n\n net = ncnn.Net()\n net.register_custom_layer(\n \"CustomLayer\", CustomLayer_layer_creator, CustomLayer_layer_destroyer\n )\n ret = net.load_param(\"tests/custom_layer.param\")\n net.load_model(dr)\n assert ret == 0 and len(net.blobs()) == 2 and len(net.layers()) == 2\n\n in_mat = ncnn.Mat(1)\n in_mat.fill(1.0)\n\n ex = net.create_extractor()\n ex.input(\"data\", in_mat)\n ret, out_mat = ex.extract(\"output\")\n assert ret == 0 and out_mat.dims == 1 and out_mat.w == 1 and out_mat[0] == 2.0\n\n ex.clear()\n\n net.clear()\n assert len(net.blobs()) == 0 and len(net.layers()) == 0\n\n\ndef test_vulkan_device_index():\n if not hasattr(ncnn, \"get_gpu_count\"):\n return\n\n net = ncnn.Net()\n assert net.vulkan_device() is None\n\n net.set_vulkan_device(0)\n assert net.vulkan_device() is not None\n\n\ndef test_vulkan_device_vkdev():\n if not hasattr(ncnn, \"get_gpu_count\"):\n return\n\n net = ncnn.Net()\n assert net.vulkan_device() is None\n\n vkdev = ncnn.get_gpu_device(0)\n net.set_vulkan_device(vkdev)\n assert net.vulkan_device() is not None\n",
"# Tencent is pleased to support the open source community by making ncnn available.\n#\n# Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.\n#\n# Licensed under the BSD 3-Clause License (the \"License\"); you may not use this file except\n# in compliance with the License. You may obtain a copy of the License at\n#\n# https://opensource.org/licenses/BSD-3-Clause\n#\n# Unless required by applicable law or agreed to in writing, software distributed\n# under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR\n# CONDITIONS OF ANY KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations under the License.\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass Model(nn.Module):\n def __init__(self):\n super(Model, self).__init__()\n\n self.dropout_0 = nn.AlphaDropout()\n self.dropout_1 = nn.AlphaDropout(p=0.7)\n\n def forward(self, x, y, z, w):\n x = F.relu(x)\n y = F.relu(y)\n z = F.relu(z)\n w = F.relu(w)\n x = self.dropout_0(x)\n y = self.dropout_0(y)\n z = self.dropout_1(z)\n w = self.dropout_1(w)\n return x, y, z, w\n\ndef test():\n net = Model()\n net.eval()\n\n torch.manual_seed(0)\n x = torch.rand(12)\n y = torch.rand(12, 64)\n z = torch.rand(12, 24, 64)\n w = torch.rand(12, 24, 32, 64)\n\n a = net(x, y, z, w)\n\n # export torchscript\n mod = torch.jit.trace(net, (x, y, z, w))\n mod.save(\"test_nn_AlphaDropout.pt\")\n\n # torchscript to pnnx\n import os\n os.system(\"../../src/pnnx test_nn_AlphaDropout.pt inputshape=[12],[12,64],[12,24,64],[12,24,32,64]\")\n\n # ncnn inference\n import test_nn_AlphaDropout_ncnn\n b = test_nn_AlphaDropout_ncnn.test_inference()\n\n for a0, b0 in zip(a, b):\n if not torch.allclose(a0, b0, 1e-4, 1e-4):\n return False\n return True\n\nif __name__ == \"__main__\":\n if test():\n exit(0)\n else:\n exit(1)\n",
"# Tencent is pleased to support the open source community by making ncnn available.\n#\n# Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.\n#\n# Licensed under the BSD 3-Clause License (the \"License\"); you may not use this file except\n# in compliance with the License. You may obtain a copy of the License at\n#\n# https://opensource.org/licenses/BSD-3-Clause\n#\n# Unless required by applicable law or agreed to in writing, software distributed\n# under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR\n# CONDITIONS OF ANY KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations under the License.\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass Model(nn.Module):\n def __init__(self):\n super(Model, self).__init__()\n\n def forward(self, x):\n x = F.max_pool2d(x, kernel_size=3)\n x = F.max_pool2d(x, kernel_size=4, stride=2, padding=2, dilation=1)\n x = F.max_pool2d(x, kernel_size=(1,3), stride=1, padding=(0,1), dilation=1, return_indices=False, ceil_mode=False)\n x = F.max_pool2d(x, kernel_size=(4,5), stride=(1,2), padding=(1,2), dilation=1, return_indices=False, ceil_mode=True)\n x = F.max_pool2d(x, kernel_size=(2,3), stride=1, padding=1, dilation=(1,1), return_indices=False, ceil_mode=False)\n x = F.max_pool2d(x, kernel_size=2, stride=1, padding=0, dilation=1, return_indices=False, ceil_mode=True)\n return x\n #x, indices1 = F.max_pool2d(x, kernel_size=2, padding=1, dilation=1, return_indices=True, ceil_mode=False)\n #x, indices2 = F.max_pool2d(x, kernel_size=(5,4), stride=1, padding=2, dilation=1, return_indices=True, ceil_mode=False)\n #return x, indices1, indices2\n\ndef test():\n net = Model()\n net.eval()\n\n torch.manual_seed(0)\n x = torch.rand(1, 12, 128, 127)\n\n a = net(x)\n\n # export torchscript\n mod = torch.jit.trace(net, x)\n mod.save(\"test_F_max_pool2d.pt\")\n\n # torchscript to pnnx\n import os\n os.system(\"../../src/pnnx test_F_max_pool2d.pt inputshape=[1,12,128,127]\")\n\n # ncnn inference\n import test_F_max_pool2d_ncnn\n b = test_F_max_pool2d_ncnn.test_inference()\n\n return torch.allclose(a, b, 1e-4, 1e-4)\n\nif __name__ == \"__main__\":\n if test():\n exit(0)\n else:\n exit(1)\n",
"# Tencent is pleased to support the open source community by making ncnn available.\n#\n# Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.\n#\n# Licensed under the BSD 3-Clause License (the \"License\"); you may not use this file except\n# in compliance with the License. You may obtain a copy of the License at\n#\n# https://opensource.org/licenses/BSD-3-Clause\n#\n# Unless required by applicable law or agreed to in writing, software distributed\n# under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR\n# CONDITIONS OF ANY KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations under the License.\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass Model(nn.Module):\n def __init__(self):\n super(Model, self).__init__()\n\n self.act_0 = nn.Softsign()\n\n def forward(self, x, y, z, w):\n x = self.act_0(x)\n y = self.act_0(y)\n z = self.act_0(z)\n w = self.act_0(w)\n return x, y, z, w\n\ndef test():\n net = Model()\n net.eval()\n\n torch.manual_seed(0)\n x = torch.rand(1, 12)\n y = torch.rand(1, 12, 64)\n z = torch.rand(1, 12, 24, 64)\n w = torch.rand(1, 12, 24, 32, 64)\n\n a0, a1, a2, a3 = net(x, y, z, w)\n\n # export torchscript\n mod = torch.jit.trace(net, (x, y, z, w))\n mod.save(\"test_nn_Softsign.pt\")\n\n # torchscript to pnnx\n import os\n os.system(\"../src/pnnx test_nn_Softsign.pt inputshape=[1,12],[1,12,64],[1,12,24,64],[1,12,24,32,64]\")\n\n # pnnx inference\n import test_nn_Softsign_pnnx\n b0, b1, b2, b3 = test_nn_Softsign_pnnx.test_inference()\n\n return torch.equal(a0, b0) and torch.equal(a1, b1) and torch.equal(a2, b2) and torch.equal(a3, b3)\n\nif __name__ == \"__main__\":\n if test():\n exit(0)\n else:\n exit(1)\n",
"# Tencent is pleased to support the open source community by making ncnn available.\n#\n# Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.\n#\n# Licensed under the BSD 3-Clause License (the \"License\"); you may not use this file except\n# in compliance with the License. You may obtain a copy of the License at\n#\n# https://opensource.org/licenses/BSD-3-Clause\n#\n# Unless required by applicable law or agreed to in writing, software distributed\n# under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR\n# CONDITIONS OF ANY KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations under the License.\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass Model(nn.Module):\n def __init__(self):\n super(Model, self).__init__()\n\n self.shuffle_0 = nn.ChannelShuffle(2)\n self.shuffle_1 = nn.ChannelShuffle(16)\n\n def forward(self, x, y):\n x = self.shuffle_0(x)\n x = self.shuffle_1(x)\n\n y = self.shuffle_0(y)\n y = self.shuffle_1(y)\n return x, y\n\ndef test():\n net = Model()\n net.eval()\n\n torch.manual_seed(0)\n x = torch.rand(1, 64, 6, 8)\n y = torch.rand(1, 96, 7, 9)\n\n a0, a1 = net(x, y)\n\n # export torchscript\n mod = torch.jit.trace(net, (x, y))\n mod.save(\"test_nn_ChannelShuffle.pt\")\n\n # torchscript to pnnx\n import os\n os.system(\"../../src/pnnx test_nn_ChannelShuffle.pt inputshape=[1,64,6,8],[1,96,7,9]\")\n\n # ncnn inference\n import test_nn_ChannelShuffle_ncnn\n b0, b1 = test_nn_ChannelShuffle_ncnn.test_inference()\n\n return torch.allclose(a0, b0, 1e-4, 1e-4) and torch.allclose(a1, b1, 1e-4, 1e-4)\n\nif __name__ == \"__main__\":\n if test():\n exit(0)\n else:\n exit(1)\n",
"# Tencent is pleased to support the open source community by making ncnn available.\n#\n# Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.\n#\n# Licensed under the BSD 3-Clause License (the \"License\"); you may not use this file except\n# in compliance with the License. You may obtain a copy of the License at\n#\n# https://opensource.org/licenses/BSD-3-Clause\n#\n# Unless required by applicable law or agreed to in writing, software distributed\n# under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR\n# CONDITIONS OF ANY KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations under the License.\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass Model(nn.Module):\n def __init__(self):\n super(Model, self).__init__()\n\n self.pool_0 = nn.AdaptiveMaxPool2d(output_size=(7,6), return_indices=True)\n self.pool_1 = nn.AdaptiveMaxPool2d(output_size=1)\n\n def forward(self, x):\n x, indices = self.pool_0(x)\n x = self.pool_1(x)\n return x, indices\n\ndef test():\n net = Model()\n net.eval()\n\n torch.manual_seed(0)\n x = torch.rand(1, 128, 13, 13)\n\n a0, a1 = net(x)\n\n # export torchscript\n mod = torch.jit.trace(net, x)\n mod.save(\"test_nn_AdaptiveMaxPool2d.pt\")\n\n # torchscript to pnnx\n import os\n os.system(\"../src/pnnx test_nn_AdaptiveMaxPool2d.pt inputshape=[1,128,13,13]\")\n\n # pnnx inference\n import test_nn_AdaptiveMaxPool2d_pnnx\n b0, b1 = test_nn_AdaptiveMaxPool2d_pnnx.test_inference()\n\n return torch.equal(a0, b0) and torch.equal(a1, b1)\n\nif __name__ == \"__main__\":\n if test():\n exit(0)\n else:\n exit(1)\n"
] | [
[
"torch.jit.trace",
"torch.manual_seed",
"torch.equal",
"torch.rand",
"torch.nn.BatchNorm2d"
],
[
"torch.jit.trace",
"torch.nn.ConvTranspose2d",
"torch.manual_seed",
"torch.rand",
"torch.allclose"
],
[
"torch.nn.functional.softmax",
"torch.jit.trace",
"torch.manual_seed",
"torch.rand",
"torch.allclose"
],
[
"torch.jit.trace",
"torch.manual_seed",
"torch.equal",
"torch.nn.functional.relu",
"torch.rand"
],
[
"numpy.array"
],
[
"torch.jit.trace",
"torch.nn.AlphaDropout",
"torch.manual_seed",
"torch.nn.functional.relu",
"torch.rand",
"torch.allclose"
],
[
"torch.jit.trace",
"torch.manual_seed",
"torch.rand",
"torch.allclose",
"torch.nn.functional.max_pool2d"
],
[
"torch.jit.trace",
"torch.manual_seed",
"torch.equal",
"torch.nn.Softsign",
"torch.rand"
],
[
"torch.jit.trace",
"torch.manual_seed",
"torch.rand",
"torch.allclose",
"torch.nn.ChannelShuffle"
],
[
"torch.nn.AdaptiveMaxPool2d",
"torch.jit.trace",
"torch.manual_seed",
"torch.equal",
"torch.rand"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ofgulban/iphigen | [
"47c972a5852677e01ab0b120f69d004abc57e478"
] | [
"iphigen/iphigen_nifti.py"
] | [
"\"\"\"MRI data processing with retinex and balance methods.\"\"\"\n\nfrom __future__ import division\nimport os\nimport numpy as np\nimport nibabel as nb\nfrom iphigen import core, utils\nfrom iphigen.ui import user_interface, display_welcome_message\nimport iphigen.config as cfg\n\n\ndef main():\n \"\"\"Iphigen processes for nifti images.\"\"\"\n user_interface()\n display_welcome_message()\n\n # Load data\n data, affine, dirname, basename, ext = [], [], [], [], []\n nr_fileinputs = len(cfg.filename)\n print('Selected file(s):')\n for i in range(nr_fileinputs):\n nii = nb.load(cfg.filename[i])\n affine.append(nii.affine)\n parses = utils.parse_filepath(cfg.filename[i])\n data.append(np.squeeze(nii.get_data()))\n print(' Name: {}'.format(cfg.filename[i]))\n print(' Dimensions: {}'.format(data[i].shape))\n if cfg.out_dir:\n dirname.append(cfg.out_dir)\n else:\n dirname.append(parses[0])\n basename.append(parses[1])\n ext.append(parses[2])\n\n # Reorganize data\n data = np.asarray(data)\n data = data.transpose([1, 2, 3, 0])\n # Compute intensity\n inten = np.sum(data, axis=-1)\n # Compute barycentic coordinates (equivalent to intensity for 0-simplex)\n bary = data / inten[..., None]\n\n suf = '' # suffix\n # TODO: consider zero_to option for MRI data\n if cfg.intensity_balance:\n raise ValueError('Intensity balance not implemented.')\n # print('Applying intensity balance...')\n # print(' Percentiles: {}'.format(cfg.int_bal_perc))\n # suf = suf + '_IB'\n # inten = utils.truncate_and_scale(\n # inten, pmin=cfg.int_bal_perc[0], pmax=cfg.int_bal_perc[1],\n # zero_to=255*data.shape[-1])\n # data = bary * inten[..., None]\n # # Update barycentic coordinates\n # bary = data / inten[..., None]\n\n if cfg.retinex:\n print('Applying multi-scale retinex with barycenter preservation (MSRBP)...')\n print(' Selected scales: {}'.format(cfg.scales_nifti))\n suf = suf + '_MSRBP' + utils.prepare_scale_suffix(cfg.scales_nifti)\n new_inten = core.multi_scale_retinex(inten, scales=cfg.scales_nifti)\n # Scale back to the approximage original intensity range\n inten = core.scale_approx(new_inten, inten)\n\n if cfg.simplex_color_balance:\n print('Applying simplex color balance...')\n print(' Centering: {}'.format(cfg.simplex_center))\n print(' Standardize: {}'.format(cfg.simplex_standardize))\n suf = suf + '_SimplexCB'\n bary = core.simplex_color_balance(bary)\n\n # Insert back the processed intensity image\n data = bary * inten[..., None]\n\n if cfg.simplest_color_balance:\n print('Applying simplest color balance...')\n print(' Percentiles: {}'.format(cfg.int_bal_perc))\n suf = suf + '_SimplestCB'\n data = core.simplest_color_balance(\n data, pmin=cfg.simplest_perc[0], pmax=cfg.simplest_perc[1])\n\n # Check at least one operation is selected before saving anything\n if sum([cfg.retinex, cfg.intensity_balance, cfg.simplex_color_balance,\n cfg.simplest_color_balance]) > 0:\n print('Saving output(s)...')\n for i in range(nr_fileinputs):\n # Generate output path\n out_basepath = os.path.join(dirname[i],\n '{}{}'.format(basename[i], suf))\n out_path = out_basepath + os.extsep + ext[i]\n # Create nifti image and save\n img = nb.Nifti1Image(data[..., i], affine=affine[i])\n nb.save(img, out_path)\n print(' {} is saved.\\n'.format(out_path))\n else:\n print('No operation selected, not saving anything.')\n print('Finished.')\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"numpy.asarray",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
johnruth96/privacy-justifiable-fairness | [
"3f5ae92d791df1827cbc8720cf5e7aa33ceed7aa"
] | [
"experiments/evaluate.py"
] | [
"import os\n\nimport pandas as pd\n\nfrom experiments.conf import Config\nfrom fairness import measure_fairness\nfrom privacy.models import get_l_distinct, get_k\n\n\ndef evaluate_experiment(conf: Config):\n # Load setup\n setup = conf.get_setup()\n A = setup[\"A\"]\n I = setup[\"I\"]\n O = setup[\"O\"]\n S = setup[\"S\"]\n QI = setup[\"QI\"]\n\n # Evaluation\n for table_dir, result_file in zip(conf.table_dirs_resampling, conf.result_files_resampling):\n if not os.path.exists(table_dir):\n continue\n\n print(f\"INFO: Evaluating {table_dir}\")\n\n df_exp = pd.read_csv(conf.exp_file, header=0, index_col=[0, 1])\n indexes = []\n col_names = []\n rows = []\n\n # Read tables\n for k, l in df_exp.index:\n print(\"Evaluating ({}, {}) ...\".format(k, l))\n table_file = os.path.join(table_dir, \"K{}L{}.csv\".format(k, l))\n df = pd.read_csv(table_file, header=0, index_col=0)\n\n k_df, n_df = get_k(df, QI)\n l_df = get_l_distinct(df, S, QI)\n idx = (k_df, l_df)\n\n if idx in indexes:\n print(f\"WARNING: index ({k_df}, {l_df}) already in {table_file}\")\n\n measurements = measure_fairness(df, A, I, O, S)\n measurements.update(\n n_groups=n_df,\n idx_original=(k, l),\n )\n\n if not col_names:\n col_names = sorted(measurements.keys())\n\n indexes.append(idx)\n rows.append([measurements[measure] for measure in col_names])\n\n results = pd.DataFrame(rows, columns=col_names,\n index=pd.MultiIndex.from_tuples(indexes, names=[\"k\", \"l\"]))\n print(f\"Writing results to {result_file} ...\", flush=True, end=\"\")\n results.to_csv(result_file, index_label=[\"k\", \"l\"], index=True)\n print(\" done\")\n"
] | [
[
"pandas.read_csv",
"pandas.MultiIndex.from_tuples"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
Yidansong/SchNet | [
"49a1e6031f50d79a83ea21148b8e8cbcabdaabb7",
"49a1e6031f50d79a83ea21148b8e8cbcabdaabb7",
"49a1e6031f50d79a83ea21148b8e8cbcabdaabb7"
] | [
"src/schnet/nn/utils.py",
"src/schnet/nn/layers/embedding.py",
"src/schnet/nn/layers/rbf.py"
] | [
"import numpy as np\nimport tensorflow as tf\nfrom tensorflow.python.ops.array_grad import _TileGrad\nfrom tensorflow.python.framework import ops\n\n\ndef shape(x):\n if isinstance(x, tf.Tensor):\n return x.get_shape().as_list()\n return np.shape(x)\n\n\[email protected](\"TileDense\")\ndef tile_grad_dense(op, grad):\n grad = tf.convert_to_tensor(grad)\n return _TileGrad(op, grad)\n",
"import numpy as np\nimport tensorflow as tf\n\nfrom .module import Module\nfrom .dense import Dense\n\n\nclass Embedding(Module):\n def __init__(self, n_embeddings, dim,\n embedding_init=None,\n trainable=True,\n name=None):\n self._n_embeddings = n_embeddings\n self._dim = dim\n self._embedding_init = embedding_init\n self._trainable = trainable\n super(Embedding, self).__init__(name)\n\n def _initialize(self):\n if self._embedding_init is None:\n r = tf.sqrt(1. / tf.sqrt(float(self._dim)))\n self._embedding_init = tf.random_normal_initializer(stddev=r)\n\n self.embeddings = Dense(self._n_embeddings, self._dim, use_bias=False,\n w_init=self._embedding_init,\n trainable=self._trainable,\n name='embeddings')\n\n def _forward(self, indices):\n I = np.eye(self._n_embeddings).astype(np.float32)\n ind = tf.nn.embedding_lookup(I, indices)\n y = self.embeddings(ind)\n return y\n",
"import numpy as np\nimport tensorflow as tf\n\nfrom .module import Module\n\n\nclass RBFExpansion(Module):\n def __init__(self, low, high, gap, dim=1, name=None):\n self.low = low\n self.high = high\n self.gap = gap\n self.dim = dim\n\n xrange = high - low\n self.centers = np.linspace(low, high, int(np.ceil(xrange / gap)))\n self.centers = self.centers[:, np.newaxis]\n self.n_centers = len(self.centers)\n self.fan_out = self.dim * self.n_centers\n super(RBFExpansion, self).__init__(name)\n\n def _forward(self, d):\n cshape = tf.shape(d)\n CS = d.get_shape()\n centers = self.centers.reshape((1, -1)).astype(np.float32)\n d -= tf.constant(centers)\n rbf = tf.exp(-(d ** 2) / self.gap)\n # rbf = tf.reshape(rbf, (\n # cshape[0], cshape[1], cshape[2],\n # self.dim * centers.shape[-1]))\n rbf.set_shape([CS[0], self.fan_out])\n return rbf\n"
] | [
[
"tensorflow.convert_to_tensor",
"tensorflow.python.ops.array_grad._TileGrad",
"numpy.shape",
"tensorflow.python.framework.ops.RegisterGradient"
],
[
"numpy.eye",
"tensorflow.random_normal_initializer",
"tensorflow.nn.embedding_lookup"
],
[
"numpy.ceil",
"tensorflow.constant",
"tensorflow.exp",
"tensorflow.shape"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"1.12",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"1.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.8",
"1.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
Banconxuan/RTS3D | [
"6d2738501eaf90f019eeaa22254cd9756f8d3364"
] | [
"src/lib/models/embedding_space_generater.py"
] | [
"# ------------------------------------------------------------------------------\n# Copyright (c) Microsoft\n# Licensed under the MIT License.\n# Written by Bin Xiao ([email protected])\n# Modified by Xingyi Zhou\n# ------------------------------------------------------------------------------\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\nimport torch\ndef generate_gridpoint(dim, pos, ori, calib_l, calib_r, trans_output_l, trans_output_r, opt=None): # dim B,K,3\n '''\n generate grid point coordinates, the image featuremap coordinates corresponding the grid point.\n return:\n image_xy_l: left image featuremap coordinates corresponding the grid point.\n image_xy_r: right image featuremap coordinates corresponding the grid point.\n xyz_norm: the grid point coordinates in the object coordinate system\n xyz: the grid point coordinates in the camera coordinate system\n '''\n\n h = dim[0]\n w = dim[1]\n l = dim[2]\n x_axi = -torch.linspace(-l / 2., l / 2., opt.R_l).cuda()\n y_axi = torch.linspace(0, -h, opt.R_h).cuda()\n z_axi = -torch.linspace(-w / 2., w / 2., opt.R_w).cuda()\n xx, yy, zz = torch.meshgrid(x_axi, y_axi, z_axi)\n xyz = torch.stack([xx, yy, zz], 0).view((3, -1)) # 3,resl***2\n R = ori\n xyz = R.mm(xyz)\n xyz_norm = xyz.clone()\n xyz[0, :] += pos[0]\n xyz[1, :] += pos[1]\n xyz[2, :] += pos[2]\n ones = torch.ones((1, xyz.size(1))).cuda()\n xyz_hom = torch.cat((xyz, ones), dim=0)\n image_xy_hom_l = calib_l.mm(xyz_hom)\n image_xy_hom_l = image_xy_hom_l / image_xy_hom_l[2, :]\n\n image_xy_hom_r = calib_r.mm(xyz_hom)\n image_xy_hom_r = image_xy_hom_r / image_xy_hom_r[2, :]\n image_xy_l = []\n image_xy_r = []\n for py in range(opt.pynum):\n image_xy_l.append(trans_output_l[py].mm(image_xy_hom_l))\n image_xy_r.append(trans_output_r[py].mm(image_xy_hom_r))\n\n image_xy_l = torch.stack(image_xy_l,dim=0)\n image_xy_r = torch.stack(image_xy_r, dim=0)\n return image_xy_l, image_xy_r, xyz_norm, xyz\n\ndef featuremap2gridpoint(batch, phase='train', opt = None):\n '''\n image featuremap to gridpoint\n '''\n outputs_l, outputs_r = batch['left_image_feature'], batch['right_image_feature']\n batch_for_point = {}\n batch_for_point['dim'] = []\n batch_for_point['pos'] = []\n batch_for_point['ori'] = []\n batch_for_point['dim_real'] = []\n batch_for_point['pos_real'] = []\n batch_for_point['ori_real'] = []\n batch_for_point['dim_est'] = []\n batch_for_point['pos_est'] = []\n batch_for_point['ori_est_scalar'] = []\n batch_for_point['reg_mask'] = []\n\n\n B = outputs_l[0].size(0)\n ## *_est represent monocular 3D detector results.\n dim = batch['dim_est']\n pos = batch['pos_est']\n ori = batch['ori_est']\n calib_l = batch['calib_l']\n calib_r = batch['calib_r']\n ## trans_output_* represent the transformation from 3D grid point to image featuremap.\n trans_output_l = batch['trans_output_l']\n trans_output_r = batch['trans_output_r']\n\n pointNet_input_list_r = []\n pointNet_input_list_l = []\n pointNet_input_list_xyz_abs = []\n pointNet_input_consis = []\n reg_mask = batch['reg_mask']\n obj_num=[]\n for b in range(B):\n index_box_l = []\n index_box_r = []\n volume_xyz_list = []\n volume_xyz_abs_list = []\n mask = torch.nonzero(reg_mask[b])\n K = mask.size(0)\n obj_num.append(K)\n for k in range(K):#range(self.opt.max_objs):\n #k_index = mask[k, 0]\n index_l, index_r, xyz, xyz_abs = generate_gridpoint(dim[b, k], pos[b, k],\n ori[b, k], calib_l[b],\n calib_r[b], trans_output_l[b],\n trans_output_r[b], opt)\n index_box_l.append(index_l)\n index_box_r.append(index_r)\n volume_xyz_list.append(xyz)\n volume_xyz_abs_list.append(xyz_abs)\n index_box_l = torch.stack(index_box_l, 0).transpose(3,2).unsqueeze(0) # 1,K,3,2,resl***2\n index_box_r = torch.stack(index_box_r, 0).transpose(3,2).unsqueeze(0)\n\n volume_xyz_list = torch.stack(volume_xyz_list, 0) # m(<=K),3,resl***2\n volume_xyz_abs_list = torch.stack(volume_xyz_abs_list, 0)\n volume_from_heatmap_l = []\n volume_from_heatmap_r = []\n for py in range(opt.pynum):\n grid_l = index_box_l[:,:,py,:,:] #1, K,resl***2,2\n grid_r = index_box_r[:,:,py,:,:] #1, K,resl***2,2\n featuremap_l = outputs_l[py]\n featuremap_r = outputs_r[py]\n lx = 2 * (grid_l[:, :, :, 0] / featuremap_l.size(3) - 0.5)\n ly = 2 * (grid_l[:, :, :, 1] / featuremap_l.size(2) - 0.5)\n rx = 2 * (grid_r[:, :, :, 0] / featuremap_r.size(3) - 0.5)\n ry = 2 * (grid_r[:, :, :, 1] / featuremap_r.size(2) - 0.5)\n\n grid_l = torch.stack((lx, ly),dim=3)\n grid_r = torch.stack((rx, ry), dim=3)\n\n volume_from_heatmap_l.append(torch.nn.functional.grid_sample(featuremap_l[b:b + 1], grid_l)) # 1,64,16K,resl***2\n volume_from_heatmap_r.append(torch.nn.functional.grid_sample(featuremap_r[b:b + 1], grid_r)) # 1,64,16K,resl***2\n\n volume_from_heatmap_l = torch.cat(volume_from_heatmap_l,dim=1) # 1,mm,K,resl***2\n volume_from_heatmap_r = torch.cat(volume_from_heatmap_r, dim=1) # 1,mm,K,resl***2\n\n volume_from_heatmap_l = volume_from_heatmap_l[0].transpose(1, 0)\n volume_from_heatmap_r = volume_from_heatmap_r[0].transpose(1, 0)\n\n\n volume_from_heatmap = volume_from_heatmap_l[:,:128,:] - volume_from_heatmap_r[:,:128,:]\n\n BRF=(volume_from_heatmap_l[:,128:256,:] +volume_from_heatmap_r[:,128:256,:])/2\n semantic = (volume_from_heatmap_l[:, 256:, :] + volume_from_heatmap_r[:, 256:, :]) / 2\n volume_from_heatmap=torch.exp(-(volume_from_heatmap**2)*(BRF**2))\n\n volume_depth=torch.norm(volume_xyz_abs_list,p=2,dim=1,keepdim=True)\n volume_from_heatmap = torch.cat([volume_from_heatmap,volume_xyz_list,volume_depth,semantic], dim=1)\n\n if phase == 'train' or phase == 'val':\n batch_for_point['dim'].append(batch['dim'][b])\n batch_for_point['pos'].append(batch['pos'][b])\n batch_for_point['ori'].append(batch['ori'][b])\n batch_for_point['dim_real'].append(batch['dim_real'][b])\n batch_for_point['pos_real'].append(batch['pos_real'][b])\n batch_for_point['ori_real'].append(batch['ori_real'][b])\n batch_for_point['reg_mask'].append(batch['reg_mask'][b])\n batch_for_point['dim_est'].append(batch['dim_est'][b])\n batch_for_point['pos_est'].append(batch['pos_est'][b])\n batch_for_point['ori_est_scalar'].append(batch['ori_est_scalar'][b])\n pointNet_input_list_l.append(volume_from_heatmap_l)\n pointNet_input_list_r.append(volume_from_heatmap_r)\n pointNet_input_list_xyz_abs.append(volume_xyz_abs_list)\n pointNet_input_consis.append(volume_from_heatmap)\n\n pointNet_input_tensor_l = torch.cat(pointNet_input_list_l, dim=0)\n pointNet_input_tensor_r = torch.cat(pointNet_input_list_r, dim=0)\n pointNet_input_tensor_consis = torch.cat(pointNet_input_consis, dim=0)\n pointNet_input_tensor_xyz_abs = torch.cat(pointNet_input_list_xyz_abs, dim=0)\n\n input_model = {}\n input_model['input_feat_l'] = pointNet_input_tensor_l\n input_model['input_feat_r'] = pointNet_input_tensor_r\n input_model['input_feat_xyz_abs'] = pointNet_input_tensor_xyz_abs\n input_model['input_feat_consis'] = pointNet_input_tensor_consis\n if phase == 'train' or phase =='val':\n batch_for_point['dim'] = torch.cat(batch_for_point['dim'], dim=0)\n batch_for_point['pos'] = torch.cat(batch_for_point['pos'], dim=0)\n batch_for_point['ori'] = torch.cat(batch_for_point['ori'], dim=0)\n batch_for_point['dim_real'] = torch.cat(batch_for_point['dim_real'], dim=0)\n batch_for_point['pos_real'] = torch.cat(batch_for_point['pos_real'], dim=0)\n batch_for_point['ori_real'] = torch.cat(batch_for_point['ori_real'], dim=0)\n\n batch_for_point['dim_est'] = torch.cat(batch_for_point['dim_est'], dim=0)\n batch_for_point['pos_est'] = torch.cat(batch_for_point['pos_est'], dim=0)\n batch_for_point['ori_est_scalar'] = torch.cat(batch_for_point['ori_est_scalar'], dim=0)\n batch_for_point['reg_mask'] = torch.cat(batch_for_point['reg_mask'], dim=0)\n input_model['input_batch'] = batch_for_point\n #input_model['obj_num']=obj_num\n return input_model\n\n\n"
] | [
[
"torch.linspace",
"torch.norm",
"torch.cat",
"torch.exp",
"torch.nn.functional.grid_sample",
"torch.nonzero",
"torch.stack",
"torch.meshgrid"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
FelixNeutatz/auto-sklearn | [
"b5d141603332041475ed746aa1640334f5561aea",
"b5d141603332041475ed746aa1640334f5561aea",
"b5d141603332041475ed746aa1640334f5561aea",
"b5d141603332041475ed746aa1640334f5561aea"
] | [
"autosklearn/pipeline/components/data_preprocessing/imputation/categorical_imputation.py",
"autosklearn/pipeline/components/feature_preprocessing/liblinear_svc_preprocessor.py",
"test/test_pipeline/components/feature_preprocessing/test_kernel_pca.py",
"autosklearn/pipeline/components/classification/random_forest.py"
] | [
"from ConfigSpace.configuration_space import ConfigurationSpace\nfrom autosklearn.pipeline.components.base import AutoSklearnPreprocessingAlgorithm\nfrom autosklearn.pipeline.constants import DENSE, SPARSE, UNSIGNED_DATA, INPUT\nfrom ConfigSpace.hyperparameters import CategoricalHyperparameter, UniformIntegerHyperparameter, UniformFloatHyperparameter\nfrom ConfigSpace.conditions import EqualsCondition\nfrom sklearn.model_selection import train_test_split\nimport numpy as np\n\n\nclass CategoricalImputation(AutoSklearnPreprocessingAlgorithm):\n \"\"\"\n Substitute missing values by 2\n \"\"\"\n\n def __init__(self, strategy=\"constant\", n_neighbors=5,\n weights='uniform', training_fraction=0.5, random_state=None):\n self.strategy = strategy\n self.n_neighbors = n_neighbors\n self.weights = weights\n self.random_stated = random_state\n self.training_fraction = training_fraction\n\n def fit(self, X, y=None):\n import sklearn.impute\n\n if self.strategy == 'constant':\n self.preprocessor = sklearn.impute.SimpleImputer(strategy='constant', fill_value=2, copy=False)\n elif self.strategy == 'most-frequent':\n self.preprocessor = sklearn.impute.SimpleImputer(strategy='most_frequent', copy=False)\n elif self.strategy == 'knn':\n self.preprocessor = sklearn.impute.KNNImputer(n_neighbors=self.n_neighbors, weights=self.weights, copy=False)\n\n X_new = None\n try:\n min_training_instances = max(\n [self.training_fraction * len(X), 10 * len(np.unique(y)), self.n_neighbors + 1])\n X_new, _, _, _ = train_test_split(X, y, train_size=min_training_instances, random_state=42)\n except:\n X_new = X\n\n self.preprocessor.fit(X_new)\n return self\n\n def transform(self, X):\n if self.preprocessor is None:\n raise NotImplementedError()\n X = self.preprocessor.transform(X).astype(int)\n return X\n\n def fit_transform(self, X, y=None):\n return self.fit(X, y).transform(X)\n\n @staticmethod\n def get_properties(dataset_properties=None):\n return {'shortname': 'CategoricalImputation',\n 'name': 'Categorical Imputation',\n 'handles_missing_values': True,\n 'handles_nominal_values': True,\n 'handles_numerical_features': True,\n 'prefers_data_scaled': False,\n 'prefers_data_normalized': False,\n 'handles_regression': True,\n 'handles_classification': True,\n 'handles_multiclass': True,\n 'handles_multilabel': True,\n 'is_deterministic': True,\n # TODO find out of this is right!\n 'handles_sparse': True,\n 'handles_dense': True,\n 'input': (DENSE, SPARSE, UNSIGNED_DATA),\n 'output': (INPUT,),\n 'preferred_dtype': None}\n\n @staticmethod\n def get_hyperparameter_search_space(dataset_properties=None):\n cs = ConfigurationSpace()\n strategy = CategoricalHyperparameter(\"strategy\", [\"constant\", \"most-frequent\", \"knn\"],\n default_value=\"constant\")\n cs.add_hyperparameter(strategy)\n\n # knn hyperparameters\n n_neighbors = UniformIntegerHyperparameter(name=\"n_neighbors\", lower=2, upper=100, log=True, default_value=5)\n weights = CategoricalHyperparameter(name=\"weights\", choices=[\"uniform\", \"distance\"], default_value=\"uniform\")\n cs.add_hyperparameters([n_neighbors, weights])\n\n n_neighbors_depends_on_knn = EqualsCondition(n_neighbors, strategy, \"knn\")\n weights_depends_on_knn = EqualsCondition(weights, strategy, \"knn\")\n cs.add_conditions([n_neighbors_depends_on_knn, weights_depends_on_knn])\n\n training_fraction = UniformFloatHyperparameter(\"training_fraction\", 0.0001, 1.0, log=True, default_value=0.5)\n cs.add_hyperparameter(training_fraction)\n\n return cs\n",
"from ConfigSpace.configuration_space import ConfigurationSpace\nfrom ConfigSpace.hyperparameters import UniformFloatHyperparameter, \\\n CategoricalHyperparameter, Constant\nfrom ConfigSpace.forbidden import ForbiddenEqualsClause, \\\n ForbiddenAndConjunction\n\nfrom autosklearn.pipeline.components.base import AutoSklearnPreprocessingAlgorithm\nfrom autosklearn.pipeline.constants import SPARSE, DENSE, UNSIGNED_DATA, INPUT\nfrom autosklearn.util.common import check_for_bool, check_none\n\n\nclass LibLinear_Preprocessor(AutoSklearnPreprocessingAlgorithm):\n # Liblinear is not deterministic as it uses a RNG inside\n def __init__(self, penalty, loss, dual, tol, C, multi_class,\n fit_intercept, intercept_scaling, class_weight=None,\n random_state=None):\n self.penalty = penalty\n self.loss = loss\n self.dual = dual\n self.tol = tol\n self.C = C\n self.multi_class = multi_class\n self.fit_intercept = fit_intercept\n self.intercept_scaling = intercept_scaling\n self.class_weight = class_weight\n self.random_state = random_state\n self.preprocessor = None\n\n def fit(self, X, Y):\n import sklearn.svm\n from sklearn.feature_selection import SelectFromModel\n\n self.C = float(self.C)\n self.tol = float(self.tol)\n self.dual = check_for_bool(self.dual)\n self.fit_intercept = check_for_bool(self.fit_intercept)\n self.intercept_scaling = float(self.intercept_scaling)\n\n if check_none(self.class_weight):\n self.class_weight = None\n\n estimator = sklearn.svm.LinearSVC(penalty=self.penalty,\n loss=self.loss,\n dual=self.dual,\n tol=self.tol,\n C=self.C,\n class_weight=self.class_weight,\n fit_intercept=self.fit_intercept,\n intercept_scaling=self.intercept_scaling,\n multi_class=self.multi_class,\n random_state=self.random_state)\n\n estimator.fit(X, Y)\n self.preprocessor = SelectFromModel(estimator=estimator,\n threshold='mean',\n prefit=True)\n\n return self\n\n def transform(self, X):\n if self.preprocessor is None:\n raise NotImplementedError()\n return self.preprocessor.transform(X)\n\n @staticmethod\n def get_properties(dataset_properties=None):\n return {'shortname': 'LinearSVC Preprocessor',\n 'name': 'Liblinear Support Vector Classification Preprocessing',\n 'handles_regression': False,\n 'handles_classification': True,\n 'handles_multiclass': True,\n 'handles_multilabel': False,\n 'input': (SPARSE, DENSE, UNSIGNED_DATA),\n 'output': (INPUT,)}\n\n @staticmethod\n def get_hyperparameter_search_space(dataset_properties=None):\n cs = ConfigurationSpace()\n\n penalty = Constant(\"penalty\", \"l1\")\n loss = CategoricalHyperparameter(\n \"loss\", [\"hinge\", \"squared_hinge\"], default_value=\"squared_hinge\")\n dual = Constant(\"dual\", \"False\")\n # This is set ad-hoc\n tol = UniformFloatHyperparameter(\"tol\", 1e-5, 1e-1, default_value=1e-4, log=True)\n C = UniformFloatHyperparameter(\"C\", 0.03125, 32768, log=True, default_value=1.0)\n multi_class = Constant(\"multi_class\", \"ovr\")\n # These are set ad-hoc\n fit_intercept = Constant(\"fit_intercept\", \"True\")\n intercept_scaling = Constant(\"intercept_scaling\", 1)\n\n cs.add_hyperparameters([penalty, loss, dual, tol, C, multi_class,\n fit_intercept, intercept_scaling])\n\n penalty_and_loss = ForbiddenAndConjunction(\n ForbiddenEqualsClause(penalty, \"l1\"),\n ForbiddenEqualsClause(loss, \"hinge\")\n )\n cs.add_forbidden_clause(penalty_and_loss)\n return cs\n",
"import sys\nimport unittest\n\nfrom sklearn.linear_model import RidgeClassifier\nfrom autosklearn.pipeline.components.feature_preprocessing.kernel_pca import \\\n KernelPCA\nfrom autosklearn.pipeline.util import _test_preprocessing, PreprocessingTestCase, \\\n get_dataset\nimport sklearn.metrics\n\n\nclass KernelPCAComponentTest(PreprocessingTestCase):\n @unittest.skipIf(sys.version_info < (3, 7), 'Random failures for Python < 3.7')\n def test_default_configuration(self):\n transformation, original = _test_preprocessing(KernelPCA,\n dataset='digits',\n train_size_maximum=2000)\n self.assertEqual(transformation.shape[0], original.shape[0])\n self.assertFalse((transformation == 0).all())\n\n def test_default_configuration_sparse(self):\n transformation, original = _test_preprocessing(KernelPCA,\n make_sparse=True,\n dataset='digits')\n self.assertEqual(transformation.shape[0], original.shape[0])\n self.assertFalse((transformation == 0).all())\n\n def test_default_configuration_classify(self):\n for i in range(5):\n X_train, Y_train, X_test, Y_test = get_dataset(dataset='digits',\n make_sparse=False,\n train_size_maximum=1000)\n configuration_space = KernelPCA.get_hyperparameter_search_space()\n default = configuration_space.get_default_configuration()\n preprocessor = KernelPCA(random_state=1,\n **{hp_name: default[hp_name] for hp_name in\n default if default[hp_name] is not None})\n preprocessor.fit(X_train, Y_train)\n X_train_trans = preprocessor.transform(X_train)\n X_test_trans = preprocessor.transform(X_test)\n\n # fit a classifier on top\n classifier = RidgeClassifier(random_state=1)\n predictor = classifier.fit(X_train_trans, Y_train)\n predictions = predictor.predict(X_test_trans)\n accuracy = sklearn.metrics.accuracy_score(predictions, Y_test)\n self.assertAlmostEqual(accuracy, 0.0903387703889586)\n\n @unittest.skip(\"Always returns float64\")\n def test_preprocessing_dtype(self):\n super(KernelPCAComponentTest,\n self)._test_preprocessing_dtype(KernelPCA)\n",
"from ConfigSpace.configuration_space import ConfigurationSpace\nfrom ConfigSpace.hyperparameters import UniformFloatHyperparameter, \\\n UniformIntegerHyperparameter, CategoricalHyperparameter, UnParametrizedHyperparameter\n\nfrom autosklearn.pipeline.components.base import (\n AutoSklearnClassificationAlgorithm,\n IterativeComponentWithSampleWeight,\n)\nfrom autosklearn.pipeline.constants import DENSE, UNSIGNED_DATA, PREDICTIONS, SPARSE\nfrom autosklearn.pipeline.implementations.util import convert_multioutput_multiclass_to_multilabel\nfrom autosklearn.util.common import check_for_bool, check_none\n\n\nclass RandomForest(\n IterativeComponentWithSampleWeight,\n AutoSklearnClassificationAlgorithm,\n):\n def __init__(self, criterion, max_features,\n max_depth, min_samples_split, min_samples_leaf,\n min_weight_fraction_leaf, bootstrap, max_leaf_nodes,\n min_impurity_decrease, random_state=None, n_jobs=1,\n class_weight=None):\n self.n_estimators = self.get_max_iter()\n self.criterion = criterion\n self.max_features = max_features\n self.max_depth = max_depth\n self.min_samples_split = min_samples_split\n self.min_samples_leaf = min_samples_leaf\n self.min_weight_fraction_leaf = min_weight_fraction_leaf\n self.bootstrap = bootstrap\n self.max_leaf_nodes = max_leaf_nodes\n self.min_impurity_decrease = min_impurity_decrease\n self.random_state = random_state\n self.n_jobs = n_jobs\n self.class_weight = class_weight\n self.estimator = None\n\n @staticmethod\n def get_max_iter():\n return 512\n\n def get_current_iter(self):\n return self.estimator.n_estimators\n\n def iterative_fit(self, X, y, sample_weight=None, n_iter=1, refit=False):\n from sklearn.ensemble import RandomForestClassifier\n\n if refit:\n self.estimator = None\n\n if self.estimator is None:\n self.n_estimators = int(self.n_estimators)\n if check_none(self.max_depth):\n self.max_depth = None\n else:\n self.max_depth = int(self.max_depth)\n\n self.min_samples_split = int(self.min_samples_split)\n self.min_samples_leaf = int(self.min_samples_leaf)\n self.min_weight_fraction_leaf = float(self.min_weight_fraction_leaf)\n\n if self.max_features not in (\"sqrt\", \"log2\", \"auto\"):\n max_features = int(X.shape[1] ** float(self.max_features))\n else:\n max_features = self.max_features\n\n self.bootstrap = check_for_bool(self.bootstrap)\n\n if check_none(self.max_leaf_nodes):\n self.max_leaf_nodes = None\n else:\n self.max_leaf_nodes = int(self.max_leaf_nodes)\n\n self.min_impurity_decrease = float(self.min_impurity_decrease)\n\n # initial fit of only increment trees\n self.estimator = RandomForestClassifier(\n n_estimators=n_iter,\n criterion=self.criterion,\n max_features=max_features,\n max_depth=self.max_depth,\n min_samples_split=self.min_samples_split,\n min_samples_leaf=self.min_samples_leaf,\n min_weight_fraction_leaf=self.min_weight_fraction_leaf,\n bootstrap=self.bootstrap,\n max_leaf_nodes=self.max_leaf_nodes,\n min_impurity_decrease=self.min_impurity_decrease,\n random_state=self.random_state,\n n_jobs=self.n_jobs,\n class_weight=self.class_weight,\n warm_start=True)\n else:\n self.estimator.n_estimators += n_iter\n self.estimator.n_estimators = min(self.estimator.n_estimators,\n self.n_estimators)\n\n self.estimator.fit(X, y, sample_weight=sample_weight)\n return self\n\n def configuration_fully_fitted(self):\n if self.estimator is None:\n return False\n\n return not len(self.estimator.estimators_) < self.n_estimators\n\n def predict(self, X):\n if self.estimator is None:\n raise NotImplementedError\n return self.estimator.predict(X)\n\n def predict_proba(self, X):\n if self.estimator is None:\n raise NotImplementedError()\n probas = self.estimator.predict_proba(X)\n probas = convert_multioutput_multiclass_to_multilabel(probas)\n return probas\n\n @staticmethod\n def get_properties(dataset_properties=None):\n return {'shortname': 'RF',\n 'name': 'Random Forest Classifier',\n 'handles_regression': False,\n 'handles_classification': True,\n 'handles_multiclass': True,\n 'handles_multilabel': True,\n 'is_deterministic': True,\n 'input': (DENSE, SPARSE, UNSIGNED_DATA),\n 'output': (PREDICTIONS,)}\n\n @staticmethod\n def get_hyperparameter_search_space(dataset_properties=None):\n cs = ConfigurationSpace()\n criterion = CategoricalHyperparameter(\n \"criterion\", [\"gini\", \"entropy\"], default_value=\"gini\")\n\n # The maximum number of features used in the forest is calculated as m^max_features, where\n # m is the total number of features, and max_features is the hyperparameter specified below.\n # The default is 0.5, which yields sqrt(m) features as max_features in the estimator. This\n # corresponds with Geurts' heuristic.\n max_features = UniformFloatHyperparameter(\n \"max_features\", 0., 1., default_value=0.5)\n\n max_depth = UnParametrizedHyperparameter(\"max_depth\", \"None\")\n min_samples_split = UniformIntegerHyperparameter(\n \"min_samples_split\", 2, 20, default_value=2)\n min_samples_leaf = UniformIntegerHyperparameter(\n \"min_samples_leaf\", 1, 20, default_value=1)\n min_weight_fraction_leaf = UnParametrizedHyperparameter(\"min_weight_fraction_leaf\", 0.)\n max_leaf_nodes = UnParametrizedHyperparameter(\"max_leaf_nodes\", \"None\")\n min_impurity_decrease = UnParametrizedHyperparameter('min_impurity_decrease', 0.0)\n bootstrap = CategoricalHyperparameter(\n \"bootstrap\", [\"True\", \"False\"], default_value=\"True\")\n cs.add_hyperparameters([criterion, max_features,\n max_depth, min_samples_split, min_samples_leaf,\n min_weight_fraction_leaf, max_leaf_nodes,\n bootstrap, min_impurity_decrease])\n return cs\n"
] | [
[
"sklearn.model_selection.train_test_split",
"numpy.unique"
],
[
"sklearn.feature_selection.SelectFromModel"
],
[
"sklearn.linear_model.RidgeClassifier"
],
[
"sklearn.ensemble.RandomForestClassifier"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
helloworldpark/PyEMD | [
"d28481b3244f317c196dbfe92af7e2d776b64382"
] | [
"PyEMD/EMD_matlab.py"
] | [
"#!/usr/bin/python\r\n# coding: UTF-8\r\n#\r\n# Author: Dawid Laszuk\r\n# Contact: [email protected]\r\n#\r\n# Edited: 11/05/2017\r\n#\r\n# Feel free to contact for any information.\r\n\r\nfrom __future__ import division, print_function\r\n\r\nimport logging\r\nimport numpy as np\r\nimport time\r\n\r\nfrom scipy.interpolate import interp1d\r\nfrom PyEMD.PyEMD.splines import *\r\n\r\nclass EMD:\r\n \"\"\"\r\n Empirical Mode Decomposition\r\n\r\n *Note:*\r\n Default and recommended package for EMD is EMD.py.\r\n This is meant to provide with the same results as MATLAB version of EMD,\r\n which is not necessarily the most efficient or numerically accurate.\r\n\r\n Method of decomposing signal into Intrinsic Mode Functions (IMFs)\r\n based on algorithm presented in Huang et al. [1].\r\n\r\n Algorithm was validated with Rilling et al. [2] Matlab's version from 3.2007.\r\n\r\n [1] N. E. Huang et al., \"The empirical mode decomposition and the\r\n Hilbert spectrum for non-linear and non stationary time series\r\n analysis\", Proc. Royal Soc. London A, Vol. 454, pp. 903-995, 1998\r\n [2] G. Rilling, P. Flandrin and P. Goncalves, \"On Empirical Mode\r\n Decomposition and its algorithms\", IEEE-EURASIP Workshop on\r\n Nonlinear Signal and Image Processing NSIP-03, Grado (I), June 2003\r\n \"\"\"\r\n\r\n logger = logging.getLogger(__name__)\r\n\r\n def __init__(self):\r\n\r\n self.splineKind = 'cubic'\r\n\r\n self.nbsym = 2\r\n self.reduceScale = 1.\r\n self.maxIteration = 500\r\n self.scaleFactor = 100\r\n\r\n self.FIXE = 0\r\n self.FIXE_H = 0\r\n\r\n self.stop1 = 0.05\r\n self.stop2 = 0.5\r\n self.stop3 = 0.05\r\n\r\n self.DTYPE = np.float64\r\n self.MAX_ITERATION = 1000\r\n\r\n self.TIME = False\r\n\r\n def extractMaxMinSpline(self, T, S):\r\n \"\"\"\r\n Input:\r\n -----------------\r\n S - Input signal array. Should be 1D.\r\n T - Time array. If none passed numpy arange is created.\r\n\r\n Output:\r\n -----------------\r\n maxSpline - Upper envelope of signal S.\r\n minSpline - Bottom envelope of signal S.\r\n maxExtrema - Position (1st row) and values (2nd row) of maxima.\r\n minExtrema - Position (1st row) and values (2nd row) of minma.\r\n \"\"\"\r\n\r\n # Get indexes of extrema\r\n maxPos, maxVal, minPos, minVal, _ = self.findExtrema(T, S)\r\n\r\n if len(maxPos) + len(minPos) < 3: return [-1]*4\r\n\r\n # Extrapolation of signal (ober boundaries)\r\n maxExtrema, minExtrema = self.preparePoints(S, T, maxPos, maxVal, minPos, minVal)\r\n\r\n _, maxSpline = self.splinePoints(T, maxExtrema, self.splineKind)\r\n _, minSpline = self.splinePoints(T, minExtrema, self.splineKind)\r\n\r\n return maxSpline, minSpline, maxExtrema, minExtrema\r\n\r\n def preparePoints(self, S, T, maxPos, maxVal, minPos, minVal):\r\n \"\"\"\r\n Adds to signal extrema according to mirror technique.\r\n Number of added points depends on nbsym variable.\r\n\r\n Input:\r\n ---------\r\n S: Signal (1D numpy array).\r\n T: Timeline (1D numpy array).\r\n maxPos: sorted time positions of maxima.\r\n maxVal: signal values at maxPos positions.\r\n minPos: sorted time positions of minima.\r\n minVal: signal values at minPos positions.\r\n\r\n Output:\r\n ---------\r\n minExtrema: Position (1st row) and values (2nd row) of minima.\r\n minExtrema: Position (1st row) and values (2nd row) of maxima.\r\n \"\"\"\r\n\r\n # Find indices for time array of extrema\r\n indmin = np.array([np.nonzero(T==t)[0] for t in minPos]).flatten()\r\n indmax = np.array([np.nonzero(T==t)[0] for t in maxPos]).flatten()\r\n\r\n # Local variables\r\n nbsym = self.nbsym\r\n endMin, endMax = len(minPos), len(maxPos)\r\n\r\n ####################################\r\n # Left bound - mirror nbsym points to the left\r\n if indmax[0] < indmin[0]:\r\n if S[0] > S[indmin[0]]:\r\n lmax = indmax[1:min(endMax,nbsym+1)][::-1]\r\n lmin = indmin[0:min(endMin,nbsym+0)][::-1]\r\n lsym = indmax[0]\r\n else:\r\n lmax = indmax[0:min(endMax,nbsym)][::-1]\r\n lmin = np.append(indmin[0:min(endMin,nbsym-1)][::-1],0)\r\n lsym = 0\r\n else:\r\n if S[0] < S[indmax[0]]:\r\n lmax = indmax[0:min(endMax,nbsym+0)][::-1]\r\n lmin = indmin[1:min(endMin,nbsym+1)][::-1]\r\n lsym = indmin[0]\r\n else:\r\n lmax = np.append(indmax[0:min(endMax,nbsym-1)][::-1],0)\r\n lmin = indmin[0:min(endMin,nbsym)][::-1]\r\n lsym = 0\r\n\r\n ####################################\r\n # Right bound - mirror nbsym points to the right\r\n if indmax[-1] < indmin[-1]:\r\n if S[-1] < S[indmax[-1]]:\r\n rmax = indmax[max(endMax-nbsym,0):][::-1]\r\n rmin = indmin[max(endMin-nbsym-1,0):-1][::-1]\r\n rsym = indmin[-1]\r\n else:\r\n rmax = np.append(indmax[max(endMax-nbsym+1,0):], len(S)-1)[::-1]\r\n rmin = indmin[max(endMin-nbsym,0):][::-1]\r\n rsym = len(S)-1\r\n else:\r\n if S[-1] > S[indmin[-1]]:\r\n rmax = indmax[max(endMax-nbsym-1,0):-1][::-1]\r\n rmin = indmin[max(endMin-nbsym,0):][::-1]\r\n rsym = indmax[-1]\r\n else:\r\n rmax = indmax[max(endMax-nbsym,0):][::-1]\r\n rmin = np.append(indmin[max(endMin-nbsym+1,0):], len(S)-1)[::-1]\r\n rsym = len(S)-1\r\n\r\n # In case any array missing\r\n if not lmin.size: lmin = indmin\r\n if not rmin.size: rmin = indmin\r\n if not lmax.size: lmax = indmax\r\n if not rmax.size: rmax = indmax\r\n\r\n # Mirror points\r\n tlmin = 2*T[lsym]-T[lmin]\r\n tlmax = 2*T[lsym]-T[lmax]\r\n trmin = 2*T[rsym]-T[rmin]\r\n trmax = 2*T[rsym]-T[rmax]\r\n\r\n # If mirrored points are not outside passed time range.\r\n if tlmin[0] > T[0] or tlmax[0] > T[0]:\r\n if lsym == indmax[0]:\r\n lmax = indmax[0:min(endMax,nbsym)][::-1]\r\n else:\r\n lmin = indmin[0:min(endMin,nbsym)][::-1]\r\n\r\n if lsym == 0:\r\n raise Exception('bug')\r\n\r\n lsym = 0\r\n tlmin = 2*T[lsym]-T[lmin]\r\n tlmax = 2*T[lsym]-T[lmax]\r\n\r\n if trmin[-1] < T[-1] or trmax[-1] < T[-1]:\r\n if rsym == indmax[-1]:\r\n rmax = indmax[max(endMax-nbsym,0):][::-1]\r\n else:\r\n rmin = indmin[max(endMin-nbsym,0):][::-1]\r\n\r\n if rsym == len(S)-1:\r\n raise Exception('bug')\r\n\r\n rsym = len(S)-1\r\n trmin = 2*T[rsym]-T[rmin]\r\n trmax = 2*T[rsym]-T[rmax]\r\n\r\n zlmax = S[lmax]\r\n zlmin = S[lmin]\r\n zrmax = S[rmax]\r\n zrmin = S[rmin]\r\n\r\n tmin = np.append(tlmin, np.append(T[indmin], trmin))\r\n tmax = np.append(tlmax, np.append(T[indmax], trmax))\r\n zmin = np.append(zlmin, np.append(S[indmin], zrmin))\r\n zmax = np.append(zlmax, np.append(S[indmax], zrmax))\r\n\r\n maxExtrema = np.array([tmax, zmax], dtype=self.DTYPE)\r\n minExtrema = np.array([tmin, zmin], dtype=self.DTYPE)\r\n\r\n # Make double sure, that each extremum is significant\r\n maxExtrema = np.delete(maxExtrema, np.where(maxExtrema[0,1:]==maxExtrema[0,:-1]),axis=1)\r\n minExtrema = np.delete(minExtrema, np.where(minExtrema[0,1:]==minExtrema[0,:-1]),axis=1)\r\n\r\n return maxExtrema, minExtrema\r\n\r\n def splinePoints(self, T, extrema, splineKind):\r\n \"\"\"\r\n Constructs spline over given points.\r\n\r\n Input:\r\n ---------\r\n T: Time array.\r\n extrema: Poistion (1st row) and values (2nd row) of points.\r\n splineKind: Type of spline.\r\n\r\n Output:\r\n ---------\r\n T: Poistion array.\r\n spline: Spline over the given points.\r\n \"\"\"\r\n\r\n kind = splineKind.lower()\r\n t = T[np.r_[T>=extrema[0,0]] & np.r_[T<=extrema[0,-1]]]\r\n if t.dtype != self.DTYPE: self.logger.error('t.dtype: '+str(t.dtype))\r\n if extrema.dtype != self.DTYPE: self.logger.error('extrema.dtype: '+str(xtrema.dtype))\r\n\r\n if kind == \"akima\":\r\n return t, akima(extrema[0], extrema[1], t)\r\n\r\n elif kind == 'cubic':\r\n if extrema.shape[1]>3:\r\n return t, interp1d(extrema[0], extrema[1], kind=kind)(t).astype(self.DTYPE)\r\n else:\r\n return self.cubicSpline_3points(T, extrema)\r\n\r\n elif kind in ['slinear', 'quadratic', 'linear']:\r\n return T, interp1d(extrema[0], extrema[1], kind=kind)(t).astype(self.DTYPE)\r\n\r\n else:\r\n raise ValueError(\"No such interpolation method!\")\r\n\r\n def cubicSpline_3points(self, T, extrema):\r\n \"\"\"\r\n Apperently scipy.interpolate.interp1d does not support\r\n cubic spline for less than 4 points.\r\n \"\"\"\r\n\r\n x0, x1, x2 = extrema[0]\r\n y0, y1, y2 = extrema[1]\r\n\r\n x1x0, x2x1 = x1-x0, x2-x1\r\n y1y0, y2y1 = y1-y0, y2-y1\r\n _x1x0, _x2x1 = 1./x1x0, 1./x2x1\r\n\r\n m11, m12, m13= 2*_x1x0, _x1x0, 0\r\n m21, m22, m23 = _x1x0, 2.*(_x1x0+_x2x1), _x2x1\r\n m31, m32, m33 = 0, _x2x1, 2.*_x2x1\r\n\r\n v1 = 3*y1y0*_x1x0*_x1x0\r\n v3 = 3*y2y1*_x2x1*_x2x1\r\n v2 = v1+v3\r\n\r\n M = np.matrix([[m11,m12,m13],[m21,m22,m23],[m31,m32,m33]])\r\n v = np.matrix([v1,v2,v3]).T\r\n k = np.array(np.linalg.inv(M)*v)\r\n\r\n a1 = k[0]*x1x0 - y1y0\r\n b1 =-k[1]*x1x0 + y1y0\r\n a2 = k[1]*x2x1 - y2y1\r\n b2 =-k[2]*x2x1 + y2y1\r\n\r\n t = T[np.r_[T>=x0] & np.r_[T<=x2]]\r\n t1 = (T[np.r_[T>=x0]&np.r_[T< x1]] - x0)/x1x0\r\n t2 = (T[np.r_[T>=x1]&np.r_[T<=x2]] - x1)/x2x1\r\n t11, t22 = 1.-t1, 1.-t2\r\n\r\n q1 = t11*y0 + t1*y1 + t1*t11*(a1*t11 + b1*t1)\r\n q2 = t22*y1 + t2*y2 + t2*t22*(a2*t22 + b2*t2)\r\n q = np.append(q1,q2)\r\n\r\n return t, q.astype(self.DTYPE)\r\n\r\n @classmethod\r\n def findExtrema(cls, t, s):\r\n \"\"\"\r\n Finds extrema and zero-crossings.\r\n\r\n Input:\r\n ---------\r\n S: Signal.\r\n T: Time array.\r\n\r\n Output:\r\n ---------\r\n localMaxPos: Time positions of maxima.\r\n localMaxVal: Values of signal at localMaxPos positions.\r\n localMinPos: Time positions of minima.\r\n localMinVal: Values of signal at localMinPos positions.\r\n indzer: Indexes of zero crossings.\r\n \"\"\"\r\n\r\n # Finds indexes of zero-crossings\r\n s1, s2 = s[:-1], s[1:]\r\n indzer = np.nonzero(s1*s2<0)[0]\r\n if np.any(s==0):\r\n iz = np.nonzero(s==0)[0]\r\n indz = []\r\n if np.any(np.diff(iz)==1):\r\n zer = (s==0)\r\n dz = np.diff(np.append(np.append(0, zer), 0))\r\n debz = np.nonzero(dz==1)[0]\r\n finz = np.nonzero(dz==-1)[0]-1\r\n indz = np.round((debz+finz)/2)\r\n else:\r\n indz = iz\r\n\r\n indzer = np.sort(np.append(indzer, indz))\r\n\r\n\r\n # Finds local extrema\r\n d = np.diff(s)\r\n d1, d2 = d[:-1], d[1:]\r\n indmin = np.nonzero(np.r_[d1*d2<0] & np.r_[d1<0])[0]+1\r\n indmax = np.nonzero(np.r_[d1*d2<0] & np.r_[d1>0])[0]+1\r\n\r\n # When two or more points have the same value\r\n if np.any(d==0):\r\n\r\n imax, imin = [], []\r\n\r\n bad = (d==0)\r\n dd = np.diff(np.append(np.append(0, bad), 0))\r\n debs = np.nonzero(dd==1)[0]\r\n fins = np.nonzero(dd==-1)[0]\r\n if debs[0]==1:\r\n if len(debs) > 1:\r\n debs, fins = debs[1:], fins[1:]\r\n else:\r\n debs, fins = [], []\r\n\r\n if len(debs) > 0:\r\n if fins[-1] == len(s)-1:\r\n if len(debs) > 1:\r\n debs, fins = debs[:-1], fins[:-1]\r\n else:\r\n debs, fins = [], []\r\n\r\n lc = len(debs)\r\n if lc > 0:\r\n for k in range(lc):\r\n if d[debs[k]-1] > 0:\r\n if d[fins[k]] < 0:\r\n imax.append(round((fins[k]+debs[k])/2.))\r\n else:\r\n if d[fins[k]] > 0:\r\n imin.append(round((fins[k]+debs[k])/2.))\r\n\r\n if len(imax) > 0:\r\n indmax = indmax.tolist()\r\n for x in imax: indmax.append(int(x))\r\n indmax.sort()\r\n\r\n if len(imin) > 0:\r\n indmin = indmin.tolist()\r\n for x in imin: indmin.append(int(x))\r\n indmin.sort()\r\n\r\n localMaxPos = t[indmax]\r\n localMaxVal = s[indmax]\r\n localMinPos = t[indmin]\r\n localMinVal = s[indmin]\r\n\r\n return localMaxPos, localMaxVal, localMinPos, localMinVal, indzer\r\n\r\n def stop_sifting(self, imf, envMax, envMin, mean, extNo):\r\n \"\"\"\r\n Criterium for stopping sifting process.\r\n Based on conditions presented in [1].\r\n\r\n [1] G. Rilling, P. Flandrin and P. Goncalves\r\n \"On Empirical Mode Decomposition and its\r\n algorithms\", 2003\r\n\r\n Input:\r\n ---------\r\n imf: Current imf.\r\n envMax: Upper envelope of imf.\r\n envMin: Bottom envelope of imf.\r\n mean: Mean of envelopes.\r\n extNo: Number of extrema.\r\n\r\n Output:\r\n ---------\r\n boolean: True if stopping criteria are meet.\r\n \"\"\"\r\n\r\n amp = np.abs(envMax - envMin)/2.\r\n sx = np.abs(mean)/amp\r\n\r\n f1 = np.mean(sx > self.stop1) > self.stop3\r\n f2 = np.any(sx > self.stop2)\r\n f3 = extNo > 2\r\n\r\n if ( not (f1 or f2) ) and f3:\r\n return True\r\n else:\r\n return False\r\n\r\n @staticmethod\r\n def _common_dtype(x, y):\r\n\r\n dtype = np.find_common_type([x.dtype, y.dtype], [])\r\n if x.dtype != dtype: x = x.astype(dtype)\r\n if y.dtype != dtype: y = y.astype(dtype)\r\n\r\n return x, y\r\n\r\n def emd(self, S, T=None, maxImf=None):\r\n \"\"\"\r\n Performs Emerical Mode Decomposition on signal S.\r\n The decomposition is limited to maxImf imf. No limitation as default.\r\n Returns IMF functions in dic format. IMF = {0:imf0, 1:imf1...}.\r\n\r\n Input:\r\n ---------\r\n S: Signal.\r\n T: Positions of signal. If none passed numpy arange is created.\r\n maxImf: IMF number to which decomposition should be performed.\r\n As a default, all IMFs are returned.\r\n\r\n Output:\r\n ---------\r\n return IMF, EXT, TIME, ITER, imfNo\r\n IMF: Signal IMFs in dictionary type. IMF = {0:imf0, 1:imf1...}\r\n EXT: Number of extrema for each IMF. IMF = {0:ext0, 1:ext1...}\r\n ITER: Number of iteration for each IMF.\r\n imfNo: Number of IMFs.\r\n \"\"\"\r\n\r\n if T is None: T = np.arange(len(S), dtype=S.dtype)\r\n if maxImf is None: maxImf = -1\r\n\r\n # Make sure same types are dealt\r\n S, T = self._common_dtype(S, T)\r\n self.DTYPE = S.dtype\r\n\r\n Res = S.astype(self.DTYPE)\r\n scale = 1.\r\n Res, scaledS = Res/scale, S/scale\r\n imf = np.zeros(len(S), dtype=self.DTYPE)\r\n imfOld = Res.copy()\r\n\r\n if Res.dtype!=self.DTYPE: self.logger.error('Res.dtype: '+str(Res.dtype))\r\n if scaledS.dtype!=self.DTYPE: self.logger.error('scaledS.dtype: '+str(scaledS.dtype))\r\n if imf.dtype!=self.DTYPE: self.logger.error('imf.dtype: '+str(imf.dtype))\r\n if imfOld.dtype!=self.DTYPE: self.logger.error('imfOld.dtype: '+str(imfOld.dtype))\r\n if T.dtype!=self.DTYPE: self.logger.error('T.dtype: '+str(T.dtype))\r\n\r\n if S.shape != T.shape:\r\n info = \"Time array should be the same size as signal.\"\r\n raise Exception(info)\r\n\r\n # Create arrays\r\n IMF = {} # Dic for imfs signals\r\n EXT = {} # Dic for number of extrema\r\n ITER = {} # Dic for number of iterations\r\n TIME = {} # Dic for time of computation\r\n imfNo = 0\r\n notFinish = True\r\n\r\n while(notFinish):\r\n self.logger.debug('IMF -- '+str(imfNo))\r\n\r\n #~ Res = scaledS - np.sum([IMF[i] for i in range(imfNo)],axis=0)\r\n Res -= imf\r\n imf = Res.copy()\r\n mean = np.zeros(len(S), dtype=self.DTYPE)\r\n\r\n # Counters\r\n n = 0 # All iterations for current imf.\r\n n_h = 0 # counts when |#zero - #ext| <=1\r\n\r\n # Time counter\r\n timeInit = time.time()\r\n if self.TIME:\r\n singleTime = time.time()\r\n\r\n while(n<self.MAX_ITERATION):\r\n n += 1\r\n\r\n if self.TIME:\r\n self.logger.info(\"Execution time: \"+str(time.time() - singleTime))\r\n singleTime = time.time()\r\n ext_res = self.findExtrema(T, imf)\r\n MP, mP = ext_res[0], ext_res[2]\r\n indzer = ext_res[4]\r\n\r\n extNo = len(mP)+len(MP)\r\n nzm = len(indzer)\r\n\r\n if extNo > 2:\r\n\r\n # Plotting. Either into file, or on-screen display.\r\n imfOld = imf.copy()\r\n imf = imf - self.reduceScale*mean\r\n\r\n env_ext = self.extractMaxMinSpline(T, imf)\r\n maxEnv, minEnv = env_ext[0], env_ext[1]\r\n\r\n if isinstance(maxEnv, int):\r\n notFinish = True\r\n break\r\n\r\n mean = 0.5*(maxEnv+minEnv)\r\n\r\n if maxEnv.dtype!=self.DTYPE: self.logger.error('maxEnvimf.dtype: '+str(maxEnv.dtype))\r\n if minEnv.dtype!=self.DTYPE: self.logger.error('minEnvimf.dtype: '+str(minEnvimf.dtype))\r\n if imf.dtype!=self.DTYPE: self.logger.error('imf.dtype: '+str(imf.dtype))\r\n if mean.dtype!=self.DTYPE: self.logger.error('mean.dtype: '+str(mean.dtype))\r\n\r\n # Stop, because of too many iterations\r\n if n > self.maxIteration:\r\n self.logger.info('TOO MANY ITERATIONS! BREAK!')\r\n break\r\n\r\n # Fix number of iterations\r\n if self.FIXE:\r\n if n>=self.FIXE+1: break\r\n\r\n # Fix number of iterations after number of zero-crossings\r\n # and extrema differ at most by one.\r\n elif self.FIXE_H:\r\n\r\n ext_res = self.findExtrema(T, imf)\r\n mP, MP, indzer = ext_res[0], ext_res[2], ext_res[4]\r\n extNo = len(mP)+len(MP)\r\n nzm = len(indzer)\r\n\r\n if n == 1: continue\r\n if abs(extNo-nzm)>1: n_h = 0\r\n else: n_h += 1\r\n\r\n # STOP\r\n if n_h >= self.FIXE_H: break\r\n\r\n # Stops after default stopping criteria are meet.\r\n else:\r\n\r\n mP,mV,MP,MV, indzer = self.findExtrema(T, imf)\r\n extNo = len(mP)+len(MP)\r\n nzm = len(indzer)\r\n\r\n f1 = self.stop_sifting(imf, maxEnv, minEnv, mean, extNo)\r\n f2 = abs(extNo - nzm)<2\r\n\r\n # STOP\r\n if f1 and f2: break\r\n\r\n else:\r\n notFinish = False\r\n break\r\n\r\n IMF[imfNo] = imf.copy()\r\n ITER[imfNo] = n\r\n EXT[imfNo] = extNo\r\n TIME[imfNo] = time.time() - timeInit\r\n imfNo += 1\r\n\r\n if imfNo==maxImf-1:\r\n notFinish = False\r\n break\r\n\r\n #~ Saving residuum if meaningful\r\n Res = scaledS - np.sum([IMF[i] for i in range(imfNo)],axis=0)\r\n if np.sum(np.abs(Res)) > 1e-10:\r\n IMF[imfNo] = Res\r\n ITER[imfNo] = 0\r\n EXT[imfNo] = extNo\r\n TIME[imfNo] = 0\r\n imfNo += 1\r\n\r\n for key in list(IMF.keys()):\r\n IMF[key] *= scale\r\n return IMF, EXT, ITER, imfNo\r\n\r\n###################################################\r\n## Beggining of program\r\n\r\nif __name__ == \"__main__\":\r\n\r\n import pylab as plt\r\n\r\n # Logging options\r\n logging.basicConfig(level=logging.DEBUG)\r\n\r\n # EMD options\r\n maxImf = -1\r\n DTYPE = np.float64\r\n\r\n # Signal options\r\n N = 1000\r\n tMin, tMax = 0, 1\r\n T = np.linspace(tMin, tMax, N, dtype=DTYPE)\r\n\r\n S = 6*T +np.cos(8*np.pi**T)+0.5*np.cos(40*np.pi*T)\r\n S = S.astype(DTYPE)\r\n\r\n # Prepare and run EMD\r\n emd = EMD()\r\n emd.FIXE_H = 5\r\n #~ emd.FIXE = 10\r\n emd.nbsym = 2\r\n emd.splineKind = 'cubic'\r\n emd.DTYPE = DTYPE\r\n IMF, EXT, ITER, imfNo = emd.emd(S, T, maxImf)\r\n\r\n # Save results (IMFs) into file\r\n npIMF = np.zeros((imfNo, N), dtype=DTYPE)\r\n for i in range(imfNo): npIMF[i] = IMF[i]\r\n\r\n np.save('imfs', npIMF)\r\n\r\n # Plotting\r\n #~ c = np.floor(np.sqrt(imfNo+3))\r\n #~ r = np.ceil( (imfNo+3)/c)\r\n c = np.floor(np.sqrt(imfNo+1))\r\n r = np.ceil( (imfNo+1)/c)\r\n\r\n plt.ioff()\r\n plt.subplot(r,c,1)\r\n plt.plot(T, S, 'r')\r\n plt.title(\"Original signal\")\r\n plt.xlabel('Time [s]')\r\n plt.ylabel('Amplitude')\r\n\r\n #~ plt.subplot(r,c,2)\r\n #~ plt.plot([EXT[i] for i in range(imfNo)], 'o')\r\n #~ plt.ylim(0, max([EXT[i] for i in range(imfNo)])+1)\r\n #~ plt.title(\"Number of extrema\")\r\n #~\r\n #~ plt.subplot(r,c,3)\r\n #~ plt.plot([ITER[i] for i in range(imfNo)], 'o')\r\n #~ plt.ylim(0, max([ITER[i] for i in range(imfNo)])+1)\r\n #~ plt.title(\"Number of iterations\")\r\n\r\n for num in range(imfNo):\r\n #~ plt.subplot(r,c,num+4)\r\n plt.subplot(r,c,num+2)\r\n plt.plot(T, IMF[num],'g')\r\n plt.xlabel('Time')\r\n plt.ylabel('Amplitude')\r\n\r\n if num == imfNo-1:\r\n plt.title('Residue')\r\n else:\r\n plt.title(\"Imf \"+str(num))\r\n\r\n plt.tight_layout()\r\n plt.show()\r\n"
] | [
[
"numpy.matrix",
"numpy.sqrt",
"numpy.linspace",
"numpy.round",
"numpy.mean",
"numpy.any",
"numpy.where",
"numpy.save",
"numpy.ceil",
"scipy.interpolate.interp1d",
"numpy.diff",
"numpy.zeros",
"numpy.nonzero",
"numpy.linalg.inv",
"numpy.append",
"numpy.find_common_type",
"numpy.array",
"numpy.abs",
"numpy.cos"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
simpla-fusion/spdb | [
"be6667eb6c7d464f68b0fd51ca2a8f021581eb84"
] | [
"examples/obsolete/putslice_eq.py"
] | [
"# Definition of the class structures in file imas.py\nimport imas\nimport numpy\nimport sys\nimport os\n\n'''\nThis sample program will create a pulse file (shot 13, run 1) and will\nput an example of equilibirium IDS using put_slice methods.\n'''\n\n# This routine reads an array of pfsystems IDSs in the database, filling\n# some fields of the IDSS\n\nTEST_DATABASE_NAME = 'test'\n\n\ndef put_ids():\n \"\"\"Class Itm is the main class for the UAL.\n\n It contains a set of field classes, each corresponding to a IDS\n defined in the UAL The parameters passed to this creator define the\n shot and run number. The second pair of arguments defines the\n reference shot and run and is used when the a new database is\n created, as in this example.\n\n \"\"\"\n\n shot = 13\n time = 1\n interp = 1\n\n imas_obj = imas.ids(13, 1, 13, 1)\n # Create a new instance of database\n imas_obj.create_env(\"fydev\", \"test\", \"3\")\n\n if imas_obj.isConnected():\n print('Creation of data entry OK!')\n else:\n print('Creation of data entry FAILED!')\n sys.exit()\n\n number = 10\n\n # Allocate a first generic vector and its time base\n lentime_1 = 3\n vect1DDouble_1 = numpy.empty([lentime_1])\n time_1 = numpy.empty([lentime_1])\n\n for i in range(lentime_1):\n time_1[i] = i\n vect1DDouble_1[i] = i * 10\n\n print('========================================================')\n print(time_1)\n print(vect1DDouble_1)\n\n # Allocate a second generic vector and its time base\n lentime_2 = 4\n vect1DDouble_2 = numpy.empty([lentime_2])\n time_2 = numpy.empty([lentime_2])\n\n for i in range(lentime_2):\n time_2[i] = i\n vect1DDouble_2[i] = i * 11\n\n print('========================================================')\n print(time_2)\n print(vect1DDouble_2)\n\n vect2DDouble_1 = numpy.zeros([3, 3])\n for i in range(3):\n for j in range(3):\n vect2DDouble_1[i, j] = i * 100 + j\n\n print('========================================================')\n print(vect2DDouble_1)\n # Allocate a second generic vector and its time base\n lentime_2 = 4\n vect1DDouble_2 = numpy.empty([lentime_2])\n time_2 = numpy.empty([lentime_2])\n\n for i in range(lentime_2):\n time_2[i] = i\n vect1DDouble_2[i] = i * 11\n\n print('========================================================')\n print(time_2)\n print(vect1DDouble_2)\n\n vect2DDouble_1 = numpy.zeros([3, 3])\n for i in range(3):\n for j in range(3):\n vect2DDouble_1[i, j] = i * 100 + j\n\n print('========================================================')\n print(vect2DDouble_1)\n\n vect2DDouble_2 = vect2DDouble_1 + 10000\n '''\n print( '========================================================')\n print( vect3DDouble_2)\n '''\n imas_obj.equilibrium.ids_properties.comment = 'This is a test ids'\n\n # A sample int\n\n # Mandatory to define this property\n imas_obj.equilibrium.ids_properties.homogeneous_time = 1\n imas_obj.equilibrium.resize(1)\n imas_obj.equilibrium.time_slice[0].profiles_2d.resize(2)\n imas_obj.equilibrium.time_slice[0].profiles_2d[0].grid_type.name = 'GRID TYPE 1A'\n imas_obj.equilibrium.time_slice[0].profiles_2d[1].grid_type.name = 'GRID TYPE 2B'\n\n imas_obj.equilibrium.time.resize(1)\n imas_obj.equilibrium.time_slice[0].profiles_2d[0].r.resize(3, 3)\n imas_obj.equilibrium.time_slice[0].profiles_2d[1].r.resize(3, 3)\n\n print('Start Put, writing first slice')\n imas_obj.equilibrium.time_slice[0].profiles_2d[0].r[:, 0] = vect2DDouble_1[0, :]\n imas_obj.equilibrium.time_slice[0].profiles_2d[1].r[:, 0] = vect2DDouble_2[0, :]\n imas_obj.equilibrium.time[0] = time_1[0]\n imas_obj.equilibrium.put()\n print('Completed Put ')\n\n for i in range(lentime_1):\n print('========================================================')\n print('vect3DDouble_1[i,:,:]')\n print(vect2DDouble_1[i, :])\n print('========================================================')\n\n imas_obj.equilibrium.time_slice[0].profiles_2d[0].r[:, i] = vect2DDouble_1[i, :]\n print('========================================================')\n print('imas_obj.equilibrium.time_slice[0].profiles_2d[0].r')\n print(imas_obj.equilibrium.time_slice[0].profiles_2d[0].r[:, i])\n print('========================================================')\n imas_obj.equilibrium.time_slice[0].profiles_2d[1].r[:, i] = vect2DDouble_2[i, :]\n imas_obj.equilibrium.time[0] = time_1[i]\n print(('Writing slice={0}'.format(i)))\n imas_obj.equilibrium.putSlice()\n\n print('========================================================')\n print(imas_obj.equilibrium.time_slice[0].profiles_2d[0].r)\n\n '''\n print( '========================================================')\n print (imas_obj.equilibrium.time_slice[0].profiles_2d[1].r)\n '''\n imas_obj.close()\n\n\nput_ids()\n"
] | [
[
"numpy.zeros",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ChuanTianML/learn_gnmt | [
"19e97e04feaecd7682abaf6247a0f9e3f37f9892"
] | [
"nmt/utils/common_test_utils.py"
] | [
"# Copyright 2017 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Common utility functions for tests.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\nfrom tensorflow.python.ops import lookup_ops\n\nfrom ..utils import iterator_utils\nfrom ..utils import standard_hparams_utils\n\n\ndef create_test_hparams(unit_type=\"lstm\",\n encoder_type=\"uni\",\n num_layers=4,\n attention=\"\",\n attention_architecture=None,\n use_residual=False,\n inference_indices=None,\n num_translations_per_input=1,\n beam_width=0,\n init_op=\"uniform\"):\n \"\"\"Create training and inference test hparams.\"\"\"\n num_residual_layers = 0\n if use_residual:\n # TODO(rzhao): Put num_residual_layers computation logic into\n # `model_utils.py`, so we can also test it here.\n num_residual_layers = 2\n\n standard_hparams = standard_hparams_utils.create_standard_hparams()\n\n # Networks\n standard_hparams.num_units = 5\n standard_hparams.num_encoder_layers = num_layers\n standard_hparams.num_decoder_layers = num_layers\n standard_hparams.dropout = 0.5\n standard_hparams.unit_type = unit_type\n standard_hparams.encoder_type = encoder_type\n standard_hparams.residual = use_residual\n standard_hparams.num_residual_layers = num_residual_layers\n\n # Attention mechanisms\n standard_hparams.attention = attention\n standard_hparams.attention_architecture = attention_architecture\n\n # Train\n standard_hparams.init_op = init_op\n standard_hparams.num_train_steps = 1\n standard_hparams.decay_scheme = \"\"\n\n # Infer\n standard_hparams.tgt_max_len_infer = 100\n standard_hparams.beam_width = beam_width\n standard_hparams.num_translations_per_input = num_translations_per_input\n\n # Misc\n standard_hparams.forget_bias = 0.0\n standard_hparams.random_seed = 3\n\n # Vocab\n standard_hparams.src_vocab_size = 5\n standard_hparams.tgt_vocab_size = 5\n standard_hparams.eos = \"eos\"\n standard_hparams.sos = \"sos\"\n standard_hparams.src_vocab_file = \"\"\n standard_hparams.tgt_vocab_file = \"\"\n standard_hparams.src_embed_file = \"\"\n standard_hparams.tgt_embed_file = \"\"\n\n # For inference.py test\n standard_hparams.subword_option = \"bpe\"\n standard_hparams.src = \"src\"\n standard_hparams.tgt = \"tgt\"\n standard_hparams.src_max_len = 400\n standard_hparams.tgt_eos_id = 0\n standard_hparams.inference_indices = inference_indices\n return standard_hparams\n\n\ndef create_test_iterator(hparams, mode):\n \"\"\"Create test iterator.\"\"\"\n src_vocab_table = lookup_ops.index_table_from_tensor(\n tf.constant([hparams.eos, \"a\", \"b\", \"c\", \"d\"]))\n tgt_vocab_mapping = tf.constant([hparams.sos, hparams.eos, \"a\", \"b\", \"c\"])\n tgt_vocab_table = lookup_ops.index_table_from_tensor(tgt_vocab_mapping)\n if mode == tf.contrib.learn.ModeKeys.INFER:\n reverse_tgt_vocab_table = lookup_ops.index_to_string_table_from_tensor(\n tgt_vocab_mapping)\n\n src_dataset = tf.data.Dataset.from_tensor_slices(\n tf.constant([\"a a b b c\", \"a b b\"]))\n\n if mode != tf.contrib.learn.ModeKeys.INFER:\n tgt_dataset = tf.data.Dataset.from_tensor_slices(\n tf.constant([\"a b c b c\", \"a b c b\"]))\n return (\n iterator_utils.get_iterator(\n src_dataset=src_dataset,\n tgt_dataset=tgt_dataset,\n src_vocab_table=src_vocab_table,\n tgt_vocab_table=tgt_vocab_table,\n batch_size=hparams.batch_size,\n sos=hparams.sos,\n eos=hparams.eos,\n random_seed=hparams.random_seed,\n num_buckets=hparams.num_buckets),\n src_vocab_table,\n tgt_vocab_table)\n else:\n return (\n iterator_utils.get_infer_iterator(\n src_dataset=src_dataset,\n src_vocab_table=src_vocab_table,\n eos=hparams.eos,\n batch_size=hparams.batch_size),\n src_vocab_table,\n tgt_vocab_table,\n reverse_tgt_vocab_table)\n"
] | [
[
"tensorflow.python.ops.lookup_ops.index_to_string_table_from_tensor",
"tensorflow.python.ops.lookup_ops.index_table_from_tensor",
"tensorflow.constant"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"1.12",
"2.6",
"1.4",
"2.2",
"2.3",
"2.4",
"2.9",
"1.5",
"1.7",
"2.5",
"2.8",
"1.2",
"2.10"
]
}
] |
floyebolu/GPy | [
"d493b200642196c6d211ea1bcb052f3fbf396f24",
"d493b200642196c6d211ea1bcb052f3fbf396f24"
] | [
"GPy/plotting/gpy_plot/latent_plots.py",
"GPy/models/gradient_checker.py"
] | [
"#===============================================================================\n# Copyright (c) 2015, Max Zwiessele\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of GPy.plotting.gpy_plot.latent_plots nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n#===============================================================================\nimport numpy as np\nfrom . import plotting_library as pl\nfrom .plot_util import get_x_y_var,\\\n update_not_existing_kwargs, \\\n helper_for_plot_data, scatter_label_generator, subsample_X,\\\n find_best_layout_for_subplots\n\ndef _wait_for_updates(view, updates):\n if view is not None:\n try:\n if updates:\n clear = raw_input('yes or enter to deactivate updates - otherwise still do updates - use plots[imshow].deactivate() to clear')\n if clear.lower() in 'yes' or clear == '':\n view.deactivate()\n else:\n view.deactivate()\n except AttributeError:\n # No updateable view:\n pass\n except TypeError:\n # No updateable view:\n pass\n\ndef _new_canvas(self, projection, kwargs, which_indices):\n input_1, input_2, input_3 = sig_dims = self.get_most_significant_input_dimensions(which_indices)\n\n if input_3 is None:\n zlabel = None\n else:\n zlabel = 'latent dimension %i' % input_3\n canvas, kwargs = pl().new_canvas(projection=projection, xlabel='latent dimension %i' % input_1,\n ylabel='latent dimension %i' % input_2,\n zlabel=zlabel, **kwargs)\n return canvas, projection, kwargs, sig_dims\n\ndef _plot_latent_scatter(canvas, X, visible_dims, labels, marker, num_samples, projection='2d', **kwargs):\n from .. import Tango\n Tango.reset()\n X, labels = subsample_X(X, labels, num_samples)\n scatters = []\n generate_colors = 'color' not in kwargs\n for x, y, z, this_label, _, m in scatter_label_generator(labels, X, visible_dims, marker):\n update_not_existing_kwargs(kwargs, pl().defaults.latent_scatter)\n if generate_colors:\n kwargs['color'] = Tango.nextMedium()\n if projection == '3d':\n scatters.append(pl().scatter(canvas, x, y, Z=z, marker=m, label=this_label, **kwargs))\n else: scatters.append(pl().scatter(canvas, x, y, marker=m, label=this_label, **kwargs))\n return scatters\n\ndef plot_latent_scatter(self, labels=None,\n which_indices=None,\n legend=True,\n plot_limits=None,\n marker='<>^vsd',\n num_samples=1000,\n projection='2d',\n **kwargs):\n \"\"\"\n Plot a scatter plot of the latent space.\n\n :param array-like labels: a label for each data point (row) of the inputs\n :param (int, int) which_indices: which input dimensions to plot against each other\n :param bool legend: whether to plot the legend on the figure\n :param plot_limits: the plot limits for the plot\n :type plot_limits: (xmin, xmax, ymin, ymax) or ((xmin, xmax), (ymin, ymax))\n :param str marker: markers to use - cycle if more labels then markers are given\n :param kwargs: the kwargs for the scatter plots\n \"\"\"\n canvas, projection, kwargs, sig_dims = _new_canvas(self, projection, kwargs, which_indices)\n\n X, _, _ = get_x_y_var(self)\n if labels is None:\n labels = np.ones(self.num_data)\n legend = False\n else:\n legend = find_best_layout_for_subplots(len(np.unique(labels)))[1]\n scatters = _plot_latent_scatter(canvas, X, sig_dims, labels, marker, num_samples, projection=projection, **kwargs)\n return pl().add_to_canvas(canvas, dict(scatter=scatters), legend=legend)\n\n\ndef plot_latent_inducing(self,\n which_indices=None,\n legend=False,\n plot_limits=None,\n marker=None,\n projection='2d',\n **kwargs):\n \"\"\"\n Plot a scatter plot of the inducing inputs.\n\n :param [int] which_indices: which input dimensions to plot against each other\n :param bool legend: whether to plot the legend on the figure\n :param plot_limits: the plot limits for the plot\n :type plot_limits: (xmin, xmax, ymin, ymax) or ((xmin, xmax), (ymin, ymax))\n :param str marker: marker to use [default is custom arrow like]\n :param kwargs: the kwargs for the scatter plots\n :param str projection: for now 2d or 3d projection (other projections can be implemented, see developer documentation)\n \"\"\"\n canvas, projection, kwargs, sig_dims = _new_canvas(self, projection, kwargs, which_indices)\n\n if legend: label = 'inducing'\n else: label = None\n if marker is not None:\n kwargs['marker'] = marker\n update_not_existing_kwargs(kwargs, pl().defaults.inducing_2d) # @UndefinedVariable\n from .data_plots import _plot_inducing\n scatters = _plot_inducing(self, canvas, sig_dims[:2], projection, label, **kwargs)\n return pl().add_to_canvas(canvas, dict(scatter=scatters), legend=legend)\n\n\n\n\n\n\ndef _plot_magnification(self, canvas, which_indices, Xgrid,\n xmin, xmax, resolution, updates,\n mean=True, covariance=True,\n kern=None,\n **imshow_kwargs):\n def plot_function(x):\n Xtest_full = np.zeros((x.shape[0], Xgrid.shape[1]))\n Xtest_full[:, which_indices] = x\n\n mf = self.predict_magnification(Xtest_full, kern=kern, mean=mean, covariance=covariance)\n return mf.reshape(resolution, resolution).T\n imshow_kwargs = update_not_existing_kwargs(imshow_kwargs, pl().defaults.magnification)\n try:\n if updates:\n return pl().imshow_interact(canvas, plot_function, (xmin[0], xmax[0], xmin[1], xmax[1]), resolution=resolution, **imshow_kwargs)\n else: raise NotImplementedError\n except NotImplementedError:\n return pl().imshow(canvas, plot_function(Xgrid[:, which_indices]), (xmin[0], xmax[0], xmin[1], xmax[1]), **imshow_kwargs)\n\ndef plot_magnification(self, labels=None, which_indices=None,\n resolution=60, marker='<>^vsd', legend=True,\n plot_limits=None,\n updates=False,\n mean=True, covariance=True,\n kern=None, num_samples=1000,\n scatter_kwargs=None, plot_scatter=True,\n **imshow_kwargs):\n \"\"\"\n Plot the magnification factor of the GP on the inputs. This is the\n density of the GP as a gray scale.\n\n :param array-like labels: a label for each data point (row) of the inputs\n :param (int, int) which_indices: which input dimensions to plot against each other\n :param int resolution: the resolution at which we predict the magnification factor\n :param str marker: markers to use - cycle if more labels then markers are given\n :param bool legend: whether to plot the legend on the figure\n :param plot_limits: the plot limits for the plot\n :type plot_limits: (xmin, xmax, ymin, ymax) or ((xmin, xmax), (ymin, ymax))\n :param bool updates: if possible, make interactive updates using the specific library you are using\n :param bool mean: use the mean of the Wishart embedding for the magnification factor\n :param bool covariance: use the covariance of the Wishart embedding for the magnification factor\n :param :py:class:`~GPy.kern.Kern` kern: the kernel to use for prediction\n :param int num_samples: the number of samples to plot maximally. We do a stratified subsample from the labels, if the number of samples (in X) is higher then num_samples.\n :param imshow_kwargs: the kwargs for the imshow (magnification factor)\n :param kwargs: the kwargs for the scatter plots\n \"\"\"\n input_1, input_2 = which_indices = self.get_most_significant_input_dimensions(which_indices)[:2]\n X = get_x_y_var(self)[0]\n _, _, Xgrid, _, _, xmin, xmax, resolution = helper_for_plot_data(self, X, plot_limits, which_indices, None, resolution)\n canvas, imshow_kwargs = pl().new_canvas(xlim=(xmin[0], xmax[0]), ylim=(xmin[1], xmax[1]),\n xlabel='latent dimension %i' % input_1, ylabel='latent dimension %i' % input_2, **imshow_kwargs)\n plots = {}\n if legend and plot_scatter:\n if (labels is not None):\n legend = find_best_layout_for_subplots(len(np.unique(labels)))[1]\n else:\n labels = np.ones(self.num_data)\n legend = False\n if plot_scatter:\n plots['scatters'] = _plot_latent_scatter(canvas, X, which_indices, labels, marker, num_samples, projection='2d', **scatter_kwargs or {})\n plots['view'] = _plot_magnification(self, canvas, which_indices, Xgrid, xmin, xmax, resolution, updates, mean, covariance, kern, **imshow_kwargs)\n retval = pl().add_to_canvas(canvas, plots,\n legend=legend,\n )\n _wait_for_updates(plots['view'], updates)\n return retval\n\n\n\n\ndef _plot_latent(self, canvas, which_indices, Xgrid,\n xmin, xmax, resolution, updates,\n kern=None,\n **imshow_kwargs):\n def plot_function(x):\n Xtest_full = np.zeros((x.shape[0], Xgrid.shape[1]))\n Xtest_full[:, which_indices] = x\n mf = self.predict(Xtest_full, kern=kern)[1]\n if mf.shape[1]==self.output_dim:\n mf = mf.sum(-1)\n else:\n mf *= self.output_dim\n mf = np.log(mf)\n return mf.reshape(resolution, resolution).T\n\n imshow_kwargs = update_not_existing_kwargs(imshow_kwargs, pl().defaults.latent)\n try:\n if updates:\n return pl().imshow_interact(canvas, plot_function, (xmin[0], xmax[0], xmin[1], xmax[1]), resolution=resolution, **imshow_kwargs)\n else: raise NotImplementedError\n except NotImplementedError:\n return pl().imshow(canvas, plot_function(Xgrid[:, which_indices]), (xmin[0], xmax[0], xmin[1], xmax[1]), **imshow_kwargs)\n\ndef plot_latent(self, labels=None, which_indices=None,\n resolution=60, legend=True,\n plot_limits=None,\n updates=False,\n kern=None, marker='<>^vsd',\n num_samples=1000, projection='2d',\n scatter_kwargs=None, **imshow_kwargs):\n \"\"\"\n Plot the latent space of the GP on the inputs. This is the\n density of the GP posterior as a grey scale and the\n scatter plot of the input dimemsions selected by which_indices.\n\n :param array-like labels: a label for each data point (row) of the inputs\n :param (int, int) which_indices: which input dimensions to plot against each other\n :param int resolution: the resolution at which we predict the magnification factor\n :param bool legend: whether to plot the legend on the figure\n :param plot_limits: the plot limits for the plot\n :type plot_limits: (xmin, xmax, ymin, ymax) or ((xmin, xmax), (ymin, ymax))\n :param bool updates: if possible, make interactive updates using the specific library you are using\n :param :py:class:`~GPy.kern.Kern` kern: the kernel to use for prediction\n :param str marker: markers to use - cycle if more labels then markers are given\n :param int num_samples: the number of samples to plot maximally. We do a stratified subsample from the labels, if the number of samples (in X) is higher then num_samples.\n :param imshow_kwargs: the kwargs for the imshow (magnification factor)\n :param scatter_kwargs: the kwargs for the scatter plots\n \"\"\"\n if projection != '2d':\n raise ValueError('Cannot plot latent in other then 2 dimensions, consider plot_scatter')\n input_1, input_2 = which_indices = self.get_most_significant_input_dimensions(which_indices)[:2]\n X = get_x_y_var(self)[0]\n _, _, Xgrid, _, _, xmin, xmax, resolution = helper_for_plot_data(self, X, plot_limits, which_indices, None, resolution)\n canvas, imshow_kwargs = pl().new_canvas(xlim=(xmin[0], xmax[0]), ylim=(xmin[1], xmax[1]),\n xlabel='latent dimension %i' % input_1, ylabel='latent dimension %i' % input_2, **imshow_kwargs)\n if legend:\n if (labels is not None):\n legend = find_best_layout_for_subplots(len(np.unique(labels)))[1]\n else:\n labels = np.ones(self.num_data)\n legend = False\n scatters = _plot_latent_scatter(canvas, X, which_indices, labels, marker, num_samples, projection='2d', **scatter_kwargs or {})\n view = _plot_latent(self, canvas, which_indices, Xgrid, xmin, xmax, resolution, updates, kern, **imshow_kwargs)\n retval = pl().add_to_canvas(canvas, dict(scatter=scatters, imshow=view), legend=legend)\n _wait_for_updates(view, updates)\n return retval\n\ndef _plot_steepest_gradient_map(self, canvas, which_indices, Xgrid,\n xmin, xmax, resolution, output_labels, updates,\n kern=None, annotation_kwargs=None,\n **imshow_kwargs):\n if output_labels is None:\n output_labels = range(self.output_dim)\n def plot_function(x):\n Xgrid[:, which_indices] = x\n dmu_dX = np.sqrt(((self.predictive_gradients(Xgrid, kern=kern)[0])**2).sum(1))\n #dmu_dX = self.predictive_gradients(Xgrid, kern=kern)[0].sum(1)\n argmax = np.argmax(dmu_dX, 1).astype(int)\n return dmu_dX.max(1).reshape(resolution, resolution).T, np.array(output_labels)[argmax].reshape(resolution, resolution).T\n annotation_kwargs = update_not_existing_kwargs(annotation_kwargs or {}, pl().defaults.annotation)\n imshow_kwargs = update_not_existing_kwargs(imshow_kwargs or {}, pl().defaults.gradient)\n try:\n if updates:\n return dict(annotation=pl().annotation_heatmap_interact(canvas, plot_function, (xmin[0], xmax[0], xmin[1], xmax[1]), resolution=resolution, imshow_kwargs=imshow_kwargs, **annotation_kwargs))\n else:\n raise NotImplementedError\n except NotImplementedError:\n imshow, annotation = pl().annotation_heatmap(canvas, *plot_function(Xgrid[:, which_indices]), extent=(xmin[0], xmax[0], xmin[1], xmax[1]), imshow_kwargs=imshow_kwargs, **annotation_kwargs)\n return dict(heatmap=imshow, annotation=annotation)\n\ndef plot_steepest_gradient_map(self, output_labels=None, data_labels=None, which_indices=None,\n resolution=15, legend=True,\n plot_limits=None,\n updates=False,\n kern=None, marker='<>^vsd',\n num_samples=1000,\n annotation_kwargs=None, scatter_kwargs=None, **imshow_kwargs):\n\n \"\"\"\n Plot the latent space of the GP on the inputs. This is the\n density of the GP posterior as a grey scale and the\n scatter plot of the input dimemsions selected by which_indices.\n\n :param array-like labels: a label for each data point (row) of the inputs\n :param (int, int) which_indices: which input dimensions to plot against each other\n :param int resolution: the resolution at which we predict the magnification factor\n :param bool legend: whether to plot the legend on the figure, if int plot legend columns on legend\n :param plot_limits: the plot limits for the plot\n :type plot_limits: (xmin, xmax, ymin, ymax) or ((xmin, xmax), (ymin, ymax))\n :param bool updates: if possible, make interactive updates using the specific library you are using\n :param :py:class:`~GPy.kern.Kern` kern: the kernel to use for prediction\n :param str marker: markers to use - cycle if more labels then markers are given\n :param int num_samples: the number of samples to plot maximally. We do a stratified subsample from the labels, if the number of samples (in X) is higher then num_samples.\n :param imshow_kwargs: the kwargs for the imshow (magnification factor)\n :param annotation_kwargs: the kwargs for the annotation plot\n :param scatter_kwargs: the kwargs for the scatter plots\n \"\"\"\n input_1, input_2 = which_indices = self.get_most_significant_input_dimensions(which_indices)[:2]\n X = get_x_y_var(self)[0]\n _, _, Xgrid, _, _, xmin, xmax, resolution = helper_for_plot_data(self, X, plot_limits, which_indices, None, resolution)\n canvas, imshow_kwargs = pl().new_canvas(xlim=(xmin[0], xmax[0]), ylim=(xmin[1], xmax[1]),\n xlabel='latent dimension %i' % input_1, ylabel='latent dimension %i' % input_2, **imshow_kwargs)\n if (data_labels is not None):\n legend = find_best_layout_for_subplots(len(np.unique(data_labels)))[1]\n else:\n data_labels = np.ones(self.num_data)\n legend = False\n plots = dict(scatter=_plot_latent_scatter(canvas, X, which_indices, data_labels, marker, num_samples, **scatter_kwargs or {}))\n plots.update(_plot_steepest_gradient_map(self, canvas, which_indices, Xgrid, xmin, xmax, resolution, output_labels, updates, kern, annotation_kwargs=annotation_kwargs, **imshow_kwargs))\n retval = pl().add_to_canvas(canvas, plots, legend=legend)\n _wait_for_updates(plots['annotation'], updates)\n return retval\n\n\n\n\n",
"# ## Copyright (c) 2012, GPy authors (see AUTHORS.txt).\n# Licensed under the BSD 3-clause license (see LICENSE.txt)\n\nimport numpy\nnp = numpy\n\nfrom ..core.parameterization import Param\nfrom GPy.core.model import Model\nfrom ..util.block_matrices import get_blocks, get_block_shapes, unblock, get_blocks_3d, get_block_shapes_3d\n\ndef get_shape(x):\n if isinstance(x, numpy.ndarray):\n return x.shape\n return ()\n\ndef at_least_one_element(x):\n if isinstance(x, (list, tuple)):\n return x\n return [x]\n\ndef flatten_if_needed(x):\n return numpy.atleast_1d(x).flatten()\n\nclass GradientChecker(Model):\n\n def __init__(self, f, df, x0, names=None, *args, **kwargs):\n \"\"\"\n :param f: Function to check gradient for\n :param df: Gradient of function to check\n :param x0:\n Initial guess for inputs x (if it has a shape (a,b) this will be reflected in the parameter names).\n Can be a list of arrays, if takes a list of arrays. This list will be passed\n to f and df in the same order as given here.\n If only one argument, make sure not to pass a list!!!\n\n :type x0: [array-like] | array-like | float | int\n :param names:\n Names to print, when performing gradcheck. If a list was passed to x0\n a list of names with the same length is expected.\n :param args: Arguments passed as f(x, *args, **kwargs) and df(x, *args, **kwargs)\n\n Examples:\n ---------\n from GPy.models import GradientChecker\n N, M, Q = 10, 5, 3\n\n Sinusoid:\n\n X = numpy.random.rand(N, Q)\n grad = GradientChecker(numpy.sin,numpy.cos,X,'x')\n grad.checkgrad(verbose=1)\n\n Using GPy:\n\n X, Z = numpy.random.randn(N,Q), numpy.random.randn(M,Q)\n kern = GPy.kern.linear(Q, ARD=True) + GPy.kern.rbf(Q, ARD=True)\n grad = GradientChecker(kern.K,\n lambda x: 2*kern.dK_dX(numpy.ones((1,1)), x),\n x0 = X.copy(),\n names='X')\n grad.checkgrad(verbose=1)\n grad.randomize()\n grad.checkgrad(verbose=1)\n \"\"\"\n super(GradientChecker, self).__init__(name='GradientChecker')\n if isinstance(x0, (list, tuple)) and names is None:\n self.shapes = [get_shape(xi) for xi in x0]\n self.names = ['X{i}'.format(i=i) for i in range(len(x0))]\n elif isinstance(x0, (list, tuple)) and names is not None:\n self.shapes = [get_shape(xi) for xi in x0]\n self.names = names\n elif names is None:\n self.names = ['X']\n self.shapes = [get_shape(x0)]\n else:\n self.names = names\n self.shapes = [get_shape(x0)]\n\n for name, xi in zip(self.names, at_least_one_element(x0)):\n self.__setattr__(name, Param(name, xi))\n self.link_parameter(self.__getattribute__(name))\n# self._param_names = []\n# for name, shape in zip(self.names, self.shapes):\n# self._param_names.extend(map(lambda nameshape: ('_'.join(nameshape)).strip('_'), itertools.izip(itertools.repeat(name), itertools.imap(lambda t: '_'.join(map(str, t)), itertools.product(*map(lambda xi: range(xi), shape))))))\n self.args = args\n self.kwargs = kwargs\n self.f = f\n self.df = df\n\n def _get_x(self):\n if len(self.names) > 1:\n return [self.__getattribute__(name) for name in self.names] + list(self.args)\n return [self.__getattribute__(self.names[0])] + list(self.args)\n\n def log_likelihood(self):\n return float(numpy.sum(self.f(*self._get_x(), **self.kwargs)))\n\n def _log_likelihood_gradients(self):\n return numpy.atleast_1d(self.df(*self._get_x(), **self.kwargs)).flatten()\n\n #def _get_params(self):\n #return numpy.atleast_1d(numpy.hstack(map(lambda name: flatten_if_needed(self.__getattribute__(name)), self.names)))\n\n #def _set_params(self, x):\n #current_index = 0\n #for name, shape in zip(self.names, self.shapes):\n #current_size = numpy.prod(shape)\n #self.__setattr__(name, x[current_index:current_index + current_size].reshape(shape))\n #current_index += current_size\n\n #def _get_param_names(self):\n #_param_names = []\n #for name, shape in zip(self.names, self.shapes):\n #_param_names.extend(map(lambda nameshape: ('_'.join(nameshape)).strip('_'), itertools.izip(itertools.repeat(name), itertools.imap(lambda t: '_'.join(map(str, t)), itertools.product(*map(lambda xi: range(xi), shape))))))\n #return _param_names\n\n\nclass HessianChecker(GradientChecker):\n\n def __init__(self, f, df, ddf, x0, names=None, *args, **kwargs):\n \"\"\"\n :param f: Function (only used for numerical hessian gradient)\n :param df: Gradient of function to check\n :param ddf: Analytical gradient function\n :param x0:\n Initial guess for inputs x (if it has a shape (a,b) this will be reflected in the parameter names).\n Can be a list of arrays, if takes a list of arrays. This list will be passed\n to f and df in the same order as given here.\n If only one argument, make sure not to pass a list!!!\n\n :type x0: [array-like] | array-like | float | int\n :param names:\n Names to print, when performing gradcheck. If a list was passed to x0\n a list of names with the same length is expected.\n :param args: Arguments passed as f(x, *args, **kwargs) and df(x, *args, **kwargs)\n\n \"\"\"\n super(HessianChecker, self).__init__(df, ddf, x0, names=names, *args, **kwargs)\n self._f = f\n self._df = df\n self._ddf = ddf\n\n def checkgrad(self, target_param=None, verbose=False, step=1e-6, tolerance=1e-3, block_indices=None, plot=False):\n \"\"\"\n Overwrite checkgrad method to check whole block instead of looping through\n\n Shows diagnostics using matshow instead\n\n :param verbose: If True, print a \"full\" checking of each parameter\n :type verbose: bool\n :param step: The size of the step around which to linearise the objective\n :type step: float (default 1e-6)\n :param tolerance: the tolerance allowed (see note)\n :type tolerance: float (default 1e-3)\n\n Note:-\n The gradient is considered correct if the ratio of the analytical\n and numerical gradients is within <tolerance> of unity.\n \"\"\"\n try:\n import numdifftools as nd\n except:\n raise ImportError(\"Don't have numdifftools package installed, it is not a GPy dependency as of yet, it is only used for hessian tests\")\n\n if target_param:\n raise NotImplementedError('Only basic functionality is provided with this gradchecker')\n\n #Repeat for each parameter, not the nicest but shouldn't be many cases where there are many\n #variables\n current_index = 0\n for name, shape in zip(self.names, self.shapes):\n current_size = numpy.prod(shape)\n x = self.optimizer_array.copy()\n #x = self._get_params_transformed().copy()\n x = x[current_index:current_index + current_size].reshape(shape)\n\n # Check gradients\n analytic_hess = self._ddf(x)\n if analytic_hess.shape[1] == 1:\n analytic_hess = numpy.diagflat(analytic_hess)\n\n #From the docs:\n #x0 : vector location\n #at which to differentiate fun\n #If x0 is an N x M array, then fun is assumed to be a function\n #of N*M variables., thus we must have it flat, not (N,1), but just (N,)\n #numeric_hess_partial = nd.Hessian(self._f, vectorized=False)\n numeric_hess_partial = nd.Jacobian(self._df, vectorized=False)\n #numeric_hess_partial = nd.Derivative(self._df, vectorized=True)\n numeric_hess = numeric_hess_partial(x)\n\n check_passed = self.checkgrad_block(analytic_hess, numeric_hess, verbose=verbose, step=step, tolerance=tolerance, block_indices=block_indices, plot=plot)\n current_index += current_size\n return check_passed\n\n def checkgrad_block(self, analytic_hess, numeric_hess, verbose=False, step=1e-6, tolerance=1e-3, block_indices=None, plot=False):\n \"\"\"\n Checkgrad a block matrix\n \"\"\"\n if analytic_hess.dtype is np.dtype('object'):\n #Make numeric hessian also into a block matrix\n real_size = get_block_shapes(analytic_hess)\n num_elements = np.sum(real_size)\n if (num_elements, num_elements) == numeric_hess.shape:\n #If the sizes are the same we assume they are the same\n #(we have not fixed any values so the numeric is the whole hessian)\n numeric_hess = get_blocks(numeric_hess, real_size)\n else:\n #Make a fake empty matrix and fill out the correct block\n tmp_numeric_hess = get_blocks(np.zeros((num_elements, num_elements)), real_size)\n tmp_numeric_hess[block_indices] = numeric_hess.copy()\n numeric_hess = tmp_numeric_hess\n\n if block_indices is not None:\n #Extract the right block\n analytic_hess = analytic_hess[block_indices]\n numeric_hess = numeric_hess[block_indices]\n else:\n #Unblock them if they are in blocks and you aren't checking a single block (checking whole hessian)\n if analytic_hess.dtype is np.dtype('object'):\n analytic_hess = unblock(analytic_hess)\n numeric_hess = unblock(numeric_hess)\n\n ratio = numeric_hess / (numpy.where(analytic_hess==0, 1e-10, analytic_hess))\n difference = numpy.abs(analytic_hess - numeric_hess)\n\n check_passed = numpy.all((numpy.abs(1 - ratio)) < tolerance) or numpy.allclose(numeric_hess, analytic_hess, atol = tolerance)\n\n if verbose:\n if block_indices:\n print(\"\\nBlock {}\".format(block_indices))\n else:\n print(\"\\nAll blocks\")\n\n header = ['Checked', 'Max-Ratio', 'Min-Ratio', 'Min-Difference', 'Max-Difference']\n header_string = map(lambda x: ' | '.join(header), [header])\n separator = '-' * len(header_string[0])\n print('\\n'.join([header_string[0], separator]))\n min_r = '%.6f' % float(numpy.min(ratio))\n max_r = '%.6f' % float(numpy.max(ratio))\n max_d = '%.6f' % float(numpy.max(difference))\n min_d = '%.6f' % float(numpy.min(difference))\n cols = [max_r, min_r, min_d, max_d]\n\n if check_passed:\n checked = \"\\033[92m True \\033[0m\"\n else:\n checked = \"\\033[91m False \\033[0m\"\n\n grad_string = \"{} | {} | {} | {} | {} \".format(checked, cols[0], cols[1], cols[2], cols[3])\n print(grad_string)\n\n if plot:\n from matplotlib import pyplot as pb\n fig, axes = pb.subplots(2, 2)\n max_lim = numpy.max(numpy.vstack((analytic_hess, numeric_hess)))\n min_lim = numpy.min(numpy.vstack((analytic_hess, numeric_hess)))\n msa = axes[0,0].matshow(analytic_hess, vmin=min_lim, vmax=max_lim)\n axes[0,0].set_title('Analytic hessian')\n axes[0,0].xaxis.set_ticklabels([None])\n axes[0,0].yaxis.set_ticklabels([None])\n axes[0,0].xaxis.set_ticks([None])\n axes[0,0].yaxis.set_ticks([None])\n msn = axes[0,1].matshow(numeric_hess, vmin=min_lim, vmax=max_lim)\n pb.colorbar(msn, ax=axes[0,1])\n axes[0,1].set_title('Numeric hessian')\n axes[0,1].xaxis.set_ticklabels([None])\n axes[0,1].yaxis.set_ticklabels([None])\n axes[0,1].xaxis.set_ticks([None])\n axes[0,1].yaxis.set_ticks([None])\n msr = axes[1,0].matshow(ratio)\n pb.colorbar(msr, ax=axes[1,0])\n axes[1,0].set_title('Ratio')\n axes[1,0].xaxis.set_ticklabels([None])\n axes[1,0].yaxis.set_ticklabels([None])\n axes[1,0].xaxis.set_ticks([None])\n axes[1,0].yaxis.set_ticks([None])\n msd = axes[1,1].matshow(difference)\n pb.colorbar(msd, ax=axes[1,1])\n axes[1,1].set_title('difference')\n axes[1,1].xaxis.set_ticklabels([None])\n axes[1,1].yaxis.set_ticklabels([None])\n axes[1,1].xaxis.set_ticks([None])\n axes[1,1].yaxis.set_ticks([None])\n if block_indices:\n fig.suptitle(\"Block: {}\".format(block_indices))\n pb.show()\n\n return check_passed\n\nclass SkewChecker(HessianChecker):\n\n def __init__(self, df, ddf, dddf, x0, names=None, *args, **kwargs):\n \"\"\"\n :param df: gradient of function\n :param ddf: Gradient of function to check (hessian)\n :param dddf: Analytical gradient function (third derivative)\n :param x0:\n Initial guess for inputs x (if it has a shape (a,b) this will be reflected in the parameter names).\n Can be a list of arrays, if takes a list of arrays. This list will be passed\n to f and df in the same order as given here.\n If only one argument, make sure not to pass a list!!!\n\n :type x0: [array-like] | array-like | float | int\n :param names:\n Names to print, when performing gradcheck. If a list was passed to x0\n a list of names with the same length is expected.\n :param args: Arguments passed as f(x, *args, **kwargs) and df(x, *args, **kwargs)\n\n \"\"\"\n super(SkewChecker, self).__init__(df, ddf, dddf, x0, names=names, *args, **kwargs)\n\n def checkgrad(self, target_param=None, verbose=False, step=1e-6, tolerance=1e-3, block_indices=None, plot=False, super_plot=False):\n \"\"\"\n Gradient checker that just checks each hessian individually\n\n super_plot will plot the hessian wrt every parameter, plot will just do the first one\n \"\"\"\n try:\n import numdifftools as nd\n except:\n raise ImportError(\"Don't have numdifftools package installed, it is not a GPy dependency as of yet, it is only used for hessian tests\")\n\n if target_param:\n raise NotImplementedError('Only basic functionality is provided with this gradchecker')\n\n #Repeat for each parameter, not the nicest but shouldn't be many cases where there are many\n #variables\n current_index = 0\n for name, n_shape in zip(self.names, self.shapes):\n current_size = numpy.prod(n_shape)\n x = self.optimizer_array.copy()\n #x = self._get_params_transformed().copy()\n x = x[current_index:current_index + current_size].reshape(n_shape)\n\n # Check gradients\n #Actually the third derivative\n analytic_hess = self._ddf(x)\n\n #Can only calculate jacobian for one variable at a time\n #From the docs:\n #x0 : vector location\n #at which to differentiate fun\n #If x0 is an N x M array, then fun is assumed to be a function\n #of N*M variables., thus we must have it flat, not (N,1), but just (N,)\n #numeric_hess_partial = nd.Hessian(self._f, vectorized=False)\n #Actually _df is already the hessian\n numeric_hess_partial = nd.Jacobian(self._df, vectorized=True)\n numeric_hess = numeric_hess_partial(x)\n\n print(\"Done making numerical hessian\")\n if analytic_hess.dtype is np.dtype('object'):\n #Blockify numeric_hess aswell\n blocksizes, pagesizes = get_block_shapes_3d(analytic_hess)\n #HACK\n real_block_size = np.sum(blocksizes)\n numeric_hess = numeric_hess.reshape(real_block_size, real_block_size, pagesizes)\n #numeric_hess = get_blocks_3d(numeric_hess, blocksizes)#, pagesizes)\n else:\n numeric_hess = numeric_hess.reshape(*analytic_hess.shape)\n\n #Check every block individually (for ease)\n check_passed = [False]*numeric_hess.shape[2]\n for block_ind in range(numeric_hess.shape[2]):\n #Unless super_plot is set, just plot the first one\n p = True if (plot and block_ind == numeric_hess.shape[2]-1) or super_plot else False\n if verbose:\n print(\"Checking derivative of hessian wrt parameter number {}\".format(block_ind))\n check_passed[block_ind] = self.checkgrad_block(analytic_hess[:,:,block_ind], numeric_hess[:,:,block_ind], verbose=verbose, step=step, tolerance=tolerance, block_indices=block_indices, plot=p)\n\n current_index += current_size\n return np.all(check_passed)\n\n"
] | [
[
"numpy.log",
"numpy.unique",
"numpy.ones",
"numpy.argmax",
"numpy.array",
"numpy.zeros"
],
[
"numpy.abs",
"numpy.allclose",
"numpy.min",
"numpy.diagflat",
"matplotlib.pyplot.subplots",
"numpy.atleast_1d",
"matplotlib.pyplot.colorbar",
"numpy.max",
"numpy.prod",
"matplotlib.pyplot.show",
"numpy.where",
"numpy.vstack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
swpucwf/Deeplearning | [
"be19885d52b7ce8782949d931a1b2994de36679f",
"be19885d52b7ce8782949d931a1b2994de36679f"
] | [
"OpenCV/video_flow.py",
"Detection-Pytorch/backbone/VGG.py"
] | [
"import numpy as np\nimport cv2\n\ncap = cv2.VideoCapture('car.mp4')\n\n# params for ShiTomasi corner detection\nfeature_params = dict(maxCorners=100,\n qualityLevel=0.3,\n minDistance=7,\n blockSize=7)\n\n# Parameters for lucas kanade optical flow\nlk_params = dict(winSize=(15, 15),\n maxLevel=2,\n criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))\n\n# Create some random colors\ncolor = np.random.randint(0, 255, (100, 3))\n\n# Take first frame and find corners in it\nret, old_frame = cap.read()\nold_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)\np0 = cv2.goodFeaturesToTrack(old_gray, mask=None, **feature_params)\n\n# Create a mask image for drawing purposes\nmask = np.zeros_like(old_frame)\n\nwhile (1):\n ret, frame = cap.read()\n frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n # calculate optical flow\n p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None, **lk_params)\n\n # Select good points\n good_new = p1[st == 1]\n good_old = p0[st == 1]\n\n # draw the tracks\n for i, (new, old) in enumerate(zip(good_new, good_old)):\n a, b = new.ravel()\n c, d = old.ravel()\n mask = cv2.line(mask, (a, b), (c, d), color[i].tolist(), 2)\n frame = cv2.circle(frame, (a, b), 5, color[i].tolist(), -1)\n img = cv2.add(frame, mask)\n\n cv2.imshow('frame', img)\n k = cv2.waitKey(30) & 0xff\n if k == 27:\n break\n\n # Now update the previous frame and previous points\n old_gray = frame_gray.copy()\n p0 = good_new.reshape(-1, 1, 2)\n\ncv2.destroyAllWindows()\ncap.release()\n",
"import torch\nfrom torch import nn\nimport torch.nn.functional as F\n \nclass VGG11(nn.Module):\n def __init__(self,num_classes=1000):\n super(VGG11, self).__init__()\n in_dims = 3\n out_dims =64\n layers = []\n for i in range(8):\n layers+=[nn.Conv2d(in_dims,out_dims,3,1,1),nn.ReLU(inplace=True)]\n in_dims = out_dims\n if i in [0,1,3,5,7]:\n layers+=[nn.MaxPool2d(kernel_size=2,stride=2)]\n if i!=5:\n out_dims*=2\n self.layer = nn.Sequential(*layers)\n self.classifier = nn.Sequential(\n nn.Linear(512 * 7 * 7, 4096),\n nn.ReLU(inplace=True),\n nn.Dropout(0.5),\n nn.Linear(4096, 4096),\n nn.ReLU(inplace=True),\n nn.Dropout(0.5),\n nn.Linear(4096, num_classes)\n )\n def forward(self,x):\n x = self.layer(x)\n x = x.reshape(x.size(0),-1)\n return self.classifier(x)\nclass VGG13(nn.Module):\n def __init__(self, num_classes=1000):\n super(VGG13, self).__init__()\n in_dims = 3\n out_dims = 64\n layers = []\n for i in range(10):\n layers += [nn.Conv2d(in_dims, out_dims, 3, 1, 1), nn.ReLU(inplace=True)]\n in_dims = out_dims\n if i in [1, 3, 5, 7, 9]:\n layers += [nn.MaxPool2d(kernel_size=2, stride=2)]\n if i != 7:\n out_dims *= 2\n self.layer = nn.Sequential(*layers)\n self.classifier = nn.Sequential(\n nn.Linear(512 * 7 * 7, 4096),\n nn.ReLU(inplace=True),\n nn.Dropout(0.5),\n nn.Linear(4096, 4096),\n nn.ReLU(inplace=True),\n nn.Dropout(0.5),\n nn.Linear(4096, num_classes)\n )\n\n def forward(self, x):\n\n x = self.layer(x)\n x = x.reshape(x.size(0), -1)\n return self.classifier(x)\nclass VGG16_1(nn.Module):\n\n\n def __init__(self,num_classes=1000):\n super(VGG16_1, self).__init__()\n layers = []\n in_dims = 3\n out_dims = 64\n\n for i in range(13):\n if i==6:\n layers+=[nn.Conv2d(in_dims,out_dims,1,1,1),nn.ReLU(inplace=True)]\n else:\n layers+=[nn.Conv2d(in_dims,out_dims,3,1,1),nn.ReLU(inplace=True)]\n in_dims = out_dims\n if i in [1,3,6,9,12]:\n layers+=[nn.MaxPool2d(kernel_size=2,stride=2)]\n if i!=9:\n out_dims*=2\n\n self.features = nn.Sequential(*layers)\n self.classifier = nn.Sequential(\n nn.Linear(512*7*7,4096),\n nn.ReLU(inplace=True),\n nn.Dropout(0.5),\n nn.Linear(4096,4096),\n nn.ReLU(inplace=True),\n nn.Dropout(0.5),\n nn.Linear(4096,num_classes)\n )\n def forward(self,x):\n\n x = self.features(x)\n print(x.shape)\n x = x.reshape(x.size(0),-1)\n x = self.classifier(x)\n return x\nclass VGG16_3(nn.Module):\n def __init__(self,num_classes=1000):\n super(VGG16_3, self).__init__()\n layers = []\n in_dims = 3\n out_dims = 64\n\n for i in range(13):\n\n layers+=[nn.Conv2d(in_dims,out_dims,3,1,1),nn.ReLU(inplace=True)]\n in_dims = out_dims\n if i in [1,3,6,9,12]:\n layers+=[nn.MaxPool2d(kernel_size=2,stride=2)]\n if i!=9:\n out_dims*=2\n\n self.features = nn.Sequential(*layers)\n self.classifier = nn.Sequential(\n nn.Linear(512*7*7,4096),\n nn.ReLU(inplace=True),\n nn.Dropout(0.5),\n nn.Linear(4096,4096),\n nn.ReLU(inplace=True),\n nn.Dropout(0.5),\n nn.Linear(4096,num_classes)\n )\n def forward(self,x):\n\n x = self.features(x)\n x = x.reshape(x.size(0),-1)\n x = self.classifier(x)\n return x\nclass VGG19(nn.Module):\n\n def __init__(self, num_classes=1000):\n\n super(VGG19, self).__init__()\n layers = []\n in_dims = 3\n out_dims = 64\n\n for i in range(16):\n\n layers += [nn.Conv2d(in_dims, out_dims, 3, 1, 1), nn.ReLU(inplace=True)]\n in_dims = out_dims\n if i in [1, 3, 7, 11, 15]:\n layers += [nn.MaxPool2d(kernel_size=2, stride=2)]\n if i != 11:\n out_dims *= 2\n\n self.features = nn.Sequential(*layers)\n self.classifier = nn.Sequential(\n nn.Linear(512 * 7 * 7, 4096),\n nn.ReLU(inplace=True),\n nn.Dropout(0.5),\n nn.Linear(4096, 4096),\n nn.ReLU(inplace=True),\n nn.Dropout(0.5),\n nn.Linear(4096, num_classes)\n )\n\n def forward(self, x):\n\n x = self.features(x)\n x = x.reshape(x.size(0), -1)\n x = self.classifier(x)\n return x\n\n\n\n\n\n\n\n\nif __name__ == '__main__':\n\n\n # input = torch.randn(1,3,224,224).cuda()\n # vgg = VGG16_3().cuda()\n # print(vgg)\n # print(vgg(input).shape)\n # scores = vgg(input)\n # print(scores)\n # input = torch.randn(1, 3, 224, 224).cuda()\n # vgg = VGG11().cuda()\n # print(vgg(input).shape)\n # input = torch.randn(1, 3, 224, 224).cuda()\n # vgg = VGG13().cuda()\n # print(vgg(input).shape)\n # input = torch.randn(1,3,224,224).cuda()\n # vgg = VGG19().cuda()\n # print(vgg)\n # print(vgg(input).shape)\n net = InceptionV1(3,64,32,64,64,96,32).cuda()\n # print(net)\n input = torch.randn(1,3,256,256).cuda()\n print(net(input).shape)"
] | [
[
"numpy.zeros_like",
"numpy.random.randint"
],
[
"torch.nn.Sequential",
"torch.nn.Dropout",
"torch.randn",
"torch.nn.Conv2d",
"torch.nn.Linear",
"torch.nn.MaxPool2d",
"torch.nn.ReLU"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
vita-epfl/pedestrian-transition-dataset | [
"7e1b723a37289850b5ef8628e6881845a24912f9"
] | [
"src/dataset/loader.py"
] | [
"import os\nimport copy\nimport PIL\nimport torch\nimport torchvision\nimport numpy as np\nimport math\n\nimport logging\nfrom typing import List\n\nLOG = logging.getLogger(__name__)\n\n\ndef define_path(use_jaad=True, use_pie=True, use_titan=True):\n \"\"\"\n Define the correct paths to datasets'annotations and images\n \"\"\"\n all_anns_paths = {'JAAD': {'anns': '../../DATA/annotations/JAAD/JAAD_DATA.pkl', \n 'split': '../../DATA/annotations/JAAD/splits'},\n 'PIE': {'anns': '../../DATA/annotations/PIE/PIE_DATA.pkl'},\n 'TITAN': {'anns': '../../DATA/annotations/TITAN/titan_0_4',\n 'split':'../../DATA/annotations/TITAN/splits' }\n }\n all_image_dir = {'JAAD': '../../DATA/JAAD/images/',\n 'PIE': '../../DATA/PIE/images/',\n 'TITAN': '../../DATA/TITAN/images_anonymized/'\n }\n anns_paths = {}\n image_dir = {}\n if use_jaad:\n anns_paths['JAAD'] = all_anns_paths['JAAD']\n image_dir['JAAD'] = all_image_dir['JAAD']\n if use_pie:\n anns_paths['PIE'] = all_anns_paths['PIE']\n image_dir['PIE'] = all_image_dir['PIE']\n if use_titan:\n anns_paths['TITAN'] = all_anns_paths['TITAN']\n image_dir['TITAN'] = all_image_dir['TITAN']\n\n return anns_paths, image_dir\n \n\nclass ImageList(torch.utils.data.Dataset):\n \"\"\"\n Basic dataloader for images\n \"\"\"\n\n def __init__(self, image_paths, preprocess=None):\n self.image_paths = image_paths\n self.preprocess = preprocess\n\n def __getitem__(self, index):\n image_path = self.image_paths[index]\n with open(image_path, 'rb') as f:\n image = PIL.Image.open(f).convert('RGB')\n if self.preprocess is not None:\n image = self.preprocess(image)\n\n return image\n\n def __len__(self):\n return len(self.image_paths)\n\n\nclass MultiLoader:\n # Class for loading data from mulitple datasets\n last_task_index = None\n \n\n def __init__(self, loaders: List[torch.utils.data.DataLoader], \n weights=None, n_batches=None):\n \n self.loaders = loaders\n self._weights = weights\n\n if self._weights is None:\n self._weights = [1.0 / len(loaders) for _ in range(len(loaders))]\n elif len(self._weights) == len(loaders) - 1:\n self._weights.append(1.0 - sum(self._weights))\n elif len(self._weights) == len(loaders):\n pass\n else:\n raise Exception('invalid dataset weights: {}'.format(self._weights))\n assert all(w > 0.0 for w in self._weights)\n sum_w = sum(self._weights)\n # normalize weights between datasets\n self._weights = [w / sum_w for w in self._weights]\n LOG.info('dataset weights: %s', self._weights)\n # set the total number of batches in one epoch\n self.n_batches = int(min(len(l) / w for l, w in zip(loaders, self._weights)))\n if n_batches is not None:\n self.n_batches = min(self.n_batches, n_batches)\n\n def __iter__(self):\n loader_iters = [iter(l) for l in self.loaders]\n # counter of loaded batches for each dataset\n n_loaded = [0 for _ in self.loaders]\n while True:\n # select loader for one iteration\n loader_index = int(np.argmin([n / w for n, w in zip(n_loaded, self._weights)]))\n next_batch = next(loader_iters[loader_index], None)\n if next_batch is None:\n break\n n_loaded[loader_index] += 1\n MultiLoader.last_task_index = loader_index\n # generator\n yield next_batch\n # termination\n if sum(n_loaded) >= self.n_batches:\n break\n\n def __len__(self):\n return self.n_batches\n \n\nclass FrameDataset(torch.utils.data.Dataset):\n\n def __init__(self, samples, image_dir, preprocess=None):\n self.samples = samples\n self.image_dir = image_dir\n self.preprocess = preprocess\n\n def __getitem__(self, index):\n ids = list(self.samples.keys())\n idx = ids[index]\n frame = self.samples[idx]['frame']\n bbox = copy.deepcopy(self.samples[idx]['bbox'])\n source = self.samples[idx][\"source\"]\n anns = {'bbox': bbox, 'source': source}\n TTE = self.samples[idx][\"TTE\"]\n if 'trans_label' in list(self.samples[idx].keys()):\n label = self.samples[idx]['trans_label']\n else:\n label = None\n if 'behavior' in list(self.samples[idx].keys()):\n behavior = self.samples[idx]['behavior']\n else:\n behavior = [-1,-1,-1,-1] # no behavior annotations\n if 'attributes' in list(self.samples[idx].keys()):\n attributes = self.samples[idx]['attributes'] # scene attributes\n else:\n attributes = [-1,-1,-1,-1,-1,-1]\n image_path = None\n # image paths\n if source == \"JAAD\":\n vid = self.samples[idx]['video_number']\n image_path = os.path.join(self.image_dir['JAAD'], vid, '{:05d}.png'.format(frame))\n elif source == \"PIE\":\n vid = self.samples[idx]['video_number']\n sid = self.samples[idx]['set_number']\n image_path = os.path.join(self.image_dir['PIE'], sid, vid, '{:05d}.png'.format(frame))\n elif source == \"TITAN\":\n vid = self.samples[idx]['video_number']\n image_path = os.path.join(self.image_dir['TITAN'], vid, 'images', '{:06}.png'.format(frame))\n\n with open(image_path, 'rb') as f:\n img = PIL.Image.open(f).convert('RGB')\n if self.preprocess is not None:\n img, anns = self.preprocess(img, anns)\n img_tensor = torchvision.transforms.ToTensor()(img)\n if label is not None:\n label = torch.tensor(label)\n label = label.to(torch.float32)\n \n if math.isnan(TTE):\n pass\n else:\n TTE = round(self.samples[idx][\"TTE\"],2)\n TTE = torch.tensor(TTE).to(torch.float32)\n attributes = torch.tensor(attributes).to(torch.float32)\n sample = {'image': img_tensor, 'bbox': anns['bbox'], 'id': idx,\n 'label': label, 'source': source, 'TTE': TTE,\n 'attributes': attributes, 'behavior': behavior\n }\n\n return sample\n\n def __len__(self):\n return len(self.samples.keys())\n\n\nclass SequenceDataset(torch.utils.data.Dataset):\n \"\"\"\n Basic dataloader for loading sequence/history samples\n \"\"\"\n\n def __init__(self, samples, image_dir, preprocess=None):\n \"\"\"\n :params: samples: transition history samples(dict)\n image_dir: root dir for images extracted from video clips\n preprocess: optional preprocessing on image tensors and annotations\n \"\"\"\n self.samples = samples\n self.image_dir = image_dir\n self.preprocess = preprocess\n\n def __getitem__(self, index):\n ids = list(self.samples.keys())\n idx = ids[index]\n frames = self.samples[idx]['frame']\n bbox = copy.deepcopy(self.samples[idx]['bbox'])\n source = self.samples[idx][\"source\"]\n action = self.samples[idx]['action']\n TTE = round(self.samples[idx][\"TTE\"],2)\n if 'trans_label' in list(self.samples[idx].keys()):\n label = self.samples[idx]['trans_label']\n else:\n label = None\n bbox_new= []\n image_path = None\n # image paths\n img_tensors = []\n for i in range(len(frames)):\n anns = {'bbox': bbox[i], 'source': source}\n if source == \"JAAD\":\n vid = self.samples[idx]['video_number']\n image_path = os.path.join(self.image_dir['JAAD'], vid, '{:05d}.png'.format(frames[i]))\n elif source == \"PIE\":\n vid = self.samples[idx]['video_number']\n sid = self.samples[idx]['set_number']\n image_path = os.path.join(self.image_dir['PIE'], sid, vid, '{:05d}.png'.format(frames[i]))\n elif source == \"TITAN\":\n vid = self.samples[idx]['video_number']\n image_path = os.path.join(self.image_dir['TITAN'], vid, 'images', '{:06}.png'.format(frames[i]))\n with open(image_path, 'rb') as f:\n img = PIL.Image.open(f).convert('RGB')\n if self.preprocess is not None:\n img, anns = self.preprocess(img, anns)\n img_tensors.append(torchvision.transforms.ToTensor()(img))\n bbox_new.append(anns['bbox'])\n img_tensors = torch.stack(img_tensors)\n if label is not None:\n label = torch.tensor(label)\n label = label.to(torch.float32)\n sample = {'image': img_tensors, 'bbox': bbox_new, 'action': action, 'id': idx, 'label': label, 'source': source, 'TTE': TTE }\n\n return sample\n\n def __len__(self):\n return len(self.samples.keys())\n\n\nclass PaddedSequenceDataset(torch.utils.data.Dataset):\n \"\"\"\n Dataloader for loading sequence/history samples,\n all sequences are padded to unify the length\n \"\"\"\n\n def __init__(self, samples, image_dir, padded_length=10, preprocess=None, hflip_p=0.0):\n \"\"\"\n :params: samples: transition history samples(dict)\n image_dir: root dir for images extracted from video clips\n padded_length: length of each sequence after padded\n preprocess: optional preprocessing on image tensors and annotations\n \"\"\"\n self.samples = samples\n self.image_dir = image_dir\n self.preprocess = preprocess\n self.padded_length = padded_length\n self.hflip_p = hflip_p\n\n def __getitem__(self, index):\n ids = list(self.samples.keys())\n idx = ids[index]\n frames = self.samples[idx]['frame']\n bbox = copy.deepcopy(self.samples[idx]['bbox'])\n source = self.samples[idx][\"source\"]\n action = self.samples[idx]['action']\n TTE = self.samples[idx][\"TTE\"]\n if source == \"PIE\":\n set_number = self.samples[idx]['set_number']\n else:\n set_number = None\n if 'trans_label' in list(self.samples[idx].keys()):\n label = self.samples[idx]['trans_label']\n else:\n label = None\n if 'behavior' in list(self.samples[idx].keys()):\n behavior = self.samples[idx]['behavior']\n else:\n behavior = [-1,-1,-1,-1]\n if 'attributes' in list(self.samples[idx].keys()):\n attributes = self.samples[idx]['attributes']\n else:\n attributes = [-1,-1,-1,-1,-1,-1]\n bbox_new = []\n bbox_ped_new = []\n image_path = None\n # image paths\n img_tensors = []\n hflip = True if float(torch.rand(1).item()) < self.hflip_p else False\n for i in range(len(frames)):\n anns = {'bbox': bbox[i], 'source': source}\n if source == \"JAAD\":\n vid = self.samples[idx]['video_number']\n image_path = os.path.join(self.image_dir['JAAD'], vid, '{:05d}.png'.format(frames[i]))\n elif source == \"PIE\":\n vid = self.samples[idx]['video_number']\n sid = self.samples[idx]['set_number']\n image_path = os.path.join(self.image_dir['PIE'], sid, vid, '{:05d}.png'.format(frames[i]))\n elif source == \"TITAN\":\n vid = self.samples[idx]['video_number']\n image_path = os.path.join(self.image_dir['TITAN'], vid, 'images', '{:06}.png'.format(frames[i]))\n with open(image_path, 'rb') as f:\n img = PIL.Image.open(f).convert('RGB')\n if hflip:\n img = img.transpose(PIL.Image.FLIP_LEFT_RIGHT)\n w, h = img.size\n x_max = w - anns['bbox'][0]\n x_min = w - anns['bbox'][2]\n anns['bbox'][0] = x_min\n anns['bbox'][2] = x_max\n anns['bbox_ped'] = copy.deepcopy(anns['bbox'])\n if self.preprocess is not None:\n img, anns = self.preprocess(img, anns)\n img_tensors.append(torchvision.transforms.ToTensor()(img))\n bbox_new.append(anns['bbox'])\n bbox_ped_new.append(anns['bbox_ped'])\n \n img_tensors = torch.stack(img_tensors)\n imgs_size = img_tensors.size()\n img_tensors_padded = torch.zeros((self.padded_length, imgs_size[1], imgs_size[2], imgs_size[3]))\n img_tensors_padded[:imgs_size[0], :, :, :] = img_tensors\n bbox_new_padded = copy.deepcopy(bbox_new)\n bbox_ped_new_padded = copy.deepcopy(bbox_ped_new)\n action_padded = copy.deepcopy(action)\n behavior_padded = copy.deepcopy(behavior)\n for i in range(imgs_size[0],self.padded_length):\n bbox_new_padded.append([0,0,0,0])\n bbox_ped_new_padded.append([0,0,0,0])\n action_padded.append(-1)\n behavior_padded.append([-1,-1,-1,-1])\n # seq_len = torch.squeeze(torch.LongTensor(imgs_size[0]))\n seq_len = imgs_size[0]\n if label is not None:\n label = torch.tensor(label)\n label = label.to(torch.float32)\n TTE_tag = -1\n if math.isnan(TTE):\n pass\n else:\n TTE = round(self.samples[idx][\"TTE\"],2)\n TTE = torch.tensor(TTE).to(torch.float32)\n TTE_tag = torch.tensor(TTE_tag)\n TTE_tag = TTE_tag.to(torch.float32)\n attributes = torch.tensor(attributes).to(torch.float32)\n sample = {'image': img_tensors_padded, 'bbox': bbox_new_padded, 'bbox_ped': bbox_ped_new_padded,\n 'seq_length': seq_len, 'action': action_padded, 'id': idx, 'label': label,\n 'source': source, 'TTE': TTE, \n 'behavior': behavior_padded, 'attributes': attributes}\n\n return sample\n\n def __len__(self):\n return len(self.samples.keys())\n \n\n"
] | [
[
"torch.stack",
"torch.tensor",
"torch.rand",
"torch.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mizolotu/DonkeyCarExperiments | [
"3d6be742915efe51c0f5abda4c69a4349a555373",
"3d6be742915efe51c0f5abda4c69a4349a555373"
] | [
"reinforcement_learning/her/utils.py",
"reinforcement_learning/common/math_util.py"
] | [
"from collections import OrderedDict\n\nimport numpy as np\nfrom reinforcement_learning.gym import spaces\n\n# Important: gym mixes up ordered and unordered keys\n# and the Dict space may return a different order of keys that the actual one\nKEY_ORDER = ['observation', 'achieved_goal', 'desired_goal']\n\n\nclass HERGoalEnvWrapper(object):\n \"\"\"\n A wrapper that allow to use dict observation space (coming from GoalEnv) with\n the RL algorithms.\n It assumes that all the spaces of the dict space are of the same type.\n\n :param env: (gym.GoalEnv)\n \"\"\"\n\n def __init__(self, env):\n super(HERGoalEnvWrapper, self).__init__()\n self.env = env\n self.metadata = self.env.metadata\n self.action_space = env.action_space\n self.spaces = list(env.observation_space.spaces.values())\n # Check that all spaces are of the same type\n # (current limitation of the wrapper)\n space_types = [type(env.observation_space.spaces[key]) for key in KEY_ORDER]\n assert len(set(space_types)) == 1, \"The spaces for goal and observation\"\\\n \" must be of the same type\"\n\n if isinstance(self.spaces[0], spaces.Discrete):\n self.obs_dim = 1\n self.goal_dim = 1\n else:\n goal_space_shape = env.observation_space.spaces['achieved_goal'].shape\n self.obs_dim = env.observation_space.spaces['observation'].shape[0]\n self.goal_dim = goal_space_shape[0]\n\n if len(goal_space_shape) == 2:\n assert goal_space_shape[1] == 1, \"Only 1D observation spaces are supported yet\"\n else:\n assert len(goal_space_shape) == 1, \"Only 1D observation spaces are supported yet\"\n\n if isinstance(self.spaces[0], spaces.MultiBinary):\n total_dim = self.obs_dim + 2 * self.goal_dim\n self.observation_space = spaces.MultiBinary(total_dim)\n\n elif isinstance(self.spaces[0], spaces.Box):\n lows = np.concatenate([space.low for space in self.spaces])\n highs = np.concatenate([space.high for space in self.spaces])\n self.observation_space = spaces.Box(lows, highs, dtype=np.float32)\n\n elif isinstance(self.spaces[0], spaces.Discrete):\n dimensions = [env.observation_space.spaces[key].n for key in KEY_ORDER]\n self.observation_space = spaces.MultiDiscrete(dimensions)\n\n else:\n raise NotImplementedError(\"{} space is not supported\".format(type(self.spaces[0])))\n\n def convert_dict_to_obs(self, obs_dict):\n \"\"\"\n :param obs_dict: (dict<np.ndarray>)\n :return: (np.ndarray)\n \"\"\"\n # Note: achieved goal is not removed from the observation\n # this is helpful to have a revertible transformation\n if isinstance(self.observation_space, spaces.MultiDiscrete):\n # Special case for multidiscrete\n return np.concatenate([[int(obs_dict[key])] for key in KEY_ORDER])\n return np.concatenate([obs_dict[key] for key in KEY_ORDER])\n\n def convert_obs_to_dict(self, observations):\n \"\"\"\n Inverse operation of convert_dict_to_obs\n\n :param observations: (np.ndarray)\n :return: (OrderedDict<np.ndarray>)\n \"\"\"\n return OrderedDict([\n ('observation', observations[:self.obs_dim]),\n ('achieved_goal', observations[self.obs_dim:self.obs_dim + self.goal_dim]),\n ('desired_goal', observations[self.obs_dim + self.goal_dim:]),\n ])\n\n def step(self, action):\n obs, reward, done, info = self.env.step(action)\n return self.convert_dict_to_obs(obs), reward, done, info\n\n def seed(self, seed=None):\n return self.env.seed(seed)\n\n def reset(self):\n return self.convert_dict_to_obs(self.env.reset())\n\n def compute_reward(self, achieved_goal, desired_goal, info):\n return self.env.compute_reward(achieved_goal, desired_goal, info)\n\n def render(self, mode='human'):\n return self.env.render(mode)\n\n def close(self):\n return self.env.close()\n",
"import numpy as np\nimport scipy.signal\n\n\ndef safe_mean(arr):\n \"\"\"\n Compute the mean of an array if there is at least one element.\n For empty array, return nan. It is used for logging only.\n\n :param arr: (np.ndarray)\n :return: (float)\n \"\"\"\n return np.nan if len(arr) == 0 else np.nanmean(arr, axis=0)\n\n\ndef discount(vector, gamma):\n \"\"\"\n computes discounted sums along 0th dimension of vector x.\n y[t] = x[t] + gamma*x[t+1] + gamma^2*x[t+2] + ... + gamma^k x[t+k],\n where k = len(x) - t - 1\n\n :param vector: (np.ndarray) the input vector\n :param gamma: (float) the discount value\n :return: (np.ndarray) the output vector\n \"\"\"\n assert vector.ndim >= 1\n return scipy.signal.lfilter([1], [1, -gamma], vector[::-1], axis=0)[::-1]\n\n\ndef explained_variance(y_pred, y_true):\n \"\"\"\n Computes fraction of variance that ypred explains about y.\n Returns 1 - Var[y-ypred] / Var[y]\n\n interpretation:\n ev=0 => might as well have predicted zero\n ev=1 => perfect prediction\n ev<0 => worse than just predicting zero\n\n :param y_pred: (np.ndarray) the prediction\n :param y_true: (np.ndarray) the expected value\n :return: (float) explained variance of ypred and y\n \"\"\"\n assert y_true.ndim == 1 and y_pred.ndim == 1\n var_y = np.var(y_true)\n return np.nan if var_y == 0 else 1 - np.var(y_true - y_pred) / var_y\n\n\ndef explained_variance_2d(y_pred, y_true):\n \"\"\"\n Computes fraction of variance that ypred explains about y, for 2D arrays.\n Returns 1 - Var[y-ypred] / Var[y]\n\n interpretation:\n ev=0 => might as well have predicted zero\n ev=1 => perfect prediction\n ev<0 => worse than just predicting zero\n\n :param y_pred: (np.ndarray) the prediction\n :param y_true: (np.ndarray) the expected value\n :return: (float) explained variance of ypred and y\n \"\"\"\n assert y_true.ndim == 2 and y_pred.ndim == 2\n var_y = np.var(y_true, axis=0)\n explained_var = 1 - np.var(y_true - y_pred) / var_y\n explained_var[var_y < 1e-10] = 0\n return explained_var\n\n\ndef flatten_arrays(arrs):\n \"\"\"\n flattens a list of arrays down to 1D\n\n :param arrs: ([np.ndarray]) arrays\n :return: (np.ndarray) 1D flattened array\n \"\"\"\n return np.concatenate([arr.flat for arr in arrs])\n\n\ndef unflatten_vector(vec, shapes):\n \"\"\"\n reshape a flattened array\n\n :param vec: (np.ndarray) 1D arrays\n :param shapes: (tuple)\n :return: ([np.ndarray]) reshaped array\n \"\"\"\n i = 0\n arrs = []\n for shape in shapes:\n size = np.prod(shape)\n arr = vec[i:i + size].reshape(shape)\n arrs.append(arr)\n i += size\n return arrs\n\n\ndef discount_with_boundaries(rewards, episode_starts, gamma):\n \"\"\"\n computes discounted sums along 0th dimension of x (reward), while taking into account the start of each episode.\n y[t] = x[t] + gamma*x[t+1] + gamma^2*x[t+2] + ... + gamma^k x[t+k],\n where k = len(x) - t - 1\n\n :param rewards: (np.ndarray) the input vector (rewards)\n :param episode_starts: (np.ndarray) 2d array of bools, indicating when a new episode has started\n :param gamma: (float) the discount factor\n :return: (np.ndarray) the output vector (discounted rewards)\n \"\"\"\n discounted_rewards = np.zeros_like(rewards)\n n_samples = rewards.shape[0]\n discounted_rewards[n_samples - 1] = rewards[n_samples - 1]\n for step in range(n_samples - 2, -1, -1):\n discounted_rewards[step] = rewards[step] + gamma * discounted_rewards[step + 1] * (1 - episode_starts[step + 1])\n return discounted_rewards\n\n\ndef scale_action(action_space, action):\n \"\"\"\n Rescale the action from [low, high] to [-1, 1]\n (no need for symmetric action space)\n\n :param action_space: (gym.spaces.box.Box)\n :param action: (np.ndarray)\n :return: (np.ndarray)\n \"\"\"\n low, high = action_space.low, action_space.high\n return 2.0 * ((action - low) / (high - low)) - 1.0\n\n\ndef unscale_action(action_space, scaled_action):\n \"\"\"\n Rescale the action from [-1, 1] to [low, high]\n (no need for symmetric action space)\n\n :param action_space: (gym.spaces.box.Box)\n :param action: (np.ndarray)\n :return: (np.ndarray)\n \"\"\"\n low, high = action_space.low, action_space.high\n return low + (0.5 * (scaled_action + 1.0) * (high - low))\n"
] | [
[
"numpy.concatenate"
],
[
"numpy.concatenate",
"numpy.zeros_like",
"numpy.nanmean",
"numpy.prod",
"numpy.var"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
JuliaChae/faster-rcnn.pytorch | [
"220005b5dbed1dd7e5abcfb85eee9f976a8a5f58"
] | [
"lib/detection_metric/Evaluator.py"
] | [
"###########################################################################################\n# #\n# Evaluator class: Implements the most popular metrics for object detection #\n# #\n# Developed by: Rafael Padilla ([email protected]) #\n# SMT - Signal Multimedia and Telecommunications Lab #\n# COPPE - Universidade Federal do Rio de Janeiro #\n# Last modification: Oct 9th 2018 #\n###########################################################################################\n\nimport os\nimport sys\nfrom collections import Counter\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom detection_metric.BoundingBox import *\nfrom detection_metric.BoundingBoxes import *\nfrom detection_metric.utils import *\n\n\nclass Evaluator:\n def GetPascalVOCMetrics(self,\n boundingboxes,\n IOUThreshold=0.5,\n method=MethodAveragePrecision.EveryPointInterpolation):\n \"\"\"Get the metrics used by the VOC Pascal 2012 challenge.\n Get\n Args:\n boundingboxes: Object of the class BoundingBoxes representing ground truth and detected\n bounding boxes;\n IOUThreshold: IOU threshold indicating which detections will be considered TP or FP\n (default value = 0.5);\n method (default = EveryPointInterpolation): It can be calculated as the implementation\n in the official PASCAL VOC toolkit (EveryPointInterpolation), or applying the 11-point\n interpolatio as described in the paper \"The PASCAL Visual Object Classes(VOC) Challenge\"\n or EveryPointInterpolation\" (ElevenPointInterpolation);\n Returns:\n A list of dictionaries. Each dictionary contains information and metrics of each class.\n The keys of each dictionary are:\n dict['class']: class representing the current dictionary;\n dict['precision']: array with the precision values;\n dict['recall']: array with the recall values;\n dict['AP']: average precision;\n dict['interpolated precision']: interpolated precision values;\n dict['interpolated recall']: interpolated recall values;\n dict['total positives']: total number of ground truth positives;\n dict['total TP']: total number of True Positive detections;\n dict['total FP']: total number of False Negative detections;\n \"\"\"\n ret = [] # list containing metrics (precision, recall, average precision) of each class\n # List with all ground truths (Ex: [imageName,class,confidence=1, (bb coordinates XYX2Y2)])\n groundTruths = []\n # List with all detections (Ex: [imageName,class,confidence,(bb coordinates XYX2Y2)])\n detections = []\n # Get all classes\n classes = []\n # Loop through all bounding boxes and separate them into GTs and detections\n for bb in boundingboxes.getBoundingBoxes():\n # [imageName, class, confidence, (bb coordinates XYX2Y2)]\n if bb.getBBType() == BBType.GroundTruth:\n groundTruths.append([\n bb.getImageName(),\n bb.getClassId(), 1,\n bb.getAbsoluteBoundingBox(BBFormat.XYX2Y2)\n ])\n else:\n detections.append([\n bb.getImageName(),\n bb.getClassId(),\n bb.getConfidence(),\n bb.getAbsoluteBoundingBox(BBFormat.XYX2Y2)\n ])\n # get class\n if bb.getClassId() not in classes:\n classes.append(bb.getClassId())\n classes = sorted(classes)\n # Precision x Recall is obtained individually by each class\n # Loop through by classes\n for c in classes:\n # Get only detection of class c\n dects = []\n [dects.append(d) for d in detections if d[1] == c]\n # Get only ground truths of class c\n gts = []\n [gts.append(g) for g in groundTruths if g[1] == c]\n npos = len(gts)\n # sort detections by decreasing confidence\n dects = sorted(dects, key=lambda conf: conf[2], reverse=True)\n TP = np.zeros(len(dects))\n FP = np.zeros(len(dects))\n # create dictionary with amount of gts for each image\n det = Counter([cc[0] for cc in gts])\n for key, val in det.items():\n det[key] = np.zeros(val)\n # print(\"Evaluating class: %s (%d detections)\" % (str(c), len(dects)))\n # Loop through detections\n for d in range(len(dects)):\n # print('dect %s => %s' % (dects[d][0], dects[d][3],))\n # Find ground truth image\n gt = [gt for gt in gts if gt[0] == dects[d][0]]\n iouMax = sys.float_info.min\n for j in range(len(gt)):\n # print('Ground truth gt => %s' % (gt[j][3],))\n iou = Evaluator.iou(dects[d][3], gt[j][3])\n if iou > iouMax:\n iouMax = iou\n jmax = j\n # Assign detection as true positive/don't care/false positive\n if iouMax >= IOUThreshold:\n if det[dects[d][0]][jmax] == 0:\n TP[d] = 1 # count as true positive\n det[dects[d][0]][jmax] = 1 # flag as already 'seen'\n # print(\"TP\")\n else:\n FP[d] = 1 # count as false positive\n # print(\"FP\")\n # - A detected \"cat\" is overlaped with a GT \"cat\" with IOU >= IOUThreshold.\n else:\n FP[d] = 1 # count as false positive\n # print(\"FP\")\n # compute precision, recall and average precision\n acc_FP = np.cumsum(FP)\n acc_TP = np.cumsum(TP)\n rec = acc_TP / npos\n prec = np.divide(acc_TP, (acc_FP + acc_TP))\n # Depending on the method, call the right implementation\n if method == MethodAveragePrecision.EveryPointInterpolation:\n [ap, mpre, mrec, ii] = Evaluator.CalculateAveragePrecision(rec, prec)\n else:\n [ap, mpre, mrec, _] = Evaluator.ElevenPointInterpolatedAP(rec, prec)\n # add class result in the dictionary to be returned\n r = {\n 'class': c,\n 'precision': prec,\n 'recall': rec,\n 'AP': ap,\n 'interpolated precision': mpre,\n 'interpolated recall': mrec,\n 'total positives': npos,\n 'total TP': np.sum(TP),\n 'total FP': np.sum(FP)\n }\n ret.append(r)\n return ret\n\n def PlotPrecisionRecallCurve(self,\n boundingBoxes,\n IOUThreshold=0.5,\n method=MethodAveragePrecision.EveryPointInterpolation,\n showAP=False,\n showInterpolatedPrecision=False,\n savePath=None,\n showGraphic=True):\n \"\"\"PlotPrecisionRecallCurve\n Plot the Precision x Recall curve for a given class.\n Args:\n boundingBoxes: Object of the class BoundingBoxes representing ground truth and detected\n bounding boxes;\n IOUThreshold (optional): IOU threshold indicating which detections will be considered\n TP or FP (default value = 0.5);\n method (default = EveryPointInterpolation): It can be calculated as the implementation\n in the official PASCAL VOC toolkit (EveryPointInterpolation), or applying the 11-point\n interpolatio as described in the paper \"The PASCAL Visual Object Classes(VOC) Challenge\"\n or EveryPointInterpolation\" (ElevenPointInterpolation).\n showAP (optional): if True, the average precision value will be shown in the title of\n the graph (default = False);\n showInterpolatedPrecision (optional): if True, it will show in the plot the interpolated\n precision (default = False);\n savePath (optional): if informed, the plot will be saved as an image in this path\n (ex: /home/mywork/ap.png) (default = None);\n showGraphic (optional): if True, the plot will be shown (default = True)\n Returns:\n A list of dictionaries. Each dictionary contains information and metrics of each class.\n The keys of each dictionary are:\n dict['class']: class representing the current dictionary;\n dict['precision']: array with the precision values;\n dict['recall']: array with the recall values;\n dict['AP']: average precision;\n dict['interpolated precision']: interpolated precision values;\n dict['interpolated recall']: interpolated recall values;\n dict['total positives']: total number of ground truth positives;\n dict['total TP']: total number of True Positive detections;\n dict['total FP']: total number of False Negative detections;\n \"\"\"\n results = self.GetPascalVOCMetrics(boundingBoxes, IOUThreshold, method)\n result = None\n # Each resut represents a class\n for result in results:\n if result is None:\n raise IOError('Error: Class %d could not be found.' % classId)\n\n classId = result['class']\n precision = result['precision']\n recall = result['recall']\n average_precision = result['AP']\n mpre = result['interpolated precision']\n mrec = result['interpolated recall']\n npos = result['total positives']\n total_tp = result['total TP']\n total_fp = result['total FP']\n\n plt.close()\n if showInterpolatedPrecision:\n if method == MethodAveragePrecision.EveryPointInterpolation:\n plt.plot(mrec, mpre, '--r', label='Interpolated precision (every point)')\n elif method == MethodAveragePrecision.ElevenPointInterpolation:\n # Uncomment the line below if you want to plot the area\n # plt.plot(mrec, mpre, 'or', label='11-point interpolated precision')\n # Remove duplicates, getting only the highest precision of each recall value\n nrec = []\n nprec = []\n for idx in range(len(mrec)):\n r = mrec[idx]\n if r not in nrec:\n idxEq = np.argwhere(mrec == r)\n nrec.append(r)\n nprec.append(max([mpre[int(id)] for id in idxEq]))\n plt.plot(nrec, nprec, 'or', label='11-point interpolated precision')\n plt.plot(recall, precision, label='Precision')\n plt.xlabel('recall')\n plt.ylabel('precision')\n if showAP:\n ap_str = \"{0:.2f}%\".format(average_precision * 100)\n # ap_str = \"{0:.4f}%\".format(average_precision * 100)\n plt.title('Precision x Recall curve \\nClass: %s, AP: %s' % (str(classId), ap_str))\n else:\n plt.title('Precision x Recall curve \\nClass: %s' % str(classId))\n plt.legend(shadow=True)\n plt.grid()\n ############################################################\n # Uncomment the following block to create plot with points #\n ############################################################\n # plt.plot(recall, precision, 'bo')\n # labels = ['R', 'Y', 'J', 'A', 'U', 'C', 'M', 'F', 'D', 'B', 'H', 'P', 'E', 'X', 'N', 'T',\n # 'K', 'Q', 'V', 'I', 'L', 'S', 'G', 'O']\n # dicPosition = {}\n # dicPosition['left_zero'] = (-30,0)\n # dicPosition['left_zero_slight'] = (-30,-10)\n # dicPosition['right_zero'] = (30,0)\n # dicPosition['left_up'] = (-30,20)\n # dicPosition['left_down'] = (-30,-25)\n # dicPosition['right_up'] = (20,20)\n # dicPosition['right_down'] = (20,-20)\n # dicPosition['up_zero'] = (0,30)\n # dicPosition['up_right'] = (0,30)\n # dicPosition['left_zero_long'] = (-60,-2)\n # dicPosition['down_zero'] = (-2,-30)\n # vecPositions = [\n # dicPosition['left_down'],\n # dicPosition['left_zero'],\n # dicPosition['right_zero'],\n # dicPosition['right_zero'], #'R', 'Y', 'J', 'A',\n # dicPosition['left_up'],\n # dicPosition['left_up'],\n # dicPosition['right_up'],\n # dicPosition['left_up'], # 'U', 'C', 'M', 'F',\n # dicPosition['left_zero'],\n # dicPosition['right_up'],\n # dicPosition['right_down'],\n # dicPosition['down_zero'], #'D', 'B', 'H', 'P'\n # dicPosition['left_up'],\n # dicPosition['up_zero'],\n # dicPosition['right_up'],\n # dicPosition['left_up'], # 'E', 'X', 'N', 'T',\n # dicPosition['left_zero'],\n # dicPosition['right_zero'],\n # dicPosition['left_zero_long'],\n # dicPosition['left_zero_slight'], # 'K', 'Q', 'V', 'I',\n # dicPosition['right_down'],\n # dicPosition['left_down'],\n # dicPosition['right_up'],\n # dicPosition['down_zero']\n # ] # 'L', 'S', 'G', 'O'\n # for idx in range(len(labels)):\n # box = dict(boxstyle='round,pad=.5',facecolor='yellow',alpha=0.5)\n # plt.annotate(labels[idx],\n # xy=(recall[idx],precision[idx]), xycoords='data',\n # xytext=vecPositions[idx], textcoords='offset points',\n # arrowprops=dict(arrowstyle=\"->\", connectionstyle=\"arc3\"),\n # bbox=box)\n if savePath is not None:\n plt.savefig(os.path.join(savePath, classId + '.png'))\n if showGraphic is True:\n plt.show()\n # plt.waitforbuttonpress()\n plt.pause(0.05)\n return results\n\n @staticmethod\n def CalculateAveragePrecision(rec, prec):\n mrec = []\n mrec.append(0)\n [mrec.append(e) for e in rec]\n mrec.append(1)\n mpre = []\n mpre.append(0)\n [mpre.append(e) for e in prec]\n mpre.append(0)\n for i in range(len(mpre) - 1, 0, -1):\n mpre[i - 1] = max(mpre[i - 1], mpre[i])\n ii = []\n for i in range(len(mrec) - 1):\n if mrec[1:][i] != mrec[0:-1][i]:\n ii.append(i + 1)\n ap = 0\n for i in ii:\n ap = ap + np.sum((mrec[i] - mrec[i - 1]) * mpre[i])\n # return [ap, mpre[1:len(mpre)-1], mrec[1:len(mpre)-1], ii]\n return [ap, mpre[0:len(mpre) - 1], mrec[0:len(mpre) - 1], ii]\n\n @staticmethod\n # 11-point interpolated average precision\n def ElevenPointInterpolatedAP(rec, prec):\n # def CalculateAveragePrecision2(rec, prec):\n mrec = []\n # mrec.append(0)\n [mrec.append(e) for e in rec]\n # mrec.append(1)\n mpre = []\n # mpre.append(0)\n [mpre.append(e) for e in prec]\n # mpre.append(0)\n recallValues = np.linspace(0, 1, 11)\n recallValues = list(recallValues[::-1])\n rhoInterp = []\n recallValid = []\n # For each recallValues (0, 0.1, 0.2, ... , 1)\n for r in recallValues:\n # Obtain all recall values higher or equal than r\n argGreaterRecalls = np.argwhere(mrec[:] >= r)\n pmax = 0\n # If there are recalls above r\n if argGreaterRecalls.size != 0:\n pmax = max(mpre[argGreaterRecalls.min():])\n recallValid.append(r)\n rhoInterp.append(pmax)\n # By definition AP = sum(max(precision whose recall is above r))/11\n ap = sum(rhoInterp) / 11\n # Generating values for the plot\n rvals = []\n rvals.append(recallValid[0])\n [rvals.append(e) for e in recallValid]\n rvals.append(0)\n pvals = []\n pvals.append(0)\n [pvals.append(e) for e in rhoInterp]\n pvals.append(0)\n # rhoInterp = rhoInterp[::-1]\n cc = []\n for i in range(len(rvals)):\n p = (rvals[i], pvals[i - 1])\n if p not in cc:\n cc.append(p)\n p = (rvals[i], pvals[i])\n if p not in cc:\n cc.append(p)\n recallValues = [i[0] for i in cc]\n rhoInterp = [i[1] for i in cc]\n return [ap, rhoInterp, recallValues, None]\n\n # For each detections, calculate IOU with reference\n @staticmethod\n def _getAllIOUs(reference, detections):\n ret = []\n bbReference = reference.getAbsoluteBoundingBox(BBFormat.XYX2Y2)\n # img = np.zeros((200,200,3), np.uint8)\n for d in detections:\n bb = d.getAbsoluteBoundingBox(BBFormat.XYX2Y2)\n iou = Evaluator.iou(bbReference, bb)\n # Show blank image with the bounding boxes\n # img = add_bb_into_image(img, d, color=(255,0,0), thickness=2, label=None)\n # img = add_bb_into_image(img, reference, color=(0,255,0), thickness=2, label=None)\n ret.append((iou, reference, d)) # iou, reference, detection\n # cv2.imshow(\"comparing\",img)\n # cv2.waitKey(0)\n # cv2.destroyWindow(\"comparing\")\n return sorted(ret, key=lambda i: i[0], reverse=True) # sort by iou (from highest to lowest)\n\n @staticmethod\n def iou(boxA, boxB):\n # if boxes dont intersect\n if Evaluator._boxesIntersect(boxA, boxB) is False:\n return 0\n interArea = Evaluator._getIntersectionArea(boxA, boxB)\n union = Evaluator._getUnionAreas(boxA, boxB, interArea=interArea)\n # intersection over union\n iou = interArea / union\n assert iou >= 0\n return iou\n\n # boxA = (Ax1,Ay1,Ax2,Ay2)\n # boxB = (Bx1,By1,Bx2,By2)\n @staticmethod\n def _boxesIntersect(boxA, boxB):\n if boxA[0] > boxB[2]:\n return False # boxA is right of boxB\n if boxB[0] > boxA[2]:\n return False # boxA is left of boxB\n if boxA[3] < boxB[1]:\n return False # boxA is above boxB\n if boxA[1] > boxB[3]:\n return False # boxA is below boxB\n return True\n\n @staticmethod\n def _getIntersectionArea(boxA, boxB):\n xA = max(boxA[0], boxB[0])\n yA = max(boxA[1], boxB[1])\n xB = min(boxA[2], boxB[2])\n yB = min(boxA[3], boxB[3])\n # intersection area\n return (xB - xA + 1) * (yB - yA + 1)\n\n @staticmethod\n def _getUnionAreas(boxA, boxB, interArea=None):\n area_A = Evaluator._getArea(boxA)\n area_B = Evaluator._getArea(boxB)\n if interArea is None:\n interArea = Evaluator._getIntersectionArea(boxA, boxB)\n return float(area_A + area_B - interArea)\n\n @staticmethod\n def _getArea(box):\n return (box[2] - box[0] + 1) * (box[3] - box[1] + 1)\n"
] | [
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.pause",
"numpy.linspace",
"numpy.cumsum",
"numpy.argwhere",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.close",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"numpy.zeros",
"numpy.sum",
"numpy.divide",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mindspore-ai/models | [
"9127b128e2961fd698977e918861dadfad00a44c",
"9127b128e2961fd698977e918861dadfad00a44c",
"9127b128e2961fd698977e918861dadfad00a44c",
"9127b128e2961fd698977e918861dadfad00a44c",
"9127b128e2961fd698977e918861dadfad00a44c",
"9127b128e2961fd698977e918861dadfad00a44c",
"9127b128e2961fd698977e918861dadfad00a44c",
"9127b128e2961fd698977e918861dadfad00a44c",
"9127b128e2961fd698977e918861dadfad00a44c",
"9127b128e2961fd698977e918861dadfad00a44c",
"9127b128e2961fd698977e918861dadfad00a44c",
"9127b128e2961fd698977e918861dadfad00a44c",
"9127b128e2961fd698977e918861dadfad00a44c",
"9127b128e2961fd698977e918861dadfad00a44c",
"9127b128e2961fd698977e918861dadfad00a44c",
"9127b128e2961fd698977e918861dadfad00a44c",
"9127b128e2961fd698977e918861dadfad00a44c",
"9127b128e2961fd698977e918861dadfad00a44c",
"9127b128e2961fd698977e918861dadfad00a44c",
"9127b128e2961fd698977e918861dadfad00a44c",
"9127b128e2961fd698977e918861dadfad00a44c",
"9127b128e2961fd698977e918861dadfad00a44c",
"9127b128e2961fd698977e918861dadfad00a44c",
"9127b128e2961fd698977e918861dadfad00a44c",
"9127b128e2961fd698977e918861dadfad00a44c",
"9127b128e2961fd698977e918861dadfad00a44c",
"9127b128e2961fd698977e918861dadfad00a44c",
"9127b128e2961fd698977e918861dadfad00a44c",
"9127b128e2961fd698977e918861dadfad00a44c",
"9127b128e2961fd698977e918861dadfad00a44c",
"9127b128e2961fd698977e918861dadfad00a44c",
"9127b128e2961fd698977e918861dadfad00a44c",
"9127b128e2961fd698977e918861dadfad00a44c",
"9127b128e2961fd698977e918861dadfad00a44c",
"9127b128e2961fd698977e918861dadfad00a44c",
"9127b128e2961fd698977e918861dadfad00a44c",
"9127b128e2961fd698977e918861dadfad00a44c"
] | [
"official/nlp/transformer/export.py",
"research/cv/meta-baseline/preprocess.py",
"research/cv/ras/src/resnet50.py",
"research/cv/VehicleNet/src/re_ranking.py",
"research/cv/res2net_faster_rcnn/src/FasterRcnn/anchor_generator.py",
"research/cv/resnext152_64x4d/postprocess.py",
"official/recommend/naml/infer/sdk/util/utils.py",
"research/cv/textfusenet/src/textfusenet/bbox_assign_sample.py",
"official/cv/semantic_human_matting/postprocess.py",
"research/cv/vit_base/src/npz_converter.py",
"research/cv/pointnet/eval.py",
"research/cv/rfcn/src/rfcn/anchor_generator.py",
"research/cv/tsm/postprocess.py",
"official/cv/deeplabv3/export.py",
"research/cv/resnet3d/eval.py",
"research/cv/yolov3_tiny/export.py",
"research/recommend/mmoe/src/data.py",
"research/cv/DDAG/src/models/trainingcell.py",
"research/cv/efficientnetv2/src/models/efficientnetv2.py",
"research/cv/PAMTRI/infer/sdk/utils/inference.py",
"research/cv/LEO/src/data.py",
"research/cv/wgan/train.py",
"research/cv/ras/eval.py",
"research/cv/nas-fpn/postprocess.py",
"research/cv/PAMTRI/PoseEstNet/src/dataset/JointsDataset.py",
"research/cv/FaceNet/export.py",
"research/cv/res2net_deeplabv3/src/data/get_dataset_lst.py",
"research/cv/TCN/export.py",
"research/cv/AlphaPose/infer/sdk/postprocess/evaluation.py",
"research/cv/pointpillars/src/core/einsum.py",
"research/cv/faceboxes/preprocess310.py",
"research/cv/APDrawingGAN/eval.py",
"research/cv/ResNeSt50/src/models/utils.py",
"official/cv/faster_rcnn/src/FasterRcnn/resnet.py",
"research/recommend/autodis/infer/sdk/main.py",
"research/cv/AlignedReID++/src/ResNet.py",
"research/cv/delf/export.py"
] | [
"# Copyright 2020-2022 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\" export checkpoint file into models\"\"\"\n\nimport numpy as np\n\nimport mindspore as ms\nfrom mindspore import Tensor\n\nfrom src.transformer_model import TransformerModel\nfrom src.model_utils.config import config\nfrom src.model_utils.moxing_adapter import moxing_wrapper\nfrom src.model_utils.device_adapter import get_device_id\nfrom eval import load_weights\n\n\nconfig.batch_size = config.batch_size_ev\nconfig.hidden_dropout_prob = config.hidden_dropout_prob_ev\nconfig.attention_probs_dropout_prob = config.attention_probs_dropout_prob_ev\n\nms.set_context(mode=ms.GRAPH_MODE, device_target=config.device_target)\nif config.device_target == \"Ascend\":\n ms.set_context(device_id=get_device_id())\n\ndef modelarts_pre_process():\n pass\n\n@moxing_wrapper(pre_process=modelarts_pre_process)\ndef export_transformer():\n \"\"\" export_transformer \"\"\"\n tfm_model = TransformerModel(config=config, is_training=False, use_one_hot_embeddings=False)\n\n parameter_dict = load_weights(config.model_file)\n ms.load_param_into_net(tfm_model, parameter_dict)\n\n source_ids = Tensor(np.ones((config.batch_size, config.seq_length)).astype(np.int32))\n source_mask = Tensor(np.ones((config.batch_size, config.seq_length)).astype(np.int32))\n\n ms.export(tfm_model, source_ids, source_mask, file_name=config.file_name, file_format=config.file_format)\n\nif __name__ == '__main__':\n export_transformer()\n",
"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"\npreprocess\n\"\"\"\nimport os\nimport argparse\nimport numpy as np\nfrom mindspore import ops, context\nimport mindspore.dataset as ds\nimport src.util as util\nfrom src.data.IterSamplers import CategoriesSampler\nfrom src.data.mini_Imagenet import MiniImageNet\n\n\ndef gen_bin(args):\n \"\"\"\n generate binary files\n \"\"\"\n n_way = 5\n n_query = 15\n n_shots = [args.num_shots]\n root_path = os.path.join(args.root_path, args.dataset)\n testset = MiniImageNet(root_path, 'test')\n\n fs_loaders = []\n for n_shot in n_shots:\n test_sampler = CategoriesSampler(testset.data, testset.label, n_way, n_shot + n_query,\n 200,\n args.ep_per_batch)\n test_loader = ds.GeneratorDataset(test_sampler, ['data'], shuffle=True)\n fs_loaders.append(test_loader)\n\n input_path = os.path.join(args.pre_result_path, \"00_data\")\n label_path = os.path.join(args.pre_result_path, \"label.npy\")\n shape_path = os.path.join(args.pre_result_path, \"shape.npy\")\n if not os.path.exists(input_path):\n os.makedirs(input_path)\n\n label_list = []\n shape_list = []\n for i, n_shot in enumerate(n_shots):\n np.random.seed(0)\n label_shot = []\n for j, data in enumerate(fs_loaders[i].create_dict_iterator()):\n x_shot, x_query = data['data'][:, :, :n_shot], data['data'][:, :, n_shot:]\n img_shape = x_query.shape[-3:]\n x_query = x_query.view(args.ep_per_batch, -1,\n *img_shape) # bs*(way*n_query)*3*84*84\n label = util.make_nk_label(n_way, n_query, args.ep_per_batch) # bs*(way*n_query)\n if j == 0:\n shape_list.append(x_shot.shape)\n shape_list.append(x_query.shape)\n\n img_shape = x_shot.shape[-3:]\n\n x_shot = x_shot.view(-1, *img_shape)\n x_query = x_query.view(-1, *img_shape)\n input0 = ops.Concat(0)([x_shot, x_query])\n file_name = \"nshot_\" + str(i) + \"_\" + str(j) + \".bin\"\n input0.asnumpy().tofile(os.path.join(input_path, file_name))\n label_shot.append(label.asnumpy())\n label_list.append(label_shot)\n\n np.save(label_path, label_list)\n np.save(shape_path, shape_list)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--root_path', default='./dataset/')\n parser.add_argument('--device_target', type=str, default='CPU', choices=['Ascend', 'GPU', 'CPU'])\n parser.add_argument('--dataset', default='mini-imagenet')\n parser.add_argument('--ep_per_batch', type=int, default=4)\n parser.add_argument('--pre_result_path', type=str, default='./preprocess_Result')\n parser.add_argument('--num_shots', type=int, default=1)\n\n args_opt = parser.parse_args()\n context.set_context(mode=context.GRAPH_MODE, device_target=args_opt.device_target, save_graphs=False)\n gen_bin(args_opt)\n",
"\"\"\"\n# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"\n\n\nimport mindspore as ms\nimport mindspore.nn as nn\nimport numpy as np\n\nclass Basic_Block(nn.Cell):\n \"\"\"\n Components constituting resnet50\n \"\"\"\n expansion = 4\n def __init__(self, in_c, out_c, stride=1, downsample=None):\n super(Basic_Block, self).__init__()\n self.conv1 = nn.Conv2d(in_channels=in_c, out_channels=out_c, kernel_size=(1, 1), stride=1)\n self.bn1 = nn.BatchNorm2d(out_c, moving_mean_init=0, moving_var_init=1)\n self.conv2 = nn.Conv2d(in_channels=out_c, out_channels=out_c, kernel_size=(3, 3), stride=stride, \\\n pad_mode='pad', padding=1)\n self.bn2 = nn.BatchNorm2d(out_c)\n self.conv3 = nn.Conv2d(in_channels=out_c, out_channels=out_c*4, kernel_size=(1, 1), stride=1)\n self.bn3 = nn.BatchNorm2d(out_c*4)\n self.relu = nn.ReLU()\n self.down_sample_layer = downsample\n\n def construct(self, x):\n \"\"\"\n\n Args:\n x: tensor\n\n Returns: tensor\n\n \"\"\"\n residual = x\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.down_sample_layer is not None:\n residual = self.down_sample_layer(residual)\n out = out + residual\n out = self.relu(out)\n return out\n\n\nclass ResNet50(nn.Cell):\n \"\"\"\n A BoneBack Net of RAS\n \"\"\"\n def __init__(self):\n super(ResNet50, self).__init__()\n self.in_c = 64\n self.conv1 = nn.Conv2d(3, 64, kernel_size=(7, 7), stride=2, pad_mode='pad', padding=3)\n self.bn1 = nn.BatchNorm2d(64)\n self.relu = nn.ReLU()\n self.pool = nn.MaxPool2d(kernel_size=(3, 3), stride=2, pad_mode='same')\n\n self.layer1 = self._build_layer(Basic_Block, 64, 3, 1)\n self.layer2 = self._build_layer(Basic_Block, 128, 4, 2)\n self.layer3 = self._build_layer(Basic_Block, 256, 6, 2)\n self.layer4 = self._build_layer(Basic_Block, 512, 3, 2)\n\n for _, m in self.cells_and_names():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.set_data(ms.Tensor(np.random.normal(0, np.sqrt(2./n), m.weight.data.shape).astype(np.float32)))\n elif isinstance(m, nn.BatchNorm2d):\n m.gamma.set_data(ms.Tensor(np.ones(m.gamma.data.shape, dtype=np.float32)))\n m.beta.set_data(ms.Tensor(np.zeros(m.beta.data.shape, dtype=np.float32)))\n\n def _build_layer(self, block, out_c, blocks, stride):\n layers = []\n downsample = nn.SequentialCell(nn.Conv2d(self.in_c, out_c*block.expansion, kernel_size=(1, 1), stride=stride),\n nn.BatchNorm2d(out_c*4))\n layers.append(block(self.in_c, out_c, stride=stride, downsample=downsample))\n self.in_c = out_c * block.expansion\n for _ in range(1, blocks):\n layers.append(block(self.in_c, out_c))\n return nn.SequentialCell(layers)\n\n\n def construct(self, x):\n \"\"\"\n\n Args:\n x:\n\n Returns:\n 5 outputs\n \"\"\"\n out = self.conv1(x)\n out = self.bn1(out)\n x1 = self.relu(out)\n x2 = self.pool(x1)\n\n x2 = self.layer1(x2)\n x3 = self.layer2(x2)\n x4 = self.layer3(x3)\n x5 = self.layer4(x4)\n\n return x1, x2, x3, x4, x5\n",
"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"reranking\"\"\"\nimport numpy as np\n\ndef k_reciprocal_neigh(initial_rank, i, k1):\n \"\"\"k_reciprocal_neigh\"\"\"\n forward_k_neigh_index = initial_rank[i, :k1+1]\n backward_k_neigh_index = initial_rank[forward_k_neigh_index, :k1+1]\n fi = np.where(backward_k_neigh_index == i)[0]\n return forward_k_neigh_index[fi]\n\ndef re_ranking(q_g_dist, q_q_dist, g_g_dist, k1=20, k2=6, lambda_value=0.3):\n \"\"\"The following naming, e.g. gallery_num, is different from outer scope.\n \"\"\"\n # Don't care about it.\n original_dist = np.concatenate([np.concatenate([q_q_dist, q_g_dist], axis=1),\n np.concatenate([q_g_dist.T, g_g_dist], axis=1)], axis=0)\n original_dist = 2. - 2 * original_dist\n original_dist = np.power(original_dist, 2).astype(np.float32)\n original_dist = np.transpose(1. * original_dist/np.max(original_dist, axis=0))\n V = np.zeros_like(original_dist).astype(np.float32)\n # initial_rank = np.argsort(original_dist).astype(np.int32)\n # top K1+1\n initial_rank = np.argpartition(original_dist, range(1, k1+1))\n\n query_num = q_g_dist.shape[0]\n all_num = original_dist.shape[0]\n\n for i in range(all_num):\n # k-reciprocal neighbors\n k_reciprocal_index = k_reciprocal_neigh(initial_rank, i, k1)\n k_reciprocal_expansion_index = k_reciprocal_index\n for j in range(len(k_reciprocal_index)):\n candidate = k_reciprocal_index[j]\n candidate_k_reciprocal_index = k_reciprocal_neigh(initial_rank, candidate, int(np.around(k1/2)))\n if len(np.intersect1d(candidate_k_reciprocal_index, k_reciprocal_index)) > \\\n 2./3*len(candidate_k_reciprocal_index):\n k_reciprocal_expansion_index = np.append(k_reciprocal_expansion_index, candidate_k_reciprocal_index)\n\n k_reciprocal_expansion_index = np.unique(k_reciprocal_expansion_index)\n weight = np.exp(-original_dist[i, k_reciprocal_expansion_index])\n V[i, k_reciprocal_expansion_index] = 1.*weight/np.sum(weight)\n\n original_dist = original_dist[:query_num,]\n if k2 != 1:\n V_qe = np.zeros_like(V, dtype=np.float32)\n for i in range(all_num):\n V_qe[i, :] = np.mean(V[initial_rank[i, :k2], :], axis=0)\n V = V_qe\n del V_qe\n del initial_rank\n invIndex = []\n for i in range(all_num):\n invIndex.append(np.where(V[:, i] != 0)[0])\n\n jaccard_dist = np.zeros_like(original_dist, dtype=np.float32)\n\n for i in range(query_num):\n temp_min = np.zeros(shape=[1, all_num], dtype=np.float32)\n indNonZero = np.where(V[i, :] != 0)[0]\n indImages = []\n indImages = [invIndex[ind] for ind in indNonZero]\n for j in range(len(indNonZero)):\n temp_min[0, indImages[j]] = temp_min[0, indImages[j]] + \\\n np.minimum(V[i, indNonZero[j]], V[indImages[j], indNonZero[j]])\n jaccard_dist[i] = 1-temp_min/(2.-temp_min)\n\n final_dist = jaccard_dist*(1-lambda_value) + original_dist*lambda_value\n\n del original_dist\n del V\n del jaccard_dist\n\n final_dist = final_dist[:query_num, query_num:]\n\n return final_dist\n",
"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"FasterRcnn anchor generator.\"\"\"\n\nimport numpy as np\n\nclass AnchorGenerator():\n \"\"\"Anchor generator for FasterRcnn.\"\"\"\n def __init__(self, base_size, scales, ratios, scale_major=True, ctr=None):\n \"\"\"Anchor generator init method.\"\"\"\n self.base_size = base_size\n self.scales = np.array(scales)\n self.ratios = np.array(ratios)\n self.scale_major = scale_major\n self.ctr = ctr\n self.base_anchors = self.gen_base_anchors()\n\n def gen_base_anchors(self):\n \"\"\"Generate a single anchor.\"\"\"\n w = self.base_size\n h = self.base_size\n if self.ctr is None:\n x_ctr = 0.5 * (w - 1)\n y_ctr = 0.5 * (h - 1)\n else:\n x_ctr, y_ctr = self.ctr\n\n h_ratios = np.sqrt(self.ratios)\n w_ratios = 1 / h_ratios\n if self.scale_major:\n ws = (w * w_ratios[:, None] * self.scales[None, :]).reshape(-1)\n hs = (h * h_ratios[:, None] * self.scales[None, :]).reshape(-1)\n else:\n ws = (w * self.scales[:, None] * w_ratios[None, :]).reshape(-1)\n hs = (h * self.scales[:, None] * h_ratios[None, :]).reshape(-1)\n\n base_anchors = np.stack(\n [\n x_ctr - 0.5 * (ws - 1), y_ctr - 0.5 * (hs - 1),\n x_ctr + 0.5 * (ws - 1), y_ctr + 0.5 * (hs - 1)\n ],\n axis=-1).round()\n\n return base_anchors\n\n def _meshgrid(self, x, y, row_major=True):\n \"\"\"Generate grid.\"\"\"\n xx = np.repeat(x.reshape(1, len(x)), len(y), axis=0).reshape(-1)\n yy = np.repeat(y, len(x))\n if row_major:\n return xx, yy\n\n return yy, xx\n\n def grid_anchors(self, featmap_size, stride=16):\n \"\"\"Generate anchor list.\"\"\"\n base_anchors = self.base_anchors\n\n feat_h, feat_w = featmap_size\n shift_x = np.arange(0, feat_w) * stride\n shift_y = np.arange(0, feat_h) * stride\n shift_xx, shift_yy = self._meshgrid(shift_x, shift_y)\n shifts = np.stack([shift_xx, shift_yy, shift_xx, shift_yy], axis=-1)\n shifts = shifts.astype(base_anchors.dtype)\n # first feat_w elements correspond to the first row of shifts\n # add A anchors (1, A, 4) to K shifts (K, 1, 4) to get\n # shifted anchors (K, A, 4), reshape to (K*A, 4)\n\n all_anchors = base_anchors[None, :, :] + shifts[:, None, :]\n all_anchors = all_anchors.reshape(-1, 4)\n\n return all_anchors\n",
"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"post process for 310 inference\"\"\"\nimport os\nimport json\nimport argparse\nimport numpy as np\n\nparser = argparse.ArgumentParser(description=\"resnet inference\")\nparser.add_argument(\"--result_path\", type=str, required=True, help=\"result files path.\")\nparser.add_argument(\"--label_path\", type=str, required=True, help=\"image file path.\")\nargs = parser.parse_args()\n\nbatch_size = 1\nnum_classes = 1000\n\ndef get_result(result_path, label_path):\n \"\"\"calculate the result\"\"\"\n files = os.listdir(result_path)\n with open(label_path, \"r\") as label:\n labels = json.load(label)\n\n top1 = 0\n top5 = 0\n total_data = len(files)\n for file in files:\n img_ids_name = file.split('_0.')[0]\n data_path = os.path.join(result_path, img_ids_name + \"_0.bin\")\n result = np.fromfile(data_path, dtype=np.float16).reshape(batch_size, num_classes)\n for batch in range(batch_size):\n predict = np.argsort(-result[batch], axis=-1)\n if labels[img_ids_name+\".JPEG\"] == predict[0]:\n top1 += 1\n if labels[img_ids_name+\".JPEG\"] in predict[:5]:\n top5 += 1\n print(f\"Total data: {total_data}, top1 accuracy: {top1/total_data}, top5 accuracy: {top5/total_data}.\")\n\n\nif __name__ == '__main__':\n get_result(args.result_path, args.label_path)\n",
"# Copyright 2022 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Utils for NAML.\"\"\"\nimport time\nimport numpy as np\nfrom sklearn.metrics import roc_auc_score\nfrom mindspore import Tensor\n\nfrom .dataset import create_eval_dataset, EvalNews, EvalUsers, EvalCandidateNews\n\ndef get_metric(args, mindpreprocess, news_encoder, user_encoder, metric):\n \"\"\"Calculate metrics.\"\"\"\n start = time.time()\n news_dict = {}\n user_dict = {}\n dataset = create_eval_dataset(mindpreprocess, EvalNews, batch_size=args.batch_size)\n dataset_size = dataset.get_dataset_size()\n iterator = dataset.create_dict_iterator(output_numpy=True)\n for count, data in enumerate(iterator):\n news_vector = news_encoder(Tensor(data[\"category\"]), Tensor(data[\"subcategory\"]),\n Tensor(data[\"title\"]), Tensor(data[\"abstract\"])).asnumpy()\n for i, nid in enumerate(data[\"news_id\"]):\n news_dict[str(nid[0])] = news_vector[i]\n print(f\"===Generate News vector==== [ {count} / {dataset_size} ]\", end='\\r')\n print(f\"===Generate News vector==== [ {dataset_size} / {dataset_size} ]\")\n dataset = create_eval_dataset(mindpreprocess, EvalUsers, batch_size=args.batch_size)\n dataset_size = dataset.get_dataset_size()\n iterator = dataset.create_dict_iterator(output_numpy=True)\n for count, data in enumerate(iterator):\n browsed_news = []\n for newses in data[\"history\"]:\n news_list = []\n for nid in newses:\n news_list.append(news_dict[str(nid[0])])\n browsed_news.append(np.array(news_list))\n browsed_news = np.array(browsed_news)\n user_vector = user_encoder(Tensor(browsed_news)).asnumpy()\n for i, uid in enumerate(data[\"uid\"]):\n user_dict[str(uid)] = user_vector[i]\n print(f\"===Generate Users vector==== [ {count} / {dataset_size} ]\", end='\\r')\n print(f\"===Generate Users vector==== [ {dataset_size} / {dataset_size} ]\")\n dataset = create_eval_dataset(mindpreprocess, EvalCandidateNews, batch_size=args.batch_size)\n dataset_size = dataset.get_dataset_size()\n iterator = dataset.create_dict_iterator(output_numpy=True)\n for count, data in enumerate(iterator):\n pred = np.dot(\n np.stack([news_dict[str(nid)] for nid in data[\"candidate_nid\"]], axis=0),\n user_dict[str(data[\"uid\"])]\n )\n metric.update(pred, data[\"labels\"])\n print(f\"===Click Prediction==== [ {count} / {dataset_size} ]\", end='\\r')\n print(f\"===Click Prediction==== [ {dataset_size} / {dataset_size} ]\")\n auc = metric.eval()\n total_cost = time.time() - start\n print(f\"Eval total cost: {total_cost} s\")\n return auc\n\ndef process_data(args):\n word_embedding = np.load(args.embedding_file)\n _, h = word_embedding.shape\n if h < args.word_embedding_dim:\n word_embedding = np.pad(word_embedding, ((0, 0), (0, args.word_embedding_dim - 300)), 'constant',\n constant_values=0)\n elif h > args.word_embedding_dim:\n word_embedding = word_embedding[:, :args.word_embedding_dim]\n print(\"Load word_embedding\", word_embedding.shape)\n return Tensor(word_embedding.astype(np.float32))\n\ndef AUC(y_true, y_pred):\n return roc_auc_score(y_true, y_pred)\n\ndef MRR(y_true, y_pred):\n index = np.argsort(y_pred)[::-1]\n y_true = np.take(y_true, index)\n score = y_true / (np.arange(len(y_true)) + 1)\n return np.sum(score) / np.sum(y_true)\n\ndef DCG(y_true, y_pred, n):\n index = np.argsort(y_pred)[::-1]\n y_true = np.take(y_true, index[:n])\n score = (2 ** y_true - 1) / np.log2(np.arange(len(y_true)) + 2)\n return np.sum(score)\n\ndef nDCG(y_true, y_pred, n):\n return DCG(y_true, y_pred, n) / DCG(y_true, y_true, n)\n\nclass NAMLMetric:\n \"\"\"\n Metric method\n \"\"\"\n def __init__(self):\n super(NAMLMetric, self).__init__()\n self.AUC_list = []\n self.MRR_list = []\n self.nDCG5_list = []\n self.nDCG10_list = []\n\n def clear(self):\n \"\"\"Clear the internal evaluation result.\"\"\"\n self.AUC_list = []\n self.MRR_list = []\n self.nDCG5_list = []\n self.nDCG10_list = []\n\n def update(self, predict, y_true):\n predict = predict.flatten()\n y_true = y_true.flatten()\n self.AUC_list.append(AUC(y_true, predict))\n self.MRR_list.append(MRR(y_true, predict))\n self.nDCG5_list.append(nDCG(y_true, predict, 5))\n self.nDCG10_list.append(nDCG(y_true, predict, 10))\n\n def eval(self):\n auc = np.mean(self.AUC_list)\n print('AUC:', auc)\n print('MRR:', np.mean(self.MRR_list))\n print('nDCG@5:', np.mean(self.nDCG5_list))\n print('nDCG@10:', np.mean(self.nDCG10_list))\n return auc\n",
"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"TextFuseNet positive and negative sample screening for RPN.\"\"\"\n\nimport numpy as np\nimport mindspore.nn as nn\nfrom mindspore.ops import operations as P\nfrom mindspore.common.tensor import Tensor\nimport mindspore.common.dtype as mstype\n\n\nclass BboxAssignSample(nn.Cell):\n \"\"\"\n Bbox assigner and sampler definition.\n\n Args:\n config (dict): Config.\n batch_size (int): Batchsize.\n num_bboxes (int): The anchor nums.\n add_gt_as_proposals (bool): add gt bboxes as proposals flag.\n\n Returns:\n Tensor, output tensor.\n bbox_targets: bbox location, (batch_size, num_bboxes, 4)\n bbox_weights: bbox weights, (batch_size, num_bboxes, 1)\n labels: label for every bboxes, (batch_size, num_bboxes, 1)\n label_weights: label weight for every bboxes, (batch_size, num_bboxes, 1)\n\n Examples:\n BboxAssignSample(config, 2, 1024, True)\n \"\"\"\n\n def __init__(self, config, batch_size, num_bboxes, add_gt_as_proposals):\n super(BboxAssignSample, self).__init__()\n cfg = config\n self.batch_size = batch_size\n\n self.neg_iou_thr = Tensor(cfg.neg_iou_thr, mstype.float16)\n self.pos_iou_thr = Tensor(cfg.pos_iou_thr, mstype.float16)\n self.min_pos_iou = Tensor(cfg.min_pos_iou, mstype.float16)\n self.zero_thr = Tensor(0.0, mstype.float16)\n\n self.num_bboxes = num_bboxes\n self.num_gts = cfg.num_gts\n self.num_expected_pos = cfg.num_expected_pos\n self.num_expected_neg = cfg.num_expected_neg\n self.add_gt_as_proposals = add_gt_as_proposals\n\n if self.add_gt_as_proposals:\n self.label_inds = Tensor(np.arange(1, self.num_gts + 1))\n\n self.concat = P.Concat(axis=0)\n self.max_gt = P.ArgMaxWithValue(axis=0)\n self.max_anchor = P.ArgMaxWithValue(axis=1)\n self.sum_inds = P.ReduceSum()\n self.iou = P.IOU()\n self.greaterequal = P.GreaterEqual()\n self.greater = P.Greater()\n self.select = P.Select()\n self.gatherND = P.GatherNd()\n self.squeeze = P.Squeeze()\n self.cast = P.Cast()\n self.logicaland = P.LogicalAnd()\n self.less = P.Less()\n self.random_choice_with_mask_pos = P.RandomChoiceWithMask(self.num_expected_pos)\n self.random_choice_with_mask_neg = P.RandomChoiceWithMask(self.num_expected_neg)\n self.reshape = P.Reshape()\n self.equal = P.Equal()\n self.bounding_box_encode = P.BoundingBoxEncode(means=(0.0, 0.0, 0.0, 0.0), stds=(1.0, 1.0, 1.0, 1.0))\n self.scatterNdUpdate = P.ScatterNdUpdate()\n self.scatterNd = P.ScatterNd()\n self.logicalnot = P.LogicalNot()\n self.tile = P.Tile()\n self.zeros_like = P.ZerosLike()\n\n self.assigned_gt_inds = Tensor(np.array(-1 * np.ones(num_bboxes), dtype=np.int32))\n self.assigned_gt_zeros = Tensor(np.array(np.zeros(num_bboxes), dtype=np.int32))\n self.assigned_gt_ones = Tensor(np.array(np.ones(num_bboxes), dtype=np.int32))\n self.assigned_gt_ignores = Tensor(np.array(-1 * np.ones(num_bboxes), dtype=np.int32))\n self.assigned_pos_ones = Tensor(np.array(np.ones(self.num_expected_pos), dtype=np.int32))\n\n self.check_neg_mask = Tensor(np.array(np.ones(self.num_expected_neg - self.num_expected_pos), dtype=np.bool))\n self.range_pos_size = Tensor(np.arange(self.num_expected_pos).astype(np.float16))\n self.check_gt_one = Tensor(np.array(-1 * np.ones((self.num_gts, 4)), dtype=np.float16))\n self.check_anchor_two = Tensor(np.array(-2 * np.ones((self.num_bboxes, 4)), dtype=np.float16))\n\n\n def construct(self, gt_bboxes_i, gt_labels_i, valid_mask, bboxes, gt_valids):\n \"\"\"\"assign forward\"\"\"\n gt_bboxes_i = self.select(self.cast(self.tile(self.reshape(self.cast(gt_valids, mstype.int32), \\\n (self.num_gts, 1)), (1, 4)), mstype.bool_), gt_bboxes_i, self.check_gt_one)\n bboxes = self.select(self.cast(self.tile(self.reshape(self.cast(valid_mask, mstype.int32), \\\n (self.num_bboxes, 1)), (1, 4)), mstype.bool_), bboxes, self.check_anchor_two)\n\n overlaps = self.iou(bboxes, gt_bboxes_i)\n\n max_overlaps_w_gt_index, max_overlaps_w_gt = self.max_gt(overlaps)\n _, max_overlaps_w_ac = self.max_anchor(overlaps)\n\n neg_sample_iou_mask = self.logicaland(self.greaterequal(max_overlaps_w_gt, self.zero_thr), \\\n self.less(max_overlaps_w_gt, self.neg_iou_thr))\n assigned_gt_inds2 = self.select(neg_sample_iou_mask, self.assigned_gt_zeros, self.assigned_gt_inds)\n\n pos_sample_iou_mask = self.greaterequal(max_overlaps_w_gt, self.pos_iou_thr)\n assigned_gt_inds3 = self.select(pos_sample_iou_mask, \\\n max_overlaps_w_gt_index + self.assigned_gt_ones, assigned_gt_inds2)\n assigned_gt_inds4 = assigned_gt_inds3\n for j in range(self.num_gts):\n max_overlaps_w_ac_j = max_overlaps_w_ac[j:j+1:1]\n overlaps_w_gt_j = self.squeeze(overlaps[j:j+1:1, ::])\n\n pos_mask_j = self.logicaland(self.greaterequal(max_overlaps_w_ac_j, self.min_pos_iou), \\\n self.equal(overlaps_w_gt_j, max_overlaps_w_ac_j))\n\n assigned_gt_inds4 = self.select(pos_mask_j, self.assigned_gt_ones + j, assigned_gt_inds4)\n\n assigned_gt_inds5 = self.select(valid_mask, assigned_gt_inds4, self.assigned_gt_ignores)\n\n pos_index, valid_pos_index = self.random_choice_with_mask_pos(self.greater(assigned_gt_inds5, 0))\n\n pos_check_valid = self.cast(self.greater(assigned_gt_inds5, 0), mstype.float16)\n pos_check_valid = self.sum_inds(pos_check_valid, -1)\n valid_pos_index = self.less(self.range_pos_size, pos_check_valid)\n pos_index = pos_index * self.reshape(self.cast(valid_pos_index, mstype.int32), (self.num_expected_pos, 1))\n\n pos_assigned_gt_index = self.gatherND(assigned_gt_inds5, pos_index) - self.assigned_pos_ones\n pos_assigned_gt_index = pos_assigned_gt_index * self.cast(valid_pos_index, mstype.int32)\n pos_assigned_gt_index = self.reshape(pos_assigned_gt_index, (self.num_expected_pos, 1))\n\n neg_index, valid_neg_index = self.random_choice_with_mask_neg(self.equal(assigned_gt_inds5, 0))\n\n num_pos = self.cast(self.logicalnot(valid_pos_index), mstype.float16)\n num_pos = self.sum_inds(num_pos, -1)\n unvalid_pos_index = self.less(self.range_pos_size, num_pos)\n valid_neg_index = self.logicaland(self.concat((self.check_neg_mask, unvalid_pos_index)), valid_neg_index)\n\n pos_bboxes_ = self.gatherND(bboxes, pos_index)\n pos_gt_bboxes_ = self.gatherND(gt_bboxes_i, pos_assigned_gt_index)\n pos_gt_labels = self.gatherND(gt_labels_i, pos_assigned_gt_index)\n\n pos_bbox_targets_ = self.bounding_box_encode(pos_bboxes_, pos_gt_bboxes_)\n\n valid_pos_index = self.cast(valid_pos_index, mstype.int32)\n valid_neg_index = self.cast(valid_neg_index, mstype.int32)\n bbox_targets_total = self.scatterNd(pos_index, pos_bbox_targets_, (self.num_bboxes, 4))\n bbox_weights_total = self.scatterNd(pos_index, valid_pos_index, (self.num_bboxes,))\n labels_total = self.scatterNd(pos_index, pos_gt_labels, (self.num_bboxes,))\n total_index = self.concat((pos_index, neg_index))\n total_valid_index = self.concat((valid_pos_index, valid_neg_index))\n label_weights_total = self.scatterNd(total_index, total_valid_index, (self.num_bboxes,))\n\n return bbox_targets_total, self.cast(bbox_weights_total, mstype.bool_), \\\n labels_total, self.cast(label_weights_total, mstype.bool_)\n",
"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\n\"\"\"post process for 310 inference\"\"\"\nimport os\nimport argparse\n\nimport yaml\nimport cv2\nimport numpy as np\n\n\ndef get_args():\n \"\"\"\n Cmd example:\n\n python postprocess.py\n --config_path=./config.yaml\n --result_path=./scripts/result_Files\n --pre_path=./scripts/preprocess_Result\n --save_path=./scripts/postprocess_Result\n \"\"\"\n parser = argparse.ArgumentParser(description='Semantic human matting')\n parser.add_argument('--config_path', type=str, default='./config.yaml', help='config path')\n parser.add_argument('--result_path', type=str, default='./scripts/result_Files', help='infer path')\n parser.add_argument('--pre_path', type=str, default='./scripts/preprocess_Result', help='pre path')\n parser.add_argument('--save_path', type=str, default='./scripts/postprocess_Result', help='save path')\n args = parser.parse_args()\n print(args)\n return args\n\n\ndef get_config_from_yaml(args):\n yaml_file = open(args.config_path, \"r\", encoding=\"utf-8\")\n file_data = yaml_file.read()\n yaml_file.close()\n\n y = yaml.load(file_data, Loader=yaml.FullLoader)\n cfg = y['infer']\n cfg['result_path'] = args.result_path\n cfg['pre_path'] = args.pre_path\n cfg['save_path'] = args.save_path\n\n return cfg\n\n\ndef safe_makedirs(path_dir):\n if not os.path.exists(path_dir):\n os.makedirs(path_dir)\n\n\ndef cal_sad(cfg):\n \"\"\"Calculate Sad metric\"\"\"\n print('postprocess and calculate metric ...')\n safe_makedirs(cfg['save_path'])\n files = os.listdir(cfg['result_path'])\n files = list(filter(lambda i: '_1.bin' in i, files))\n files.sort()\n list_sad = list()\n for _, file in enumerate(files):\n file_name = file.replace('_1.bin', '')\n\n file_infer = os.path.join(cfg['result_path'], file)\n alpha = np.fromfile(file_infer, dtype=np.float32).reshape((1, 1, cfg['size'], cfg['size']))\n\n image_path = os.path.join(cfg['pre_path'], 'clip_data', '{}.jpg'.format(file_name))\n image = cv2.imread(image_path)\n\n label_path = os.path.join(cfg['pre_path'], 'label', '{}.png'.format(file_name))\n label = cv2.imread(label_path)\n\n # generate foreground image\n alpha_np = alpha[0, 0, :, :]\n origin_h, origin_w, _ = image.shape\n alpha_fg = cv2.resize(alpha_np, (origin_w, origin_h), interpolation=cv2.INTER_CUBIC)\n fg = np.multiply(alpha_fg[..., np.newaxis], image)\n fg_path = os.path.join(cfg['save_path'], '{}.jpg'.format(file_name))\n cv2.imwrite(fg_path, fg)\n\n # generate metric Sad (original image size)\n image_gt = label[:, :, 0]\n image_gt = image_gt.astype(np.float64) / 255\n sad = np.abs(alpha_fg - image_gt).sum() / 1000\n list_sad.append(sad)\n\n print('{}\\tsad\\t{}'.format(image_path, sad))\n print('Total images: {}, total sad: {}, ave sad: {}'.format(len(list_sad), np.sum(list_sad), np.mean(list_sad)))\n\n\nif __name__ == \"__main__\":\n cal_sad(get_config_from_yaml(get_args()))\n",
"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"\nProcess the .npz checkpoint to .ckpt format.\nFor ViT-base-16 only.\n\"\"\"\n\nimport mindspore as ms\nimport numpy as np\n\n\ndef extract_encoder_weights(ref_ws, index):\n \"\"\"extract weights from encoder layers and transform shape\"\"\"\n src_prefix = f'Transformer/encoderblock_{index}/'\n tgt_prefix = f'transformer.encoder.layer.{index}.'\n\n tgt_ws = {}\n\n # Attention\n src_att_name = src_prefix + 'MultiHeadDotProductAttention_1/'\n tgt_att_name = tgt_prefix + 'attn.'\n\n tgt_ws[tgt_att_name + 'query.weight'] = ref_ws[src_att_name + 'query/kernel'].reshape(768, 768).T\n tgt_ws[tgt_att_name + 'key.weight'] = ref_ws[src_att_name + 'key/kernel'].reshape(768, 768).T\n tgt_ws[tgt_att_name + 'value.weight'] = ref_ws[src_att_name + 'value/kernel'].reshape(768, 768).T\n tgt_ws[tgt_att_name + 'out.weight'] = ref_ws[src_att_name + 'out/kernel'].reshape(768, 768).T\n\n tgt_ws[tgt_att_name + 'query.bias'] = ref_ws[src_att_name + 'query/bias'].reshape(768)\n tgt_ws[tgt_att_name + 'key.bias'] = ref_ws[src_att_name + 'key/bias'].reshape(768)\n tgt_ws[tgt_att_name + 'value.bias'] = ref_ws[src_att_name + 'value/bias'].reshape(768)\n tgt_ws[tgt_att_name + 'out.bias'] = ref_ws[src_att_name + 'out/bias']\n\n tgt_ws[tgt_prefix + 'attention_norm.gamma'] = ref_ws[src_prefix + 'LayerNorm_0/scale']\n tgt_ws[tgt_prefix + 'attention_norm.beta'] = ref_ws[src_prefix + 'LayerNorm_0/bias']\n\n # Feed forward\n tgt_ws[tgt_prefix + 'ffn_norm.gamma'] = ref_ws[src_prefix + 'LayerNorm_2/scale']\n tgt_ws[tgt_prefix + 'ffn_norm.beta'] = ref_ws[src_prefix + 'LayerNorm_2/bias']\n\n tgt_ws[tgt_prefix + 'ffn.fc1.weight'] = ref_ws[src_prefix + 'MlpBlock_3/Dense_0/kernel'].T\n tgt_ws[tgt_prefix + 'ffn.fc1.bias'] = ref_ws[src_prefix + 'MlpBlock_3/Dense_0/bias']\n tgt_ws[tgt_prefix + 'ffn.fc2.weight'] = ref_ws[src_prefix + 'MlpBlock_3/Dense_1/kernel'].T\n tgt_ws[tgt_prefix + 'ffn.fc2.bias'] = ref_ws[src_prefix + 'MlpBlock_3/Dense_1/bias']\n\n return tgt_ws\n\n\ndef extract_embeddings(ref_ws):\n \"\"\"extract weights from embeddings and transform shape\"\"\"\n tgt_ws = dict()\n\n tgt_ws['transformer.embeddings.position_embeddings'] = ref_ws['Transformer/posembed_input/pos_embedding']\n tgt_ws['transformer.embeddings.cls_token'] = ref_ws['cls']\n tgt_ws['transformer.embeddings.patch_embeddings.weight'] = np.transpose(ref_ws['embedding/kernel'], (3, 2, 0, 1))\n tgt_ws['transformer.embeddings.patch_embeddings.bias'] = ref_ws['embedding/bias']\n\n return tgt_ws\n\n\ndef prepare_weights(weights_data):\n \"\"\"prepare weights from every encoder layer\"\"\"\n new_weights = {}\n\n # Extract encoder data\n for i in range(12):\n new_weights.update(extract_encoder_weights(weights_data, index=i))\n\n # Extract something\n new_weights['transformer.encoder.encoder_norm.gamma'] = weights_data['Transformer/encoder_norm/scale']\n new_weights['transformer.encoder.encoder_norm.beta'] = weights_data['Transformer/encoder_norm/bias']\n\n # Extract embeddings\n new_weights.update(extract_embeddings(weights_data))\n\n # Extract head\n new_weights['head.weight'] = weights_data['head/kernel'].T\n new_weights['head.bias'] = weights_data['head/bias']\n\n # Take first ten head weights\n head_indexes = np.arange(0, 10, 1, dtype=int)\n new_weights.update(\n {\n 'head.weight': new_weights['head.weight'][head_indexes],\n 'head.bias': new_weights['head.bias'][head_indexes]\n }\n )\n\n # Turn numpy data into parameters\n new_weights = {\n k: ms.Parameter(v.astype(np.float32))\n for k, v in new_weights.items()\n }\n\n return new_weights\n\n\ndef npz2ckpt(npz_path):\n \"\"\"\n Takes weights from .npz format.\n If necessary prepare it's shape to mindspore format\n and create dictionary ready to load into mindspore net\n\n Note:\n Supports ViT-base-16 only.\n\n Returns:\n weight dict of mindspore format\n \"\"\"\n\n ref_weights = np.load(npz_path)\n prepared_weights = prepare_weights(ref_weights)\n\n return prepared_weights\n",
"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"eval model\"\"\"\nfrom __future__ import print_function\nimport argparse\nimport os\nimport random\nimport math\nimport numpy as np\nimport mindspore\nfrom mindspore import load_checkpoint, load_param_into_net, context\nimport mindspore.dataset as ds\nimport mindspore.ops as ops\nfrom mindspore.communication.management import init, get_rank\nfrom src.dataset import ShapeNetDataset\nfrom src.network import PointNetDenseCls\nfrom tqdm import tqdm\n\nparser = argparse.ArgumentParser(description='MindSpore Pointnet Segmentation')\nparser.add_argument(\n '--batchSize', type=int, default=32, help='input batch size')\nparser.add_argument(\n '--nepoch', type=int, default=100, help='number of epochs to train for')\nparser.add_argument('--device_id', type=int, default=0, help='device id')\nparser.add_argument('--device_target', default='Ascend', help='device id')\nparser.add_argument('--data_path', type=str, default='/home/pointnet/shapenetcore_partanno_segmentation_benchmark_v0'\n , help=\"dataset path\")\nparser.add_argument('--model_path', type=str, default=''\n , help=\"dataset path\")\nparser.add_argument('--ckpt_dir', type=str, default='./ckpts'\n , help=\"ckpts path\")\nparser.add_argument('--class_choice', type=str, default='Chair', help=\"class_choice\")\nparser.add_argument('--feature_transform', action='store_true', help=\"use feature transform\")\nparser.add_argument('--enable_modelarts', default=False, help=\"use feature transform\")\n\nargs = parser.parse_args()\nprint(args)\n\ndef test_net(test_dataset, network, data_path, class_choice, model=None):\n \"\"\"test model\"\"\"\n print(\"============== Starting Testing ==============\")\n if model:\n param_dict = load_checkpoint(model)\n load_param_into_net(network, param_dict)\n print('successfully load model')\n\n print(type(test_dataset))\n\n print('batchSize', test_dataset.get_batch_size())\n print('num_batch', test_dataset.get_dataset_size())\n print('shapes2', test_dataset.output_shapes())\n\n print('test_dataset_size', test_dataset.get_dataset_size())\n network.set_train(False)\n shape_ious = []\n for _, data in tqdm(enumerate(test_dataset.create_dict_iterator(), 0)):\n points, target = data['point'], data['label']\n pred = network(points) # pred.shape=[80000,4]\n pred_choice = ops.ArgMaxWithValue(axis=2)(pred)[0]\n pred_np = pred_choice.asnumpy()\n target_np = target.asnumpy() - 1\n\n for shape_idx in range(target_np.shape[0]):\n parts = range(num_classes)\n part_ious = []\n for part in parts:\n I = np.sum(np.logical_and(pred_np[shape_idx] == part, target_np[shape_idx] == part))\n U = np.sum(np.logical_or(pred_np[shape_idx] == part, target_np[shape_idx] == part))\n if U == 0:\n iou = 1\n else:\n iou = I / float(U)\n part_ious.append(iou)\n shape_ious.append(np.mean(part_ious))\n print(np.mean(part_ious))\n\n print(\"mIOU for class {}: {}\".format(args.class_choice, np.mean(shape_ious)))\n\n\nif __name__ == \"__main__\":\n blue = lambda x: '\\033[94m' + x + '\\033[0m'\n local_data_url = args.data_path\n local_train_url = args.ckpt_dir\n device_num = int(os.getenv(\"RANK_SIZE\", \"1\"))\n if args.enable_modelarts:\n device_id = int(os.getenv(\"DEVICE_ID\"))\n import moxing as mox\n\n local_data_url = './cache/data'\n local_train_url = './cache/ckpt'\n device_target = args.device_target\n context.set_context(mode=context.GRAPH_MODE, device_target=args.device_target)\n context.set_context(save_graphs=False)\n if device_target == \"Ascend\":\n context.set_context(device_id=device_id)\n if device_num > 1:\n cfg.episode = int(cfg.episode / 2)\n cfg.learning_rate = cfg.learning_rate * 2\n context.reset_auto_parallel_context()\n context.set_auto_parallel_context(device_num=device_num,\n parallel_mode=context.ParallelMode.DATA_PARALLEL, gradients_mean=True)\n init()\n local_data_url = os.path.join(local_data_url, str(device_id))\n local_train_url = os.path.join(local_train_url, \"_\" + str(get_rank()))\n else:\n raise ValueError(\"Unsupported platform.\")\n import moxing as mox\n\n mox.file.copy_parallel(src_url=args.data_url, dst_url=local_data_url)\n else:\n context.set_context(mode=context.GRAPH_MODE, device_target=args.device_target, device_id=args.device_id)\n context.set_context(save_graphs=False)\n if device_num > 1:\n cfg.episode = int(cfg.episode / 2)\n cfg.learning_rate = cfg.learning_rate * 2\n context.reset_auto_parallel_context()\n context.set_auto_parallel_context(device_num=device_num, parallel_mode=ParallelMode.DATA_PARALLEL,\n gradients_mean=True)\n init()\n\n if not os.path.exists(local_train_url):\n os.makedirs(local_train_url)\n\n args.manualSeed = random.randint(1, 10000)\n print(\"Random Seed: \", args.manualSeed)\n random.seed(args.manualSeed)\n mindspore.set_seed(args.manualSeed)\n dataset_sink_mode = False\n context.set_context(mode=context.GRAPH_MODE, device_target=\"Ascend\", device_id=args.device_id)\n\n dataset_generator = ShapeNetDataset(\n root=local_data_url,\n classification=False,\n class_choice=[args.class_choice])\n test_dataset_generator = ShapeNetDataset(\n root=local_data_url,\n classification=False,\n class_choice=[args.class_choice],\n split='test',\n data_augmentation=False)\n\n test_dataloader = ds.GeneratorDataset(test_dataset_generator, [\"point\", \"label\"], shuffle=True)\n test_dataset1 = test_dataloader.batch(args.batchSize)\n num_classes = dataset_generator.num_seg_classes\n classifier = PointNetDenseCls(k=num_classes, feature_transform=args.feature_transform)\n classifier.set_train(False)\n num_batch = math.ceil(len(dataset_generator) / args.batchSize)\n\n test_net(test_dataset1, classifier, args.data_path, args.class_choice, args.model_path)\n",
"# Copyright 2022 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Rfcn anchor generator.\"\"\"\n\nimport numpy as np\n\nclass AnchorGenerator():\n \"\"\"Anchor generator for Rfcn.\"\"\"\n def __init__(self, base_size, scales, ratios, scale_major=True, ctr=None):\n \"\"\"Anchor generator init method.\"\"\"\n self.base_size = base_size\n self.scales = np.array(scales)\n self.ratios = np.array(ratios)\n self.scale_major = scale_major\n self.ctr = ctr\n self.base_anchors = self.gen_base_anchors()\n\n def gen_base_anchors(self):\n \"\"\"Generate a single anchor.\"\"\"\n w = self.base_size\n h = self.base_size\n if self.ctr is None:\n x_ctr = 0.5 * (w - 1)\n y_ctr = 0.5 * (h - 1)\n else:\n x_ctr, y_ctr = self.ctr\n\n h_ratios = np.sqrt(self.ratios)\n w_ratios = 1 / h_ratios\n if self.scale_major:\n ws = (w * w_ratios[:, None] * self.scales[None, :]).reshape(-1)\n hs = (h * h_ratios[:, None] * self.scales[None, :]).reshape(-1)\n else:\n ws = (w * self.scales[:, None] * w_ratios[None, :]).reshape(-1)\n hs = (h * self.scales[:, None] * h_ratios[None, :]).reshape(-1)\n\n base_anchors = np.stack(\n [\n x_ctr - 0.5 * (ws - 1), y_ctr - 0.5 * (hs - 1),\n x_ctr + 0.5 * (ws - 1), y_ctr + 0.5 * (hs - 1)\n ],\n axis=-1).round()\n\n return base_anchors\n\n def _meshgrid(self, x, y, row_major=True):\n \"\"\"Generate grid.\"\"\"\n xx = np.repeat(x.reshape(1, len(x)), len(y), axis=0).reshape(-1)\n yy = np.repeat(y, len(x))\n if row_major:\n return xx, yy\n\n return yy, xx\n\n def grid_anchors(self, featmap_size, stride=16):\n \"\"\"Generate anchor list.\"\"\"\n base_anchors = self.base_anchors\n\n feat_h, feat_w = featmap_size\n shift_x = np.arange(0, feat_w) * stride\n shift_y = np.arange(0, feat_h) * stride\n shift_xx, shift_yy = self._meshgrid(shift_x, shift_y)\n shifts = np.stack([shift_xx, shift_yy, shift_xx, shift_yy], axis=-1)\n shifts = shifts.astype(base_anchors.dtype)\n # first feat_w elements correspond to the first row of shifts\n # add A anchors (1, A, 4) to K shifts (K, 1, 4) to get\n # shifted anchors (K, A, 4), reshape to (K*A, 4)\n\n all_anchors = base_anchors[None, :, :] + shifts[:, None, :]\n all_anchors = all_anchors.reshape(-1, 4)\n\n return all_anchors\n",
"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n'''post process for 310 inference'''\nimport os\nimport argparse\nimport numpy as np\n\nparser = argparse.ArgumentParser(description='postprocess for googlenet')\nparser.add_argument(\"--result_path\", type=str, required=True, help=\"result file path\")\nparser.add_argument(\"--label_file\", type=str, required=True, help=\"label file\")\nargs = parser.parse_args()\n\n\ndef cal_acc_imagenet(result_path, label_file):\n \"\"\"cal_acc_imagenet\"\"\"\n img_tot = 0\n top1_correct = 0\n\n files = os.listdir(result_path)\n for file in files:\n full_file_path = os.path.join(result_path, file)\n if os.path.isfile(full_file_path):\n result = np.fromfile(full_file_path, dtype=np.float32).argmax()\n idx_num = file.split(\"_\")[0].split(\"s\")[1]\n gt_classes = np.fromfile(os.path.join(label_file, 'label{}.bin'.format(idx_num)), dtype=np.int32)\n\n if result == gt_classes:\n top1_correct = top1_correct + 1\n img_tot += 1\n\n acc1 = 100.0 * top1_correct / img_tot\n print('after allreduce eval: top1_correct={}, tot={}, acc={:.2f}%'.format(top1_correct, img_tot, acc1))\n\nif __name__ == \"__main__\":\n cal_acc_imagenet(args.result_path, args.label_file)\n",
"# Copyright 2022 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"export checkpoint file into air, onnx, mindir models\"\"\"\nimport os\nimport numpy as np\n\nimport mindspore.nn as nn\nimport mindspore.ops as ops\nfrom mindspore import Tensor, context, load_checkpoint, load_param_into_net, export\nfrom src.nets import net_factory\n\nfrom model_utils.config import config\nfrom model_utils.moxing_adapter import moxing_wrapper\n\nclass BuildEvalNetwork(nn.Cell):\n def __init__(self, net, input_format=\"NCHW\"):\n super(BuildEvalNetwork, self).__init__()\n self.network = net\n self.softmax = nn.Softmax(axis=1)\n self.transpose = ops.Transpose()\n self.format = input_format\n\n def construct(self, x):\n if self.format == \"NHWC\":\n x = self.transpose(x, (0, 3, 1, 2))\n output = self.network(x)\n output = self.softmax(output)\n return output\n\n\ndef modelarts_pre_process():\n '''modelarts pre process function.'''\n config.file_name = os.path.join(config.output_path, config.file_name)\n\n\n@moxing_wrapper(pre_process=modelarts_pre_process)\ndef run_export():\n '''run export.'''\n context.set_context(mode=context.GRAPH_MODE, device_target=config.device_target)\n if config.device_target == \"Ascend\":\n context.set_context(device_id=config.device_id)\n\n if config.export_model == 'deeplab_v3_s16':\n network = net_factory.nets_map['deeplab_v3_s16']('eval', config.num_classes, 16, config.freeze_bn)\n else:\n network = net_factory.nets_map['deeplab_v3_s8']('eval', config.num_classes, 8, config.freeze_bn)\n network = BuildEvalNetwork(network, config.input_format)\n param_dict = load_checkpoint(config.ckpt_file)\n\n # load the parameter into net\n load_param_into_net(network, param_dict)\n if config.input_format == \"NHWC\":\n input_data = Tensor(\n np.ones([config.export_batch_size, config.input_size, config.input_size, 3]).astype(np.float32))\n else:\n input_data = Tensor(\n np.ones([config.export_batch_size, 3, config.input_size, config.input_size]).astype(np.float32))\n export(network, input_data, file_name=config.file_name, file_format=config.file_format)\n\n\nif __name__ == '__main__':\n run_export()\n",
"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"\nEval.\n\"\"\"\nimport time\nimport random\nimport numpy as np\n\nimport mindspore.nn as nn\nfrom mindspore import context\nfrom mindspore import dataset as de\nfrom mindspore.train.serialization import load_checkpoint, load_param_into_net\nfrom mindspore.common import set_seed\nfrom mindspore.train.model import Model\n\nfrom src.config import config as args_opt\nfrom src.dataset import create_eval_dataset\nfrom src.ResNet3D import generate_model\nfrom src.inference import (Inference, load_ground_truth, load_result,\n remove_nonexistent_ground_truth, calculate_clip_acc)\n\n\nrandom.seed(1)\nnp.random.seed(1)\nde.config.set_seed(1)\nset_seed(1)\n\n\nclass NetWithSoftmax(nn.Cell):\n \"\"\"\n Add Softmax module to network.\n \"\"\"\n\n def __init__(self, network):\n super(NetWithSoftmax, self).__init__()\n self.softmax = nn.Softmax()\n self.net = network\n\n def construct(self, x):\n out = self.net(x)\n out = self.softmax(out)\n return out\n\n\nif __name__ == '__main__':\n t1 = time.time()\n cfg = args_opt\n print(cfg)\n target = args_opt.device_target\n # init context\n device_id = args_opt.device_id\n context.set_context(mode=context.GRAPH_MODE, device_target=target, save_graphs=False,\n device_id=device_id)\n\n net = generate_model(n_classes=cfg.n_classes, no_max_pool=False)\n param_dict = load_checkpoint(cfg.inference_ckpt_path)\n load_param_into_net(net, param_dict)\n\n net = NetWithSoftmax(net)\n net.set_train(False)\n\n model = Model(net)\n\n predict_data = create_eval_dataset(\n cfg.video_path, cfg.annotation_path, cfg)\n inference = Inference()\n inference_results, clip_inference_results = inference(\n predict_data, model, cfg.annotation_path)\n\n print('load ground truth')\n ground_truth, class_labels_map = load_ground_truth(\n cfg.annotation_path, \"validation\")\n print('number of ground truth: {}'.format(len(ground_truth)))\n\n n_ground_truth_top_1 = len(ground_truth)\n n_ground_truth_top_5 = len(ground_truth)\n\n result_top1, result_top5 = load_result(\n clip_inference_results, class_labels_map)\n\n # print(\"==================result_top1===========\\n\", result_top1)\n\n ground_truth_top1 = remove_nonexistent_ground_truth(\n ground_truth, result_top1)\n ground_truth_top5 = remove_nonexistent_ground_truth(\n ground_truth, result_top5)\n\n if cfg.ignore:\n n_ground_truth_top_1 = len(ground_truth_top1)\n n_ground_truth_top_5 = len(ground_truth_top5)\n\n correct_top1 = [1 if line[1] in result_top1[line[0]]\n else 0 for line in ground_truth_top1]\n correct_top5 = [1 if line[1] in result_top5[line[0]]\n else 0 for line in ground_truth_top5]\n\n clip_acc = calculate_clip_acc(\n inference_results, ground_truth, class_labels_map)\n accuracy_top1 = float(sum(correct_top1)) / float(n_ground_truth_top_1)\n accuracy_top5 = float(sum(correct_top5)) / float(n_ground_truth_top_5)\n print('==================Accuracy=================\\n'\n ' clip-acc : {} \\ttop-1 : {} \\ttop-5: {}'.format(clip_acc, accuracy_top1, accuracy_top5))\n t2 = time.time()\n print(\"Total time : {} s\".format(t2 - t1))\n",
"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"export model for YOLO\"\"\"\nimport os\n\nimport numpy as np\nfrom mindspore import Tensor\nfrom mindspore import context\nfrom mindspore import dtype as mstype\nfrom mindspore.train.serialization import export\nfrom mindspore.train.serialization import load_checkpoint\nfrom mindspore.train.serialization import load_param_into_net\n\nfrom model_utils.config import config\nfrom model_utils.moxing_adapter import moxing_wrapper\nfrom src.yolo import YOLOv3Inference\n\n\ndef modelarts_pre_process():\n \"\"\"modelarts pre process function.\"\"\"\n config.file_name = os.path.join(config.output_path, config.file_name)\n\n\n@moxing_wrapper(pre_process=modelarts_pre_process)\ndef run_export():\n \"\"\"export model to ir file\"\"\"\n context.set_context(mode=context.GRAPH_MODE, device_target=config.device_target)\n if config.device_target == \"Ascend\":\n context.set_context(device_id=config.device_id)\n network = YOLOv3Inference(config.test_img_shape)\n param_dict = load_checkpoint(config.ckpt_file)\n load_param_into_net(network, param_dict)\n shape = [config.batch_size, 3] + config.test_img_shape\n input_data = Tensor(np.zeros(shape), mstype.float32)\n export(network, input_data, file_name=config.file_name, file_format=config.file_format)\n\n\nif __name__ == \"__main__\":\n run_export()\n",
"# Copyright 2022 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Generate data in mindrecord format.\"\"\"\nimport os\n\nimport pandas as pd\nimport numpy as np\n\nfrom mindspore.mindrecord import FileWriter\n\nfrom model_utils.config import config\n\n\ndef generate_npy(data_path, do_train):\n \"\"\"create npy file\"\"\"\n column_names = ['age', 'class_worker', 'det_ind_code', 'det_occ_code', 'education', 'wage_per_hour',\n 'hs_college', 'marital_stat', 'major_ind_code', 'major_occ_code', 'race', 'hisp_origin',\n 'sex', 'union_member', 'unemp_reason', 'full_or_part_emp', 'capital_gains',\n 'capital_losses', 'stock_dividends', 'tax_filer_stat', 'region_prev_res', 'state_prev_res',\n 'det_hh_fam_stat', 'det_hh_summ', 'instance_weight', 'mig_chg_msa', 'mig_chg_reg',\n 'mig_move_reg', 'mig_same', 'mig_prev_sunbelt', 'num_emp', 'fam_under_18',\n 'country_father', 'country_mother', 'country_self', 'citizenship', 'own_or_self',\n 'vet_question', 'vet_benefits', 'weeks_worked', 'year', 'income_50k']\n label_columns = ['income_50k', 'marital_stat']\n categorical_columns = ['class_worker', 'det_ind_code', 'det_occ_code', 'education', 'hs_college',\n 'major_ind_code', 'major_occ_code', 'race', 'hisp_origin', 'sex', 'union_member',\n 'unemp_reason', 'full_or_part_emp', 'tax_filer_stat', 'region_prev_res',\n 'state_prev_res', 'det_hh_fam_stat', 'det_hh_summ', 'mig_chg_msa', 'mig_chg_reg',\n 'mig_move_reg', 'mig_same', 'mig_prev_sunbelt', 'fam_under_18', 'country_father',\n 'country_mother', 'country_self', 'citizenship', 'vet_question']\n if do_train:\n ds = pd.read_csv(\n data_path + '/census-income.data.gz',\n delimiter=',',\n index_col=None,\n names=column_names\n )\n else:\n ds = pd.read_csv(\n data_path + '/census-income.test.gz',\n delimiter=',',\n index_col=None,\n names=column_names\n )\n ds_transformed = pd.get_dummies(\n ds.drop(label_columns, axis=1), columns=categorical_columns)\n if not do_train:\n ds_transformed['det_hh_fam_stat_ Grandchild <18 ever marr not in subfamily'] = 0\n data = ds_transformed\n np.save(data_path + '/data.npy', np.array(data), allow_pickle=False)\n\n ds_raw_labels = ds[label_columns]\n ds_raw_labels['marital_stat'] = ds_raw_labels['marital_stat'].apply(\n lambda x: 'never married' if x == ' Never married' else 'married')\n\n income_labels = pd.get_dummies(ds_raw_labels['income_50k'])\n np.save(data_path + '/income_labels.npy',\n np.array(income_labels), allow_pickle=False)\n\n married_labels = pd.get_dummies(ds_raw_labels['marital_stat'])\n np.save(data_path + '/married_labels.npy',\n np.array(married_labels), allow_pickle=False)\n\n data = np.load(data_path + '/data.npy').astype(np.float32)\n income = np.load(data_path + '/income_labels.npy').astype(np.float32)\n married = np.load(data_path + '/married_labels.npy').astype(np.float32)\n\n mindrecord_path = data_path + \"/mindrecord\"\n\n if not os.path.exists(mindrecord_path):\n os.mkdir(mindrecord_path)\n\n if do_train:\n MINDRECORD_FILE = mindrecord_path + \"/train.mindrecord\"\n writer = FileWriter(file_name=MINDRECORD_FILE, shard_num=1)\n\n nlp_schema = {\"data\": {\"type\": \"float32\", \"shape\": [-1]},\n \"income_labels\": {\"type\": \"float32\", \"shape\": [-1]},\n \"married_labels\": {\"type\": \"float32\", \"shape\": [-1]}}\n writer.add_schema(nlp_schema, \"it is a preprocessed nlp dataset\")\n for i in range(len(data)):\n sample = {\"data\": data[i],\n \"income_labels\": income[i],\n \"married_labels\": married[i]}\n\n if i % 10000 == 0:\n print(f'write {i} lines.')\n\n writer.write_raw_data([sample])\n writer.commit()\n else:\n MINDRECORD_FILE = mindrecord_path + \"/eval.mindrecord\"\n writer = FileWriter(file_name=MINDRECORD_FILE, shard_num=1)\n\n nlp_schema = {\"data\": {\"type\": \"float32\", \"shape\": [-1]},\n \"income_labels\": {\"type\": \"float32\", \"shape\": [-1]},\n \"married_labels\": {\"type\": \"float32\", \"shape\": [-1]}}\n writer.add_schema(nlp_schema, \"it is a preprocessed nlp dataset\")\n for i in range(len(data)):\n sample = {\"data\": data[i],\n \"income_labels\": income[i],\n \"married_labels\": married[i]}\n\n if i % 10000 == 0:\n print(f'write {i} lines.')\n\n writer.write_raw_data([sample])\n writer.commit()\n\n\nif __name__ == '__main__':\n generate_npy(data_path=config.local_data_path, do_train=True)\n generate_npy(data_path=config.local_data_path, do_train=False)\n",
"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"trainingcell.py\"\"\"\nimport os\nimport psutil\nimport numpy as np\nimport mindspore as ms\nimport mindspore.ops as P\n\nfrom mindspore import nn\nfrom mindspore import ParameterTuple, Tensor, Parameter\n\n\ndef show_memory_info(hint=\"\"):\n pid = os.getpid()\n\n p = psutil.Process(pid)\n info = p.memory_full_info()\n memory = info.uss/1024./1024\n print(f\"{hint} memory used: {memory} MB \")\n\n\nclass CriterionWithNet(nn.Cell):\n \"\"\"\n class of criterion with network\n \"\"\"\n def __init__(self, backbone, ce_loss, tri_loss, loss_func='id'):\n super(CriterionWithNet, self).__init__()\n self._backbone = backbone\n self.loss_func = loss_func\n self._ce_loss = ce_loss\n self._tri_loss = tri_loss\n self.wg = Parameter(Tensor(np.array([0.0]), dtype=ms.float32),\\\n name=\"wg\", requires_grad=False)\n\n # self.total_loss = 0.0\n # self.wg = 0.0\n\n self.cat = P.Concat()\n self.cast = P.Cast()\n self.sum = P.ReduceSum()\n self.max = P.ArgMaxWithValue(axis=1)\n self.eq = P.Equal()\n\n def construct(self, img1, img2, label1, label2, adj, modal=0):\n \"\"\"\n function of constructing\n \"\"\"\n out_graph = None\n\n if self._backbone.nheads > 0:\n feat, _, out, out_att, out_graph = self._backbone(\n img1, x2=img2, adj=adj, modal=modal)\n else:\n feat, _, out, out_att = self._backbone(\n img1, x2=img2, modal=modal)\n\n label = self.cat((label1, label2))\n label_ = self.cast(label, ms.int32)\n\n loss_id = self._ce_loss(out, label_)\n loss_tri = self._tri_loss(feat, label)\n\n if self.loss_func == 'tri':\n loss_total = loss_tri\n elif self.loss_func == 'id+tri':\n loss_total = loss_id + loss_tri\n else:\n loss_total = loss_id\n\n if self._backbone.part > 0:\n loss_p = self._ce_loss(out_att, label_)\n loss_total = loss_total + loss_p\n\n if self._backbone.nheads > 0:\n loss_g = P.NLLLoss(\"mean\")(out_graph, label_,\n P.Ones()((out_graph.shape[1]), ms.float32))\n loss_total = loss_total + self.wg * loss_g[0]\n\n return loss_total\n\n @property\n def backbone_network(self):\n return self._backbone\n\n\nclass OptimizerWithNetAndCriterion(nn.Cell):\n \"\"\"\n class of optimization methods\n \"\"\"\n def __init__(self, network, optimizer):\n super(OptimizerWithNetAndCriterion, self).__init__()\n self.network = network\n self.weights = ParameterTuple(optimizer.parameters)\n self.optimizer = optimizer\n self.grad = P.GradOperation(get_by_list=True)\n\n def construct(self, x1, x2, y1, y2, adj):\n loss = self.network(x1, x2, y1, y2, adj)\n weights = self.weights\n grads = self.grad(self.network, weights)(x1, x2, y1, y2, adj)\n loss = P.Depend()(loss, self.optimizer(grads))\n return loss\n",
"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"efficientnetv2 model define\"\"\"\nimport os\n\nimport numpy as np\nfrom mindspore import Tensor, dtype, ops\nfrom mindspore import nn\nfrom mindspore.common import initializer as weight_init\n\nfrom src.models.var_init import RandomNormal, RandomUniform\n\n__all__ = ['effnetv2_s', 'effnetv2_m', 'effnetv2_l', 'effnetv2_xl']\n\n\nclass DropPath(nn.Cell):\n \"\"\"Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). \"\"\"\n\n def __init__(self, drop_prob, ndim):\n super(DropPath, self).__init__()\n self.drop = nn.Dropout(keep_prob=1 - drop_prob)\n shape = (1,) + (1,) * (ndim + 1)\n self.ndim = ndim\n self.mask = Tensor(np.ones(shape), dtype=dtype.float32)\n\n def construct(self, x):\n if not self.training:\n return x\n mask = ops.Tile()(self.mask, (x.shape[0],) + (1,) * (self.ndim + 1))\n out = self.drop(mask)\n out = out * x\n return out\n\n\nclass DropPath2D(DropPath):\n \"\"\"DropPath2D\"\"\"\n\n def __init__(self, drop_prob):\n super(DropPath2D, self).__init__(drop_prob=drop_prob, ndim=2)\n\n\ndef _make_divisible(v, divisor, min_value=None):\n \"\"\"\n This function is taken from the original tf repo.\n It ensures that all layers have a channel number that is divisible by 8\n It can be seen here:\n https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py\n :param v:\n :param divisor:\n :param min_value:\n :return:\n \"\"\"\n if min_value is None:\n min_value = divisor\n new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)\n return new_v\n\n\nclass SiLU(nn.Cell):\n \"\"\"SiLU\"\"\"\n\n def __init__(self):\n super(SiLU, self).__init__()\n self.ops_sigmoid = nn.Sigmoid()\n\n def construct(self, x):\n return x * self.ops_sigmoid(x)\n\n def __repr__(self):\n return \"SiLU<x * Sigmoid(x)>\"\n\n\nclass SELayer(nn.Cell):\n \"\"\"SELayer\"\"\"\n\n def __init__(self, inp, oup, reduction=4):\n super(SELayer, self).__init__()\n self.avg_pool = ops.ReduceMean(keep_dims=True)\n self.fc = nn.SequentialCell([\n nn.Conv2d(in_channels=oup, out_channels=inp // reduction,\n kernel_size=1, has_bias=True),\n SiLU(),\n nn.Conv2d(in_channels=inp // reduction, out_channels=oup,\n kernel_size=1, has_bias=True),\n nn.Sigmoid()\n ])\n\n def construct(self, x):\n y = self.avg_pool(x, [2, 3])\n y = self.fc(y)\n return y * x\n\n\ndef conv_3x3_bn(inp, oup, stride, norm_type):\n \"\"\"conv_3x3_bn\"\"\"\n return nn.SequentialCell([\n nn.Conv2d(in_channels=inp, out_channels=oup, kernel_size=3, stride=stride, pad_mode='same', has_bias=False),\n norm_type(num_features=oup, momentum=0.9, eps=1e-3),\n SiLU()\n ])\n\n\ndef conv_1x1_bn(inp, oup, norm_type):\n \"\"\"conv_1x1_bn\"\"\"\n return nn.SequentialCell([\n nn.Conv2d(in_channels=inp, out_channels=oup, kernel_size=1, stride=1, has_bias=False),\n norm_type(num_features=oup, momentum=0.9, eps=1e-3),\n SiLU()\n ])\n\n\nclass MBConv(nn.Cell):\n \"\"\"MBConv\"\"\"\n\n def __init__(self, inp, oup, stride, expand_ratio, use_se, norm_type, drop_path_rate=0.):\n super(MBConv, self).__init__()\n assert stride in [1, 2]\n\n hidden_dim = round(inp * expand_ratio)\n self.identity = stride == 1 and inp == oup\n if self.identity:\n self.drop_path = DropPath2D(drop_path_rate)\n if use_se:\n self.conv = nn.SequentialCell([\n # pw\n nn.Conv2d(in_channels=inp, out_channels=hidden_dim, kernel_size=1, stride=1, pad_mode='pad',\n padding=0, has_bias=False),\n norm_type(num_features=hidden_dim, momentum=0.9, eps=1e-3),\n SiLU(),\n # dw\n nn.Conv2d(in_channels=hidden_dim, out_channels=hidden_dim, kernel_size=3, stride=stride,\n pad_mode='same', group=hidden_dim, has_bias=False),\n norm_type(num_features=hidden_dim, momentum=0.9, eps=1e-3),\n SiLU(),\n SELayer(inp, hidden_dim),\n # pw-linear\n nn.Conv2d(in_channels=hidden_dim, out_channels=oup, kernel_size=1, stride=1, has_bias=False),\n norm_type(num_features=oup, momentum=0.9, eps=1e-3),\n ])\n else:\n # fused branch\n if expand_ratio == 1:\n # when expand_ratio == 1: apply dw\n self.conv = nn.SequentialCell([\n # pw-linear\n nn.Conv2d(in_channels=hidden_dim, out_channels=oup, kernel_size=3, stride=1, pad_mode='same',\n has_bias=False),\n norm_type(num_features=oup, momentum=0.9, eps=1e-3),\n SiLU(),\n ])\n else:\n self.conv = nn.SequentialCell([\n # fused\n nn.Conv2d(in_channels=inp, out_channels=hidden_dim, kernel_size=3, stride=stride, pad_mode='same',\n has_bias=False),\n norm_type(num_features=hidden_dim, momentum=0.9, eps=1e-3),\n SiLU(),\n # pw-linear\n nn.Conv2d(in_channels=hidden_dim, out_channels=oup, kernel_size=1, stride=1, pad_mode='pad',\n padding=0, has_bias=False),\n norm_type(num_features=oup, momentum=0.9, eps=1e-3),\n ])\n\n def construct(self, x):\n if self.identity:\n return x + self.drop_path(self.conv(x))\n return self.conv(x)\n\n\nclass EffNetV2(nn.Cell):\n \"\"\"EffNetV2\"\"\"\n\n def __init__(self, cfgs, args, num_classes=1000, width_mult=1., drop_out_rate=0., drop_path_rate=0.):\n super(EffNetV2, self).__init__()\n if args.device_target == \"Ascend\" and int(os.getenv(\"DEVICE_NUM\", args.device_num)) > 1:\n norm_type = nn.SyncBatchNorm\n else:\n norm_type = nn.BatchNorm2d\n self.cfgs = cfgs\n # building first layer\n input_channel = _make_divisible(cfgs[0][1] * width_mult, 8)\n layers = [conv_3x3_bn(3, input_channel, 2, norm_type=norm_type)]\n # building inverted residual blocks\n block = MBConv\n layers_num = 0\n for _, _, n, _, _ in self.cfgs:\n layers_num += n\n drop_path_rates = np.linspace(0, drop_path_rate, int(layers_num) + 1)\n index = 0\n for t, c, n, s, use_se in self.cfgs:\n output_channel = _make_divisible(c * width_mult, 8)\n for i in range(n):\n layers.append(\n block(input_channel, output_channel, s if i == 0 else 1, t,\n use_se, norm_type, drop_path_rates[index]))\n input_channel = output_channel\n index += 1\n self.features = nn.CellList(layers)\n # building last several layers\n output_channel = _make_divisible(1280 * width_mult, 8) if width_mult > 1.0 else 1280\n self.conv = conv_1x1_bn(input_channel, output_channel, norm_type=norm_type)\n self.avgpool = ops.ReduceMean(keep_dims=False)\n self.dropout = nn.Dropout(keep_prob=1 - drop_out_rate)\n self.classifier = nn.Dense(in_channels=output_channel, out_channels=num_classes)\n for _, cell in self.cells_and_names():\n if isinstance(cell, nn.Dense):\n init_range = 1.0 / np.sqrt(cell.weight.shape[0])\n cell.weight.set_data(weight_init.initializer(RandomUniform(init_range),\n cell.weight.shape,\n cell.weight.dtype))\n if cell.bias is not None:\n cell.bias.set_data(weight_init.initializer(weight_init.Zero(),\n cell.bias.shape,\n cell.bias.dtype))\n if isinstance(cell, nn.Conv2d):\n out_channel, _, kernel_size_h, kernel_size_w = cell.weight.shape\n stddev = np.sqrt(2 / int(out_channel * kernel_size_h * kernel_size_w))\n cell.weight.set_data(weight_init.initializer(RandomNormal(std=stddev),\n cell.weight.shape,\n cell.weight.dtype))\n if cell.bias is not None:\n cell.bias.set_data(weight_init.initializer(weight_init.Zero(),\n cell.bias.shape,\n cell.bias.dtype))\n\n def construct(self, x):\n for feature in self.features:\n x = feature(x)\n x = self.conv(x)\n x = self.avgpool(x, [2, 3])\n x = self.dropout(x)\n x = self.classifier(x)\n return x\n\n\ndef effnetv2_s(args):\n \"\"\"\n Constructs a EfficientNetV2-S model\n \"\"\"\n cfgs = [\n # t, c, n, s, SE\n [1, 24, 2, 1, 0],\n [4, 48, 4, 2, 0],\n [4, 64, 4, 2, 0],\n [4, 128, 6, 2, 1],\n [6, 160, 9, 1, 1],\n [6, 256, 15, 2, 1],\n ]\n return EffNetV2(cfgs, args=args, num_classes=args.num_classes, drop_out_rate=args.drop_out_rate,\n drop_path_rate=args.drop_path_rate)\n\n\ndef effnetv2_m(args):\n \"\"\"\n Constructs a EfficientNetV2-M model\n \"\"\"\n cfgs = [\n # t, c, n, s, SE\n [1, 24, 3, 1, 0],\n [4, 48, 5, 2, 0],\n [4, 80, 5, 2, 0],\n [4, 160, 7, 2, 1],\n [6, 176, 14, 1, 1],\n [6, 304, 18, 2, 1],\n [6, 512, 5, 1, 1],\n ]\n return EffNetV2(cfgs, args=args, num_classes=args.num_classes, drop_out_rate=args.drop_out_rate,\n drop_path_rate=args.drop_path_rate)\n\n\ndef effnetv2_l(args):\n \"\"\"\n Constructs a EfficientNetV2-L model\n \"\"\"\n cfgs = [\n # t, c, n, s, SE\n [1, 32, 4, 1, 0],\n [4, 64, 7, 2, 0],\n [4, 96, 7, 2, 0],\n [4, 192, 10, 2, 1],\n [6, 224, 19, 1, 1],\n [6, 384, 25, 2, 1],\n [6, 640, 7, 1, 1],\n ]\n return EffNetV2(cfgs, args=args, num_classes=args.num_classes, drop_out_rate=args.drop_out_rate,\n drop_path_rate=args.drop_path_rate)\n\n\ndef effnetv2_xl(args):\n \"\"\"\n Constructs a EfficientNetV2-XL model\n \"\"\"\n cfgs = [\n # t, c, n, s, SE\n [1, 32, 4, 1, 0],\n [4, 64, 8, 2, 0],\n [4, 96, 8, 2, 0],\n [4, 192, 16, 2, 1],\n [6, 256, 24, 1, 1],\n [6, 512, 32, 2, 1],\n [6, 640, 8, 1, 1],\n ]\n return EffNetV2(cfgs, args=args, num_classes=args.num_classes, drop_out_rate=args.drop_out_rate,\n drop_path_rate=args.drop_path_rate)\n",
"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n''' infer module '''\nimport datetime\nimport os\nimport numpy as np\nimport MxpiDataType_pb2 as MxpiDataType\n\nfrom utils.transforms import image_proc, gene_mt_input, flip_back, flip_pairs, get_posenet_preds\nfrom StreamManagerApi import StreamManagerApi, MxDataInput, StringVector, InProtobufVector, MxProtobufIn\n\n\nclass SdkInfer:\n ''' sdk infer '''\n def __init__(self, pipeline_path):\n self.pipline_path = pipeline_path\n self._stream_api = None\n self._data_input = None\n self._device_id = None\n self.stream_name = None\n\n def init_stream(self):\n ''' init stream '''\n stream_manager_api = StreamManagerApi()\n ret = stream_manager_api.InitManager()\n if ret != 0:\n print(f\"Failed to init stream manager, ret={ret}.\")\n return False\n # with open('../pipline/posenet.pipline', 'rb') as pl:\n with open(self.pipline_path, 'rb') as pl:\n pipline_stream = pl.read()\n ret = stream_manager_api.CreateMultipleStreams(pipline_stream)\n if ret != 0:\n print(f\"Failed to create stream, ret={ret}.\")\n return False\n self._stream_api = stream_manager_api\n return True\n\n def send_vision_buf(self, stream_name, data, in_plug_id):\n ''' send visionbuf to model '''\n if self._stream_api is None:\n print('stream_api is None')\n return False\n input_data = data.tobytes()\n vision_list = MxpiDataType.MxpiVisionList()\n vision_vec = vision_list.visionVec.add()\n vision_vec.visionInfo.format = 13\n vision_vec.visionInfo.width = 256\n vision_vec.visionInfo.height = 256\n vision_vec.visionInfo.widthAligned = 256\n vision_vec.visionInfo.heightAligned = 256\n vision_vec.visionData.memType = 0\n vision_vec.visionData.dataStr = input_data\n vision_vec.visionData.dataSize = len(input_data)\n\n key = \"appsrc{}\".format(0).encode('utf-8')\n protobuf_vec = InProtobufVector()\n protobuf = MxProtobufIn()\n protobuf.key = key\n protobuf.type = b\"MxTools.MxpiVisionList\"\n protobuf.protobuf = vision_list.SerializeToString()\n protobuf_vec.push_back(protobuf)\n unique_id = self._stream_api.SendProtobuf(\n stream_name, in_plug_id, protobuf_vec)\n if unique_id < 0:\n print(\"Failed to send data to stream.\")\n return False\n return unique_id\n\n def send_package_buf(self, stream_name, data, appsrc_pos):\n ''' send packagebuf to model '''\n # create MxpiTensorPackageList\n tensor_package_list = MxpiDataType.MxpiTensorPackageList()\n tensor_package = tensor_package_list.tensorPackageVec.add()\n if isinstance(data, list):\n data = np.array(data)\n array_bytes = data.tobytes()\n data_input = MxDataInput()\n data_input.data = array_bytes\n tensor_vec = tensor_package.tensorVec.add()\n tensor_vec.deviceId = 0\n tensor_vec.memType = 0\n for i in data.shape:\n tensor_vec.tensorShape.append(i)\n tensor_vec.dataStr = data_input.data\n tensor_vec.tensorDataSize = len(array_bytes)\n\n key = \"appsrc{}\".format(appsrc_pos).encode('utf-8')\n protobuf_vec = InProtobufVector()\n protobuf = MxProtobufIn()\n protobuf.key = key\n protobuf.type = b\"MxTools.MxpiTensorPackageList\"\n protobuf.protobuf = tensor_package_list.SerializeToString()\n protobuf_vec.push_back(protobuf)\n unique_id = self._stream_api.SendProtobuf(\n stream_name, appsrc_pos, protobuf_vec)\n return unique_id\n\n def send_mxdata(self, stream_name, data, appsrc_pos):\n ''' send_mxdata '''\n data_input = MxDataInput()\n data_input.data = data.tobytes()\n unique_id = self._stream_api.SendData(\n stream_name, appsrc_pos, data_input)\n if unique_id < 0:\n print(\"Failed to send data to stream.\")\n return False\n return True\n\n def get_result(self, stream_name, appsrc_pos=0):\n ''' get result bytes and convert to array '''\n key_vec = StringVector()\n key_vec.push_back(b'mxpi_tensorinfer0')\n infer_result = self._stream_api.GetProtobuf(\n stream_name, appsrc_pos, key_vec)\n if infer_result[0].errorCode != 0:\n print(\"GetResultWithUniqueId error. errorCode=%d, errorMsg=%s\" % (\n infer_result.errorCode, infer_result.data.decode()))\n return False\n if stream_name == b'PoseEstNet0':\n return self._convert_posenet_result(infer_result)\n if stream_name == b'MultiTaskNet0':\n return self._convert_multitask_result(infer_result)\n return None\n\n def destroy(self):\n ''' destroy stream '''\n self._stream_api.DestroyAllStreams()\n\n @staticmethod\n def _convert_posenet_result(infer_result):\n ''' convert bytes to array '''\n result = MxpiDataType.MxpiTensorPackageList()\n result.ParseFromString(infer_result[0].messageBuf)\n tensor_vec = result.tensorPackageVec[0].tensorVec[0]\n data_str = tensor_vec.dataStr\n tensor_shape = tensor_vec.tensorShape\n infer_array = np.frombuffer(data_str, dtype=np.float32)\n infer_array.shape = tensor_shape\n return infer_array\n\n @staticmethod\n def _convert_multitask_result(infer_result):\n ''' convert bytes to array '''\n result = MxpiDataType.MxpiTensorPackageList()\n result.ParseFromString(infer_result[0].messageBuf)\n package_vec = result.tensorPackageVec[0]\n\n tensor_vec_ids = package_vec.tensorVec[0]\n data_str_ids = tensor_vec_ids.dataStr\n tensor_shape_ids = tensor_vec_ids.tensorShape\n infer_array_ids = np.frombuffer(data_str_ids, dtype=np.float32)\n infer_array_ids.shape = tensor_shape_ids\n\n tensor_vec_cls = package_vec.tensorVec[1]\n data_str_cls = tensor_vec_cls.dataStr\n tensor_shape_cls = tensor_vec_cls.tensorShape\n infer_array_cls = np.frombuffer(data_str_cls, dtype=np.float32)\n infer_array_cls.shape = tensor_shape_cls\n\n tensor_vec_tps = package_vec.tensorVec[2]\n data_str_tps = tensor_vec_tps.dataStr\n tensor_shape_tps = tensor_vec_tps.tensorShape\n infer_array_tps = np.frombuffer(data_str_tps, dtype=np.float32)\n infer_array_tps.shape = tensor_shape_tps\n\n tensor_vec_fts = package_vec.tensorVec[3]\n data_str_fts = tensor_vec_fts.dataStr\n tensor_shape_fts = tensor_vec_fts.tensorShape\n infer_array_fts = np.frombuffer(data_str_fts, dtype=np.float32)\n infer_array_fts.shape = tensor_shape_fts\n return infer_array_ids, infer_array_cls, infer_array_tps, infer_array_fts\n\n\ndef infer(img_dir, result_path, pipline_path, heatmapaware=True,\n segmentaware=True, FLIP_TEST=True, SHIFT_HEATMAP=True, BatchSize=1):\n ''' start infer '''\n stream = SdkInfer(pipline_path)\n stream.init_stream()\n file_list = os.listdir(img_dir)\n file_list.sort()\n\n pn_inputs = []\n imgs = []\n centers = []\n scales = []\n batch_cout = 0\n infer_id = 0\n cost_mils = 0.0\n with open(result_path, 'w') as f_write:\n for _, file_name in enumerate(file_list):\n if not file_name.lower().endswith((\".jpg\", \"jpeg\")):\n continue\n img_path = os.path.join(img_dir, file_name)\n start_time = datetime.datetime.now()\n img_hwc, pn_input, center, scale = image_proc(img_path)\n imgs.append(img_hwc)\n pn_inputs.append(pn_input)\n batch_cout += 1\n if batch_cout < BatchSize:\n continue\n infer_id = stream.send_package_buf(b'PoseEstNet0', pn_inputs, 0)\n posenet_result = stream.get_result(b'PoseEstNet0', infer_id)\n centers.append(center)\n scales.append(scale)\n if FLIP_TEST:\n input_flipped = np.flip(pn_inputs, 3)\n infer_id = stream.send_package_buf(b'PoseEstNet0', input_flipped, 0)\n outputs_flipped = stream.get_result(b'PoseEstNet0', infer_id)\n if isinstance(outputs_flipped, list):\n output_flipped = outputs_flipped[-1]\n else:\n output_flipped = outputs_flipped\n output_flipped = flip_back(\n np.array(output_flipped), flip_pairs)\n # feature is not aligned, shift flipped heatmap for higher accuracy\n if SHIFT_HEATMAP: # true\n output_flipped_copy = output_flipped\n output_flipped[:, :, :,\n 1:] = output_flipped_copy[:, :, :, 0:-1]\n posenet_result = (posenet_result + output_flipped) * 0.5\n\n pn_preds = get_posenet_preds(\n posenet_result, center=centers, scale=scales)\n mt_input, vkpt = gene_mt_input(np.array(\n imgs, np.float32), posenet_result, pn_preds, heatmapaware, segmentaware)\n\n infer_id = stream.send_package_buf(b'MultiTaskNet0', mt_input, 0)\n infer_id = stream.send_package_buf(b'MultiTaskNet0', vkpt, 1)\n _, cls, tps, _ = stream.get_result(b'MultiTaskNet0', infer_id)\n end_time = datetime.datetime.now()\n cost_mils += (end_time - start_time).microseconds/1000\n pn_inputs = []\n imgs = []\n centers = []\n scales = []\n batch_cout = 0\n for i in range(BatchSize):\n res_list = file_name + ' color:' + '{:d}'.format(np.argmax(cls[i])+1) + \\\n ' type:' + '{:d}'.format(np.argmax(tps[i])+1) + '\\n'\n f_write.writelines(res_list)\n print(f'sdk run time: {cost_mils:8.2f} ms; fps: {(1000.0*len(file_list)/cost_mils):8.2f} f/s')\n stream.destroy()\n\n\ndef infer_test(stream, img_path, vkeypt=None, heatmapaware=True, segmentaware=True, FLIP_TEST=True, SHIFT_HEATMAP=True):\n ''' infer single img '''\n img_hwc, pe_input, center, scale = image_proc(img_path)\n pe_input = np.expand_dims(pe_input, axis=0)\n infer_id = stream.send_package_buf(b'PoseEstNet0', pe_input, 0)\n posenet_result = stream.get_result(b'PoseEstNet0', infer_id)\n\n if FLIP_TEST:\n input_flipped = np.flip(pe_input, 3)\n infer_id = stream.send_package_buf(b'PoseEstNet0', input_flipped, 0)\n outputs_flipped = stream.get_result(b'PoseEstNet0', infer_id)\n if isinstance(outputs_flipped, list):\n output_flipped = outputs_flipped[-1]\n else:\n output_flipped = outputs_flipped\n output_flipped = flip_back(np.array(output_flipped), flip_pairs)\n # output_flipped = np.array(output_flipped)\n # feature is not aligned, shift flipped heatmap for higher accuracy\n if SHIFT_HEATMAP: # true\n output_flipped_copy = output_flipped\n output_flipped[:, :, :, 1:] = output_flipped_copy[:, :, :, 0:-1]\n posenet_result = (posenet_result + output_flipped) * 0.5\n\n pn_preds = get_posenet_preds(posenet_result, center=[center], scale=[scale])\n\n mt_input, vkpts = gene_mt_input(\n np.array([img_hwc]), posenet_result, pn_preds, heatmapaware, segmentaware)\n\n infer_id = stream.send_package_buf(b'MultiTaskNet0', mt_input, 0)\n if vkeypt is None:\n infer_id = stream.send_package_buf(b'MultiTaskNet0', vkpts, 1)\n else:\n infer_id = stream.send_package_buf(b'MultiTaskNet0', vkeypt, 1)\n return stream.get_result(b'MultiTaskNet0', infer_id)\n",
"# Copyright 2022 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\nimport random\nimport os\nimport pickle\nimport itertools\nimport numpy as np\n\nimport mindspore\nimport mindspore.ops as ops\nfrom mindspore import Tensor\n\nclass Data_Utils:\n \"\"\"docstring for Data_Utils:(参数解析器,配置数据)\"\"\"\n def __init__(self, train, seed, way, shot,\n data_path, dataset_name, embedding_crop,\n batchsize, val_batch_size, test_batch_size,\n meta_val_steps, embedding_size, verbose):\n self.train = train\n self.seed = seed\n self.way = way\n self.shot = shot\n self.data_path = data_path\n self.dataset_name = dataset_name\n self.embedding_crop = embedding_crop\n self.batch_size = batchsize\n self.val_batch_size = val_batch_size\n self.test_batch_size = test_batch_size\n self.meta_val_steps = meta_val_steps\n self.embedding_size = embedding_size\n self.verbose = verbose\n\n if self.train:\n self.metasplit = ['train', 'val']\n else:\n self.metasplit = ['test']\n\n random.seed(self.seed)\n self.construct_data()\n\n def construct_data(self):\n # loading embeddings\n self.embedding_path = os.path.join(self.data_path, self.dataset_name, self.embedding_crop)\n\n self.embeddings = {}\n for d in self.metasplit:\n if self.verbose:\n print('Loading data from ' + os.path.join(self.embedding_path, d+'_embeddings.pkl') + '...')\n self.embeddings[d] = pickle.load(open(os.path.join(self.embedding_path, d+'_embeddings.pkl'), 'rb'),\n encoding='iso-8859-1')\n\n # sort images by class\n self.image_by_class = {}\n self.embed_by_name = {}\n self.class_list = {}\n for d in self.metasplit:\n self.image_by_class[d] = {}\n self.embed_by_name[d] = {}\n self.class_list[d] = set()\n keys = self.embeddings[d][\"keys\"]\n for i, k in enumerate(keys):\n _, class_name, img_name = k.split('-')\n if class_name not in self.image_by_class[d]:\n self.image_by_class[d][class_name] = []\n self.image_by_class[d][class_name].append(img_name)\n self.embed_by_name[d][img_name] = self.embeddings[d][\"embeddings\"][i]\n self.class_list[d].add(class_name)\n\n self.class_list[d] = list(self.class_list[d])\n if self.verbose:\n print('Finish constructing ' + d + ' data, total %d classes.' % len(self.class_list[d]))\n\n def get_batch(self, metasplit):\n \"\"\"N-way K-shot\"\"\"\n if metasplit == 'train':\n b_size = self.batch_size\n elif metasplit == 'val':\n b_size = self.val_batch_size\n else:\n b_size = self.test_batch_size\n K = self.shot\n N = self.way\n val_steps = self.meta_val_steps\n\n datasplit = ['train', 'val']\n batch = {}\n for d in datasplit:\n batch[d] = {'input': [], 'target': [], 'name': []}\n\n for _ in range(b_size):\n shuffled_classes = self.class_list[metasplit].copy()\n random.shuffle(shuffled_classes)\n\n shuffled_classes = shuffled_classes[:N]\n\n inp = {'train': [[] for i in range(N)], 'val': [[] for i in range(N)]}\n tgt = {'train': [[] for i in range(N)], 'val': [[] for i in range(N)]}\n\n for c, class_name in enumerate(shuffled_classes):\n images = np.random.choice(self.image_by_class[metasplit][class_name], K + val_steps)\n image_names = {'train': images[:K], 'val': images[K:]}\n\n for d in datasplit:\n num_images = K if d == 'train' else val_steps\n assert len(image_names[d]) == num_images\n for i in range(num_images):\n embed = self.embed_by_name[metasplit][image_names[d][i]]\n inp[d][c].append(embed)\n tgt[d][c].append(c)\n\n for d in datasplit:\n num_images = K if d == 'train' else val_steps\n\n assert len(inp['train']) == N\n assert len(inp['val']) == N\n\n permutations = list(itertools.permutations(range(N)))\n order = random.choice(permutations)\n inputs = [inp[d][i] for i in order]\n target = [tgt[d][i] for i in order]\n\n batch[d]['input'].append(np.asarray(inputs).reshape(N, num_images, -1))\n batch[d]['target'].append(np.asarray(target).reshape(N, num_images, -1))\n\n # convert to tensor\n for d in datasplit:\n num_images = K if d == 'train' else val_steps\n normalized_input = Tensor(np.array(batch[d]['input']), mindspore.float32)\n batch[d]['input'] = ops.L2Normalize(axis=-1)(normalized_input)\n batch[d]['target'] = Tensor.from_numpy(np.array(batch[d]['target']))\n\n assert batch[d]['input'].shape == (b_size, N, num_images, self.embedding_size)\n assert batch[d]['target'].shape == (b_size, N, num_images, 1)\n return batch\n",
"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"train WGAN\"\"\"\nimport os\nimport random\nimport json\nimport time\nfrom mindspore import Tensor\nimport mindspore.nn as nn\nimport mindspore.dataset as ds\nimport mindspore.ops as ops\nfrom mindspore.common import initializer as init\nimport mindspore.common.dtype as mstype\nfrom mindspore import context\nfrom mindspore.train.serialization import load_checkpoint, load_param_into_net, save_checkpoint\nfrom PIL import Image\nimport numpy as np\n\nfrom src.dataset import create_dataset\nfrom src.dcgan_model import DcganG, DcganD\nfrom src.dcgannobn_model import DcgannobnG\nfrom src.cell import GenTrainOneStepCell, DisTrainOneStepCell\nfrom src.args import get_args\n\nif __name__ == '__main__':\n t_begin = time.time()\n args_opt = get_args('train')\n print(args_opt)\n\n # init context\n target = args_opt.device_target\n context.set_context(mode=context.GRAPH_MODE, device_target=target)\n\n # whether train on modelarts or local server\n if not args_opt.is_modelarts:\n if args_opt.experiment is None:\n args_opt.experiment = 'samples'\n os.system('mkdir {0}'.format(args_opt.experiment))\n context.set_context(device_id=int(args_opt.device_id))\n dataset = create_dataset(args_opt.dataroot, args_opt.dataset, args_opt.batchSize, args_opt.imageSize, 1,\n args_opt.workers, target)\n\n else:\n import moxing as mox\n if args_opt.experiment is None:\n args_opt.experiment = '/cache/train_output'\n os.system('mkdir {0}'.format(args_opt.experiment))\n context.set_context(device_id=int(os.getenv('DEVICE_ID')))\n data_name = 'LSUN-bedroom.zip'\n local_data_url = '/cache/data_path/'\n mox.file.copy_parallel(src_url=args_opt.data_url, dst_url=local_data_url)\n zip_command = \"unzip -o -q %s -d %s\" % (local_data_url + data_name, local_data_url)\n os.system(zip_command)\n print(\"Unzip success!\")\n\n dataset = create_dataset(local_data_url, args_opt.dataset, args_opt.batchSize, args_opt.imageSize, 1,\n args_opt.workers, target)\n\n\n # fix seed\n args_opt.manualSeed = random.randint(1, 10000)\n print(\"Random Seed: \", args_opt.manualSeed)\n random.seed(args_opt.manualSeed)\n ds.config.set_seed(args_opt.manualSeed)\n\n\n # initialize hyperparameters\n nz = int(args_opt.nz)\n ngf = int(args_opt.ngf)\n ndf = int(args_opt.ndf)\n nc = int(args_opt.nc)\n n_extra_layers = int(args_opt.n_extra_layers)\n\n # write out generator config to generate images together wth training checkpoints\n generator_config = {\"imageSize\": args_opt.imageSize, \"nz\": nz, \"nc\": nc, \"ngf\": ngf,\n \"n_extra_layers\": n_extra_layers, \"noBN\": args_opt.noBN}\n\n with open(os.path.join(args_opt.experiment, \"generator_config.json\"), 'w') as gcfg:\n gcfg.write(json.dumps(generator_config) + \"\\n\")\n\n def init_weight(net):\n \"\"\"initial net weight\"\"\"\n for _, cell in net.cells_and_names():\n if isinstance(cell, (nn.Conv2d, nn.Conv2dTranspose)):\n cell.weight.set_data(init.initializer(init.Normal(0.02), cell.weight.shape))\n elif isinstance(cell, nn.BatchNorm2d):\n cell.gamma.set_data(init.initializer(Tensor(np.random.normal(1, 0.02, cell.gamma.shape), \\\n mstype.float32), cell.gamma.shape))\n cell.beta.set_data(init.initializer('zeros', cell.beta.shape))\n\n\n def save_image(img, img_path):\n \"\"\"save image\"\"\"\n mul = ops.Mul()\n add = ops.Add()\n if isinstance(img, Tensor):\n img = mul(img, 255 * 0.5)\n img = add(img, 255 * 0.5)\n\n img = img.asnumpy().astype(np.uint8).transpose((0, 2, 3, 1))\n\n elif not isinstance(img, np.ndarray):\n raise ValueError(\"img should be Tensor or numpy array, but get {}\".format(type(img)))\n\n IMAGE_SIZE = 64 # Image size\n IMAGE_ROW = 8 # Row num\n IMAGE_COLUMN = 8 # Column num\n PADDING = 2 # Interval of small pictures\n to_image = Image.new('RGB', (IMAGE_COLUMN * IMAGE_SIZE + PADDING * (IMAGE_COLUMN + 1),\n IMAGE_ROW * IMAGE_SIZE + PADDING * (IMAGE_ROW + 1))) # create a new picture\n # cycle\n ii = 0\n for y in range(1, IMAGE_ROW + 1):\n for x in range(1, IMAGE_COLUMN + 1):\n from_image = Image.fromarray(img[ii])\n to_image.paste(from_image, ((x - 1) * IMAGE_SIZE + PADDING * x, (y - 1) * IMAGE_SIZE + PADDING * y))\n ii = ii + 1\n\n to_image.save(img_path) # save\n\n\n # define net----------------------------------------------------------------------------------------------\n # Generator\n if args_opt.noBN:\n netG = DcgannobnG(args_opt.imageSize, nz, nc, ngf, n_extra_layers)\n else:\n netG = DcganG(args_opt.imageSize, nz, nc, ngf, n_extra_layers)\n\n # write out generator config to generate images together wth training checkpoints\n generator_config = {\"imageSize\": args_opt.imageSize, \"nz\": nz, \"nc\": nc, \"ngf\": ngf,\n \"n_extra_layers\": n_extra_layers, \"noBN\": args_opt.noBN}\n with open(os.path.join(args_opt.experiment, \"generator_config.json\"), 'w') as gcfg:\n gcfg.write(json.dumps(generator_config) + \"\\n\")\n\n init_weight(netG)\n\n if args_opt.netG != '': # load checkpoint if needed\n load_param_into_net(netG, load_checkpoint(args_opt.netG))\n print(netG)\n\n netD = DcganD(args_opt.imageSize, nz, nc, ndf, n_extra_layers)\n init_weight(netD)\n\n if args_opt.netD != '':\n load_param_into_net(netD, load_checkpoint(args_opt.netD))\n print(netD)\n\n input1 = Tensor(np.zeros([args_opt.batchSize, 3, args_opt.imageSize, args_opt.imageSize]), dtype=mstype.float32)\n noise = Tensor(np.zeros([args_opt.batchSize, nz, 1, 1]), dtype=mstype.float32)\n fixed_noise = Tensor(np.random.normal(0, 1, size=[args_opt.batchSize, nz, 1, 1]), dtype=mstype.float32)\n\n # setup optimizer\n if args_opt.adam:\n optimizerD = nn.Adam(netD.trainable_params(), learning_rate=args_opt.lrD, beta1=args_opt.beta1, beta2=.999)\n optimizerG = nn.Adam(netG.trainable_params(), learning_rate=args_opt.lrG, beta1=args_opt.beta1, beta2=.999)\n else:\n optimizerD = nn.RMSProp(netD.trainable_params(), learning_rate=args_opt.lrD, decay=0.99)\n optimizerG = nn.RMSProp(netG.trainable_params(), learning_rate=args_opt.lrG, decay=0.99)\n\n netG_train = GenTrainOneStepCell(netG, netD, optimizerG)\n netD_train = DisTrainOneStepCell(netG, netD, optimizerD, args_opt.clamp_lower, args_opt.clamp_upper)\n\n netG_train.set_train()\n netD_train.set_train()\n\n gen_iterations = 0\n\n t0 = time.time()\n # Train\n for epoch in range(args_opt.niter): # niter: the num of epoch\n data_iter = dataset.create_dict_iterator()\n length = dataset.get_dataset_size()\n i = 0\n while i < length:\n ############################\n # (1) Update D network\n ###########################\n for p in netD.trainable_params(): # reset requires_grad\n p.requires_grad = True # they are set to False below in netG update\n\n # train the discriminator Diters times\n if gen_iterations < 25 or gen_iterations % 500 == 0:\n Diters = 100\n else:\n Diters = args_opt.Diters\n j = 0\n while j < Diters and i < length:\n j += 1\n\n data = data_iter.__next__()\n i += 1\n\n # train with real and fake\n real = data['image']\n noise = Tensor(np.random.normal(0, 1, size=[args_opt.batchSize, nz, 1, 1]), dtype=mstype.float32)\n loss_D = netD_train(real, noise)\n\n ############################\n # (2) Update G network\n ###########################\n for p in netD.trainable_params():\n p.requires_grad = False # to avoid computation\n\n noise = Tensor(np.random.normal(0, 1, size=[args_opt.batchSize, nz, 1, 1]), dtype=mstype.float32)\n loss_G = netG_train(noise)\n gen_iterations += 1\n\n t1 = time.time()\n print('[%d/%d][%d/%d][%d] Loss_D: %f Loss_G: %f'\n % (epoch, args_opt.niter, i, length, gen_iterations,\n loss_D.asnumpy(), loss_G.asnumpy()))\n\n print('step_cost: %.4f seconds' % (float(t1 - t0)))\n t0 = t1\n\n if gen_iterations % 500 == 0:\n fake = netG(fixed_noise)\n save_image(real, '{0}/real_samples.png'.format(args_opt.experiment))\n save_image(fake, '{0}/fake_samples_{1}.png'.format(args_opt.experiment, gen_iterations))\n\n save_checkpoint(netD, '{0}/netD_epoch_{1}.ckpt'.format(args_opt.experiment, epoch))\n save_checkpoint(netG, '{0}/netG_epoch_{1}.ckpt'.format(args_opt.experiment, epoch))\n\n if args_opt.is_modelarts:\n mox.file.copy_parallel(src_url='/cache/train_output', dst_url=args_opt.train_url)\n\n t_end = time.time()\n print('total_cost: %.4f seconds' % (float(t_end - t_begin)))\n\n print(\"Train success!\")\n",
"\"\"\"\n# Copyright 2022 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"\n\nimport os\nimport sys\nimport argparse\nimport cv2\nimport numpy as np\nfrom PIL import Image\n\nimport mindspore as ms\nimport mindspore.ops as ops\nfrom mindspore import load_checkpoint, load_param_into_net\n\nfrom src.model import BoneModel\nfrom src.dataset_test import TrainDataLoader\n\nsys.path.append(\"../\")\n\n\n# data_url is the directory where the data set is located,\n# and there must be two folders, images and gts, under data_url;\n\n# If inferring on modelarts, there are two zip compressed files named after images and gts under data_url,\n# and there are only these two files\n\n\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--is_modelarts', type=str, default=\"NO\")\nparser.add_argument('--device_target', type=str, default=\"Ascend\", help=\"Ascend, GPU, CPU\")\nparser.add_argument('--device_id', type=int, default=5, help='Number of device')\nparser.add_argument('--data_url', type=str)\nparser.add_argument('--train_url', type=str)\nparser.add_argument('--model_path', type=str)\nparser.add_argument('--pre_model', type=str)\n\npar = parser.parse_args()\n\n\ndevice_target = par.device_target\nif par.is_modelarts == \"YES\":\n device_id = int(os.getenv(\"DEVICE_ID\"))\nelse:\n device_id = int(par.device_id)\n\nms.context.set_context(device_target=device_target, device_id=device_id)\n\ndef image_loader(imagename):\n image = Image.open(imagename).convert(\"L\")\n return np.array(image)\n\ndef Fmeasure(predict_, groundtruth):\n \"\"\"\n\n Args:\n predict: predict image\n gt: ground truth\n\n Returns:\n Calculate F-measure\n \"\"\"\n sumLabel = 2 * np.mean(predict_)\n if sumLabel > 1:\n sumLabel = 1\n Label3 = predict_ >= sumLabel\n NumRec = np.sum(Label3)\n #LabelAnd = (Label3 is True)\n LabelAnd = Label3\n #NumAnd = np.sum(np.logical_and(LabelAnd, groundtruth))\n gt_t = gt > 0.5\n NumAnd = np.sum(LabelAnd * gt_t)\n num_obj = np.sum(groundtruth)\n if NumAnd == 0:\n p = 0\n r = 0\n FmeasureF = 0\n else:\n p = NumAnd / NumRec\n r = NumAnd / num_obj\n FmeasureF = (1.3 * p * r) / (0.3 * p + r)\n return FmeasureF\n\n\n\n\nif __name__ == \"__main__\":\n if par.is_modelarts == \"YES\":\n data_true_path = par.data_url\n pre_model_true_path = par.pre_model\n result_path = par.train_url\n model_true_path = par.model_path\n import moxing as mox\n\n test_out = '/cache/test_output/'\n local_data_path = '/cache/test/'\n os.system(\"mkdir {0}\".format(test_out))\n os.system(\"mkdir {0}\".format(local_data_path))\n image_name = \"images.zip\"\n gt_name = \"gts.zip\"\n mox.file.copy_parallel(src_url=data_true_path, dst_url=local_data_path)\n mox.file.copy_parallel(src_url=pre_model_true_path, dst_url=local_data_path)\n mox.file.copy_parallel(src_url=model_true_path, dst_url=local_data_path)\n zip_command1 = \"unzip -o -q %s -d %s\" % (local_data_path + image_name, local_data_path)\n zip_command2 = \"unzip -o -q %s -d %s\" % (local_data_path + gt_name, local_data_path)\n os.system(zip_command1)\n os.system(zip_command2)\n print(\"unzip success\")\n\n filename = os.path.join(local_data_path, \"images/\")\n gtname = os.path.join(local_data_path, 'gts/')\n pre_model_path = os.path.join(local_data_path, pre_model_true_path.split(\"/\")[-1])\n trained_model_path = os.path.join(local_data_path, model_true_path.split(\"/\")[-1])\n else:\n filename = os.path.join(par.data_url, 'images/')\n gtname = os.path.join(par.data_url, 'gts/')\n pre_model_path = par.pre_model\n trained_model_path = par.model_path\n save_path = par.train_url\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n\n testdataloader = TrainDataLoader(filename)\n\n model = BoneModel(device_target, pre_model_path)\n param_dict = load_checkpoint(trained_model_path)\n load_param_into_net(model, param_dict)\n\n Names = []\n for data in os.listdir(filename):\n name = data.split('.')[0]\n Names.append(name)\n Names = sorted(Names)\n i = 0\n sigmoid = ops.Sigmoid()\n for data in testdataloader.dataset.create_dict_iterator():\n data, data_org = data[\"data\"], data[\"data_org\"]\n img, _, _, _, _ = model(data)\n upsample = ops.ResizeBilinear((data_org.shape[1], data_org.shape[2]), align_corners=False)\n img = upsample(img)\n img = sigmoid(img)\n img = img.asnumpy().squeeze()\n img = (img - img.min()) / (img.max() - img.min() + 1e-8)\n img = img * 255\n data_name = Names[i]\n if par.is_modelarts == \"NO\":\n save_path_end = os.path.join(save_path, data_name + '.png')\n else:\n save_path_end = os.path.join(test_out, data_name + '.png')\n cv2.imwrite(save_path_end, img)\n print(\"--------------- %d OK ----------------\" % i)\n i += 1\n print(\"-------------- EVALUATION END --------------------\")\n if par.is_modelarts == \"YES\":\n predictpath = test_out\n mox.file.copy_parallel(src_url=test_out, dst_url=result_path)\n else:\n predictpath = par.train_url\n\n #calculate F-measure\n gtfiles = sorted([gtname + gt_file for gt_file in os.listdir(gtname)])\n predictfiles = sorted([os.path.join(predictpath, predictfile) for predictfile in os.listdir(predictpath)])\n\n Fs = []\n for i in range(len(gtfiles)):\n gt = image_loader(gtfiles[i]) / 255\n predict = image_loader(predictfiles[i]) / 255\n fmea = Fmeasure(predict, gt)\n Fs.append(fmea)\n\n print(\"Fmeasure is %.3f\" % np.mean(Fs))\n",
"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\n\"\"\"Evaluation for nasfpn\"\"\"\n\nimport os\nimport argparse\nimport numpy as np\nfrom PIL import Image\nfrom src.coco_eval import metrics\n\nparser = argparse.ArgumentParser(description='nasfpn postprocess')\nparser.add_argument(\"--result_path\", type=str, required=True, help=\"result files path.\")\nparser.add_argument(\"--dataset_path\", type=str, required=True, help=\"dataset path.\")\nparser.add_argument(\"--anno_path\", type=str, required=True, help=\"annotation json path.\")\nargs = parser.parse_args()\n\ndef get_pred(result_path, img_id):\n \"\"\"get prediction output\"\"\"\n boxes_file = os.path.join(result_path, img_id + '_0.bin')\n scores_file = os.path.join(result_path, img_id + '_1.bin')\n\n boxes = np.fromfile(boxes_file, dtype=np.float32).reshape(76725, 4)\n scores = np.fromfile(scores_file, dtype=np.float32).reshape(76725, 81)\n return boxes, scores\n\ndef get_img_size(file_name):\n \"\"\"get image size\"\"\"\n img = Image.open(file_name)\n return img.size\n\ndef get_img_set(anno_json_path):\n \"\"\"Get image path and annotation from COCO.\"\"\"\n from pycocotools.coco import COCO\n need_img_ids = []\n coco = COCO(anno_json_path)\n image_ids = coco.getImgIds()\n print(\"first dataset is {}\".format(len(image_ids)))\n for img_id in image_ids:\n iscrowd = False\n anno_ids = coco.getAnnIds(imgIds=img_id, iscrowd=None)\n anno = coco.loadAnns(anno_ids)\n for label in anno:\n iscrowd = iscrowd or label[\"iscrowd\"]\n if iscrowd:\n continue\n need_img_ids.append(img_id)\n return need_img_ids\n\ndef cal_acc(result_path, img_path, anno_path):\n \"\"\"calculate accuracy\"\"\"\n need_img_ids = get_img_set(anno_path)\n\n imgs = os.listdir(img_path)\n pred_data = []\n\n for img in imgs:\n img_id = img.split('.')[0]\n if int(img_id) not in need_img_ids:\n continue\n boxes, box_scores = get_pred(result_path, img_id)\n\n w, h = get_img_size(os.path.join(img_path, img))\n img_shape = np.array((h, w), dtype=np.float32)\n pred_data.append({\"boxes\": boxes,\n \"box_scores\": box_scores,\n \"img_id\": int(img_id),\n \"image_shape\": img_shape})\n\n mAP = metrics(pred_data, anno_path)\n print(f\"mAP: {mAP}\")\n\nif __name__ == '__main__':\n cal_acc(args.result_path, args.dataset_path, args.anno_path)\n",
"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"JointsDataset\"\"\"\nimport copy\nimport random\nimport cv2\nimport numpy as np\n\nfrom .transforms import get_affine_transform\nfrom .transforms import affine_transform\nfrom .transforms import fliplr_joints\n\nclass JointsDataset():\n \"\"\"JointsDataset\"\"\"\n def __init__(self, cfg, root, is_train):\n self.num_joints = 0\n self.pixel_std = 200\n self.flip_pairs = []\n self.parent_ids = []\n\n self.is_train = is_train\n self.root = root\n if self.is_train:\n self.image_set = 'train'\n else:\n self.image_set = 'test'\n\n self.data_format = cfg.DATASET.DATA_FORMAT\n\n self.scale_factor = cfg.DATASET.SCALE_FACTOR\n self.rotation_factor = cfg.DATASET.ROT_FACTOR\n self.flip = cfg.DATASET.FLIP\n self.num_joints_half_body = cfg.DATASET.NUM_JOINTS_HALF_BODY\n self.prob_half_body = cfg.DATASET.PROB_HALF_BODY\n self.color_rgb = cfg.DATASET.COLOR_RGB\n\n self.target_type = cfg.MODEL.TARGET_TYPE\n self.image_size = np.array(cfg.MODEL.IMAGE_SIZE)\n self.heatmap_size = np.array(cfg.MODEL.HEATMAP_SIZE)\n self.sigma = cfg.MODEL.SIGMA\n self.use_different_joints_weight = cfg.LOSS.USE_DIFFERENT_JOINTS_WEIGHT\n self.joints_weight = 1\n\n self.db = []\n\n def _get_db(self):\n raise NotImplementedError\n\n def evaluate(self, preds, output_dir, *args, **kwargs):\n raise NotImplementedError\n\n def __len__(self,):\n return len(self.db)\n\n def __getitem__(self, idx):\n db_rec = copy.deepcopy(self.db[idx])\n\n image_file = db_rec['image']\n filename = db_rec['filename'] if 'filename' in db_rec else ''\n imgnum = db_rec['imgnum'] if 'imgnum' in db_rec else ''\n\n if self.data_format == 'zip':\n from .zipreader import imread\n data_numpy = imread(\n image_file, cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION\n )\n else:\n data_numpy = cv2.imread(\n image_file, cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION\n )\n\n if self.color_rgb:\n data_numpy = cv2.cvtColor(data_numpy, cv2.COLOR_BGR2RGB)\n\n if data_numpy is None:\n raise ValueError('Fail to read {}'.format(image_file))\n\n joints = db_rec['joints_3d']\n joints_vis = db_rec['joints_3d_vis']\n\n c = db_rec['center']\n s = db_rec['scale']\n score = db_rec['score'] if 'score' in db_rec else 1\n r = 0\n\n if self.is_train:\n sf = self.scale_factor\n rf = self.rotation_factor\n s = s * np.clip(np.random.randn()*sf + 1, 1 - sf, 1 + sf)\n r = np.clip(np.random.randn()*rf, -rf*2, rf*2) \\\n if random.random() <= 0.6 else 0\n\n if self.flip and random.random() <= 0.5:\n data_numpy = data_numpy[:, ::-1, :]\n joints, joints_vis = fliplr_joints(\n joints, joints_vis, data_numpy.shape[1], self.flip_pairs)\n c[0] = data_numpy.shape[1] - c[0] - 1\n\n trans = get_affine_transform(c, s, r, self.image_size)\n\n _input = cv2.warpAffine(\n data_numpy,\n trans,\n (int(self.image_size[0]), int(self.image_size[1])),\n flags=cv2.INTER_LINEAR)\n\n for i in range(self.num_joints):\n if joints_vis[i, 0] > 0.0:\n joints[i, 0:2] = affine_transform(joints[i, 0:2], trans)\n\n target, target_weight = self.generate_target(joints, joints_vis)\n\n meta = {\n 'image': image_file,\n 'filename': filename,\n 'imgnum': imgnum,\n 'joints': joints,\n 'joints_vis': joints_vis,\n 'center': c,\n 'scale': s,\n 'rotation': r,\n 'score': score\n }\n\n if self.is_train:\n return _input, target, target_weight\n\n return _input, target, target_weight, meta['center'], \\\n meta['scale'], meta['score'], idx, meta['joints'], meta['joints_vis']\n\n def select_data(self, db):\n \"\"\"select_data\"\"\"\n db_selected = []\n for rec in db:\n num_vis = 0\n joints_x = 0.0\n joints_y = 0.0\n for joint, joint_vis in zip(\n rec['joints_3d'], rec['joints_3d_vis']):\n if joint_vis[0] <= 0:\n continue\n num_vis += 1\n\n joints_x += joint[0]\n joints_y += joint[1]\n if num_vis == 0:\n continue\n\n joints_x, joints_y = joints_x / num_vis, joints_y / num_vis\n\n area = rec['scale'][0] * rec['scale'][1] * (self.pixel_std**2)\n joints_center = np.array([joints_x, joints_y])\n bbox_center = np.array(rec['center'])\n diff_norm2 = np.linalg.norm((joints_center-bbox_center), 2)\n ks = np.exp(-1.0*(diff_norm2**2) / ((0.2)**2*2.0*area))\n\n metric = (0.2 / 16) * num_vis + 0.45 - 0.2 / 16\n if ks > metric:\n db_selected.append(rec)\n\n return db_selected\n\n def generate_target(self, joints, joints_vis):\n '''\n :param joints: [num_joints, 3]\n :param joints_vis: [num_joints, 3]\n :return: target, target_weight(1: visible, 0: invisible)\n '''\n target_weight = np.ones((self.num_joints, 1), dtype=np.float32)\n target_weight[:, 0] = joints_vis[:, 0]\n\n target = np.zeros((self.num_joints, self.heatmap_size[1], self.heatmap_size[0]), dtype=np.float32)\n\n assert self.target_type == 'gaussian', \\\n 'Only support gaussian map now!'\n\n if self.target_type == 'gaussian':\n tmp_size = self.sigma * 3\n\n for joint_id in range(self.num_joints):\n feat_stride = self.image_size / self.heatmap_size\n mu_x = int(joints[joint_id][0] / feat_stride[0] + 0.5)\n mu_y = int(joints[joint_id][1] / feat_stride[1] + 0.5)\n # Check that any part of the gaussian is in-bounds\n ul = [int(mu_x - tmp_size), int(mu_y - tmp_size)]\n br = [int(mu_x + tmp_size + 1), int(mu_y + tmp_size + 1)]\n if ul[0] >= self.heatmap_size[0] or ul[1] >= self.heatmap_size[1] \\\n or br[0] < 0 or br[1] < 0:\n # If not, just return the image as is\n target_weight[joint_id] = 0\n continue\n\n # # Generate gaussian\n size = 2 * tmp_size + 1\n x = np.arange(0, size, 1, np.float32)\n y = x[:, np.newaxis]\n x0 = y0 = size // 2\n # The gaussian is not normalized, we want the center value to equal 1\n g = np.exp(- ((x - x0) ** 2 + (y - y0) ** 2) / (2 * self.sigma ** 2))\n\n # Usable gaussian range\n g_x = max(0, -ul[0]), min(br[0], self.heatmap_size[0]) - ul[0]\n g_y = max(0, -ul[1]), min(br[1], self.heatmap_size[1]) - ul[1]\n # Image range\n img_x = max(0, ul[0]), min(br[0], self.heatmap_size[0])\n img_y = max(0, ul[1]), min(br[1], self.heatmap_size[1])\n\n v = target_weight[joint_id]\n if v > 0.5:\n target[joint_id][img_y[0]:img_y[1], img_x[0]:img_x[1]] = g[g_y[0]:g_y[1], g_x[0]:g_x[1]]\n\n if self.use_different_joints_weight:\n target_weight = np.multiply(target_weight, self.joints_weight)\n\n return target, target_weight\n",
"# Copyright 2022 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"\n##############export checkpoint file into air, onnx or mindir model#################\npython export.py\n\"\"\"\nimport argparse\nimport numpy as np\n\nfrom mindspore import Tensor, export, context\n\nfrom src.models import FaceNetModelwithLoss\n\nparser = argparse.ArgumentParser(description='FaceNet export')\nparser.add_argument(\"--device_id\", type=int, default=0, help=\"Device id\")\nparser.add_argument(\"--batch_size\", type=int, default=1, help=\"batch size\")\nparser.add_argument(\"--ckpt_file\", type=str, required=True, help=\"Checkpoint file path.\")\nparser.add_argument(\"--net_name\", type=str, default=\"facenet\", help=\"network name.\")\nparser.add_argument('--width', type=int, default=224, help='input width')\nparser.add_argument('--height', type=int, default=224, help='input height')\nparser.add_argument(\"--file_format\", type=str, choices=[\"AIR\", \"ONNX\", \"MINDIR\"], default=\"MINDIR\", help=\"file format\")\nargs = parser.parse_args()\n\ncontext.set_context(mode=context.GRAPH_MODE, device_target=\"Ascend\")\ncontext.set_context(device_id=args.device_id)\n\nif __name__ == '__main__':\n\n assert args.ckpt_file is not None, \"checkpoint_path is None.\"\n\n net = FaceNetModelwithLoss(num_classes=500, margin=0.5)\n\n input_arr = Tensor(np.zeros([args.batch_size, 3, args.height, args.width], np.float32))\n export(net, input_arr, file_name=args.net_name, file_format=args.file_format)\n",
"\"\"\"\nget dataset lst\n\"\"\"\n\nimport argparse\nimport os\n\nimport numpy as np\nimport scipy.io\nfrom PIL import Image\n\nparser = argparse.ArgumentParser('dataset list generator')\nparser.add_argument(\"--data_dir\", type=str, default='./', help='where dataset stored.')\n\nargs, _ = parser.parse_known_args()\n\ndata_dir = args.data_dir\nprint(\"Data dir is:\", data_dir)\n\n#\nVOC_IMG_DIR = os.path.join(data_dir, 'VOCdevkit/VOC2012/JPEGImages')\nVOC_ANNO_DIR = os.path.join(data_dir, 'VOCdevkit/VOC2012/SegmentationClass')\nVOC_ANNO_GRAY_DIR = os.path.join(data_dir, 'VOCdevkit/VOC2012/SegmentationClassGray')\nVOC_TRAIN_TXT = os.path.join(data_dir, 'VOCdevkit/VOC2012/ImageSets/Segmentation/train.txt')\nVOC_VAL_TXT = os.path.join(data_dir, 'VOCdevkit/VOC2012/ImageSets/Segmentation/val.txt')\n\nSBD_ANNO_DIR = os.path.join(data_dir, 'benchmark_RELEASE/dataset/cls')\nSBD_IMG_DIR = os.path.join(data_dir, 'benchmark_RELEASE/dataset/img')\nSBD_ANNO_PNG_DIR = os.path.join(data_dir, 'benchmark_RELEASE/dataset/cls_png')\nSBD_ANNO_GRAY_DIR = os.path.join(data_dir, 'benchmark_RELEASE/dataset/cls_png_gray')\nSBD_TRAIN_TXT = os.path.join(data_dir, 'benchmark_RELEASE/dataset/train.txt')\nSBD_VAL_TXT = os.path.join(data_dir, 'benchmark_RELEASE/dataset/val.txt')\n\nVOC_TRAIN_LST_TXT = os.path.join(data_dir, 'voc_train_lst.txt')\nVOC_VAL_LST_TXT = os.path.join(data_dir, 'voc_val_lst.txt')\nVOC_AUG_TRAIN_LST_TXT = os.path.join(data_dir, 'vocaug_train_lst.txt')\n\n\ndef __get_data_list(data_list_file):\n with open(data_list_file, mode='r') as f:\n return f.readlines()\n\n\ndef conv_voc_colorpng_to_graypng():\n if not os.path.exists(VOC_ANNO_GRAY_DIR):\n os.makedirs(VOC_ANNO_GRAY_DIR)\n\n for ann in os.listdir(VOC_ANNO_DIR):\n ann_im = Image.open(os.path.join(VOC_ANNO_DIR, ann))\n ann_im = Image.fromarray(np.array(ann_im))\n ann_im.save(os.path.join(VOC_ANNO_GRAY_DIR, ann))\n\n\ndef __gen_palette(cls_nums=256):\n \"\"\"__gen_palette\"\"\"\n palette = np.zeros((cls_nums, 3), dtype=np.uint8)\n for i in range(cls_nums):\n lbl = i\n j = 0\n while lbl:\n palette[i, 0] |= (((lbl >> 0) & 1) << (7 - j))\n palette[i, 1] |= (((lbl >> 1) & 1) << (7 - j))\n palette[i, 2] |= (((lbl >> 2) & 1) << (7 - j))\n lbl >>= 3\n j += 1\n return palette.flatten()\n\n\ndef conv_sbd_mat_to_png():\n \"\"\"conv_sbd_mat_to_png\"\"\"\n if not os.path.exists(SBD_ANNO_PNG_DIR):\n os.makedirs(SBD_ANNO_PNG_DIR)\n if not os.path.exists(SBD_ANNO_GRAY_DIR):\n os.makedirs(SBD_ANNO_GRAY_DIR)\n\n palette = __gen_palette()\n for an in os.listdir(SBD_ANNO_DIR):\n img_id = an[:-4]\n mat = scipy.io.loadmat(os.path.join(SBD_ANNO_DIR, an))\n anno = mat['GTcls'][0]['Segmentation'][0].astype(np.uint8)\n anno_png = Image.fromarray(anno)\n # save to gray png\n anno_png.save(os.path.join(SBD_ANNO_GRAY_DIR, img_id + '.png'))\n # save to color png use palette\n anno_png.putpalette(palette)\n anno_png.save(os.path.join(SBD_ANNO_PNG_DIR, img_id + '.png'))\n\n\ndef create_voc_train_lst_txt():\n voc_train_data_lst = __get_data_list(VOC_TRAIN_TXT)\n with open(VOC_TRAIN_LST_TXT, mode='w') as f:\n for id_ in voc_train_data_lst:\n id_ = id_.strip()\n img_ = os.path.join(VOC_IMG_DIR, id_ + '.jpg')\n anno_ = os.path.join(VOC_ANNO_GRAY_DIR, id_ + '.png')\n f.write(img_ + ' ' + anno_ + '\\n')\n\n\ndef create_voc_val_lst_txt():\n voc_val_data_lst = __get_data_list(VOC_VAL_TXT)\n with open(VOC_VAL_LST_TXT, mode='w') as f:\n for id_ in voc_val_data_lst:\n id_ = id_.strip()\n img_ = os.path.join(VOC_IMG_DIR, id_ + '.jpg')\n anno_ = os.path.join(VOC_ANNO_GRAY_DIR, id_ + '.png')\n f.write(img_ + ' ' + anno_ + '\\n')\n\n\ndef create_voc_train_aug_lst_txt():\n \"\"\"create_voc_train_aug_lst_txt\"\"\"\n voc_train_data_lst = __get_data_list(VOC_TRAIN_TXT)\n voc_val_data_lst = __get_data_list(VOC_VAL_TXT)\n\n sbd_train_data_lst = __get_data_list(SBD_TRAIN_TXT)\n sbd_val_data_lst = __get_data_list(SBD_VAL_TXT)\n\n with open(VOC_AUG_TRAIN_LST_TXT, mode='w') as f:\n for id_ in sbd_train_data_lst + sbd_val_data_lst:\n if id_ in voc_train_data_lst + voc_val_data_lst:\n continue\n id_ = id_.strip()\n img_ = os.path.join(SBD_IMG_DIR, id_ + '.jpg')\n anno_ = os.path.join(SBD_ANNO_GRAY_DIR, id_ + '.png')\n f.write(img_ + ' ' + anno_ + '\\n')\n\n for id_ in voc_train_data_lst:\n id_ = id_.strip()\n img_ = os.path.join(VOC_IMG_DIR, id_ + '.jpg')\n anno_ = os.path.join(VOC_ANNO_GRAY_DIR, id_ + '.png')\n f.write(img_ + ' ' + anno_ + '\\n')\n\n\nif __name__ == '__main__':\n print('converting voc color png to gray png ...')\n conv_voc_colorpng_to_graypng()\n print('converting done.')\n\n create_voc_train_lst_txt()\n print('generating voc train list success.')\n\n create_voc_val_lst_txt()\n print('generating voc val list success.')\n\n print('converting sbd annotations to png ...')\n conv_sbd_mat_to_png()\n print('converting done')\n\n create_voc_train_aug_lst_txt()\n print('generating voc train aug list success.')\n",
"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"\n######################## export mindir ########################\nexport net as mindir\n\"\"\"\nimport numpy as np\nimport mindspore\nfrom mindspore import Tensor, context, load_checkpoint, load_param_into_net, export\n\nfrom src.model import TCN\nfrom src.model_utils.config import config\nfrom src.model_utils.device_adapter import get_device_id\nfrom src.model_utils.moxing_adapter import moxing_wrapper\n\ncontext.set_context(mode=context.GRAPH_MODE, device_target=config.device_target)\n\nif config.device_target == \"Ascend\":\n context.set_context(device_id=get_device_id())\n\n\n@moxing_wrapper()\ndef export_tcn():\n \"\"\"export net as mindir\"\"\"\n # define fusion network\n net = TCN(config.channel_size, config.num_classes, [config.nhid] * config.level, config.kernel_size, config.dropout\n , config.dataset_name)\n # load network checkpoint\n param_dict = load_checkpoint(config.ckpt_file)\n load_param_into_net(net, param_dict)\n\n # export network\n if config.dataset_name == 'permuted_mnist':\n inputs = Tensor(np.ones([config.batch_size, config.channel_size, config.image_height * config.image_width]),\n mindspore.float32)\n elif config.dataset_name == 'adding_problem':\n inputs = Tensor(np.ones([config.batch_test, config.channel_size, config.seq_length]), mindspore.float32)\n export(net, inputs, file_name=config.file_name, file_format=config.file_format)\n\n\nif __name__ == '__main__':\n export_tcn()\n",
"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n'''\nThis file evaluates the model used.\n'''\nfrom __future__ import division\n\nimport argparse\nimport os\nimport numpy as np\n\nfrom src.config import config\nfrom src.utils.transforms import flip_back\nfrom src.utils.coco import evaluate\nfrom src.utils.inference import get_final_preds\n\nflip_pairs = [[1, 2], [3, 4], [5, 6], [7, 8],\n [9, 10], [11, 12], [13, 14], [15, 16]]\n\n\ndef parse_args():\n '''\n parse_args\n '''\n parser = argparse.ArgumentParser(description='get_acc')\n parser.add_argument('--result_path', required=True,\n default=None, help='Location of result.')\n parser.add_argument('--data_path', required=True,\n default=None, help='Location of .npy file.')\n opt_args = parser.parse_args()\n return opt_args\n\n\ndef get_acc(cfg, result_path, npy_path):\n '''\n get_acc\n '''\n centers = np.load(os.path.join(npy_path, \"centers.npy\"))\n scales = np.load(os.path.join(npy_path, \"scales.npy\"))\n scores = np.load(os.path.join(npy_path, \"scores.npy\"))\n ids = np.load(os.path.join(npy_path, \"ids.npy\"))\n num_samples = len(os.listdir(result_path)) // 2\n all_preds = np.zeros((num_samples, 17, 3),\n dtype=np.float32)\n all_boxes = np.zeros((num_samples, 2))\n image_id = []\n idx = 0\n print(num_samples)\n out_shape = [1, 17, 64, 48]\n for i in range(num_samples):\n f1 = os.path.join(result_path, str(i) + \"_0.bin\")\n output = np.fromfile(f1, np.float32).reshape(out_shape)\n if cfg.TEST.FLIP_TEST:\n f2 = os.path.join(result_path, \"flipped\" + str(i) + \"_0.bin\")\n output_flipped = np.fromfile(f2, np.float32).reshape(out_shape)\n output_flipped = flip_back(output_flipped, flip_pairs)\n if cfg.TEST.SHIFT_HEATMAP:\n output_flipped[:, :, :, 1:] = \\\n output_flipped.copy()[:, :, :, 0:-1]\n\n output = (output + output_flipped) * 0.5\n\n c = centers[i]\n s = scales[i]\n score = scores[i]\n file_id = list(ids[i])\n\n preds, maxvals = get_final_preds(output.copy(), c, s)\n num_images, _ = preds.shape[:2]\n all_preds[idx:idx + num_images, :, 0:2] = preds[:, :, 0:2]\n all_preds[idx:idx + num_images, :, 2:3] = maxvals\n all_boxes[idx:idx + num_images, 0] = np.prod(s * 200, 1)\n all_boxes[idx:idx + num_images, 1] = score\n image_id.extend(file_id)\n idx += num_images\n\n output_dir = \"result/\"\n ann_path = config.DATASET.ROOT + config.DATASET.TEST_JSON\n print(all_preds[:idx].shape, all_boxes[:idx].shape, len(image_id))\n _, perf_indicator = evaluate(\n cfg, all_preds[:idx], output_dir, all_boxes[:idx], image_id, ann_path)\n print(\"AP:\", perf_indicator)\n return perf_indicator\n\n\nif __name__ == '__main__':\n args = parse_args()\n get_acc(config, args.result_path, args.data_path)\n",
"# Copyright 2022 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"einsum\"\"\"\nfrom collections import OrderedDict\n\nimport numpy as np\nfrom mindspore import ops\n\nVALID_LABELS = set(list(\"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ\"))\n\n\ndef parse_format(f):\n \"\"\"parse format\"\"\"\n if '->' not in f:\n raise ValueError('incorrect format received')\n\n f_inputs, f_output = f.split('->')\n\n if not f_inputs:\n raise ValueError\n\n f_inputs = [list(f) for f in f_inputs.split(',')]\n f_output = list(f_output)\n\n if len(set(f_output)) != len(f_output):\n raise ValueError(f'duplicate label in f_output: {f_output}')\n\n for f_input in f_inputs:\n if set(f_input) > VALID_LABELS:\n raise ValueError\n if len(set(f_input)) < len(f_input):\n raise ValueError(f\"duplicate label {f_input}\")\n\n return f_inputs, f_output\n\n\ndef validate_args(f_inputs, tensors):\n \"\"\"validate args\"\"\"\n assert len(tensors) == len(f_inputs)\n\n dimensions = OrderedDict()\n for t in range(len(tensors)):\n fmt = f_inputs[t]\n assert tensors[t].ndim == len(fmt)\n\n for i in range(len(fmt)):\n if fmt[i] in dimensions:\n assert dimensions[fmt[i]] == tensors[t].shape[i]\n else:\n dimensions[fmt[i]] = tensors[t].shape[i]\n\n return dimensions\n\n\ndef transpose(tensor, permutation):\n \"\"\"transpose\"\"\"\n if isinstance(tensor, np.ndarray):\n return np.transpose(tensor, permutation)\n return tensor.transpose(permutation)\n\n\ndef outer_product(f_inputs, dimensions, tensors):\n \"\"\"outer product\"\"\"\n tensors = list(tensors)\n assert len(f_inputs) == len(tensors)\n f_output = list(dimensions.keys())\n\n normalized = []\n\n while tensors:\n tensor = tensors.pop()\n labels = f_inputs.pop()\n\n if labels == f_output:\n normalized.append(tensor)\n continue\n\n source = dict(zip(labels, range(len(labels))))\n permutation = [source[l] for l in f_output if l in labels]\n labels = [labels[axis] for axis in permutation]\n tensor = ops.Transpose()(tensor, tuple(permutation))\n\n i = 0\n while i < len(dimensions):\n if i == len(labels) or labels[i] != f_output[i]:\n tensor = ops.ExpandDims()(tensor, i)\n labels.insert(i, f_output[i])\n else:\n i += 1\n\n normalized.append(tensor)\n\n op = normalized.pop()\n while normalized:\n tensor = normalized.pop()\n op = op * tensor\n\n return op\n\n\ndef contract(op, dimensions, f_output):\n \"\"\"contract\"\"\"\n if not f_output:\n return op.sum()\n\n f_input = list(dimensions.keys())\n axis = 0\n while op.ndim > len(f_output):\n assert len(f_input) == op.ndim\n if f_input[axis] not in f_output:\n op = op.sum(axis)\n del f_input[axis]\n else:\n axis += 1\n\n if f_input == f_output:\n return op\n source = dict(zip(f_input, range(len(f_input))))\n permutation = [source[l] for l in f_output]\n return ops.Transpose()(op, tuple(permutation))\n\n\ndef einsum(f, *tensors):\n \"\"\"einsum\"\"\"\n f_inputs, f_output = parse_format(f)\n dimensions = validate_args(f_inputs, tensors)\n\n op = outer_product(f_inputs, dimensions, tensors)\n assert op.shape == tuple(dimensions.values())\n contraction = contract(op, dimensions, f_output)\n return contraction\n",
"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"preprocess\"\"\"\nfrom __future__ import print_function\nimport argparse\nimport os\nimport numpy as np\nimport cv2\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Process file')\n parser.add_argument('--val_dataset_folder', type=str, default='/home/dataset/widerface/val',\n help='val dataset folder.')\n args = parser.parse_args()\n\n # testing dataset\n test_dataset = []\n with open(os.path.join(args.val_dataset_folder, 'val_img_list.txt'), 'r') as f:\n lines = f.readlines()\n for line in lines:\n test_dataset.append(line.rstrip())\n\n # transform data to bin_file\n img_path = \"./bin_file\"\n if os.path.exists(img_path):\n os.system('rm -rf ' + img_path)\n os.makedirs(img_path)\n h_max, w_max = 1024, 1024\n for i, img_name in enumerate(test_dataset):\n image_path = os.path.join(args.val_dataset_folder, 'images', img_name)\n\n img_raw = cv2.imread(image_path, cv2.IMREAD_COLOR)\n img = np.float32(img_raw)\n\n # testing scale\n img = cv2.resize(img, (1024, 1024))\n\n img -= (104, 117, 123)\n img = img.transpose(2, 0, 1)\n img = np.expand_dims(img, 0) # [1, c, h, w]\n\n # save bin file\n file_name = \"widerface_test\" + \"_\" + str(i) + \".bin\"\n file_path = os.path.join(img_path, file_name)\n img.tofile(file_path)\n",
"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"eval\"\"\"\n\nimport os\nimport mindspore\nfrom mindspore import load_checkpoint, load_param_into_net\nfrom mindspore import context, Tensor\nfrom PIL import Image\nimport numpy as np\nfrom src.models.APDrawingGAN_G import Generator\nfrom src.data import create_dataset\nfrom src.data.single_dataloader import single_dataloader\nfrom src.option.options_test import TestOptions\n\ncontext.set_context(mode=context.GRAPH_MODE)\n\nclass Eval:\n \"\"\" Eval \"\"\"\n @staticmethod\n def save_image(pic_tensor, pic_path=\"test.png\"):\n \"\"\" save image \"\"\"\n pic_np = pic_tensor.asnumpy()[0]\n if pic_np.shape[0] == 1:\n pic_np = (pic_np[0] + 1) / 2.0 * 255.0\n elif pic_np.shape[0] == 3:\n pic_np = (np.transpose(pic_np, (1, 2, 0)) + 1) / 2.0 * 255.0\n pic = Image.fromarray(pic_np)\n pic = pic.convert('RGB')\n pic.save(pic_path)\n print(pic_path + ' is saved.')\n\n @staticmethod\n def infer_one_image(net, all_data):\n \"\"\" infer one image \"\"\"\n real_A = all_data['A']\n real_A_bg = all_data['bg_A']\n real_A_eyel = all_data['eyel_A']\n real_A_eyer = all_data['eyer_A']\n real_A_nose = all_data['nose_A']\n real_A_mouth = all_data['mouth_A']\n real_A_hair = all_data['hair_A']\n mask = all_data['mask']\n mask2 = all_data['mask2']\n center = all_data['center']\n net.set_pad(center[0])\n result = net.construct(real_A, real_A_bg, real_A_eyel, real_A_eyer, real_A_nose, real_A_mouth, real_A_hair,\n mask, mask2)\n Eval.save_image(result[0], all_data['out_path'])\n\n @staticmethod\n def expand_tensor_data(data_tensor):\n \"\"\"expand_tensor_data\"\"\"\n tmp = np.expand_dims(data_tensor, axis=0)\n data_out = Tensor(tmp, mindspore.float32)\n return data_out\n\n @staticmethod\n def process_input(all_data, result_dir):\n \"\"\"process_input\"\"\"\n all_data['A'] = Eval.expand_tensor_data(all_data['A'])\n all_data['bg_A'] = Eval.expand_tensor_data(all_data['bg_A'])\n all_data['eyel_A'] = Eval.expand_tensor_data(all_data['eyel_A'])\n all_data['eyer_A'] = Eval.expand_tensor_data(all_data['eyer_A'])\n all_data['nose_A'] = Eval.expand_tensor_data(all_data['nose_A'])\n all_data['mouth_A'] = Eval.expand_tensor_data(all_data['mouth_A'])\n all_data['hair_A'] = Eval.expand_tensor_data(all_data['hair_A'])\n all_data['mask'] = Eval.expand_tensor_data(all_data['mask'])\n all_data['mask2'] = Eval.expand_tensor_data(all_data['mask2'])\n all_data['center'] = np.expand_dims(all_data['center'], axis=0)\n pic_path = all_data['A_path']\n pic_path = pic_path[pic_path.rfind('/') + 1:-1]\n all_data['out_path'] = os.path.join(result_dir, pic_path)\n return all_data\n\nif __name__ == \"__main__\":\n opt = TestOptions().get_settings()\n opt.rank = 0\n opt.group_size = 1\n context.set_context(device_id=opt.device_id, device_target=opt.device_target)\n if not os.path.exists(opt.results_dir):\n os.mkdir(opt.results_dir)\n\n models = Generator(opt)\n param_dict = load_checkpoint(opt.model_path)\n load_param_into_net(models, param_dict)\n\n dataset = create_dataset(opt)\n for data in dataset.create_dict_iterator(output_numpy=True):\n input_data = {}\n item = single_dataloader(data, opt)\n for d, v in item.items():\n if d in ('A_paths', 'B_paths'):\n input_data[d] = v\n else:\n input_data[d] = v[0]\n Eval.infer_one_image(models, Eval.process_input(input_data, opt.results_dir))\n if opt.isModelarts:\n from src.utils.tools import modelarts_result2obs\n modelarts_result2obs(opt)\n",
"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Helper functions\"\"\"\nimport math\nfrom collections import Counter\nimport numpy as np\nfrom mindspore import load_checkpoint\n\ndef Resume(model, root):\n \"\"\"Restore the trained model\"\"\"\n print(\"Loading the trained models from step {}...\".format(root))\n\n checkpoint = load_checkpoint(root, model)\n return checkpoint\n\ndef get_param_groups(network):\n \"\"\"get param groups\"\"\"\n decay_params = []\n no_decay_params = []\n for x in network.trainable_params():\n parameter_name = x.name\n if parameter_name.endswith('.bias'):\n no_decay_params.append(x)\n elif parameter_name.endswith('.gamma'):\n no_decay_params.append(x)\n elif parameter_name.endswith('.beta'):\n no_decay_params.append(x)\n else:\n decay_params.append(x)\n\n return [{'params': no_decay_params, 'weight_decay': 0.0}, {'params': decay_params}]\n\ndef linear_warmup_lr(current_step, warmup_steps, base_lr, init_lr):\n \"\"\"\n Applies liner Increasing to generate learning rate array in warmup stage.\n\n Args:\n current_step(int): current step in warmup stage.\n warmup_steps(int): all steps in warmup stage.\n base_lr(float): init learning rate.\n init_lr(float): end learning rate\n\n Returns:\n float, learning rate.\n \"\"\"\n lr_inc = (float(base_lr) - float(init_lr)) / float(warmup_steps)\n lr = float(init_lr) + lr_inc * current_step\n return lr\n\ndef warmup_cosine_annealing_lr(lr, steps_per_epoch, warmup_epochs, max_epoch, T_max, eta_min=0):\n \"\"\"\n Applies cosine decay to generate learning rate array with warmup.\n\n Args:\n lr(float): init learning rate\n steps_per_epoch(int): steps of one epoch\n warmup_epochs(int): number of warmup epochs\n max_epoch(int): total epoch of training\n T_max(int): max epoch in decay.\n eta_min(float): end learning rate\n\n Returns:\n np.array, learning rate array.\n \"\"\"\n base_lr = lr\n warmup_init_lr = 0\n total_steps = int(max_epoch * steps_per_epoch)\n warmup_steps = int(warmup_epochs * steps_per_epoch)\n\n lr_each_step = []\n for i in range(total_steps):\n last_epoch = i // steps_per_epoch\n if i < warmup_steps:\n lr = linear_warmup_lr(i + 1, warmup_steps, base_lr, warmup_init_lr)\n else:\n lr = eta_min + (base_lr - eta_min) * (1. + math.cos(math.pi * last_epoch / T_max)) / 2\n lr_each_step.append(lr)\n\n return np.array(lr_each_step).astype(np.float32)\n\ndef warmup_step_lr(lr, lr_epochs, steps_per_epoch, warmup_epochs, max_epoch, gamma=0.1):\n \"\"\"\n Applies step decay to generate learning rate array with warmup.\n\n Args:\n lr(float): init learning rate\n lr_epochs(list): learning rate decay epoches list\n steps_per_epoch(int): steps of one epoch\n warmup_epochs(int): number of warmup epochs\n max_epoch(int): total epoch of training\n gamma(float): attenuation constants.\n\n Returns:\n np.array, learning rate array.\n \"\"\"\n base_lr = lr\n warmup_init_lr = 0\n total_steps = int(max_epoch * steps_per_epoch)\n warmup_steps = int(warmup_epochs * steps_per_epoch)\n milestones = lr_epochs\n milestones_steps = []\n for milestone in milestones:\n milestones_step = milestone * steps_per_epoch\n milestones_steps.append(milestones_step)\n\n lr_each_step = []\n lr = base_lr\n milestones_steps_counter = Counter(milestones_steps)\n for i in range(total_steps):\n if i < warmup_steps:\n lr = linear_warmup_lr(i + 1, warmup_steps, base_lr, warmup_init_lr)\n else:\n lr = lr * gamma**milestones_steps_counter[i]\n lr_each_step.append(lr)\n\n return np.array(lr_each_step).astype(np.float32)\n\ndef multi_step_lr(lr, milestones, steps_per_epoch, max_epoch, gamma=0.1):\n return warmup_step_lr(lr, milestones, steps_per_epoch, 0, max_epoch, gamma=gamma)\n\ndef step_lr(lr, epoch_size, steps_per_epoch, max_epoch, gamma=0.1):\n lr_epochs = []\n for i in range(1, max_epoch):\n if i % epoch_size == 0:\n lr_epochs.append(i)\n return multi_step_lr(lr, lr_epochs, steps_per_epoch, max_epoch, gamma=gamma)\n\ndef get_lr(args):\n \"\"\"generate learning rate array.\"\"\"\n if args.lr_scheduler == 'exponential':\n lr = warmup_step_lr(args.lr,\n args.lr_epochs,\n args.steps_per_epoch,\n args.warmup_epochs,\n args.max_epoch,\n gamma=args.lr_gamma,\n )\n elif args.lr_scheduler == 'cosine_annealing':\n lr = warmup_cosine_annealing_lr(args.lr,\n args.steps_per_epoch,\n args.warmup_epochs,\n args.max_epoch,\n args.T_max,\n args.eta_min)\n else:\n raise NotImplementedError(args.lr_scheduler)\n return lr\n",
"# Copyright 2020-2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Resnet backbone.\"\"\"\n\nimport numpy as np\nimport mindspore.ops as ops\nimport mindspore.nn as nn\nfrom mindspore.common.tensor import Tensor\n\n\ndef weight_init_ones(shape):\n \"\"\"Weight init.\"\"\"\n return Tensor(np.full(shape, 0.01).astype(np.float32))\n\n\ndef _conv(in_channels, out_channels, kernel_size=3, stride=1, padding=0, pad_mode='pad'):\n \"\"\"Conv2D wrapper.\"\"\"\n shape = (out_channels, in_channels, kernel_size, kernel_size)\n weights = weight_init_ones(shape)\n return nn.Conv2d(in_channels, out_channels,\n kernel_size=kernel_size, stride=stride, padding=padding,\n pad_mode=pad_mode, weight_init=weights, has_bias=False)\n\n\ndef _BatchNorm2dInit(out_chls, momentum=0.1, affine=True, use_batch_statistics=True):\n \"\"\"Batchnorm2D wrapper.\"\"\"\n dtype = np.float32\n gamma_init = Tensor(np.array(np.ones(out_chls)).astype(dtype))\n beta_init = Tensor(np.array(np.ones(out_chls) * 0).astype(dtype))\n moving_mean_init = Tensor(np.array(np.ones(out_chls) * 0).astype(dtype))\n moving_var_init = Tensor(np.array(np.ones(out_chls)).astype(dtype))\n return nn.BatchNorm2d(out_chls, momentum=momentum, affine=affine, gamma_init=gamma_init,\n beta_init=beta_init, moving_mean_init=moving_mean_init,\n moving_var_init=moving_var_init, use_batch_statistics=use_batch_statistics)\n\n\nclass ResNetFea(nn.Cell):\n \"\"\"\n ResNet architecture.\n\n Args:\n block (Cell): Block for network.\n layer_nums (list): Numbers of block in different layers.\n in_channels (list): Input channel in each layer.\n out_channels (list): Output channel in each layer.\n weights_update (bool): Weight update flag.\n Returns:\n Tensor, output tensor.\n\n Examples:\n >>> ResNet(ResidualBlock,\n >>> [3, 4, 6, 3],\n >>> [64, 256, 512, 1024],\n >>> [256, 512, 1024, 2048],\n >>> False)\n \"\"\"\n def __init__(self,\n block,\n layer_nums,\n in_channels,\n out_channels,\n weights_update=False):\n super(ResNetFea, self).__init__()\n\n if not len(layer_nums) == len(in_channels) == len(out_channels) == 4:\n raise ValueError(\"the length of \"\n \"layer_num, inchannel, outchannel list must be 4!\")\n\n bn_training = False\n self.conv1 = _conv(3, 64, kernel_size=7, stride=2, padding=3, pad_mode='pad')\n self.bn1 = _BatchNorm2dInit(64, affine=bn_training, use_batch_statistics=bn_training)\n self.relu = ops.ReLU()\n self.maxpool = ops.MaxPool(kernel_size=3, strides=2, pad_mode=\"SAME\")\n self.weights_update = weights_update\n\n if not self.weights_update:\n self.conv1.weight.requires_grad = False\n\n self.layer1 = self._make_layer(block,\n layer_nums[0],\n in_channel=in_channels[0],\n out_channel=out_channels[0],\n stride=1,\n training=bn_training,\n weights_update=self.weights_update)\n self.layer2 = self._make_layer(block,\n layer_nums[1],\n in_channel=in_channels[1],\n out_channel=out_channels[1],\n stride=2,\n training=bn_training,\n weights_update=True)\n self.layer3 = self._make_layer(block,\n layer_nums[2],\n in_channel=in_channels[2],\n out_channel=out_channels[2],\n stride=2,\n training=bn_training,\n weights_update=True)\n self.layer4 = self._make_layer(block,\n layer_nums[3],\n in_channel=in_channels[3],\n out_channel=out_channels[3],\n stride=2,\n training=bn_training,\n weights_update=True)\n\n def _make_layer(self, block, layer_num, in_channel, out_channel, stride, training=False, weights_update=False):\n \"\"\"Make block layer.\"\"\"\n layers = []\n down_sample = False\n if stride != 1 or in_channel != out_channel:\n down_sample = True\n resblk = block(in_channel,\n out_channel,\n stride=stride,\n down_sample=down_sample,\n training=training,\n weights_update=weights_update)\n layers.append(resblk)\n\n for _ in range(1, layer_num):\n resblk = block(out_channel, out_channel, stride=1, training=training, weights_update=weights_update)\n layers.append(resblk)\n\n return nn.SequentialCell(layers)\n\n def construct(self, x):\n \"\"\"\n construct the ResNet Network\n\n Args:\n x: input feature data.\n\n Returns:\n Tensor, output tensor.\n \"\"\"\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n c1 = self.maxpool(x)\n\n c2 = self.layer1(c1)\n identity = c2\n if not self.weights_update:\n identity = ops.stop_gradient(c2)\n c3 = self.layer2(identity)\n c4 = self.layer3(c3)\n c5 = self.layer4(c4)\n\n return identity, c3, c4, c5\n\n\nclass ResidualBlockUsing(nn.Cell):\n \"\"\"\n ResNet V1 residual block definition.\n\n Args:\n in_channels (int) - Input channel.\n out_channels (int) - Output channel.\n stride (int) - Stride size for the initial convolutional layer. Default: 1.\n down_sample (bool) - If to do the downsample in block. Default: False.\n momentum (float) - Momentum for batchnorm layer. Default: 0.1.\n training (bool) - Training flag. Default: False.\n weights_updata (bool) - Weights update flag. Default: False.\n\n Returns:\n Tensor, output tensor.\n\n Examples:\n ResidualBlock(3,256,stride=2,down_sample=True)\n \"\"\"\n expansion = 4\n\n def __init__(self,\n in_channels,\n out_channels,\n stride=1,\n down_sample=False,\n momentum=0.1,\n training=False,\n weights_update=False):\n super(ResidualBlockUsing, self).__init__()\n\n self.affine = weights_update\n\n out_chls = out_channels // self.expansion\n self.conv1 = _conv(in_channels, out_chls, kernel_size=1, stride=1, padding=0)\n self.bn1 = _BatchNorm2dInit(out_chls, momentum=momentum, affine=self.affine, use_batch_statistics=training)\n\n self.conv2 = _conv(out_chls, out_chls, kernel_size=3, stride=stride, padding=1)\n self.bn2 = _BatchNorm2dInit(out_chls, momentum=momentum, affine=self.affine, use_batch_statistics=training)\n\n self.conv3 = _conv(out_chls, out_channels, kernel_size=1, stride=1, padding=0)\n self.bn3 = _BatchNorm2dInit(out_channels, momentum=momentum, affine=self.affine, use_batch_statistics=training)\n\n if training:\n self.bn1 = self.bn1.set_train()\n self.bn2 = self.bn2.set_train()\n self.bn3 = self.bn3.set_train()\n\n if not weights_update:\n self.conv1.weight.requires_grad = False\n self.conv2.weight.requires_grad = False\n self.conv3.weight.requires_grad = False\n\n self.relu = ops.ReLU()\n self.downsample = down_sample\n if self.downsample:\n self.conv_down_sample = _conv(in_channels, out_channels, kernel_size=1, stride=stride, padding=0)\n self.bn_down_sample = _BatchNorm2dInit(out_channels, momentum=momentum, affine=self.affine,\n use_batch_statistics=training)\n if training:\n self.bn_down_sample = self.bn_down_sample.set_train()\n if not weights_update:\n self.conv_down_sample.weight.requires_grad = False\n self.add = ops.Add()\n\n def construct(self, x):\n \"\"\"\n construct the ResNet V1 residual block\n\n Args:\n x: input feature data.\n\n Returns:\n Tensor, output tensor.\n \"\"\"\n identity = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample:\n identity = self.conv_down_sample(identity)\n identity = self.bn_down_sample(identity)\n\n out = self.add(out, identity)\n out = self.relu(out)\n\n return out\n",
"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\n\"\"\"\nsample script of autodis infer using SDK run in docker\n\"\"\"\n\nimport argparse\nimport os\nimport time\n\nimport MxpiDataType_pb2 as MxpiDataType\nimport numpy as np\nfrom StreamManagerApi import StreamManagerApi, MxDataInput, InProtobufVector, \\\n MxProtobufIn, StringVector\n\ndef parse_args():\n \"\"\"set and check parameters.\"\"\"\n parser = argparse.ArgumentParser(description='autodis process')\n parser.add_argument('--data_dir', type=str, default='../data/input', help='Data path')\n parser.add_argument('--ids_file', type=str, default='ids')\n parser.add_argument('--wts_file', type=str, default='wts')\n parser.add_argument('--label_file', type=str, default='label')\n parser.add_argument('--input_format', type=str, default='bin')\n parser.add_argument('--output_dir', type=str, default='./output', help='Data path')\n parser.add_argument('--pipeline', type=str, default='../data/config/autodis.pipeline', help='SDK infer pipeline')\n parser.add_argument('--dense_dim', type=int, default=13)\n parser.add_argument('--slot_dim', type=int, default=26)\n args_opt = parser.parse_args()\n return args_opt\n\nargs = parse_args()\n\ndef send_source_data(appsrc_id, file_name, file_data, stream_name, stream_manager, shape, tp):\n \"\"\"\n Construct the input of the stream,\n send inputs data to a specified stream based on streamName.\n\n Returns:\n bool: send data success or not\n \"\"\"\n tensors = np.array(file_data, dtype=tp).reshape(shape)\n tensor_package_list = MxpiDataType.MxpiTensorPackageList()\n tensor_package = tensor_package_list.tensorPackageVec.add()\n data_input = MxDataInput()\n tensor_vec = tensor_package.tensorVec.add()\n tensor_vec.deviceId = 0\n tensor_vec.memType = 0\n for i in tensors.shape:\n tensor_vec.tensorShape.append(i)\n array_bytes = tensors.tobytes()\n data_input.data = array_bytes\n tensor_vec.dataStr = data_input.data\n tensor_vec.tensorDataSize = len(array_bytes)\n key = \"appsrc{}\".format(appsrc_id).encode('utf-8')\n protobuf_vec = InProtobufVector()\n protobuf = MxProtobufIn()\n protobuf.key = key\n protobuf.type = b'MxTools.MxpiTensorPackageList'\n protobuf.protobuf = tensor_package_list.SerializeToString()\n protobuf_vec.push_back(protobuf)\n ret = stream_manager.SendProtobuf(stream_name, appsrc_id, protobuf_vec)\n if ret < 0:\n print(\"Failed to send data to stream.\")\n return False\n print(\"Send successfully!\")\n return True\n\ndef send_appsrc_data(appsrc_id, file_name, file_data, stream_name, stream_manager, shape, tp):\n \"\"\"\n send three stream to infer model, include input ids, input mask and token type_id.\n\n Returns:\n bool: send data success or not\n \"\"\"\n if not send_source_data(appsrc_id, file_name, file_data, stream_name, stream_manager, shape, tp):\n return False\n return True\n\ndef get_acc(labels, preds):\n \"\"\"Accuracy\"\"\"\n accuracy = np.sum(labels == preds) / len(labels)\n return accuracy\n\ndef post_process(infer_result):\n \"\"\"\n process the result of infer tensor to Visualization results.\n Args:\n infer_result: get logit from infer result\n \"\"\"\n result = MxpiDataType.MxpiTensorPackageList()\n result.ParseFromString(infer_result[0].messageBuf)\n res = np.frombuffer(result.tensorPackageVec[0].tensorVec[1].dataStr, dtype=np.float32)\n res = res.reshape((-1,))\n label = np.frombuffer(result.tensorPackageVec[0].tensorVec[2].dataStr, dtype=np.float32)\n label = res.reshape((-1,))\n pred_label = np.round(res)\n return int(label[0]), res[0], int(pred_label[0])\n\ndef get_auc(labels, preds, n_bins=10000):\n \"\"\"ROC_AUC\"\"\"\n postive_len = sum(labels)\n negative_len = len(labels) - postive_len\n total_case = postive_len * negative_len\n if total_case == 0:\n return 0\n pos_histogram = [0 for _ in range(n_bins+1)]\n neg_histogram = [0 for _ in range(n_bins+1)]\n bin_width = 1.0 / n_bins\n for i in range(len(labels)):\n nth_bin = int(preds[i]/bin_width)\n if labels[i] == 1:\n pos_histogram[nth_bin] += 1\n else:\n neg_histogram[nth_bin] += 1\n accumulated_neg = 0\n satisfied_pair = 0\n for i in range(n_bins+1):\n satisfied_pair += (pos_histogram[i] * accumulated_neg + pos_histogram[i] * neg_histogram[i] * 0.5)\n accumulated_neg += neg_histogram[i]\n return satisfied_pair / float(total_case)\n\ndef run():\n \"\"\"\n read pipeline and do infer\n \"\"\"\n # init stream manager\n stream_manager_api = StreamManagerApi()\n ret = stream_manager_api.InitManager()\n if ret != 0:\n print(\"Failed to init Stream manager, ret=%s\" % str(ret))\n exit()\n\n # create streams by pipeline config file\n with open(os.path.realpath(args.pipeline), 'rb') as f:\n pipeline_str = f.read()\n ret = stream_manager_api.CreateMultipleStreams(pipeline_str)\n if ret != 0:\n print(\"Failed to create Stream, ret=%s\" % str(ret))\n exit()\n\n # preprocess data\n if args.input_format == 'txt':\n ids_data = np.loadtxt(os.path.join(args.data_dir, args.ids_file+\".\"+args.input_format), delimiter=\"\\t\")\n wts_data = np.loadtxt(os.path.join(args.data_dir, args.wts_file+\".\"+args.input_format), delimiter=\"\\t\")\n label_data = np.loadtxt(os.path.join(args.data_dir, args.label_file+\".\"+args.input_format), delimiter=\"\\t\")\n else:\n ids_data = np.fromfile(os.path.join(args.data_dir, args.ids_file+\".\"+args.input_format), dtype=np.int32)\n ids_data.shape = -1, 39\n wts_data = np.fromfile(os.path.join(args.data_dir, args.wts_file+\".\"+args.input_format), dtype=np.float32)\n wts_data.shape = -1, 39\n label_data = np.fromfile(os.path.join(args.data_dir, args.label_file+\".\"+args.input_format), dtype=np.float32)\n label_data.shape = -1, 1\n\n if(ids_data.shape[0] != wts_data.shape[0] or wts_data.shape[0] != label_data.shape[0]):\n print(\"number of input data not completely equal\")\n exit()\n rows = label_data.shape[0]\n\n # statistical variable\n labels = []\n probs = []\n preds = []\n infer_total_time = 0\n\n # write predict label\n if not os.path.exists(args.output_dir):\n os.mkdir(args.output_dir)\n fo = open(os.path.join(args.output_dir, \"result.txt\"), \"w\")\n fo.write(\"label\\tprob\\tpred\\n\")\n for i in range(rows):\n # fetch data\n ids = ids_data[i]\n wts = wts_data[i]\n label = label_data[i]\n\n # data shape\n ids_shape = (-1, args.dense_dim+args.slot_dim)\n wts_shape = (-1, args.dense_dim+args.slot_dim)\n label_shape = (-1, 1)\n\n # data type\n ids_type = np.int32\n wts_type = np.float32\n label_type = np.float32\n\n # send data\n stream_name = b'autodis'\n if not send_appsrc_data(0, \"ids\", ids, stream_name, stream_manager_api, ids_shape, ids_type):\n return\n if not send_appsrc_data(1, \"wts\", wts, stream_name, stream_manager_api, wts_shape, wts_type):\n return\n if not send_appsrc_data(2, \"label\", label, stream_name, stream_manager_api, label_shape, label_type):\n return\n\n # Obtain the inference result by specifying streamName and uniqueId.\n key_vec = StringVector()\n key_vec.push_back(b'mxpi_tensorinfer0')\n start_time = time.time()\n infer_result = stream_manager_api.GetProtobuf(stream_name, 0, key_vec)\n infer_total_time += time.time() - start_time\n if infer_result.size() == 0:\n print(\"inferResult is null\")\n return\n if infer_result[0].errorCode != 0:\n print(\"GetProtobuf error. errorCode=%d\" % (infer_result[0].errorCode))\n return\n\n # updata variable\n label_, prob_, pred_ = post_process(infer_result)\n label_ = label\n labels.append(label_)\n probs.append(prob_)\n preds.append(pred_)\n\n # write predict label\n fo.write(str(label_)+\"\\t\"+str(prob_)+\"\\t\"+str(pred_)+\"\\n\")\n\n labels = np.array(labels)\n probs = np.array(probs)\n preds = np.array(preds)\n infer_acc = get_acc(labels, preds)\n infer_auc = get_auc(labels, probs)\n fo1 = open(os.path.join(args.output_dir, \"metric.txt\"), \"w\")\n fo1.write(\"Number of samples:%d\\n\"%(rows))\n fo1.write(\"Infer total time:%f\\n\"%(infer_total_time))\n fo1.write(\"Average infer time:%f\\n\"%(infer_total_time/rows))\n fo1.write(\"Infer acc:%f\\n\"%(infer_acc))\n fo1.write(\"Infer auc:%f\\n\"%(infer_auc))\n fo.close()\n fo1.close()\n print('<<======== Infer Metric ========>>')\n print(\"Number of samples:%d\"%(rows))\n print(\"Infer total time:%f\"%(infer_total_time))\n print(\"Average infer time:%f\\n\"%(infer_total_time/rows))\n print(\"Infer acc:%f\"%(infer_acc))\n print(\"infer auc:%f\"%(infer_auc))\n print('<<===============================>>')\n stream_manager_api.DestroyAllStreams()\n\nif __name__ == '__main__':\n run()\n",
"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\n\"\"\"ResNet.\"\"\"\nimport numpy as np\n\nimport mindspore\nimport mindspore.nn as nn\nimport mindspore.common.dtype as mstype\nimport mindspore.ops as ops\nfrom mindspore.ops import operations as P\nfrom mindspore.ops import functional as F\nfrom mindspore import Tensor\n\nfrom scipy.stats import truncnorm\n\n__all__ = ['ResNet50']\n\ndef _conv_variance_scaling_initializer(in_channel, out_channel, kernel_size):\n fan_in = in_channel * kernel_size * kernel_size\n scale = 1.0\n scale /= max(1., fan_in)\n stddev = (scale ** 0.5) / .87962566103423978\n mu, sigma = 0, stddev\n weight = truncnorm(-2, 2, loc=mu, scale=sigma).rvs(out_channel * in_channel * kernel_size * kernel_size)\n weight = np.reshape(weight, (out_channel, in_channel, kernel_size, kernel_size))\n return Tensor(weight, dtype=mstype.float32)\n\ndef _weight_variable(shape, factor=0.01):\n init_value = np.random.randn(*shape).astype(np.float32) * factor\n return Tensor(init_value)\n\n\ndef _conv3x3(in_channel, out_channel, stride=1, use_se=False):\n if use_se:\n weight = _conv_variance_scaling_initializer(in_channel, out_channel, kernel_size=3)\n else:\n weight_shape = (out_channel, in_channel, 3, 3)\n weight = _weight_variable(weight_shape)\n return nn.Conv2d(in_channel, out_channel,\n kernel_size=3, stride=stride, padding=0, pad_mode='same', weight_init=weight)\n\n\ndef _conv1x1(in_channel, out_channel, stride=1, use_se=False):\n if use_se:\n weight = _conv_variance_scaling_initializer(in_channel, out_channel, kernel_size=1)\n else:\n weight_shape = (out_channel, in_channel, 1, 1)\n weight = _weight_variable(weight_shape)\n return nn.Conv2d(in_channel, out_channel,\n kernel_size=1, stride=stride, padding=0, pad_mode='same', weight_init=weight)\n\n\ndef _conv7x7(in_channel, out_channel, stride=1, use_se=False):\n if use_se:\n weight = _conv_variance_scaling_initializer(in_channel, out_channel, kernel_size=7)\n else:\n weight_shape = (out_channel, in_channel, 7, 7)\n weight = _weight_variable(weight_shape)\n return nn.Conv2d(in_channel, out_channel,\n kernel_size=7, stride=stride, padding=0, pad_mode='same', weight_init=weight)\n\n\ndef _bn(channel):\n return nn.BatchNorm2d(channel, eps=1e-4, momentum=0.9,\n gamma_init=1, beta_init=0, moving_mean_init=0, moving_var_init=1)\n\n\ndef _bn_last(channel):\n return nn.BatchNorm2d(channel, eps=1e-4, momentum=0.9,\n gamma_init=0, beta_init=0, moving_mean_init=0, moving_var_init=1)\n\n\ndef _fc(in_channel, out_channel, use_se=False):\n if use_se:\n weight = np.random.normal(loc=0, scale=0.01, size=out_channel*in_channel)\n weight = Tensor(np.reshape(weight, (out_channel, in_channel)), dtype=mstype.float32)\n else:\n weight_shape = (out_channel, in_channel)\n weight = _weight_variable(weight_shape)\n return nn.Dense(in_channel, out_channel, has_bias=True, weight_init=weight, bias_init=0)\n\n\nclass ResidualBlock(nn.Cell):\n \"\"\"\n ResNet V1 residual block definition.\n\n Args:\n in_channel (int): Input channel.\n out_channel (int): Output channel.\n stride (int): Stride size for the first convolutional layer. Default: 1.\n use_se (bool): enable SE-ResNet50 net. Default: False.\n se_block(bool): use se block in SE-ResNet50 net. Default: False.\n\n Returns:\n Tensor, output tensor.\n\n Examples:\n >>> ResidualBlock(3, 256, stride=2)\n \"\"\"\n expansion = 4\n\n def __init__(self,\n in_channel,\n out_channel,\n stride=1,\n use_se=False, se_block=False):\n super(ResidualBlock, self).__init__()\n self.stride = stride\n self.use_se = use_se\n self.se_block = se_block\n channel = out_channel // self.expansion\n self.conv1 = _conv1x1(in_channel, channel, stride=1, use_se=self.use_se)\n self.bn1 = _bn(channel)\n if self.use_se and self.stride != 1:\n self.e2 = nn.SequentialCell([_conv3x3(channel, channel, stride=1, use_se=True), _bn(channel),\n nn.ReLU(), nn.MaxPool2d(kernel_size=2, stride=2, pad_mode='same')])\n else:\n self.conv2 = _conv3x3(channel, channel, stride=stride, use_se=self.use_se)\n self.bn2 = _bn(channel)\n\n self.conv3 = _conv1x1(channel, out_channel, stride=1, use_se=self.use_se)\n self.bn3 = _bn_last(out_channel)\n if self.se_block:\n self.se_global_pool = P.ReduceMean(keep_dims=False)\n self.se_dense_0 = _fc(out_channel, int(out_channel/4), use_se=self.use_se)\n self.se_dense_1 = _fc(int(out_channel/4), out_channel, use_se=self.use_se)\n self.se_sigmoid = nn.Sigmoid()\n self.se_mul = P.Mul()\n self.relu = nn.ReLU()\n\n self.down_sample = False\n\n if stride != 1 or in_channel != out_channel:\n self.down_sample = True\n self.down_sample_layer = None\n\n if self.down_sample:\n if self.use_se:\n if stride == 1:\n self.down_sample_layer = nn.SequentialCell([_conv1x1(in_channel, out_channel,\n stride, use_se=self.use_se), _bn(out_channel)])\n else:\n self.down_sample_layer = nn.SequentialCell([nn.MaxPool2d(kernel_size=2, stride=2, pad_mode='same'),\n _conv1x1(in_channel, out_channel, 1,\n use_se=self.use_se), _bn(out_channel)])\n else:\n self.down_sample_layer = nn.SequentialCell([_conv1x1(in_channel, out_channel, stride,\n use_se=self.use_se), _bn(out_channel)])\n self.add = P.Add()\n\n def construct(self, x):\n \"\"\"construct ResidualBlock\"\"\"\n identity = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n if self.use_se and self.stride != 1:\n out = self.e2(out)\n else:\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n out = self.conv3(out)\n out = self.bn3(out)\n if self.se_block:\n out_se = out\n out = self.se_global_pool(out, (2, 3))\n out = self.se_dense_0(out)\n out = self.relu(out)\n out = self.se_dense_1(out)\n out = self.se_sigmoid(out)\n out = F.reshape(out, F.shape(out) + (1, 1))\n out = self.se_mul(out, out_se)\n\n if self.down_sample:\n identity = self.down_sample_layer(identity)\n\n out = out + identity\n out = self.relu(out)\n\n return out\n\nclass ResNet(nn.Cell):\n \"\"\"construct resnet backbone\"\"\"\n\n def __init__(self,\n block,\n layer_nums,\n in_channels,\n out_channels,\n strides,\n num_classes,\n use_se=False):\n super(ResNet, self).__init__()\n\n if not len(layer_nums) == len(in_channels) == len(out_channels) == 4:\n raise ValueError(\"the length of layer_num, in_channels, out_channels list must be 4!\")\n self.use_se = use_se\n self.se_block = False\n if self.use_se:\n self.se_block = True\n\n if self.use_se:\n self.conv1_0 = _conv3x3(3, 32, stride=2, use_se=self.use_se)\n self.bn1_0 = _bn(32)\n self.conv1_1 = _conv3x3(32, 32, stride=1, use_se=self.use_se)\n self.bn1_1 = _bn(32)\n self.conv1_2 = _conv3x3(32, 64, stride=1, use_se=self.use_se)\n else:\n self.conv1 = _conv7x7(3, 64, stride=2)\n self.bn1 = _bn(64)\n self.relu = P.ReLU()\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, pad_mode=\"same\")\n self.layer1 = self._make_layer(block,\n layer_nums[0],\n in_channel=in_channels[0],\n out_channel=out_channels[0],\n stride=strides[0],\n use_se=self.use_se)\n self.layer2 = self._make_layer(block,\n layer_nums[1],\n in_channel=in_channels[1],\n out_channel=out_channels[1],\n stride=strides[1],\n use_se=self.use_se)\n self.layer3 = self._make_layer(block,\n layer_nums[2],\n in_channel=in_channels[2],\n out_channel=out_channels[2],\n stride=strides[2],\n use_se=self.use_se,\n se_block=self.se_block)\n self.layer4 = self._make_layer(block,\n layer_nums[3],\n in_channel=in_channels[3],\n out_channel=out_channels[3],\n stride=strides[3],\n use_se=self.use_se,\n se_block=self.se_block)\n\n\n def _make_layer(self, block, layer_num, in_channel, out_channel, stride, use_se=False, se_block=False):\n \"\"\"construct make_layer\"\"\"\n\n layers = []\n\n resnet_block = block(in_channel, out_channel, stride=stride, use_se=use_se)\n layers.append(resnet_block)\n if se_block:\n for _ in range(1, layer_num - 1):\n resnet_block = block(out_channel, out_channel, stride=1, use_se=use_se)\n layers.append(resnet_block)\n resnet_block = block(out_channel, out_channel, stride=1, use_se=use_se, se_block=se_block)\n layers.append(resnet_block)\n else:\n for _ in range(1, layer_num):\n resnet_block = block(out_channel, out_channel, stride=1, use_se=use_se)\n layers.append(resnet_block)\n return nn.SequentialCell(layers)\n\n\n def construct(self, x):\n \"\"\"construct resnet\"\"\"\n\n if self.use_se:\n x = self.conv1_0(x)\n x = self.bn1_0(x)\n x = self.relu(x)\n x = self.conv1_1(x)\n x = self.bn1_1(x)\n x = self.relu(x)\n x = self.conv1_2(x)\n else:\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n c1 = self.maxpool(x)\n\n c2 = self.layer1(c1)\n c3 = self.layer2(c2)\n c4 = self.layer3(c3)\n c5 = self.layer4(c4)\n\n return c5\n\nclass ResNet50(nn.Cell):\n \"\"\"construct resnet50\"\"\"\n def __init__(self, num_classes, loss='softmax and metric', aligned=True, is_train=True, **kwargs):\n super(ResNet50, self).__init__()\n self.loss = loss\n self.num_classes = num_classes\n self.base = ResNet(ResidualBlock,\n [3, 4, 6, 3],\n [64, 256, 512, 1024],\n [256, 512, 1024, 2048],\n [1, 2, 2, 2],\n self.num_classes,\n use_se=False)\n\n self.is_train = is_train\n self.aligned = aligned\n if self.is_train:\n self.horizon_pool = nn.MaxPool2d(kernel_size=(4, 1)) #train\n else:\n self.horizon_pool = nn.MaxPool2d(kernel_size=(1, 4)) #test\n\n self.transpose = P.Transpose()\n self.op_sum = ops.ReduceSum(keep_dims=True)\n self.powt = ops.Pow()\n self.min_value = Tensor(1e-12, mindspore.float32)\n self.max_value = Tensor(1e+12, mindspore.float32)\n self.op_sqrt = ops.Sqrt()\n if self.aligned:\n self.bn = nn.BatchNorm2d(2048)\n self.relu = nn.ReLU()\n self.conv1 = nn.Conv2d(2048, 128, kernel_size=1, stride=1, padding=0, has_bias=True)\n\n self.mean = P.ReduceMean(keep_dims=True)\n self.flatten = nn.Flatten()\n self.end_point = _fc(2048, num_classes, use_se=False)\n\n\n def construct(self, x):\n \"\"\"construct resnet50\"\"\"\n x = self.base(x)\n lf = x\n if not self.is_train:\n lf = self.horizon_pool(x)\n lft = self.powt(lf, 2)\n lft = self.op_sum(lft, 1)\n lft = ops.clip_by_value(lft, clip_value_min=self.min_value, clip_value_max=self.max_value)\n lft = self.op_sqrt(lft)\n lf = lf/lft\n\n if self.aligned and self.is_train:\n lf = self.bn(x)\n lf = self.relu(lf)\n lf = self.transpose(lf, (0, 1, 3, 2))\n lf = self.horizon_pool(lf)\n lf = self.transpose(lf, (0, 1, 3, 2))\n lf = self.conv1(lf)\n\n lft = self.powt(lf, 2)\n lft = self.op_sum(lft, 1)\n lft = ops.clip_by_value(lft, clip_value_min=self.min_value, clip_value_max=self.max_value)\n lft = self.op_sqrt(lft)\n lf = lf/lft\n\n x = self.mean(x, (2, 3))\n f = self.flatten(x)\n y = self.end_point(f)\n\n if not self.is_train:\n return f, lf\n if self.loss == 'softmax':\n return y\n if self.loss == 'metric':\n if self.aligned: return f, lf\n return f\n if self.loss == 'softmax and metric':\n if self.aligned: return y, f, lf\n return y, f\n return 0\n",
"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"export file.\"\"\"\nimport argparse\nimport numpy as np\n\nfrom mindspore import context, Tensor\nfrom mindspore.train.serialization import export, load_param_into_net, load_checkpoint\n\nfrom src.delf_model import Model as DELF\n\nparser = argparse.ArgumentParser(description='Export MINDIR')\nparser.add_argument(\"--device_id\", type=int, default=0, help=\"Device id\")\nparser.add_argument('--ckpt_path', type=str, default='')\n\nargs = parser.parse_known_args()[0]\n\ncontext.set_context(mode=context.GRAPH_MODE, device_target=\"Ascend\", device_id=args.device_id)\n\nif __name__ == '__main__':\n\n delf_net = DELF(state=\"test\")\n param_dict = load_checkpoint(args.ckpt_path)\n load_param_into_net(delf_net, param_dict)\n\n input_batch = Tensor(np.random.uniform(\n -1.0, 1.0, size=(7, 3, 2048, 2048)).astype(np.float32))\n\n export(delf_net, input_batch, file_name='DELF_MindIR', file_format='MINDIR')\n print(\"Export successfully!\")\n"
] | [
[
"numpy.ones"
],
[
"numpy.random.seed",
"numpy.save"
],
[
"numpy.zeros",
"numpy.sqrt",
"numpy.ones"
],
[
"numpy.minimum",
"numpy.unique",
"numpy.power",
"numpy.around",
"numpy.concatenate",
"numpy.max",
"numpy.append",
"numpy.intersect1d",
"numpy.zeros_like",
"numpy.mean",
"numpy.exp",
"numpy.where",
"numpy.sum",
"numpy.zeros"
],
[
"numpy.arange",
"numpy.array",
"numpy.sqrt",
"numpy.stack"
],
[
"numpy.argsort",
"numpy.fromfile"
],
[
"sklearn.metrics.roc_auc_score",
"numpy.take",
"numpy.pad",
"numpy.mean",
"numpy.argsort",
"numpy.load",
"numpy.array",
"numpy.sum"
],
[
"numpy.arange",
"numpy.zeros",
"numpy.ones"
],
[
"numpy.fromfile",
"numpy.abs",
"numpy.multiply",
"numpy.mean",
"numpy.sum"
],
[
"numpy.arange",
"numpy.load",
"numpy.transpose"
],
[
"numpy.logical_or",
"numpy.logical_and",
"numpy.mean"
],
[
"numpy.arange",
"numpy.array",
"numpy.sqrt",
"numpy.stack"
],
[
"numpy.fromfile"
],
[
"numpy.ones"
],
[
"numpy.random.seed"
],
[
"numpy.zeros"
],
[
"numpy.load",
"numpy.array",
"pandas.read_csv",
"pandas.get_dummies"
],
[
"numpy.array"
],
[
"numpy.sqrt",
"numpy.ones"
],
[
"numpy.expand_dims",
"numpy.frombuffer",
"numpy.argmax",
"numpy.array",
"numpy.flip"
],
[
"numpy.asarray",
"numpy.array",
"numpy.random.choice"
],
[
"numpy.random.normal",
"numpy.zeros"
],
[
"numpy.array",
"numpy.mean",
"numpy.sum"
],
[
"numpy.fromfile",
"numpy.array"
],
[
"numpy.multiply",
"numpy.arange",
"numpy.linalg.norm",
"numpy.ones",
"numpy.random.randn",
"numpy.array",
"numpy.exp",
"numpy.zeros"
],
[
"numpy.zeros"
],
[
"numpy.array",
"numpy.zeros"
],
[
"numpy.ones"
],
[
"numpy.fromfile",
"numpy.zeros",
"numpy.prod"
],
[
"numpy.transpose"
],
[
"numpy.expand_dims",
"numpy.float32"
],
[
"numpy.expand_dims",
"numpy.transpose"
],
[
"numpy.array"
],
[
"numpy.ones",
"numpy.full"
],
[
"numpy.round",
"numpy.frombuffer",
"numpy.array",
"numpy.sum"
],
[
"numpy.reshape",
"numpy.random.normal",
"numpy.random.randn",
"scipy.stats.truncnorm"
],
[
"numpy.random.uniform"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
PPACI/Devoxx19-TensorflowJS | [
"4096c8ea460af8a9f8a36df01e88309568318ab8"
] | [
"python/02_train.py"
] | [
"from PIL import Image\nimport numpy\nfrom tensorflow.python.keras.preprocessing.image import ImageDataGenerator\nfrom tensorflow.python.keras.applications.mobilenet import MobileNet\nfrom tensorflow.python.keras.models import Model\nfrom tensorflow.python.keras.layers import Dense, Dropout, BatchNormalization\nfrom tensorflow.python.keras.optimizers import Adam\nfrom tensorflow.python.keras.callbacks import *\n\n# Create image data generator\nimage_generator = ImageDataGenerator(\n validation_split=0.15,\n horizontal_flip=True,\n zoom_range=0.1,\n width_shift_range=0.1,\n height_shift_range=0.1,\n rotation_range=5,\n rescale=1. / 255\n)\ntrain_generator = image_generator.flow_from_directory(\"dataset\", subset=\"training\", target_size=(224, 224),\n batch_size=8)\nvalidation_generator = image_generator.flow_from_directory(\"dataset\", subset=\"validation\", target_size=(224, 224),\n batch_size=8)\n\n# Show an image from train set\nImage.fromarray((next(train_generator)[0][0] * 255).astype(numpy.uint8)).show()\n\n# Create model\nmobile = MobileNet(\n input_shape=(224, 224, 3),\n include_top=False,\n weights='imagenet',\n pooling='avg',\n alpha=0.5\n)\noutput = Dropout(0.4)(mobile.output)\noutput = Dense(8, activation=\"relu\")(output)\noutput = Dense(3, activation=\"sigmoid\")(output)\n\nmodel = Model(inputs=mobile.input, outputs=output)\nmodel.summary()\n\n# Compile model\nmodel.compile(optimizer=Adam(amsgrad=True), loss=\"categorical_crossentropy\", metrics=[\"accuracy\"])\n\ncallbacks = [\n ReduceLROnPlateau(\n patience=3,\n factor=0.2,\n verbose=1,\n min_lr=1e-5\n ),\n ModelCheckpoint(\n filepath=\"croissant.hdf5\",\n verbose=1,\n save_best_only=True\n )\n]\n\n# Train\nmodel.fit_generator(\n generator=train_generator,\n steps_per_epoch=256,\n epochs=50,\n verbose=1,\n validation_data=validation_generator,\n validation_steps=40,\n callbacks=callbacks\n)\n"
] | [
[
"tensorflow.python.keras.preprocessing.image.ImageDataGenerator",
"tensorflow.python.keras.layers.Dense",
"tensorflow.python.keras.models.Model",
"tensorflow.python.keras.layers.Dropout",
"tensorflow.python.keras.optimizers.Adam",
"tensorflow.python.keras.applications.mobilenet.MobileNet"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.5",
"1.4"
]
}
] |
fbrundu/scCODA | [
"5508a0419d4a46e33897a5df69ba6d4e1753fadd"
] | [
"sccoda/model/dirichlet_models.py"
] | [
"\"\"\"\nDirichlet-multinomial models for statistical analysis of compositional changes in single-cell data.\n\nFor further reference, see:\nBüttner, Ostner et al.: scCODA: A Bayesian model for compositional single-cell data analysis\n\n:authors: Johannes Ostner\n\"\"\"\nimport numpy as np\nimport time\nimport warnings\n\nimport tensorflow as tf\nimport tensorflow_probability as tfp\n\nfrom sccoda.util import result_classes as res\nfrom typing import Optional, Tuple, Collection, Union, List\n\ntfd = tfp.distributions\ntfb = tfp.bijectors\n\n\nclass CompositionalModel:\n \"\"\"\n Dynamical framework for formulation and inference of Bayesian models for compositional data analysis.\n This framework is used to implement scCODA's model as a subclass of this class.\n Tensorflow probability then allows to run a multitude of inference algorithms on these models\n without the need to specify them every time.\n\n A `CompositionalModel` consists of the following parameters:\n\n - `covariate_matrix`: Numpy array that specifies the independent variables (X). Generated equivalently to the covariate matrix of a linear regression.\n\n - `data_matrix`: Dependent variable (Y). Includes the raw cell type counts for every sample.\n\n - `cell_types`, covariate_names: Names of cell types and covariates\n\n - `formula`: String that represents which covariates to include and how to transform them. Used analogously to the formula in R's lm function\n\n A specific compositional model is then implemented as a child class, with the following additional parameters\n specified in the constructor:\n\n - `target_log_prob_fn`: Log-probability function of the model. For more specific information, please refer to (tensorflow probability's API)[https://www.tensorflow.org/probability/api_docs/python/tfp]\n\n - `param_names`: Names of prior and intermediate parameters that are included in the model output. The order has to be the same as in the states_burnin output of `self.get_y_hat`\n\n - `init_params`: Initial values for the inference method\n\n Methods implemented by this class:\n\n - `sampling`: General MCMC sampling that uses a transition kernel\n\n - `get_chains_after_burnin`: Application of burn-in to MCMC sampling results\n\n - MCMC sampling methods (`sample_hmc`, `sample_hmc_da`, `sample_nuts`)\n\n Methods implemented by a child class:\n\n - `get_y_hat`: Calculation of intermediate parameters for all MCMC chain states and posterior mode of the cell count matrix\n\n \"\"\"\n\n def __init__(\n self,\n covariate_matrix: np.ndarray,\n data_matrix: np.ndarray,\n cell_types: List[str],\n covariate_names: List[str],\n formula: str,\n *args,\n **kwargs\n ):\n \"\"\"\n Generalized Constructor of Bayesian compositional model class.\n\n Parameters\n ----------\n covariate_matrix\n covariate matrix, size NxD\n data_matrix\n cell count matrix, size NxK\n cell_types\n Cell type names\n covariate_names\n Covariate names\n \"\"\"\n\n dtype = tf.float64\n self.x = tf.convert_to_tensor(covariate_matrix, dtype)\n\n # Add pseudocount if zeroes are present.\n if np.count_nonzero(data_matrix) != np.size(data_matrix):\n print(\"Zero counts encountered in data! Added a pseudocount of 0.5.\")\n data_matrix += 0.5\n self.y = tf.convert_to_tensor(data_matrix, dtype)\n\n sample_counts = np.sum(data_matrix, axis=1)\n self.n_total = tf.cast(sample_counts, dtype)\n self.cell_types = cell_types\n self.covariate_names = covariate_names\n self.formula = formula\n\n # Get dimensions of data\n self.N, self.D = self.x.shape\n self.K = self.y.shape[1]\n\n # Check input data\n if self.N != self.y.shape[0]:\n raise ValueError(\"Wrong input dimensions X[{},:] != y[{},:]\".format(self.x.shape[0], self.y.shape[0]))\n if self.N != len(self.n_total):\n raise ValueError(\"Wrong input dimensions X[{},:] != n_total[{}]\".format(self.x.shape[0], len(self.n_total)))\n\n def sampling(\n self,\n num_results: int,\n num_burnin: int,\n kernel,\n init_state: dict,\n trace_fn,\n ) -> Tuple[List[any], List[any], float]:\n \"\"\"\n MCMC sampling process (tensorflow 2)\n\n Parameters\n ----------\n num_results\n MCMC chain length (default 20000)\n num_burnin\n Number of burnin iterations (default 5000)\n kernel\n tensorflow MCMC kernel object\n init_state\n Starting parameters\n trace_fn\n tracing function\n\n Returns\n -------\n MCMC chain states and results\n\n states\n States of MCMC chain\n kernel_results\n sampling meta-information\n duration\n Duration of MCMC sampling process\n \"\"\"\n\n # HMC sampling function\n @tf.function\n def sample_mcmc(num_results_, num_burnin_, kernel_, current_state_, trace_fn):\n\n return tfp.mcmc.sample_chain(\n num_results=num_results_,\n num_burnin_steps=num_burnin_,\n kernel=kernel_,\n current_state=current_state_,\n trace_fn=trace_fn\n )\n\n # The actual sampling process\n start = time.time()\n states, kernel_results = sample_mcmc(num_results, num_burnin, kernel, init_state, trace_fn)\n duration = time.time() - start\n print(\"MCMC sampling finished. ({:.3f} sec)\".format(duration))\n\n return states, kernel_results, duration\n\n def get_chains_after_burnin(\n self,\n samples: List[any],\n kernel_results: List[any],\n num_burnin: int,\n is_nuts: bool = False\n ) -> Tuple[List[any], dict, float]:\n \"\"\"\n Application of burn-in after MCMC sampling.\n Cuts the first `num_burnin` samples from all inferred variables and diagnostic statistics.\n\n Parameters\n ----------\n samples\n all kernel states\n kernel_results\n Kernel meta-information. The tracked statistics depend on the sampling method.\n num_burnin\n number of burn-in iterations\n is_nuts\n Specifies whether NUTS sampling was used\n\n Returns\n -------\n MCMC chain without burn-in, sampling statistics, acceptance rate\n\n states_burnin\n Kernel states without burn-in samples\n stats\n sampling statistics\n p_accept\n acceptance rate of MCMC process\n \"\"\"\n\n # Samples after burn-in\n states_burnin = []\n stats = {}\n\n # Apply burn-in to MCMC results\n for s in samples:\n states_burnin.append(s[num_burnin:].numpy())\n\n # Apply burn-in to sampling statistics\n for k, v in kernel_results.items():\n stats[k] = v[num_burnin:].numpy()\n\n # Calculation of acceptance rate (different for NUTS sampling)\n if is_nuts:\n p_accept = np.mean(np.exp(kernel_results[\"log_accept_ratio\"].numpy()))\n else:\n acceptances = kernel_results[\"is_accepted\"].numpy()\n\n # Calculate acceptance rate\n p_accept = sum(acceptances) / acceptances.shape[0]\n print('Acceptance rate: %0.1f%%' % (100 * p_accept))\n\n return states_burnin, stats, p_accept\n\n def sample_hmc(\n self,\n num_results: int = int(20e3),\n num_burnin: int = int(5e3),\n num_adapt_steps: Optional[int] = None,\n num_leapfrog_steps: Optional[int] = 10,\n step_size: float = 0.01\n ) -> res.CAResult:\n\n \"\"\"\n Hamiltonian Monte Carlo (HMC) sampling in tensorflow 2.\n\n Tracked diagnostic statistics:\n\n - `target_log_prob`: Value of the model's log-probability\n\n - `diverging`: Marks samples as diverging (NOTE: Handle with care, the spike-and-slab prior of scCODA usually leads to many samples being flagged as diverging)\n\n - `is_accepted`: Whether the proposed sample was accepted in the algorithm's acceptance step\n\n - `step_size`: The step size used by the algorithm in each step\n\n Parameters\n ----------\n num_results\n MCMC chain length (default 20000)\n num_burnin\n Number of burnin iterations (default 5000)\n num_adapt_steps\n Length of step size adaptation procedure\n num_leapfrog_steps\n HMC leapfrog steps (default 10)\n step_size\n Initial step size (default 0.01)\n\n Returns\n -------\n results object\n\n result\n Compositional analysis result\n \"\"\"\n\n # bijectors (not in use atm, therefore identity)\n constraining_bijectors = [tfb.Identity() for x in range(len(self.init_params))]\n\n # HMC transition kernel\n hmc_kernel = tfp.mcmc.HamiltonianMonteCarlo(\n target_log_prob_fn=self.target_log_prob_fn,\n step_size=step_size,\n num_leapfrog_steps=num_leapfrog_steps)\n hmc_kernel = tfp.mcmc.TransformedTransitionKernel(\n inner_kernel=hmc_kernel, bijector=constraining_bijectors)\n\n # Set default value for adaptation steps if none given\n if num_adapt_steps is None:\n num_adapt_steps = int(0.8 * num_burnin)\n\n # Add step size adaptation (Andrieu, Thomas - 2008)\n hmc_kernel = tfp.mcmc.SimpleStepSizeAdaptation(\n inner_kernel=hmc_kernel, num_adaptation_steps=num_adapt_steps, target_accept_prob=0.8)\n\n # diagnostics tracing function\n def trace_fn(_, pkr):\n return {\n 'target_log_prob': pkr.inner_results.inner_results.accepted_results.target_log_prob,\n 'diverging': (pkr.inner_results.inner_results.log_accept_ratio < -1000.),\n 'is_accepted': pkr.inner_results.inner_results.is_accepted,\n 'step_size': pkr.inner_results.inner_results.accepted_results.step_size,\n }\n\n # The actual HMC sampling process\n states, kernel_results, duration = self.sampling(num_results, num_burnin,\n hmc_kernel, self.init_params, trace_fn)\n\n # apply burn-in\n states_burnin, sample_stats, acc_rate = self.get_chains_after_burnin(states, kernel_results, num_burnin,\n is_nuts=False)\n\n # Calculate posterior predictive\n y_hat = self.get_y_hat(states_burnin, num_results, num_burnin)\n\n params = dict(zip(self.param_names, states_burnin))\n\n # Result object generation setup\n # Get names of cell types that are not the reference\n if self.reference_cell_type is not None:\n cell_types_nb = self.cell_types[:self.reference_cell_type] + self.cell_types[self.reference_cell_type+1:]\n else:\n cell_types_nb = self.cell_types\n\n # Result object generation process. Uses arviz's data structure.\n posterior = {var_name: [var] for var_name, var in params.items() if\n \"prediction\" not in var_name}\n\n if \"prediction\" in self.param_names:\n posterior_predictive = {\"prediction\": [params[\"prediction\"]]}\n else:\n posterior_predictive = {}\n\n observed_data = {\"y\": self.y}\n dims = {\"alpha\": [\"cell_type\"],\n \"mu_b\": [\"1\"],\n \"sigma_b\": [\"1\"],\n \"b_offset\": [\"covariate\", \"cell_type_nb\"],\n \"ind_raw\": [\"covariate\", \"cell_type_nb\"],\n \"ind\": [\"covariate\", \"cell_type_nb\"],\n \"b_raw\": [\"covariate\", \"cell_type_nb\"],\n \"beta\": [\"covariate\", \"cell_type\"],\n \"concentration\": [\"sample\", \"cell_type\"],\n \"prediction\": [\"sample\", \"cell_type\"]\n }\n coords = {\"cell_type\": self.cell_types,\n \"cell_type_nb\": cell_types_nb,\n \"covariate\": self.covariate_names,\n \"sample\": range(self.y.shape[0])\n }\n\n sampling_stats = {\"chain_length\": num_results, \"num_burnin\": num_burnin,\n \"acc_rate\": acc_rate, \"duration\": duration, \"y_hat\": y_hat}\n\n model_specs = {\"reference\": self.reference_cell_type, \"formula\": self.formula}\n\n return res.CAResultConverter(posterior=posterior,\n posterior_predictive=posterior_predictive,\n observed_data=observed_data,\n dims=dims,\n sample_stats=sample_stats,\n coords=coords).to_result_data(sampling_stats=sampling_stats,\n model_specs=model_specs)\n\n def sample_hmc_da(\n self,\n num_results: int = int(20e3),\n num_burnin: int = int(5e3),\n num_adapt_steps: Optional[int] = None,\n num_leapfrog_steps: Optional[int] = 10,\n step_size: float = 0.01\n ) -> res.CAResult:\n \"\"\"\n HMC sampling with dual-averaging step size adaptation (Nesterov, 2009)\n\n Tracked diagnostic statistics:\n\n - `target_log_prob`: Value of the model's log-probability\n\n - `diverging`: Marks samples as diverging (NOTE: Handle with care, the spike-and-slab prior of scCODA usually leads to many samples being flagged as diverging)\n\n - `log_acc_ratio`: log-acceptance ratio\n\n - `is_accepted`: Whether the proposed sample was accepted in the algorithm's acceptance step\n\n - `step_size`: The step size used by the algorithm in each step\n\n Parameters\n ----------\n num_results\n MCMC chain length (default 20000)\n num_burnin\n Number of burnin iterations (default 5000)\n num_adapt_steps\n Length of step size adaptation procedure\n num_leapfrog_steps\n HMC leapfrog steps (default 10)\n step_size\n Initial step size (default 0.01)\n\n Returns\n -------\n result object\n\n result\n Compositional analysis result\n \"\"\"\n\n warnings.warn(\n \"This feature is untested and might yield different results than expected. Please use sample_hmc().\",\n category=UserWarning\n )\n\n # bijectors (not in use atm, therefore identity)\n constraining_bijectors = [tfb.Identity() for x in range(len(self.init_params))]\n\n # HMC transition kernel\n hmc_kernel = tfp.mcmc.HamiltonianMonteCarlo(\n target_log_prob_fn=self.target_log_prob_fn,\n step_size=step_size,\n num_leapfrog_steps=num_leapfrog_steps)\n hmc_kernel = tfp.mcmc.TransformedTransitionKernel(\n inner_kernel=hmc_kernel, bijector=constraining_bijectors)\n\n # Set default value for adaptation steps if none given\n if num_adapt_steps is None:\n num_adapt_steps = int(0.8 * num_burnin)\n\n # Add step size adaptation\n hmc_kernel = tfp.mcmc.DualAveragingStepSizeAdaptation(\n inner_kernel=hmc_kernel, num_adaptation_steps=num_adapt_steps, target_accept_prob=0.8, decay_rate=0.5)\n\n # tracing function\n def trace_fn(_, pkr):\n return {\n 'target_log_prob': pkr.inner_results.inner_results.accepted_results.target_log_prob,\n 'diverging': (pkr.inner_results.inner_results.log_accept_ratio < -1000.),\n \"log_acc_ratio\": pkr.inner_results.inner_results.log_accept_ratio,\n 'is_accepted': pkr.inner_results.inner_results.is_accepted,\n 'step_size': tf.exp(pkr.log_averaging_step[0]),\n }\n\n # HMC sampling\n states, kernel_results, duration = self.sampling(num_results, num_burnin, hmc_kernel, self.init_params, trace_fn)\n states_burnin, sample_stats, acc_rate = self.get_chains_after_burnin(states, kernel_results, num_burnin,\n is_nuts=False)\n\n y_hat = self.get_y_hat(states_burnin, num_results, num_burnin)\n\n params = dict(zip(self.param_names, states_burnin))\n\n # Specification of cell types that were not used as the reference\n if self.reference_cell_type is not None:\n cell_types_nb = self.cell_types[:self.reference_cell_type] + self.cell_types[self.reference_cell_type+1:]\n else:\n cell_types_nb = self.cell_types\n\n # Result object generation process. Uses arviz's data structure.\n posterior = {var_name: [var] for var_name, var in params.items() if\n \"prediction\" not in var_name}\n\n if \"prediction\" in self.param_names:\n posterior_predictive = {\"prediction\": [params[\"prediction\"]]}\n else:\n posterior_predictive = {}\n\n observed_data = {\"y\": self.y}\n dims = {\"alpha\": [\"cell_type\"],\n \"mu_b\": [\"1\"],\n \"sigma_b\": [\"1\"],\n \"b_offset\": [\"covariate\", \"cell_type_nb\"],\n \"ind_raw\": [\"covariate\", \"cell_type_nb\"],\n \"ind\": [\"covariate\", \"cell_type_nb\"],\n \"b_raw\": [\"covariate\", \"cell_type_nb\"],\n \"beta\": [\"covariate\", \"cell_type\"],\n \"concentration\": [\"sample\", \"cell_type\"],\n \"prediction\": [\"sample\", \"cell_type\"]\n }\n coords = {\"cell_type\": self.cell_types,\n \"cell_type_nb\": cell_types_nb,\n \"covariate\": self.covariate_names,\n \"sample\": range(self.y.shape[0])\n }\n\n # build dictionary with sampling statistics\n sampling_stats = {\"chain_length\": num_results, \"num_burnin\": num_burnin,\n \"acc_rate\": acc_rate, \"duration\": duration, \"y_hat\": y_hat}\n\n model_specs = {\"reference\": self.reference_cell_type, \"formula\": self.formula}\n\n return res.CAResultConverter(posterior=posterior,\n posterior_predictive=posterior_predictive,\n observed_data=observed_data,\n dims=dims,\n sample_stats=sample_stats,\n coords=coords).to_result_data(sampling_stats=sampling_stats,\n model_specs=model_specs)\n\n def sample_nuts(\n self,\n num_results: int = int(10e3),\n num_burnin: int = int(5e3),\n num_adapt_steps: Optional[int] = None,\n max_tree_depth: int = 10,\n step_size: float = 0.01\n ) -> res.CAResult:\n \"\"\"\n HMC with No-U-turn (NUTS) sampling.\n This method is untested and might yield different results than expected.\n\n Tracked diagnostic statistics:\n\n - `target_log_prob`: Value of the model's log-probability\n\n - `leapfros_taken`: Number of leapfrog steps taken by hte integrator\n\n - `diverging`: Marks samples as diverging (NOTE: Handle with care, the spike-and-slab prior of scCODA usually leads to many samples being flagged as diverging)\n\n - `energy`: HMC \"Energy\" value for each step\n\n - `log_accept_ratio`: log-acceptance ratio\n\n - `step_size`: The step size used by the algorithm in each step\n\n - `reached_max_depth`: Whether the NUTS algorithm reached the maximum sampling depth in each step\n\n - `is_accepted`: Whether the proposed sample was accepted in the algorithm's acceptance step\n\n Parameters\n ----------\n num_results\n MCMC chain length (default 10000)\n num_burnin\n Number of burnin iterations (default 5000)\n num_adapt_steps\n Length of step size adaptation procedure\n max_tree_depth\n Maximum tree depth (default 10)\n step_size\n Initial step size (default 0.01)\n\n Returns\n -------\n result object\n\n result\n Compositional analysis result\n \"\"\"\n\n warnings.warn(\n \"This feature is untested and might yield different results than expected. Please use sample_hmc().\",\n category=UserWarning\n )\n\n # bijectors (not in use atm, therefore identity)\n constraining_bijectors = [tfb.Identity() for x in range(len(self.init_params))]\n\n # NUTS transition kernel\n nuts_kernel = tfp.mcmc.NoUTurnSampler(\n target_log_prob_fn=self.target_log_prob_fn,\n step_size=step_size,\n max_tree_depth=max_tree_depth)\n nuts_kernel = tfp.mcmc.TransformedTransitionKernel(\n inner_kernel=nuts_kernel,\n bijector=constraining_bijectors\n )\n\n # Set default value for adaptation steps\n if num_adapt_steps is None:\n num_adapt_steps = int(0.8 * num_burnin)\n\n # Step size adaptation (Nesterov, 2009)\n nuts_kernel = tfp.mcmc.DualAveragingStepSizeAdaptation(\n inner_kernel=nuts_kernel, num_adaptation_steps=num_adapt_steps, target_accept_prob=0.8,\n step_size_setter_fn=lambda pkr, new_step_size: pkr._replace(\n inner_results=pkr.inner_results._replace(step_size=new_step_size)\n ),\n step_size_getter_fn=lambda pkr: pkr.inner_results.step_size,\n log_accept_prob_getter_fn=lambda pkr: pkr.inner_results.log_accept_ratio,\n )\n\n # trace function\n def trace_fn(_, pkr):\n return {\n \"target_log_prob\": pkr.inner_results.inner_results.target_log_prob,\n \"leapfrogs_taken\": pkr.inner_results.inner_results.leapfrogs_taken,\n \"diverging\": pkr.inner_results.inner_results.has_divergence,\n \"energy\": pkr.inner_results.inner_results.energy,\n \"log_accept_ratio\": pkr.inner_results.inner_results.log_accept_ratio,\n \"step_size\": pkr.inner_results.inner_results.step_size[0],\n \"reach_max_depth\": pkr.inner_results.inner_results.reach_max_depth,\n \"is_accepted\": pkr.inner_results.inner_results.is_accepted,\n }\n\n # HMC sampling\n states, kernel_results, duration = self.sampling(num_results, num_burnin, nuts_kernel, self.init_params, trace_fn)\n states_burnin, sample_stats, acc_rate = self.get_chains_after_burnin(states, kernel_results, num_burnin,\n is_nuts=True)\n\n y_hat = self.get_y_hat(states_burnin, num_results, num_burnin)\n\n params = dict(zip(self.param_names, states_burnin))\n\n # Result object generation process. Uses arviz's data structure.\n # Get names of cell types that are not the reference\n if self.reference_cell_type is not None:\n cell_types_nb = self.cell_types[:self.reference_cell_type] + self.cell_types[self.reference_cell_type + 1:]\n else:\n cell_types_nb = self.cell_types\n\n posterior = {var_name: [var] for var_name, var in params.items() if\n \"prediction\" not in var_name}\n\n if \"prediction\" in self.param_names:\n posterior_predictive = {\"prediction\": [params[\"prediction\"]]}\n else:\n posterior_predictive = {}\n\n observed_data = {\"y\": self.y}\n dims = {\"alpha\": [\"cell_type\"],\n \"mu_b\": [\"1\"],\n \"sigma_b\": [\"1\"],\n \"b_offset\": [\"covariate\", \"cell_type_nb\"],\n \"ind_raw\": [\"covariate\", \"cell_type_nb\"],\n \"ind\": [\"covariate\", \"cell_type_nb\"],\n \"b_raw\": [\"covariate\", \"cell_type_nb\"],\n \"beta\": [\"covariate\", \"cell_type\"],\n \"concentration\": [\"sample\", \"cell_type\"],\n \"prediction\": [\"sample\", \"cell_type\"]\n }\n coords = {\"cell_type\": self.cell_types,\n \"cell_type_nb\": cell_types_nb,\n \"covariate\": self.covariate_names,\n \"sample\": range(self.y.shape[0])\n }\n\n sampling_stats = {\"chain_length\": num_results, \"num_burnin\": num_burnin,\n \"acc_rate\": acc_rate, \"duration\": duration, \"y_hat\": y_hat}\n\n model_specs = {\"reference\": self.reference_cell_type, \"formula\": self.formula}\n\n return res.CAResultConverter(posterior=posterior,\n posterior_predictive=posterior_predictive,\n observed_data=observed_data,\n dims=dims,\n sample_stats=sample_stats,\n coords=coords).to_result_data(sampling_stats=sampling_stats,\n model_specs=model_specs)\n\n\nclass ReferenceModel(CompositionalModel):\n \"\"\"\n Statistical model for single-cell differential composition analysis with specification of a reference cell type.\n This is the standard scCODA model and recommenced for all uses.\n\n The hierarchical formulation of the model for one sample is:\n\n .. math::\n y|x &\\\\sim DirMult(a(x), \\\\bar{y}) \\\\\\\\\n \\\\log(a(x)) &= \\\\alpha + x \\\\beta \\\\\\\\\n \\\\alpha_k &\\\\sim N(0, 5) \\\\quad &\\\\forall k \\\\in [K] \\\\\\\\\n \\\\beta_{d, \\\\hat{k}} &= 0 &\\\\forall d \\\\in [D]\\\\\\\\\n \\\\beta_{d, k} &= \\\\tau_{d, k} \\\\tilde{\\\\beta}_{d, k} \\\\quad &\\\\forall d \\\\in [D], k \\\\in \\\\{[K] \\\\smallsetminus \\\\hat{k}\\\\} \\\\\\\\\n \\\\tau_{d, k} &= \\\\frac{\\\\exp(t_{d, k})}{1+ \\\\exp(t_{d, k})} \\\\quad &\\\\forall d \\\\in [D], k \\\\in \\\\{[K] \\\\smallsetminus \\\\hat{k}\\\\} \\\\\\\\\n \\\\frac{t_{d, k}}{50} &\\\\sim N(0, 1) \\\\quad &\\\\forall d \\\\in [D], k \\\\in \\\\{[K] \\\\smallsetminus \\\\hat{k}\\\\} \\\\\\\\\n \\\\tilde{\\\\beta}_{d, k} &= (\\\\tilde{\\\\mu} + \\\\tilde{\\\\sigma}^2) \\\\cdot \\\\tilde{\\\\gamma}_{d, k} \\\\quad &\\\\forall d \\\\in [D], k \\\\in \\\\{[K] \\\\smallsetminus \\\\hat{k}\\\\} \\\\\\\\\n \\\\tilde{\\\\mu} &\\\\sim N(0, 1) \\\\\\\\\n \\\\tilde{\\\\sigma}^2 &\\\\sim HC(0, 1) \\\\\\\\\n \\\\tilde{\\\\gamma}_{d, k} &\\\\sim N(0,1) \\\\quad &\\\\forall d \\\\in [D], k \\\\in \\\\{[K] \\\\smallsetminus \\\\hat{k}\\\\} \\\\\\\\\n\n with y being the cell counts and x the covariates.\n\n For further information, see `scCODA: A Bayesian model for compositional single-cell data analysis`\n (Büttner, Ostner et al., 2020)\n\n \"\"\"\n\n def __init__(\n self,\n reference_cell_type: int,\n *args,\n **kwargs):\n \"\"\"\n Constructor of model class. Defines model structure, log-probability function, parameter names,\n and MCMC starting values.\n\n Parameters\n ----------\n reference_cell_type\n Index of reference cell type (column in count data matrix)\n args\n arguments passed to top-level class\n kwargs\n arguments passed to top-level class\n \"\"\"\n\n super(self.__class__, self).__init__(*args, **kwargs)\n\n self.reference_cell_type = reference_cell_type\n dtype = tf.float64\n\n # All parameters that are returned for analysis\n self.param_names = [\"mu_b\", \"sigma_b\", \"b_offset\", \"ind_raw\", \"alpha\",\n \"ind\", \"b_raw\", \"beta\", \"concentration\", \"prediction\"]\n\n alpha_size = [self.K]\n beta_size = [self.D, self.K]\n beta_nobl_size = [self.D, self.K-1]\n\n Root = tfd.JointDistributionCoroutine.Root\n\n def model():\n mu_b = yield Root(tfd.Independent(\n tfd.Normal(loc=tf.zeros(1, dtype=dtype),\n scale=tf.ones(1, dtype=dtype),\n name=\"mu_b\"),\n reinterpreted_batch_ndims=1))\n\n sigma_b = yield Root(tfd.Independent(\n tfd.HalfCauchy(tf.zeros(1, dtype=dtype),\n tf.ones(1, dtype=dtype),\n name=\"sigma_b\"),\n reinterpreted_batch_ndims=1))\n\n b_offset = yield Root(tfd.Independent(\n tfd.Normal(\n loc=tf.zeros(beta_nobl_size, dtype=dtype),\n scale=tf.ones(beta_nobl_size, dtype=dtype),\n name=\"b_offset\"),\n reinterpreted_batch_ndims=2))\n\n # Spike-and-slab\n ind_raw = yield Root(tfd.Independent(\n tfd.Normal(\n loc=tf.zeros(shape=beta_nobl_size, dtype=dtype),\n scale=tf.ones(shape=beta_nobl_size, dtype=dtype),\n name='ind_raw'),\n reinterpreted_batch_ndims=2))\n\n ind_scaled = ind_raw * 50\n ind = tf.exp(ind_scaled) / (1 + tf.exp(ind_scaled))\n\n b_raw = mu_b + sigma_b * b_offset\n\n beta = ind * b_raw\n\n # Include slope 0 for reference cell type\n beta = tf.concat(axis=1, values=[beta[:, :reference_cell_type],\n tf.zeros(shape=[self.D, 1], dtype=dtype),\n beta[:, reference_cell_type:]])\n\n alpha = yield Root(tfd.Independent(\n tfd.Normal(\n loc=tf.zeros(alpha_size, dtype=dtype),\n scale=tf.ones(alpha_size, dtype=dtype) * 5,\n name=\"alpha\"),\n reinterpreted_batch_ndims=1))\n\n concentrations = tf.exp(alpha + tf.matmul(self.x, beta))\n\n # Cell count prediction via DirMult\n predictions = yield Root(tfd.Independent(\n tfd.DirichletMultinomial(\n total_count=tf.cast(self.n_total, dtype),\n concentration=concentrations,\n name=\"predictions\"),\n reinterpreted_batch_ndims=1))\n\n self.model_struct = tfd.JointDistributionCoroutine(model)\n\n # Joint posterior distribution\n self.target_log_prob_fn = lambda *args:\\\n self.model_struct.log_prob(list(args) + [tf.cast(self.y, dtype)])\n\n # MCMC starting values\n self.init_params = [\n tf.zeros(1, name=\"init_mu_b\", dtype=dtype),\n tf.ones(1, name=\"init_sigma_b\", dtype=dtype),\n tf.random.normal(beta_nobl_size, 0, 1, name='init_b_offset', dtype=dtype),\n tf.zeros(beta_nobl_size, name='init_ind_raw', dtype=dtype),\n tf.random.normal(alpha_size, 0, 1, name='init_alpha', dtype=dtype)\n ]\n\n # Calculate predicted cell counts (for analysis purposes)\n def get_y_hat(\n self,\n states_burnin: List[any],\n num_results: int,\n num_burnin: int\n ) -> np.ndarray:\n \"\"\"\n Calculate posterior mode of cell counts (for analysis purposes) and add intermediate parameters\n that are no priors to MCMC results.\n\n Parameters\n ----------\n states_burnin\n MCMC chain without burn-in samples\n num_results\n Chain length (with burn-in)\n num_burnin\n Number of burn-in samples\n\n Returns\n -------\n posterior mode\n\n y_mean\n posterior mode of cell counts\n \"\"\"\n\n chain_size_y = [num_results - num_burnin, self.N, self.K]\n chain_size_beta = [num_results - num_burnin, self.D, self.K]\n\n alphas = states_burnin[4]\n alphas_final = alphas.mean(axis=0)\n\n ind_raw = states_burnin[3] * 50\n mu_b = states_burnin[0]\n sigma_b = states_burnin[1]\n b_offset = states_burnin[2]\n\n ind_ = np.exp(ind_raw) / (1 + np.exp(ind_raw))\n\n b_raw_ = mu_b.reshape((num_results - num_burnin, 1, 1)) + np.einsum(\"...jk, ...j->...jk\", b_offset, sigma_b)\n\n beta_temp = np.einsum(\"..., ...\", ind_, b_raw_)\n\n beta_ = np.zeros(chain_size_beta)\n for i in range(num_results - num_burnin):\n beta_[i] = np.concatenate([beta_temp[i, :, :self.reference_cell_type],\n np.zeros(shape=[self.D, 1], dtype=np.float64),\n beta_temp[i, :, self.reference_cell_type:]], axis=1)\n conc_ = np.exp(np.einsum(\"jk, ...kl->...jl\", self.x, beta_)\n + alphas.reshape((num_results - num_burnin, 1, self.K)))\n\n predictions_ = np.zeros(chain_size_y)\n for i in range(num_results - num_burnin):\n pred = tfd.DirichletMultinomial(self.n_total, conc_[i, :, :]).mean().numpy()\n predictions_[i, :, :] = pred\n\n betas_final = beta_.mean(axis=0)\n states_burnin.append(ind_)\n states_burnin.append(b_raw_)\n states_burnin.append(beta_)\n states_burnin.append(conc_)\n states_burnin.append(predictions_)\n\n concentration = np.exp(np.matmul(self.x, betas_final) + alphas_final).astype(np.float64)\n\n y_mean = concentration / np.sum(concentration, axis=1, keepdims=True) * self.n_total.numpy()[:, np.newaxis]\n\n return y_mean\n"
] | [
[
"tensorflow.convert_to_tensor",
"tensorflow.matmul",
"numpy.einsum",
"tensorflow.zeros",
"tensorflow.random.normal",
"tensorflow.cast",
"numpy.matmul",
"tensorflow.ones",
"tensorflow.exp",
"numpy.size",
"numpy.count_nonzero",
"numpy.exp",
"numpy.zeros",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"1.10",
"1.12",
"2.7",
"2.6",
"1.4",
"1.13",
"2.3",
"2.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.2",
"1.2",
"2.10"
]
}
] |
ylimit/ModelDiff | [
"f509bd2a1de20138aeb5cf105f99597a279f6f0b"
] | [
"utils.py"
] | [
"import os\nimport os.path as osp\nimport sys\nimport time\nimport argparse\nfrom pdb import set_trace as st\nimport json\nimport functools\n\nimport torch\nimport numpy as np\nimport torchvision\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\n\nfrom torchvision import transforms\n\n\nclass MovingAverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n def __init__(self, name, fmt=':f', momentum=0.9):\n self.name = name\n self.fmt = fmt\n self.momentum = momentum\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n\n def update(self, val, n=1):\n self.val = val\n self.avg = self.momentum*self.avg + (1-self.momentum)*val\n\n def __str__(self):\n fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'\n return fmtstr.format(**self.__dict__)\n\nclass ProgressMeter(object):\n def __init__(self, num_batches, meters, prefix=\"\", output_dir=None):\n self.batch_fmtstr = self._get_batch_fmtstr(num_batches)\n self.meters = meters\n self.prefix = prefix\n if output_dir is not None:\n self.filepath = osp.join(output_dir, \"progress\")\n\n def display(self, batch):\n entries = [self.prefix + self.batch_fmtstr.format(batch)]\n entries += [str(meter) for meter in self.meters]\n log_str = '\\t'.join(entries)\n print(log_str)\n # if self.filepath is not None:\n # with open(self.filepath, \"a\") as f:\n # f.write(log_str+\"\\n\")\n\n def _get_batch_fmtstr(self, num_batches):\n num_digits = len(str(num_batches // 1))\n fmt = '{:' + str(num_digits) + 'd}'\n return '[' + fmt + '/' + fmt.format(num_batches) + ']'\n \nclass CrossEntropyLabelSmooth(nn.Module):\n def __init__(self, num_classes, epsilon = 0.1):\n super(CrossEntropyLabelSmooth, self).__init__()\n self.num_classes = num_classes\n self.epsilon = epsilon\n self.logsoftmax = nn.LogSoftmax(dim=1)\n\n def forward(self, inputs, targets):\n log_probs = self.logsoftmax(inputs)\n targets = torch.zeros_like(log_probs).scatter_(1, targets.unsqueeze(1), 1)\n targets = (1 - self.epsilon) * targets + self.epsilon / self.num_classes\n loss = (-targets * log_probs).sum(1)\n return loss.mean()\n\n\ndef linear_l2(model, beta_lmda):\n beta_loss = 0\n for m in model.modules():\n if isinstance(m, nn.Linear):\n beta_loss += (m.weight).pow(2).sum()\n beta_loss += (m.bias).pow(2).sum()\n return 0.5*beta_loss*beta_lmda, beta_loss\n\n\ndef l2sp(model, reg):\n reg_loss = 0\n dist = 0\n for m in model.modules():\n if hasattr(m, 'weight') and hasattr(m, 'old_weight'):\n diff = (m.weight - m.old_weight).pow(2).sum()\n dist += diff\n reg_loss += diff \n\n if hasattr(m, 'bias') and hasattr(m, 'old_bias'):\n diff = (m.bias - m.old_bias).pow(2).sum()\n dist += diff\n reg_loss += diff \n\n if dist > 0:\n dist = dist.sqrt()\n \n loss = (reg * reg_loss)\n return loss, dist\n\n\ndef advtest_fast(model, loader, adversary, args):\n advDataset = torch.load(args.adv_data_dir)\n test_loader = torch.utils.data.DataLoader(\n advDataset,\n batch_size=4, shuffle=False,\n num_workers=0, pin_memory=False)\n model.eval()\n\n total_ce = 0\n total = 0\n top1 = 0\n\n total = 0\n top1_clean = 0\n top1_adv = 0\n adv_success = 0\n adv_trial = 0\n for i, (batch, label, adv_batch, adv_label) in enumerate(test_loader):\n batch, label = batch.to('cuda'), label.to('cuda')\n adv_batch = adv_batch.to('cuda')\n\n total += batch.size(0)\n out_clean = model(batch)\n\n # if 'mbnetv2' in args.network:\n # y = torch.zeros(batch.shape[0], model.classifier[1].in_features).cuda()\n # else:\n # y = torch.zeros(batch.shape[0], model.fc.in_features).cuda()\n \n # y[:,0] = args.m\n # advbatch = adversary.perturb(batch, y)\n\n out_adv = model(adv_batch)\n\n _, pred_clean = out_clean.max(dim=1)\n _, pred_adv = out_adv.max(dim=1)\n\n clean_correct = pred_clean.eq(label)\n adv_trial += int(clean_correct.sum().item())\n adv_success += int(pred_adv[clean_correct].eq(label[clean_correct]).sum().detach().item())\n top1_clean += int(pred_clean.eq(label).sum().detach().item())\n top1_adv += int(pred_adv.eq(label).sum().detach().item())\n\n # print('{}/{}...'.format(i+1, len(test_loader)))\n print(f\"Finish adv test fast\")\n del test_loader\n del advDataset\n return float(top1_clean)/total*100, float(top1_adv)/total*100, float(adv_trial-adv_success) / adv_trial *100\n\n\ndef lazy_property(func):\n attribute = '_lazy_' + func.__name__\n\n @property\n @functools.wraps(func)\n def wrapper(self):\n if not hasattr(self, attribute):\n setattr(self, attribute, func(self))\n return getattr(self, attribute)\n\n return wrapper\n\n\nclass Utils:\n _instance = None\n\n def __init__(self):\n self.cache = {}\n\n @staticmethod\n def _get_instance():\n if Utils._instance is None:\n Utils._instance = Utils()\n return Utils._instance\n\n @staticmethod\n def show_images(images, labels, title='examples'):\n plt.figure(figsize=(10,10))\n plt.subplots_adjust(hspace=0.2)\n for n in range(25):\n plt.subplot(5,5,n+1)\n img = images[n]\n img = img.numpy().squeeze()\n plt.imshow(img)\n plt.title(f'{labels[n]}')\n plt.axis('off')\n _ = plt.suptitle(title)\n plt.show()\n\n @staticmethod\n def copy_weights(source_model, target_model):\n # print(source_model.summary())\n # print(target_model.summary())\n for i, layer in enumerate(target_model.layers):\n if not layer.get_weights():\n continue\n source_layer = source_model.get_layer(layer.name)\n # print(layer)\n # print(source_layer)\n layer.set_weights(source_layer.get_weights())\n return target_model\n\n @staticmethod\n def normalize(v):\n norm = np.linalg.norm(v)\n if norm == 0:\n return v\n return v / norm\n\n"
] | [
[
"torch.nn.LogSoftmax",
"torch.load",
"torch.zeros_like",
"torch.utils.data.DataLoader",
"numpy.linalg.norm"
]
] | [
{
"matplotlib": [],
"numpy": [
"1.10",
"1.12",
"1.11",
"1.19",
"1.24",
"1.13",
"1.16",
"1.9",
"1.18",
"1.23",
"1.21",
"1.22",
"1.20",
"1.7",
"1.15",
"1.14",
"1.17",
"1.8"
],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
TomLiu59/AI-Final-Project | [
"160cb39f7a6c2d51a5f131c70a2ef4677a6d554e"
] | [
"main.py"
] | [
"import numpy as np\nimport pprint\nimport tensorflow as tf\nimport os\nfrom datetime import datetime\n\nfrom model import AlternatingAttention\nimport data_helper\nimport train\nimport test1\nimport sys\n\nflags = tf.app.flags;\n\nflags.DEFINE_integer(\"embedding_dim\", 384, \"Dimensionality of character embedding (default: 384)\")\nflags.DEFINE_integer(\"encoding_dim\", 128, \"Dimensionality of bidirectional GRU encoding for query / document\")\nflags.DEFINE_integer(\"num_glimpses\", 8, \"Number of glimpse iterations during read (default: 8)\")\nflags.DEFINE_float(\"dropout_keep_prob\", 0.8, \"Dropout keep probability (default: 0.8)\")\nflags.DEFINE_float(\"l2_reg_lambda\", 1e-4, \"L2 regularizaion lambda (default: 0.0001)\")\nflags.DEFINE_float(\"learning_rate\", 1e-3, \"AdamOptimizer learning rate (default: 0.001)\")\nflags.DEFINE_float(\"learning_rate_decay\", 0.8, \"How much learning rate will decay after half epoch of non-decreasing loss (default: 0.8)\")\n\n# Training parameters\nflags.DEFINE_integer(\"batch_size\", 1, \"Batch Size (default: 32)\")\nflags.DEFINE_integer(\"num_epochs\", 12, \"Number of training epochs (default: 12)\")\nflags.DEFINE_integer(\"evaluate_every\", 300, \"Evaluate model on validation set after this many steps (default: 300)\")\n\nflags.DEFINE_boolean(\"trace\", False, \"Trace (load smaller dataset)\")\nflags.DEFINE_string(\"log_dir\", \"logs\", \"Directory for summary logs to be written to default (./logs/)\")\n\nflags.DEFINE_integer(\"checkpoint_every\", 1000, \"Save model after this many steps (default: 1000)\")\nflags.DEFINE_string(\"ckpt_dir\", \"./ckpts/\", \"Directory for checkpoints default (./ckpts/)\")\nflags.DEFINE_string(\"restore_file\", \"model-l3.165_a0.510.ckpt-11000\", \"Checkpoint to load\")\n\nflags.DEFINE_boolean(\"evaluate\", True, \"Whether to run evaluation epoch on a checkpoint. Must have restore_file set.\")\n\ndef main(_):\n FLAGS = tf.app.flags.FLAGS\n pp = pprint.PrettyPrinter()\n# FLAGS._parse_flags()\n FLAGS(sys.argv)\n pp.pprint(FLAGS.__flags)\n\n # Load Data\n X_train, Q_train, Y_train = data_helper.load_data('train')\n X_test, Q_test, Y_test = data_helper.load_data('valid')\n\n vocab_size = np.max(X_train) + 1\n print('[?] Vocabulary Size:', vocab_size)\n\n # Create directories\n if not os.path.exists(FLAGS.ckpt_dir):\n os.makedirs(FLAGS.ckpt_dir)\n\n timestamp = str('\\\\') + datetime.now().strftime('%Y%m%d%H%M%S') \n FLAGS.log_dir = str('log') + str(timestamp)\n print(FLAGS.log_dir)\n if not os.path.exists(FLAGS.log_dir):\n os.makedirs(FLAGS.log_dir)\n\n # Train Model\n with tf.Session(config=tf.ConfigProto(log_device_placement=False, allow_soft_placement=True)) as sess, tf.device('/gpu:0'):\n model = AlternatingAttention(FLAGS.batch_size, vocab_size, FLAGS.encoding_dim, FLAGS.embedding_dim, FLAGS.num_glimpses, session=sess)\n\n if FLAGS.trace: # Trace model for debugging\n train.trace(FLAGS, sess, model, (X_train, Q_train, Y_train))\n return\n\n saver = tf.train.Saver()\n\n if FLAGS.restore_file is not None:\n# saver = tf.train.import_meta_graph('/tmp/model.ckpt.meta')\n saver = tf.train.import_meta_graph(str(\"./ckpts/\")+str(FLAGS.restore_file))\n print('[?] Loading variables from checkpoint %s' % FLAGS.restore_file)\n saver.restore(sess, \"./ckpts/model-l3.165_a0.510.ckpt-11000\")\n# saver.restore(sess, FLAGS.restore_file)\n\n # Run evaluation\n if FLAGS.evaluate:\n if not FLAGS.restore_file:\n print('Need to specify a restore_file checkpoint to evaluate')\n else:\n test_data = data_helper.load_data('test')\n word2idx, _, _ = data_helper.build_vocab()\n test1.run(FLAGS, sess, model, test_data, word2idx)\n else:\n train.run(FLAGS, sess, model,\n (X_train, Q_train, Y_train),\n (X_test, Q_test, Y_test),\n saver)\n\nif __name__ == '__main__':\n tf.app.run()\n"
] | [
[
"tensorflow.device",
"tensorflow.ConfigProto",
"numpy.max",
"tensorflow.train.Saver",
"tensorflow.app.run"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
ShuoZ9379/Integration_SIL_and_MBL | [
"d7df6501a665d65eb791f7fd9b8e85fd660e6320",
"d7df6501a665d65eb791f7fd9b8e85fd660e6320"
] | [
"algos/mbl_copos2_sil/run.py",
"baselines/n_copos/eta_omega_dual.py"
] | [
"import multiprocessing\nimport os.path as osp\nimport gym,sys\nfrom collections import defaultdict\nimport tensorflow as tf\nimport numpy as np\nimport pickle\nfrom baselines.common.vec_env import VecFrameStack,VecEnv, VecNormalize\nfrom baselines.run import parse_cmdline_kwargs, build_env, configure_logger, get_default_network, get_env_type\nfrom baselines.common.tf_util import get_session\nfrom baselines import logger\nfrom importlib import import_module\nfrom baselines.common import set_global_seeds\nimport baselines.common.tf_util as U\nfrom baselines.common.policies import build_policy\nfrom baselines.common.input import observation_placeholder\nfrom baselines.common.vec_env.vec_video_recorder import VecVideoRecorder\nfrom baselines.common.cmd_util import common_arg_parser, parse_unknown_args,make_vec_env, make_env\ntry:\n from mpi4py import MPI\nexcept ImportError:\n MPI = None\n\ntry:\n import pybullet_envs\nexcept ImportError:\n pybullet_envs = None\n\ntry:\n import roboschool\nexcept ImportError:\n roboschool = None\n\n_game_envs = defaultdict(set)\nfor env in gym.envs.registry.all():\n # TODO: solve this with regexes\n env_type = env._entry_point.split(':')[0].split('.')[-1]\n if env.id.find('Sparse') > -1:\n _game_envs['sparse_{}'.format(env_type)].add(env.id)\n else:\n _game_envs[env_type].add(env.id)\n\n# reading benchmark names directly from retro requires\n# importing retro here, and for some reason that crashes tensorflow\n# in ubuntu\n_game_envs['retro'] = {\n 'BubbleBobble-Nes',\n 'SuperMarioBros-Nes',\n 'TwinBee3PokoPokoDaimaou-Nes',\n 'SpaceHarrier-Nes',\n 'SonicTheHedgehog-Genesis',\n 'Vectorman-Genesis',\n 'FinalFight-Snes',\n 'SpaceInvaders-Snes',\n}\n\n\ndef train(args, extra_args):\n env_type, env_id = get_env_type(args)\n print('env_type: {}'.format(env_type))\n total_timesteps = int(args.num_timesteps)\n seed = args.seed\n set_global_seeds(seed)\n \n learn = get_learn_function(args.alg)\n alg_kwargs = get_learn_function_defaults(args.alg, env_type)\n alg_kwargs.update(extra_args)\n\n env = build_env(args,normalize_ob=False)\n eval_env = build_env(args,normalize_ob=False, is_eval=True)\n if args.save_video_interval != 0:\n env = VecVideoRecorder(env, osp.join(logger.get_dir(), \"videos\"), record_video_trigger=lambda x: x % args.save_video_interval == 0, video_length=args.save_video_length)\n\n if args.network:\n alg_kwargs['network'] = args.network\n else:\n if alg_kwargs.get('network') is None:\n alg_kwargs['network'] = get_default_network(env_type)\n beta = -1\n if beta < 0:\n #print(alg_kwargs)\n nr_episodes = total_timesteps // alg_kwargs['timesteps_per_batch']\n # Automatically compute beta based on initial entropy and number of iterations\n policy = build_policy(env, alg_kwargs['network'], value_network='copy', normalize_observations=alg_kwargs['normalize_observations'], copos=True)\n ob = observation_placeholder(env.observation_space)\n \n sess = U.single_threaded_session()\n sess.__enter__()\n with tf.variable_scope(\"tmp_pi\"):\n tmp_pi = policy(observ_placeholder=ob)\n sess.run(tf.global_variables_initializer())\n \n tmp_ob = np.zeros((1,) + env.observation_space.shape)\n entropy = sess.run(tmp_pi.pd.entropy(), feed_dict={tmp_pi.X: tmp_ob})\n #beta = 2 * entropy / nr_episodes\n beta = 0\n print(\"Initial entropy: \" + str(entropy) + \", episodes: \" + str(nr_episodes))\n print(\"Constantly set beta: \" + str(beta))\n\n print('Training {} on {}:{} with arguments \\n{}'.format(args.alg, env_type, env_id, alg_kwargs))\n iters = 0\n for model in learn(\n env=env,\n env_id=env_id,\n eval_env=eval_env,\n make_eval_env=lambda: build_env(args, normalize_ob=False, is_eval=True),\n seed=seed,\n beta=beta,\n total_timesteps=total_timesteps,\n sil_update=args.sil_update,\n sil_loss=args.sil_loss, \n **alg_kwargs\n ):\n if args.store_ckpt:\n save_path = osp.join(logger.get_dir(), 'model-{}'.format(iters))\n model.save(save_path) \n if isinstance(env, VecNormalize):\n rms_path = osp.join(logger.get_dir(), 'rms-{}'.format(iters))\n with open(rms_path, 'wb') as f:\n rms = (env.ob_rms, env.ret_rms)\n pickle.dump(rms, f)\n logger.log('Save {} model'.format(iters+1))\n iters += 1\n\n return model, env\n\ndef get_alg_module(alg, submodule=None):\n submodule = submodule or alg\n print(submodule)\n try:\n # first try to import the alg module from baselines\n alg_module = import_module('.'.join([submodule]))\n \n except ImportError:\n # then from rl_algs\n alg_module = import_module('.'.join(['rl_' + 'algs', alg, submodule]))\n\n return alg_module\n\ndef get_learn_function(alg):\n return get_alg_module(alg).learn\n\n\ndef get_learn_function_defaults(alg, env_type):\n try:\n alg_defaults = get_alg_module(alg, 'defaults')\n kwargs = getattr(alg_defaults, env_type)()\n except (ImportError, AttributeError):\n kwargs = {}\n return kwargs\n\ndef main(args):\n # configure logger, disable logging in child MPI processes (with rank > 0)\n\n arg_parser = common_arg_parser()\n args, unknown_args = arg_parser.parse_known_args()\n extra_args = parse_cmdline_kwargs(unknown_args)\n print(args)\n\n if MPI is None or MPI.COMM_WORLD.Get_rank() == 0:\n #rank = 0\n #logger.configure()\n #logger.configure(dir=extra_args['logdir'])\n rank = 0\n configure_logger(args.log_path)\n else:\n rank = MPI.COMM_WORLD.Get_rank()\n configure_logger(args.log_path, format_strs=[])\n\n model, env = train(args, extra_args)\n\n if args.save_path is not None and rank == 0:\n save_path = osp.expanduser(args.save_path)\n model.save(save_path)\n\n if args.play:\n logger.log(\"Running trained model\")\n obs = env.reset()\n\n state = model.initial_state if hasattr(model, 'initial_state') else None\n dones = np.zeros((1,))\n\n episode_rew = 0\n while True:\n if state is not None:\n actions, _, state, _ = model.step(obs,S=state, M=dones)\n else:\n actions, _, _, _ = model.step(obs)\n\n obs, rew, done, _ = env.step(actions)\n episode_rew += rew[0] if isinstance(env, VecEnv) else rew\n env.render()\n done = done.any() if isinstance(done, np.ndarray) else done\n if done:\n print('episode_rew={}'.format(episode_rew))\n episode_rew = 0\n obs = env.reset()\n env.close()\n return model\n\nif __name__ == '__main__':\n main(sys.argv)\n",
"import scipy.optimize\n\n# import numpy as np\nimport autograd.numpy as np # Thinly-wrapped numpy\nfrom autograd import grad\n\nimport tensorflow as tf\nfrom baselines import logger\nimport baselines.common.tf_util as U\n\nclass EtaOmegaOptimizer(object):\n \"\"\"\n Finds eta and omega Lagrange multipliers.\n \"\"\"\n\n def __init__(self, beta, epsilon, init_eta, init_omega):\n self.init_eta_omega(beta, epsilon, init_eta, init_omega)\n\n def optimize(self, w_theta, Waa, Wsa, wa, varphis, Kt, prec, is_valid_eta_omega, old_entropy, eta=None):\n\n # wa = w_beta * \\grad_beta \\varphi_beta(s) * K^T * Prec\n\n if False:\n f_dual = self.opt_info['f_dual']\n f_dual_grad = self.opt_info['f_dual_grad']\n\n # Set BFGS eval function\n def eval_dual(input):\n param_eta = input[0]\n param_omega = input[1]\n val = f_dual(*([varphis, Kt, prec, Waa, Wsa, wa] + [param_eta, param_omega, old_entropy]))\n return val.astype(np.float64)\n\n # Set BFGS gradient eval function\n def eval_dual_grad(input):\n param_eta = input[0]\n param_omega = input[1]\n grad = f_dual_grad(*([varphis, Kt, prec, Waa, Wsa, wa] + [param_eta, param_omega, old_entropy]))\n return np.asarray(grad)\n\n if eta is not None:\n param_eta = eta\n else:\n param_eta = self.param_eta\n\n if self.beta == 0:\n beta = 0\n else:\n beta = old_entropy - self.beta\n\n # eta_before = param_eta\n # omega_before = self.param_omega\n # dual_before = eval_dual([eta_before, omega_before])\n # dual_grad_before = eval_dual_grad([eta_before, omega_before])\n\n x0 = [param_eta, self.param_omega]\n\n # TEST\n # small = 0.000000001\n # f1 = [self.param_eta - small, self.param_omega]\n # f2 = [self.param_eta + small, self.param_omega]\n # fd = (eval_dual(f1) - eval_dual(f2)) / (2 * small)\n #\n # duals = self.opt_info[\"f_duals\"](*([varphis, Kt, prec, Waa, Wsa, wa] + [eta_before, omega_before, old_entropy]))\n # logger.log(\"Theano eta/omega: \" + str(eta_before) + \"/\" + str(omega_before) + \": \" + str(dual_before) +\n # \", \" + str(duals) + \", grad: \" + str(eval_dual_grad(x0)) + \", fd: \" + str(fd))\n # # END TEST\n\n # Create dual function\n def eval_dual(input):\n param_eta = input[0]\n param_omega = input[1]\n\n # ha(s): eta * (\\varphi(s)^T * K^T * \\Sigma^{-1} + W_{sa}) + wa(s))\n ha = np.dot(varphis, param_eta * np.dot(Kt, prec) + Wsa) + wa\n\n # hss(s): eta * (\\varphi(s)^T * K^T * \\Sigma^{-1} * K * \\varphi(s))\n varphisKt = np.dot(varphis, Kt)\n hss = param_eta * np.sum(np.dot(varphisKt, prec) * varphisKt, axis=1)\n\n Haa = param_eta * prec + Waa\n # Haa = 0.5 * (Haa + np.transpose(Haa))\n HaaInv = np.linalg.inv(Haa)\n\n # The two terms 'term1' and 'term2' which come from normalizers of the\n # 1. Original policy distribution\n # 2. The distribution after completing the square\n sigma = np.linalg.inv(prec)\n term1 = -0.5 * param_eta * np.linalg.slogdet(2 * np.pi * sigma)[1]\n if self.beta == 0:\n term2 = 0.5 * param_eta * np.linalg.slogdet(2 * np.pi * param_eta * HaaInv)[1]\n else:\n term2 = 0.5 * (param_eta + param_omega) * np.linalg.slogdet(\n 2 * np.pi * (param_eta + param_omega) * HaaInv)[1]\n\n dual = param_eta * self.epsilon - param_omega * beta + \\\n term1 + term2 + np.mean(\n 0.5 * (np.sum(np.dot(ha, HaaInv) * ha, axis=1) - hss))\n\n return dual\n\n # Automatic gradient of the dual\n eval_dual_grad = grad(eval_dual)\n\n if True:\n def fx(x):\n eta, omega = x # eta: Lagrange variable of KL constraint, omega: of the entropy constraint\n error_return_val = 1e6, np.array([0., 0.])\n if eta + omega < 0:\n return error_return_val\n if not is_valid_eta_omega(eta, omega, w_theta):\n return error_return_val\n return eval_dual(x), eval_dual_grad(x)\n else:\n def fx(x):\n eta, omega = x # eta: Lagrange variable of KL constraint, omega: of the entropy constraint\n error_return_val = 1e6, np.array([0., 0.])\n if eta + omega < 0:\n return error_return_val\n if not is_valid_eta_omega(eta, omega, w_theta):\n return error_return_val\n return eval_dual(x), eval_dual_grad(x) # L-BFGS-B expects double floats\n # return np.float64(eval_dual(x)), np.float64(eval_dual_grad(x)) # L-BFGS-B expects double floats\n\n logger.log('optimizing dual')\n\n # Make sure valid initial covariance matrices\n while (not is_valid_eta_omega(x0[0], x0[1], w_theta)):\n x0[0] *= 2\n logger.log(\"Eta increased: \" + str(x0[0]))\n\n if eta is None:\n omega_lower = -100\n if False:\n res = scipy.optimize.minimize(fx, x0, method='L-BFGS-B', jac=True,\n bounds=((1e-12, None), (omega_lower, None)), options={'ftol': 1e-12})\n else:\n res = scipy.optimize.minimize(fx, x0, method='SLSQP', jac=True,\n bounds=((1e-12, None), (omega_lower, None)), options={'ftol': 1e-12})\n\n # Make sure that eta > omega\n if res.x[1] < 0 and -res.x[1] > res.x[0]:\n res.x[1] = -res.x[0] + 1e-6\n else:\n # Fixed eta: make sure that eta > omega\n omega_lower = np.max([-(eta - 1e-3) + 1e-6, -100])\n if False:\n res = scipy.optimize.minimize(fx, x0, method='L-BFGS-B', jac=True,\n bounds=((eta - 1e-3, eta + 1e-3), (omega_lower, None)),\n options={'ftol': 1e-16})\n else:\n res = scipy.optimize.minimize(fx, x0, method='SLSQP', jac=True,\n bounds=((eta - 1e-3, eta + 1e-3), (omega_lower, None)), options={'ftol': 1e-16})\n\n if self.beta == 0:\n res.x[1] = 0\n\n logger.log(\"dual optimized, eta: \" + str(res.x[0]) + \", omega: \" + str(res.x[1]))\n return res.x[0], res.x[1]\n\n # def f(x, grad):\n # if grad.size > 0:\n # grad[:] = eval_dual_grad(x)\n #\n # return np.float64(eval_dual(x))\n\n # self.nlopt_opt.set_min_objective(f)\n # # Set parameter boundaries: eta, omega > 0\n # self.nlopt_opt.set_lower_bounds([1e-12, 1e-12])\n #\n # self.nlopt_opt.set_ftol_rel(1e-12)\n # self.nlopt_opt.set_xtol_rel(1e-12)\n # self.nlopt_opt.set_vector_storage(100)\n\n # try:\n # x = self.nlopt_opt.optimize([self.param_eta, self.param_omega])\n # except RuntimeError:\n # entropy = np.mean(self.policy.distribution.entropy_log_probs(samples_data[\"agent_infos\"]))\n # if entropy < 1e-9:\n # # ignore error since we already converged and are at the optimal policy\n # x = [eta_before, omega_before]\n # else:\n # print(\"Error during optimization of the dual...\")\n # raise\n\n # logger.log('dual optimized')\n #\n # # get optimal values\n # return x[0], x[1]\n\n def init_eta_omega(self, beta, epsilon, init_eta, init_omega):\n # Here we define the symbolic function for the dual and the gradient\n\n self.beta = beta\n self.epsilon = epsilon\n\n # Init dual param values\n self.param_eta = init_eta\n self.param_omega = init_omega\n\n self.param_eta_non_lin = init_eta\n self.param_omega_non_lin = init_omega\n\n param_eta = tf.placeholder(dtype=tf.float32, shape=[], name=\"param_eta\")\n param_omega = tf.placeholder(dtype=tf.float32, shape=[], name=\"param_omega\")\n old_entropy = tf.placeholder(dtype=tf.float32, shape=[], name=\"old_entropy\")\n\n varphis = tf.placeholder(dtype=tf.float32, shape=[None, None], name=\"varphis\")\n Kt = tf.placeholder(dtype=tf.float32, shape=[None, None], name=\"Kt\")\n prec = tf.placeholder(dtype=tf.float32, shape=[None, None], name=\"prec\")\n Waa = tf.placeholder(dtype=tf.float32, shape=[None, None], name=\"Waa\")\n Wsa = tf.placeholder(dtype=tf.float32, shape=[None, None], name=\"Wsa\")\n wa = tf.placeholder(dtype=tf.float32, shape=[None, None], name=\"wa\")\n\n# varphis = ext.new_tensor(\n# 'varphis',\n# ndim=2,\n# dtype=theano.config.floatX\n# )\n# Kt = ext.new_tensor(\n# 'Kt',\n# ndim=2,\n# dtype=theano.config.floatX\n# )\n# prec = ext.new_tensor(\n# 'prec',\n# ndim=2,\n# dtype=theano.config.floatX\n# )\n# Waa = ext.new_tensor(\n# 'Waa',\n# ndim=2,\n# dtype=theano.config.floatX\n# )\n# Wsa = ext.new_tensor(\n# 'Wsa',\n# ndim=2,\n# dtype=theano.config.floatX\n# )\n# wa = ext.new_tensor(\n# 'wa',\n# ndim=2,\n# dtype=theano.config.floatX\n# )\n\n if self.beta == 0:\n beta = 0\n else:\n beta = old_entropy - self.beta\n\n # beta = self.printt('beta shape: ', beta)\n # log_action_prob = self.printn('log_action_prob shape: ', log_action_prob)\n # action_prob = self.printn('action_prob shape: ', action_prob)\n # q_values = self.printn('q_values shape: ', q_values)\n # beta = self.printn('beta shape: ', beta)\n\n # ha(s): eta * (\\varphi(s)^T * K^T * \\Sigma^{-1} + W_{sa}) + wa(s))\n ha = tf.matmul(varphis, param_eta * tf.matmul(Kt, prec) + Wsa) + wa\n\n # hss(s): eta * (\\varphi(s)^T * K^T * \\Sigma^{-1} * K * \\varphi(s))\n varphisKt = tf.matmul(varphis, Kt)\n hss = param_eta * tf.reduce_sum(tf.matmul(varphisKt, prec) * varphisKt, axis=1)\n\n Haa = param_eta * prec + Waa\n # Haa = 0.5 * (Haa + TT.transpose(Haa))\n HaaInv = tf.matrix_inverse(Haa)\n\n # The two terms 'term1' and 'term2' which come from normalizers of the\n # 1. Original policy distribution\n # 2. The distribution after completing the square\n sigma = tf.matrix_inverse(prec)\n term1 = -0.5 * param_eta * tf.log(tf.matrix_determinant(2 * np.pi * sigma))\n if self.beta == 0:\n term2 = 0.5 * param_eta * tf.log(tf.matrix_determinant(2 * np.pi * param_eta * HaaInv))\n else:\n term2 = 0.5 * (param_eta + param_omega) * tf.log(tf.matrix_determinant(2 * np.pi * (param_eta + param_omega) * HaaInv))\n\n dual = param_eta * self.epsilon - param_omega * beta + \\\n term1 + term2 + tf.reduce_mean(\n 0.5 * (tf.reduce_sum(tf.matmul(ha, HaaInv) * ha, axis=1) - hss))\n\n # Symbolic dual gradient\n dual_grad = tf.gradients(xs=[param_eta, param_omega], ys=dual)\n\n # Eval functions.\n f_dual = U.function(\n inputs=[varphis, Kt, prec, Waa, Wsa, wa] + [param_eta, param_omega, old_entropy],\n outputs=dual,\n# mode='DebugMode' # TEST\n )\n\n f_dual_grad = U.function(\n inputs=[varphis, Kt, prec, Waa, Wsa, wa] + [param_eta, param_omega, old_entropy],\n outputs=dual_grad,\n # mode='DebugMode' # TEST\n )\n #\n # # TEST\n # d0 = param_eta * self.epsilon - param_omega * beta\n # d1 = term1\n # d2 = term2\n # d3 = TT.mean(0.5 * (TT.sum(TT.dot(ha, HaaInv) * ha, axis=1)))\n # d4 = TT.mean(hss)\n # f_duals = ext.compile_function(\n # inputs=[varphis, Kt, prec, Waa, Wsa, wa] + [param_eta, param_omega, old_entropy],\n # outputs=[d0, d1, d2, d3, d4]\n # )\n # # END TEST\n\n self.opt_info = dict(\n f_dual=f_dual,\n f_dual_grad=f_dual_grad,\n # f_duals=f_duals, # TEST\n )\n\n"
] | [
[
"tensorflow.variable_scope",
"tensorflow.global_variables_initializer",
"numpy.zeros"
],
[
"tensorflow.matmul",
"tensorflow.matrix_inverse",
"tensorflow.gradients",
"tensorflow.matrix_determinant",
"tensorflow.placeholder"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
mrtucar/keras-unet-collection | [
"38ac652f33799502df1933c805c04e366ee05c3d"
] | [
"keras_unet_collection/_model_swin_unet_2d.py"
] | [
"\nfrom __future__ import absolute_import\n\nfrom keras_unet_collection.layer_utils import *\nfrom keras_unet_collection.transformer_layers import patch_extract, patch_embedding, SwinTransformerBlock, patch_merging, patch_expanding\n\nfrom tensorflow.keras.layers import Input, Dense\nfrom tensorflow.keras.models import Model\n\ndef swin_transformer_stack(X, stack_num, embed_dim, num_patch, num_heads, window_size, num_mlp, shift_window=True, name=''):\n '''\n Stacked Swin Transformers that share the same token size.\n \n Alternated Window-MSA and Swin-MSA will be configured if `shift_window=True`, Window-MSA only otherwise.\n *Dropout is turned off.\n '''\n # Turn-off dropouts\n mlp_drop_rate = 0 # Droupout after each MLP layer\n attn_drop_rate = 0 # Dropout after Swin-Attention\n proj_drop_rate = 0 # Dropout at the end of each Swin-Attention block, i.e., after linear projections\n drop_path_rate = 0 # Drop-path within skip-connections\n \n qkv_bias = True # Convert embedded patches to query, key, and values with a learnable additive value\n qk_scale = None # None: Re-scale query based on embed dimensions per attention head # Float for user specified scaling factor\n \n if shift_window:\n shift_size = window_size // 2\n else:\n shift_size = 0\n \n for i in range(stack_num):\n \n if i % 2 == 0:\n shift_size_temp = 0\n else:\n shift_size_temp = shift_size\n\n X = SwinTransformerBlock(dim=embed_dim, num_patch=num_patch, num_heads=num_heads, \n window_size=window_size, shift_size=shift_size_temp, num_mlp=num_mlp, qkv_bias=qkv_bias, qk_scale=qk_scale,\n mlp_drop=mlp_drop_rate, attn_drop=attn_drop_rate, proj_drop=proj_drop_rate, drop_path_prob=drop_path_rate, \n name='name{}'.format(i))(X)\n return X\n\n\ndef swin_unet_2d_base(input_tensor, filter_num_begin, depth, stack_num_down, stack_num_up, \n patch_size, num_heads, window_size, num_mlp, shift_window=True, name='swin_unet'):\n '''\n The base of SwinUNET.\n \n ----------\n Cao, H., Wang, Y., Chen, J., Jiang, D., Zhang, X., Tian, Q. and Wang, M., 2021. \n Swin-Unet: Unet-like Pure Transformer for Medical Image Segmentation. arXiv preprint arXiv:2105.05537.\n \n Input\n ----------\n input_tensor: the input tensor of the base, e.g., `keras.layers.Inpyt((None, None, 3))`.\n filter_num_begin: number of channels in the first downsampling block; \n it is also the number of embedded dimensions.\n depth: the depth of Swin-UNET, e.g., depth=4 means three down/upsampling levels and a bottom level.\n stack_num_down: number of convolutional layers per downsampling level/block. \n stack_num_up: number of convolutional layers (after concatenation) per upsampling level/block.\n name: prefix of the created keras model and its layers.\n \n ---------- (keywords of Swin-Transformers) ----------\n \n patch_size: The size of extracted patches, \n e.g., patch_size=(2, 2) means 2-by-2 patches\n *Height and width of the patch must be equal.\n \n num_heads: number of attention heads per down/upsampling level,\n e.g., num_heads=[4, 8, 16, 16] means increased attention heads with increasing depth.\n *The length of num_heads must equal to `depth`.\n \n window_size: the size of attention window per down/upsampling level,\n e.g., window_size=[4, 2, 2, 2] means decreased window size with increasing depth.\n \n num_mlp: number of MLP nodes.\n \n shift_window: The indicator of window shifting;\n shift_window=True means applying Swin-MSA for every two Swin-Transformer blocks.\n shift_window=False means MSA with fixed window locations for all blocks.\n\n Output\n ----------\n output tensor.\n \n Note: This function is experimental.\n The activation functions of all Swin-Transformers are fixed to GELU.\n \n '''\n # Compute number be patches to be embeded\n input_size = input_tensor.shape.as_list()[1:]\n num_patch_x = input_size[0]//patch_size[0]\n num_patch_y = input_size[1]//patch_size[1]\n \n # Number of Embedded dimensions\n embed_dim = filter_num_begin\n \n depth_ = depth\n \n X_skip = []\n\n X = input_tensor\n \n # Patch extraction\n X = patch_extract(patch_size)(X)\n\n # Embed patches to tokens\n X = patch_embedding(num_patch_x*num_patch_y, embed_dim)(X)\n \n # The first Swin Transformer stack\n X = swin_transformer_stack(X, stack_num=stack_num_down, \n embed_dim=embed_dim, num_patch=(num_patch_x, num_patch_y), \n num_heads=num_heads[0], window_size=window_size[0], num_mlp=num_mlp, \n shift_window=shift_window, name='{}_swin_down0'.format(name))\n X_skip.append(X)\n \n # Downsampling blocks\n for i in range(depth_-1):\n \n # Patch merging\n X = patch_merging((num_patch_x, num_patch_y), embed_dim=embed_dim, name='down{}'.format(i))(X)\n \n # update token shape info\n embed_dim = embed_dim*2\n num_patch_x = num_patch_x//2\n num_patch_y = num_patch_y//2\n \n # Swin Transformer stacks\n X = swin_transformer_stack(X, stack_num=stack_num_down, \n embed_dim=embed_dim, num_patch=(num_patch_x, num_patch_y), \n num_heads=num_heads[i+1], window_size=window_size[i+1], num_mlp=num_mlp, \n shift_window=shift_window, name='{}_swin_down{}'.format(name, i+1))\n \n # Store tensors for concat\n X_skip.append(X)\n \n # reverse indexing encoded tensors and hyperparams\n X_skip = X_skip[::-1]\n num_heads = num_heads[::-1]\n window_size = window_size[::-1]\n \n # upsampling begins at the deepest available tensor\n X = X_skip[0]\n \n # other tensors are preserved for concatenation\n X_decode = X_skip[1:]\n \n depth_decode = len(X_decode)\n \n for i in range(depth_decode):\n \n # Patch expanding\n X = patch_expanding(num_patch=(num_patch_x, num_patch_y),\n embed_dim=embed_dim, upsample_rate=2, return_vector=True, name='{}_swin_up{}'.format(name, i))(X)\n \n\n # update token shape info\n embed_dim = embed_dim//2\n num_patch_x = num_patch_x*2\n num_patch_y = num_patch_y*2\n \n # Concatenation and linear projection\n X = concatenate([X, X_decode[i]], axis=-1, name='{}_concat_{}'.format(name, i))\n X = Dense(embed_dim, use_bias=False, name='{}_concat_linear_proj_{}'.format(name, i))(X)\n \n # Swin Transformer stacks\n X = swin_transformer_stack(X, stack_num=stack_num_up, \n embed_dim=embed_dim, num_patch=(num_patch_x, num_patch_y), \n num_heads=num_heads[i], window_size=window_size[i], num_mlp=num_mlp, \n shift_window=shift_window, name='{}_swin_up{}'.format(name, i))\n \n # The last expanding layer; it produces full-size feature maps based on the patch size\n # !!! <--- \"patch_size[0]\" is used; it assumes patch_size = (size, size)\n X = patch_expanding(num_patch=(num_patch_x, num_patch_y),\n embed_dim=embed_dim, upsample_rate=patch_size[0], return_vector=False)(X)\n \n return X\n\n\ndef swin_unet_2d(input_size, filter_num_begin, n_labels, depth, stack_num_down, stack_num_up, \n patch_size, num_heads, window_size, num_mlp, output_activation='Softmax', shift_window=True, name='swin_unet'):\n '''\n The base of SwinUNET.\n \n ----------\n Cao, H., Wang, Y., Chen, J., Jiang, D., Zhang, X., Tian, Q. and Wang, M., 2021. \n Swin-Unet: Unet-like Pure Transformer for Medical Image Segmentation. arXiv preprint arXiv:2105.05537.\n \n Input\n ----------\n input_size: the size/shape of network input, e.g., `(128, 128, 3)`.\n filter_num_begin: number of channels in the first downsampling block; \n it is also the number of embedded dimensions.\n n_labels: number of output labels.\n depth: the depth of Swin-UNET, e.g., depth=4 means three down/upsampling levels and a bottom level.\n stack_num_down: number of convolutional layers per downsampling level/block. \n stack_num_up: number of convolutional layers (after concatenation) per upsampling level/block.\n name: prefix of the created keras model and its layers.\n \n ---------- (keywords of Swin-Transformers) ----------\n \n patch_size: The size of extracted patches, \n e.g., patch_size=(2, 2) means 2-by-2 patches\n *Height and width of the patch must be equal.\n \n num_heads: number of attention heads per down/upsampling level,\n e.g., num_heads=[4, 8, 16, 16] means increased attention heads with increasing depth.\n *The length of num_heads must equal to `depth`.\n \n window_size: the size of attention window per down/upsampling level,\n e.g., window_size=[4, 2, 2, 2] means decreased window size with increasing depth.\n \n num_mlp: number of MLP nodes.\n \n shift_window: The indicator of window shifting;\n shift_window=True means applying Swin-MSA for every two Swin-Transformer blocks.\n shift_window=False means MSA with fixed window locations for all blocks.\n \n Output\n ----------\n model: a keras model.\n \n Note: This function is experimental.\n The activation functions of all Swin-Transformers are fixed to GELU.\n '''\n IN = Input(input_size)\n \n # base \n X = swin_unet_2d_base(IN, filter_num_begin=filter_num_begin, depth=depth, stack_num_down=stack_num_down, stack_num_up=stack_num_up, \n patch_size=patch_size, num_heads=num_heads, window_size=window_size, num_mlp=num_mlp, shift_window=True, name=name)\n \n # output layer\n OUT = CONV_output(X, n_labels, kernel_size=1, activation=output_activation, name='{}_output'.format(name))\n \n # functional API model\n model = Model(inputs=[IN,], outputs=[OUT,], name='{}_model'.format(name))\n \n return model\n"
] | [
[
"tensorflow.keras.layers.Input"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.2",
"2.3",
"2.4",
"2.5",
"2.6"
]
}
] |
warcraft12321/Hyperfoods | [
"b995cd7afe10fcbd338158c80f53ce637bfffc0c",
"b995cd7afe10fcbd338158c80f53ce637bfffc0c",
"b995cd7afe10fcbd338158c80f53ce637bfffc0c",
"b995cd7afe10fcbd338158c80f53ce637bfffc0c",
"b995cd7afe10fcbd338158c80f53ce637bfffc0c",
"b995cd7afe10fcbd338158c80f53ce637bfffc0c"
] | [
"src/torch/nn/grad.py",
"src/torch/utils/data/distributed.py",
"src/torch/_utils.py",
"src/torch/utils/ffi/__init__.py",
"src/torch/distributions/transformed_distribution.py",
"src/torch/nn/utils/spectral_norm.py"
] | [
"\"\"\"Gradient interface\"\"\"\n\nimport torch\nfrom .modules.utils import _single, _pair, _triple\n\n\ndef _grad_input_padding(grad_output, input_size, stride, padding, kernel_size):\n input_size = list(input_size)\n k = grad_output.dim() - 2\n\n if len(input_size) == k + 2:\n input_size = input_size[-k:]\n if len(input_size) != k:\n raise ValueError(\"input_size must have {} elements (got {})\"\n .format(k + 2, len(input_size)))\n\n def dim_size(d):\n return ((grad_output.size(d + 2) - 1) * stride[d] - 2 * padding[d] +\n kernel_size[d])\n\n min_sizes = [dim_size(d) for d in range(k)]\n max_sizes = [min_sizes[d] + stride[d] - 1 for d in range(k)]\n for size, min_size, max_size in zip(input_size, min_sizes, max_sizes):\n if size < min_size or size > max_size:\n raise ValueError(\n (\"requested an input grad size of {}, but valid sizes range \"\n \"from {} to {} (for a grad_output of {})\").format(\n input_size, min_sizes, max_sizes,\n grad_output.size()[2:]))\n\n return tuple(input_size[d] - min_sizes[d] for d in range(k))\n\n\ndef conv1d_input(input_size, weight, grad_output, stride=1, padding=0, dilation=1, groups=1, bias=None):\n r\"\"\"\n Computes the gradient of conv1d with respect to the input of the convolution.\n This is same as the 1D transposed convolution operator under the hood but requires\n the shape of the gradient w.r.t. input to be specified explicitly.\n\n Args:\n input_size : Shape of the input gradient tensor\n weight: weight tensor (out_channels x in_channels/groups x kW)\n grad_output : output gradient tensor (minibatch x out_channels x oW)\n stride (int or tuple, optional): Stride of the convolution. Default: 1\n padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0\n dilation (int or tuple, optional): Spacing between kernel elements. Default: 1\n groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1\n bias: optional bias tensor (out_channels). Default: None\n\n Examples::\n\n >>> input = torch.randn(1,1,3, requires_grad=True)\n >>> weight = torch.randn(1,1,1, requires_grad=True)\n >>> output = F.conv1d(input, weight)\n >>> grad_output = torch.randn(output.shape)\n >>> grad_input = torch.autograd.grad(output, input, grad_output)\n >>> F.grad.conv1d_input(input.shape, weight, grad_output)\n\n \"\"\"\n stride = _single(stride)\n padding = _single(padding)\n dilation = _single(dilation)\n kernel_size = [weight.shape[2]]\n\n if input_size is None:\n raise ValueError(\"grad.conv1d_input requires specifying an input_size\")\n\n grad_input_padding = _grad_input_padding(grad_output, input_size, stride,\n padding, kernel_size)\n\n return torch.conv_transpose1d(\n grad_output, weight, bias, stride, padding, grad_input_padding, groups,\n dilation)\n\n\ndef conv1d_weight(input, weight_size, grad_output, stride=1, padding=0, dilation=1, groups=1, bias=None):\n r\"\"\"\n Computes the gradient of conv1d with respect to the weight of the convolution.\n\n Args:\n input: input tensor of shape (minibatch x in_channels x iW)\n weight_size : Shape of the weight gradient tensor\n grad_output : output gradient tensor (minibatch x out_channels x oW)\n stride (int or tuple, optional): Stride of the convolution. Default: 1\n padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0\n dilation (int or tuple, optional): Spacing between kernel elements. Default: 1\n groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1\n bias: optional bias tensor (out_channels). Default: None\n\n Examples::\n\n >>> input = torch.randn(1,1,3, requires_grad=True)\n >>> weight = torch.randn(1,1,1, requires_grad=True)\n >>> output = F.conv1d(input, weight)\n >>> grad_output = torch.randn(output.shape)\n >>> grad_weight = torch.autograd.grad(output, filter, grad_output)\n >>> F.grad.conv1d_weight(input, weight.shape, grad_output)\n\n \"\"\"\n stride = _single(stride)\n padding = _single(padding)\n dilation = _single(dilation)\n in_channels = input.shape[1]\n out_channels = grad_output.shape[1]\n min_batch = input.shape[0]\n\n grad_output = grad_output.contiguous().repeat(1, in_channels // groups, 1)\n grad_output = grad_output.contiguous().view(\n grad_output.shape[0] * grad_output.shape[1], 1, grad_output.shape[2])\n\n input = input.contiguous().view(1, input.shape[0] * input.shape[1],\n input.shape[2])\n\n grad_weight = torch.conv1d(input, grad_output, bias, dilation, padding,\n stride, in_channels * min_batch)\n\n grad_weight = grad_weight.contiguous().view(\n min_batch, grad_weight.shape[1] // min_batch, grad_weight.shape[2])\n\n return grad_weight.sum(dim=0).view(\n in_channels // groups, out_channels, grad_weight.shape[2]).transpose(\n 0, 1).narrow(2, 0, weight_size[2])\n\n\ndef conv2d_input(input_size, weight, grad_output, stride=1, padding=0, dilation=1, groups=1, bias=None):\n r\"\"\"\n Computes the gradient of conv2d with respect to the input of the convolution.\n This is same as the 2D transposed convolution operator under the hood but requires\n the shape of the gradient w.r.t. input to be specified explicitly.\n\n Args:\n input_size : Shape of the input gradient tensor\n weight: weight tensor (out_channels x in_channels/groups x kH x kW)\n grad_output : output gradient tensor (minibatch x out_channels x oH x oW)\n stride (int or tuple, optional): Stride of the convolution. Default: 1\n padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0\n dilation (int or tuple, optional): Spacing between kernel elements. Default: 1\n groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1\n bias: optional bias tensor (out_channels). Default: None\n\n Examples::\n\n >>> input = torch.randn(1,1,3,3, requires_grad=True)\n >>> weight = torch.randn(1,1,1,2, requires_grad=True)\n >>> output = F.conv2d(input, weight)\n >>> grad_output = torch.randn(output.shape)\n >>> grad_input = torch.autograd.grad(output, input, grad_output)\n >>> F.grad.conv2d_input(input.shape, weight, grad_output)\n\n \"\"\"\n stride = _pair(stride)\n padding = _pair(padding)\n dilation = _pair(dilation)\n kernel_size = (weight.shape[2], weight.shape[3])\n\n if input_size is None:\n raise ValueError(\"grad.conv2d_input requires specifying an input_size\")\n\n grad_input_padding = _grad_input_padding(grad_output, input_size, stride,\n padding, kernel_size)\n\n return torch.conv_transpose2d(\n grad_output, weight, bias, stride, padding, grad_input_padding, groups,\n dilation)\n\n\ndef conv2d_weight(input, weight_size, grad_output, stride=1, padding=0, dilation=1, groups=1, bias=None):\n r\"\"\"\n Computes the gradient of conv2d with respect to the weight of the convolution.\n\n Args:\n input: input tensor of shape (minibatch x in_channels x iH x iW)\n weight_size : Shape of the weight gradient tensor\n grad_output : output gradient tensor (minibatch x out_channels x oH x oW)\n stride (int or tuple, optional): Stride of the convolution. Default: 1\n padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0\n dilation (int or tuple, optional): Spacing between kernel elements. Default: 1\n groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1\n bias: optional bias tensor (out_channels). Default: None\n\n Examples::\n\n >>> input = torch.randn(1,1,3,3, requires_grad=True)\n >>> weight = torch.randn(1,1,1,2, requires_grad=True)\n >>> output = F.conv2d(input, weight)\n >>> grad_output = torch.randn(output.shape)\n >>> grad_weight = torch.autograd.grad(output, filter, grad_output)\n >>> F.grad.conv2d_weight(input, weight.shape, grad_output)\n\n \"\"\"\n stride = _pair(stride)\n padding = _pair(padding)\n dilation = _pair(dilation)\n in_channels = input.shape[1]\n out_channels = grad_output.shape[1]\n min_batch = input.shape[0]\n\n grad_output = grad_output.contiguous().repeat(1, in_channels // groups, 1,\n 1)\n grad_output = grad_output.contiguous().view(\n grad_output.shape[0] * grad_output.shape[1], 1, grad_output.shape[2],\n grad_output.shape[3])\n\n input = input.contiguous().view(1, input.shape[0] * input.shape[1],\n input.shape[2], input.shape[3])\n\n grad_weight = torch.conv2d(input, grad_output, bias, dilation, padding,\n stride, in_channels * min_batch)\n\n grad_weight = grad_weight.contiguous().view(\n min_batch, grad_weight.shape[1] // min_batch, grad_weight.shape[2],\n grad_weight.shape[3])\n\n return grad_weight.sum(dim=0).view(\n in_channels // groups, out_channels,\n grad_weight.shape[2], grad_weight.shape[3]).transpose(0, 1).narrow(\n 2, 0, weight_size[2]).narrow(3, 0, weight_size[3])\n\n\ndef conv3d_input(input_size, weight, grad_output, stride=1, padding=0, dilation=1, groups=1, bias=None):\n r\"\"\"\n Computes the gradient of conv3d with respect to the input of the convolution.\n This is same as the 3D transposed convolution operator under the hood but requires\n the shape of the gradient w.r.t. input to be specified explicitly.\n\n Args:\n input_size : Shape of the input gradient tensor\n weight: weights tensor (out_channels x in_channels/groups x kT x kH x kW)\n grad_output : output gradient tensor (minibatch x out_channels x oT x oH x oW)\n stride (int or tuple, optional): Stride of the convolution. Default: 1\n padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0\n dilation (int or tuple, optional): Spacing between kernel elements. Default: 1\n groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1\n bias: optional bias tensor (out_channels). Default: None\n\n Examples::\n\n >>> input = torch.randn(2, 8, 10, 10, 20, requires_grad=True)\n >>> weight = torch.randn(4, 8, 2, 3, 3, requires_grad=True)\n >>> output = F.conv3d(input, weight)\n >>> grad_output = torch.randn(output.shape)\n >>> grad_input = torch.autograd.grad(output, input, grad_output)\n >>> F.grad.conv3d_input(input.shape, weight, grad_output)\n\n \"\"\"\n stride = _triple(stride)\n padding = _triple(padding)\n dilation = _triple(dilation)\n kernel_size = (weight.shape[2], weight.shape[3], weight.shape[4])\n\n if input_size is None:\n raise ValueError(\"grad.conv3d_input requires specifying an input_size\")\n\n grad_input_padding = _grad_input_padding(grad_output, input_size, stride,\n padding, kernel_size)\n\n return torch.conv_transpose3d(\n grad_output, weight, bias, stride, padding, grad_input_padding, groups,\n dilation)\n\n\ndef conv3d_weight(input, weight_size, grad_output, stride=1, padding=0, dilation=1, groups=1, bias=None):\n r\"\"\"\n Computes the gradient of conv3d with respect to the weight of the convolution.\n\n Args:\n input: input tensor of shape (minibatch x in_channels x iT x iH x iW)\n weight_size : Shape of the weight gradient tensor\n grad_output : output gradient tensor (minibatch x out_channels x oT x oH x oW)\n stride (int or tuple, optional): Stride of the convolution. Default: 1\n padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0\n dilation (int or tuple, optional): Spacing between kernel elements. Default: 1\n groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1\n bias: optional bias tensor (out_channels). Default: None\n\n Examples::\n\n >>> input = torch.randn(2, 8, 10, 10, 20, requires_grad=True)\n >>> weight = torch.randn(4, 8, 2, 3, 3, requires_grad=True)\n >>> output = F.conv3d(input, weight)\n >>> grad_output = torch.randn(output.shape)\n >>> grad_weight = torch.autograd.grad(output, weight, grad_output)\n >>> F.grad.conv3d_weight(input, weight.shape, grad_output)\n\n \"\"\"\n stride = _triple(stride)\n padding = _triple(padding)\n dilation = _triple(dilation)\n in_channels = input.shape[1]\n out_channels = grad_output.shape[1]\n min_batch = input.shape[0]\n\n grad_output = grad_output.repeat(1, in_channels // groups, 1, 1, 1)\n grad_output = grad_output.contiguous().view(\n grad_output.shape[0] * grad_output.shape[1], 1, grad_output.shape[2],\n grad_output.shape[3], grad_output.shape[4])\n\n input = input.contiguous().view(1, input.shape[0] * input.shape[1],\n input.shape[2], input.shape[3],\n input.shape[4])\n\n grad_weight = torch.conv3d(input, grad_output, bias, dilation, padding,\n stride, in_channels * min_batch)\n\n grad_weight = grad_weight.contiguous().view(\n min_batch, grad_weight.shape[1] // min_batch, grad_weight.shape[2],\n grad_weight.shape[3], grad_weight.shape[4])\n\n return grad_weight.sum(dim=0).view(\n in_channels // groups, out_channels, grad_weight.shape[2],\n grad_weight.shape[3], grad_weight.shape[4]).transpose(0, 1).narrow(\n 2, 0, weight_size[2]).narrow(3, 0, weight_size[3]).narrow(\n 4, 0, weight_size[4])\n",
"import math\nimport torch\nfrom . import Sampler\nfrom torch.distributed import get_world_size, get_rank\n\n\nclass DistributedSampler(Sampler):\n \"\"\"Sampler that restricts data loading to a subset of the dataset.\n\n It is especially useful in conjunction with\n :class:`torch.nn.parallel.DistributedDataParallel`. In such case, each\n process can pass a DistributedSampler instance as a DataLoader sampler,\n and load a subset of the original dataset that is exclusive to it.\n\n .. note::\n Dataset is assumed to be of constant size.\n\n Arguments:\n dataset: Dataset used for sampling.\n num_replicas (optional): Number of processes participating in\n distributed training.\n rank (optional): Rank of the current process within num_replicas.\n \"\"\"\n\n def __init__(self, dataset, num_replicas=None, rank=None):\n if num_replicas is None:\n num_replicas = get_world_size()\n if rank is None:\n rank = get_rank()\n self.dataset = dataset\n self.num_replicas = num_replicas\n self.rank = rank\n self.epoch = 0\n self.num_samples = int(math.ceil(len(self.dataset) * 1.0 / self.num_replicas))\n self.total_size = self.num_samples * self.num_replicas\n\n def __iter__(self):\n # deterministically shuffle based on epoch\n g = torch.Generator()\n g.manual_seed(self.epoch)\n indices = list(torch.randperm(len(self.dataset), generator=g))\n\n # add extra samples to make it evenly divisible\n indices += indices[:(self.total_size - len(indices))]\n assert len(indices) == self.total_size\n\n # subsample\n offset = self.num_samples * self.rank\n indices = indices[offset:offset + self.num_samples]\n assert len(indices) == self.num_samples\n\n return iter(indices)\n\n def __len__(self):\n return self.num_samples\n\n def set_epoch(self, epoch):\n self.epoch = epoch\n",
"import torch\nimport importlib\nimport warnings\nfrom collections import defaultdict\n\n\ndef _type(self, dtype=None, non_blocking=False, **kwargs):\n \"\"\"Returns the type if `dtype` is not provided, else casts this object to\n the specified type.\n\n If this is already of the correct type, no copy is performed and the\n original object is returned.\n\n Args:\n dtype (type or string): The desired type\n non_blocking (bool): If ``True``, and the source is in pinned memory\n and destination is on the GPU or vice versa, the copy is performed\n asynchronously with respect to the host. Otherwise, the argument\n has no effect.\n **kwargs: For compatibility, may contain the key ``async`` in place of\n the ``non_blocking`` argument. The ``async`` arg is deprecated.\n \"\"\"\n non_blocking = _get_async_or_non_blocking('type', non_blocking, kwargs)\n if dtype is None:\n return self.__module__ + '.' + self.__class__.__name__\n\n if isinstance(dtype, str):\n dtype = _import_dotted_name(dtype)\n if dtype == type(self):\n return self\n if self.is_sparse:\n if not dtype.is_sparse:\n raise RuntimeError(\"Cannot cast sparse tensor to dense tensor\")\n new_module_name = dtype.__module__.replace('.sparse', '')\n new_values_type_name = new_module_name + '.' + dtype.__name__\n new_values = self._values().type(new_values_type_name, non_blocking)\n new_indices_type_name = new_module_name + '.LongTensor'\n new_indices = self._indices().type(new_indices_type_name, non_blocking)\n return dtype(new_indices, new_values, self.size())\n if dtype.is_sparse:\n raise RuntimeError(\"Cannot cast dense tensor to sparse tensor\")\n return dtype(self.size()).copy_(self, non_blocking)\n\n\ndef _cuda(self, device=None, non_blocking=False, **kwargs):\n \"\"\"Returns a copy of this object in CUDA memory.\n\n If this object is already in CUDA memory and on the correct device, then\n no copy is performed and the original object is returned.\n\n Args:\n device (int): The destination GPU id. Defaults to the current device.\n non_blocking (bool): If ``True`` and the source is in pinned memory,\n the copy will be asynchronous with respect to the host. Otherwise,\n the argument has no effect.\n **kwargs: For compatibility, may contain the key ``async`` in place of\n the ``non_blocking`` argument.\n \"\"\"\n non_blocking = _get_async_or_non_blocking('cuda', non_blocking, kwargs)\n if self.is_cuda:\n if device is None:\n device = torch.cuda.current_device()\n if self.get_device() == device:\n return self\n else:\n if device is None:\n device = -1\n with torch.cuda.device(device):\n if self.is_sparse:\n new_type = getattr(torch.cuda.sparse, self.__class__.__name__)\n indices = self._indices().cuda(device, non_blocking)\n values = self._values().cuda(device, non_blocking)\n return new_type(indices, values, self.size())\n else:\n new_type = getattr(torch.cuda, self.__class__.__name__)\n return new_type(self.size()).copy_(self, non_blocking)\n\n\ndef _get_async_or_non_blocking(function_name, non_blocking, kwargs):\n if not kwargs:\n return non_blocking\n if len(kwargs) != 1 or 'async' not in kwargs:\n message = \"{}() got an unexpected keyword argument '{}'\"\n argument = list(kwargs.keys()).pop()\n raise TypeError(message.format(function_name, argument))\n warnings.warn(\"'async' is deprecated; use 'non_blocking'\")\n return kwargs['async']\n\n\ndef _rebuild_tensor(storage, storage_offset, size, stride):\n class_name = storage.__class__.__name__.replace('Storage', 'Tensor')\n module = importlib.import_module(storage.__module__)\n tensor_class = getattr(module, class_name)\n return tensor_class().set_(storage, storage_offset, size, stride)\n\n\ndef _rebuild_tensor_v2(storage, storage_offset, size, stride, requires_grad, backward_hooks):\n tensor = _rebuild_tensor(storage, storage_offset, size, stride)\n tensor.requires_grad = requires_grad\n tensor._backward_hooks = backward_hooks\n return tensor\n\n\ndef _import_dotted_name(name):\n components = name.split('.')\n obj = __import__(components[0])\n for component in components[1:]:\n obj = getattr(obj, component)\n return obj\n\n\n# Taken from python 3.5 docs\ndef _accumulate(iterable, fn=lambda x, y: x + y):\n 'Return running totals'\n # _accumulate([1,2,3,4,5]) --> 1 3 6 10 15\n # _accumulate([1,2,3,4,5], operator.mul) --> 1 2 6 24 120\n it = iter(iterable)\n try:\n total = next(it)\n except StopIteration:\n return\n yield total\n for element in it:\n total = fn(total, element)\n yield total\n\n\ndef _flatten_dense_tensors(tensors):\n \"\"\"Flatten dense tensors into a contiguous 1D buffer. Assume tensors are of\n same dense type.\n\n Since inputs are dense, the resulting tensor will be a concatenated 1D\n buffer. Element-wise operation on this buffer will be equivalent to\n operating individually.\n\n Arguments:\n tensors (Iterable[Tensor]): dense tensors to flatten.\n\n Returns:\n A contiguous 1D buffer containing input tensors.\n \"\"\"\n if len(tensors) == 1:\n return tensors[0].contiguous().view(-1)\n flat = torch.cat([t.contiguous().view(-1) for t in tensors], dim=0)\n return flat\n\n\ndef _flatten_sparse_tensors(tensors):\n \"\"\"Flatten sparse tensors into two contiguous 1D buffers, one of indices and\n one of values. Assume tensors are of same sparse type.\n\n Arguments:\n tensors (Iterable[Tensor]): sparse tensors to flatten.\n\n Returns:\n A tuple of two contiguous 1D buffers, one containing input tensors'\n indices and the other containing the values.\n \"\"\"\n flat_indices = _flatten_dense_tensors([t._indices() for t in tensors])\n flat_values = _flatten_dense_tensors([t._values() for t in tensors])\n return flat_indices, flat_values\n\n\ndef _unflatten_dense_tensors(flat, tensors):\n \"\"\"View a flat buffer using the sizes of tensors. Assume that tensors are of\n same dense type, and that flat is given by _flatten_dense_tensors.\n\n Arguments:\n flat (Tensor): flattened dense tensors to unflatten.\n tensors (Iterable[Tensor]): dense tensors whose sizes will be used to\n unflatten flat.\n\n Returns:\n Unflattened dense tensors with sizes same as tensors and values from\n flat.\n \"\"\"\n outputs = []\n offset = 0\n for tensor in tensors:\n numel = tensor.numel()\n outputs.append(flat.narrow(0, offset, numel).view_as(tensor))\n offset += numel\n return tuple(outputs)\n\n\ndef _unflatten_sparse_tensors(flat, tensors):\n \"\"\"View flat buffer (containing indices and values) using the sizes of\n tensors. Assume that tensors are of same sparse type, and that flat is given\n by _flatten_sparse_tensors.\n\n Arguments:\n flat (tuple(Tensor, Tensor)): flattened indices and values of sparse\n tensors to unflatten.\n tensors (Iterable[Tensor]): sparse tensors whose sizes will be used to\n unflatten flat.\n\n Returns:\n Unflattened sparse tensors with sizes same as tensors and values from\n flat.\n \"\"\"\n flat_indices, flat_values = flat\n indices = _unflatten_dense_tensors(flat_indices, [t._indices() for t in tensors])\n values = _unflatten_dense_tensors(flat_values, [t._values() for t in tensors])\n outputs = []\n for t, i, v in zip(tensors, indices, values):\n outputs.append(t.new(i, v, t.size()))\n return tuple(outputs)\n\n\ndef _reorder_tensors_as(tensors, ordered_tensors):\n \"\"\"Assume that tensors are of same order as ordered_tensors within their\n types, e.g., from _take_tensors. Reorder them to be of same order as\n ordered_tensors.\n\n Arguments:\n tensors (Iterable[Tensor]): tensors to be reordered. They should be of\n the same order as ordered_tensors within their own types.\n ordered_tensors (Iterable[Tensor]): tensors whose order will be the\n reference.\n\n Returns:\n Ordered tuple of tensors with contents from tensors and order of\n ordered_tensors.\n \"\"\"\n type_dict = defaultdict(list)\n for tensor in tensors:\n type_dict[tensor.type()].append(tensor)\n type_dict = {t: iter(coll) for t, coll in type_dict.items()}\n return tuple(next(type_dict[tensor.type()]) for tensor in ordered_tensors)\n\n\ndef _take_tensors(tensors, size_limit):\n \"\"\"Group tensors into chunks. This generator yields a chunk at each time,\n each containing tensors of same type up to certain byte limit in total size.\n\n Args:\n tensors (Sequence): A sequence of tensors to be separated into chunks.\n size_limit (int): The limit of each chunk in bytes.\n\n Yields:\n Blocks of tensors of same type and within size_limit. The yielded\n tensors are only ordered as the original sequence within its types.\n \"\"\"\n buf_dict = defaultdict(lambda: [[], 0])\n for tensor in tensors:\n t = tensor.type()\n if tensor.is_sparse:\n indices = tensor._indices()\n values = tensor._values()\n size = indices.numel() * indices.element_size() + values.numel() * values.element_size()\n else:\n size = tensor.numel() * tensor.element_size()\n buf_and_size = buf_dict[t]\n if buf_and_size[1] + size > size_limit and buf_and_size[1] > 0:\n yield buf_and_size[0]\n buf_and_size = buf_dict[t] = [[], 0]\n buf_and_size[0].append(tensor)\n buf_and_size[1] += size\n for buf, _ in buf_dict.values():\n if len(buf) > 0:\n yield buf\n",
"import os\nimport glob\nimport tempfile\nimport shutil\nfrom functools import wraps, reduce\nfrom string import Template\nimport torch\nimport torch.cuda\nfrom torch._utils import _accumulate\n\ntry:\n import cffi\nexcept ImportError:\n raise ImportError(\"torch.utils.ffi requires the cffi package\")\n\n\nif cffi.__version_info__ < (1, 4, 0):\n raise ImportError(\"torch.utils.ffi requires cffi version >= 1.4, but \"\n \"got \" + '.'.join(map(str, cffi.__version_info__)))\n\n\ndef _generate_typedefs():\n typedefs = []\n for t in ['Double', 'Float', 'Long', 'Int', 'Short', 'Char', 'Byte']:\n for lib in ['TH', 'THCuda']:\n for kind in ['Tensor', 'Storage']:\n python_name = t + kind\n if t == 'Float' and lib == 'THCuda':\n th_name = 'THCuda' + kind\n else:\n th_name = lib + t + kind\n th_struct = 'struct ' + th_name\n\n typedefs += ['typedef {} {};'.format(th_struct, th_name)]\n # We have to assemble a string here, because we're going to\n # do this lookup based on tensor.type(), which returns a\n # string (not a type object, as this code was before)\n python_module = 'torch.cuda' if lib == 'THCuda' else 'torch'\n python_class = python_module + '.' + python_name\n _cffi_to_torch[th_struct] = python_class\n _torch_to_cffi[python_class] = th_struct\n return '\\n'.join(typedefs) + '\\n'\n_cffi_to_torch = {}\n_torch_to_cffi = {}\n_typedefs = _generate_typedefs()\n\n\nPY_MODULE_TEMPLATE = Template(\"\"\"\nfrom torch.utils.ffi import _wrap_function\nfrom .$cffi_wrapper_name import lib as _lib, ffi as _ffi\n\n__all__ = []\ndef _import_symbols(locals):\n for symbol in dir(_lib):\n fn = getattr(_lib, symbol)\n if callable(fn):\n locals[symbol] = _wrap_function(fn, _ffi)\n else:\n locals[symbol] = fn\n __all__.append(symbol)\n\n_import_symbols(locals())\n\"\"\")\n\n\ndef _setup_wrapper(with_cuda):\n here = os.path.abspath(os.path.dirname(__file__))\n lib_dir = os.path.join(here, '..', '..', 'lib')\n include_dirs = [\n os.path.join(lib_dir, 'include'),\n os.path.join(lib_dir, 'include', 'TH'),\n ]\n\n wrapper_source = '#include <TH/TH.h>\\n'\n if with_cuda:\n import torch.cuda\n wrapper_source += '#include <THC/THC.h>\\n'\n if os.sys.platform == 'win32':\n cuda_include_dirs = glob.glob(os.getenv('CUDA_PATH', '') + '/include')\n cuda_include_dirs += glob.glob(os.getenv('NVTOOLSEXT_PATH', '') + '/include')\n else:\n cuda_include_dirs = glob.glob('/usr/local/cuda/include')\n cuda_include_dirs += glob.glob('/Developer/NVIDIA/CUDA-*/include')\n include_dirs.append(os.path.join(lib_dir, 'include', 'THC'))\n include_dirs.extend(cuda_include_dirs)\n return wrapper_source, include_dirs\n\n\ndef _create_module_dir(base_path, fullname):\n module, _, name = fullname.rpartition('.')\n if not module:\n target_dir = name\n else:\n target_dir = reduce(os.path.join, fullname.split('.'))\n target_dir = os.path.join(base_path, target_dir)\n try:\n os.makedirs(target_dir)\n except os.error:\n pass\n for dirname in _accumulate(fullname.split('.'), os.path.join):\n init_file = os.path.join(base_path, dirname, '__init__.py')\n open(init_file, 'a').close() # Create file if it doesn't exist yet\n return name, target_dir\n\n\ndef _build_extension(ffi, cffi_wrapper_name, target_dir, verbose):\n try:\n tmpdir = tempfile.mkdtemp()\n ext_suf = '.pyd' if os.sys.platform == 'win32' else '.so'\n libname = cffi_wrapper_name + ext_suf\n outfile = ffi.compile(tmpdir=tmpdir, verbose=verbose, target=libname)\n shutil.copy(outfile, os.path.join(target_dir, libname))\n finally:\n shutil.rmtree(tmpdir)\n\n\ndef _make_python_wrapper(name, cffi_wrapper_name, target_dir):\n py_source = PY_MODULE_TEMPLATE.substitute(name=name,\n cffi_wrapper_name=cffi_wrapper_name)\n with open(os.path.join(target_dir, '__init__.py'), 'w') as f:\n f.write(py_source)\n\n\ndef create_extension(name, headers, sources, verbose=True, with_cuda=False,\n package=False, relative_to='.', **kwargs):\n \"\"\"Creates and configures a cffi.FFI object, that builds PyTorch extension.\n\n Arguments:\n name (str): package name. Can be a nested module e.g. ``.ext.my_lib``.\n headers (str or List[str]): list of headers, that contain only exported\n functions\n sources (List[str]): list of sources to compile.\n verbose (bool, optional): if set to ``False``, no output will be printed\n (default: True).\n with_cuda (bool, optional): set to ``True`` to compile with CUDA headers\n (default: False)\n package (bool, optional): set to ``True`` to build in package mode (for modules\n meant to be installed as pip packages) (default: False).\n relative_to (str, optional): path of the build file. Required when\n ``package is True``. It's best to use ``__file__`` for this argument.\n kwargs: additional arguments that are passed to ffi to declare the\n extension. See `Extension API reference`_ for details.\n\n .. _`Extension API reference`: https://docs.python.org/3/distutils/apiref.html#distutils.core.Extension\n \"\"\"\n base_path = os.path.abspath(os.path.dirname(relative_to))\n name_suffix, target_dir = _create_module_dir(base_path, name)\n if not package:\n cffi_wrapper_name = '_' + name_suffix\n else:\n cffi_wrapper_name = (name.rpartition('.')[0] +\n '.{0}._{0}'.format(name_suffix))\n\n wrapper_source, include_dirs = _setup_wrapper(with_cuda)\n include_dirs.extend(kwargs.pop('include_dirs', []))\n\n if os.sys.platform == 'win32':\n library_dirs = glob.glob(os.getenv('CUDA_PATH', '') + '/lib/x64')\n library_dirs += glob.glob(os.getenv('NVTOOLSEXT_PATH', '') + '/lib/x64')\n\n here = os.path.abspath(os.path.dirname(__file__))\n lib_dir = os.path.join(here, '..', '..', 'lib')\n\n library_dirs.append(os.path.join(lib_dir))\n else:\n library_dirs = []\n library_dirs.extend(kwargs.pop('library_dirs', []))\n\n if isinstance(headers, str):\n headers = [headers]\n all_headers_source = ''\n for header in headers:\n with open(os.path.join(base_path, header), 'r') as f:\n all_headers_source += f.read() + '\\n\\n'\n\n ffi = cffi.FFI()\n sources = [os.path.join(base_path, src) for src in sources]\n # NB: TH headers are C99 now\n kwargs['extra_compile_args'] = ['-std=c99'] + kwargs.get('extra_compile_args', [])\n ffi.set_source(cffi_wrapper_name, wrapper_source + all_headers_source,\n sources=sources,\n include_dirs=include_dirs,\n library_dirs=library_dirs, **kwargs)\n ffi.cdef(_typedefs + all_headers_source)\n\n _make_python_wrapper(name_suffix, '_' + name_suffix, target_dir)\n\n def build():\n _build_extension(ffi, cffi_wrapper_name, target_dir, verbose)\n ffi.build = build\n return ffi\n\n\ndef _wrap_function(function, ffi):\n @wraps(function)\n def safe_call(*args, **kwargs):\n args = tuple(ffi.cast(_torch_to_cffi.get(arg.type(), 'void') + '*', arg._cdata)\n if isinstance(arg, torch.Tensor) or torch.is_storage(arg)\n else arg\n for arg in args)\n args = (function,) + args\n result = torch._C._safe_call(*args, **kwargs)\n if isinstance(result, ffi.CData):\n typeof = ffi.typeof(result)\n if typeof.kind == 'pointer':\n cdata = int(ffi.cast('uintptr_t', result))\n cname = typeof.item.cname\n if cname in _cffi_to_torch:\n # TODO: Maybe there is a less janky way to eval\n # off of this\n return eval(_cffi_to_torch[cname])(cdata=cdata)\n return result\n return safe_call\n",
"import torch\nfrom torch.distributions import constraints\nfrom torch.distributions.distribution import Distribution\nfrom torch.distributions.transforms import Transform\nfrom torch.distributions.utils import _sum_rightmost\n\n\nclass TransformedDistribution(Distribution):\n r\"\"\"\n Extension of the Distribution class, which applies a sequence of Transforms\n to a base distribution. Let f be the composition of transforms applied::\n\n X ~ BaseDistribution\n Y = f(X) ~ TransformedDistribution(BaseDistribution, f)\n log p(Y) = log p(X) + log |det (dX/dY)|\n\n Note that the ``.event_shape`` of a :class:`TransformedDistribution` is the\n maximum shape of its base distribution and its transforms, since transforms\n can introduce correlations among events.\n\n An example for the usage of :class:`TransformedDistribution` would be::\n\n # Building a Logistic Distribution\n # X ~ Uniform(0, 1)\n # f = a + b * logit(X)\n # Y ~ f(X) ~ Logistic(a, b)\n base_distribution = Uniform(0, 1)\n transforms = [SigmoidTransform().inv, AffineTransform(loc=a, scale=b)]\n logistic = TransformedDistribution(base_distribution, transforms)\n\n For more examples, please look at the implementations of\n :class:`~torch.distributions.gumbel.Gumbel`,\n :class:`~torch.distributions.half_cauchy.HalfCauchy`,\n :class:`~torch.distributions.half_normal.HalfNormal`,\n :class:`~torch.distributions.log_normal.LogNormal`,\n :class:`~torch.distributions.pareto.Pareto`,\n :class:`~torch.distributions.relaxed_bernoulli.RelaxedBernoulli` and\n :class:`~torch.distributions.relaxed_categorical.RelaxedOneHotCategorical`\n \"\"\"\n arg_constraints = {}\n\n def __init__(self, base_distribution, transforms, validate_args=None):\n self.base_dist = base_distribution\n if isinstance(transforms, Transform):\n self.transforms = [transforms, ]\n elif isinstance(transforms, list):\n if not all(isinstance(t, Transform) for t in transforms):\n raise ValueError(\"transforms must be a Transform or a list of Transforms\")\n self.transforms = transforms\n else:\n raise ValueError(\"transforms must be a Transform or list, but was {}\".format(transforms))\n shape = self.base_dist.batch_shape + self.base_dist.event_shape\n event_dim = max([len(self.base_dist.event_shape)] + [t.event_dim for t in self.transforms])\n batch_shape = shape[:len(shape) - event_dim]\n event_shape = shape[len(shape) - event_dim:]\n super(TransformedDistribution, self).__init__(batch_shape, event_shape, validate_args=validate_args)\n\n @constraints.dependent_property\n def support(self):\n return self.transforms[-1].codomain if self.transforms else self.base_dist.support\n\n @property\n def has_rsample(self):\n return self.base_dist.has_rsample\n\n def sample(self, sample_shape=torch.Size()):\n \"\"\"\n Generates a sample_shape shaped sample or sample_shape shaped batch of\n samples if the distribution parameters are batched. Samples first from\n base distribution and applies `transform()` for every transform in the\n list.\n \"\"\"\n with torch.no_grad():\n x = self.base_dist.sample(sample_shape)\n for transform in self.transforms:\n x = transform(x)\n return x\n\n def rsample(self, sample_shape=torch.Size()):\n \"\"\"\n Generates a sample_shape shaped reparameterized sample or sample_shape\n shaped batch of reparameterized samples if the distribution parameters\n are batched. Samples first from base distribution and applies\n `transform()` for every transform in the list.\n \"\"\"\n x = self.base_dist.rsample(sample_shape)\n for transform in self.transforms:\n x = transform(x)\n return x\n\n def log_prob(self, value):\n \"\"\"\n Scores the sample by inverting the transform(s) and computing the score\n using the score of the base distribution and the log abs det jacobian.\n \"\"\"\n event_dim = len(self.event_shape)\n log_prob = 0.0\n y = value\n for transform in reversed(self.transforms):\n x = transform.inv(y)\n log_prob = log_prob - _sum_rightmost(transform.log_abs_det_jacobian(x, y),\n event_dim - transform.event_dim)\n y = x\n\n log_prob = log_prob + _sum_rightmost(self.base_dist.log_prob(y),\n event_dim - len(self.base_dist.event_shape))\n return log_prob\n\n def _monotonize_cdf(self, value):\n \"\"\"\n This conditionally flips ``value -> 1-value`` to ensure :meth:`cdf` is\n monotone increasing.\n \"\"\"\n sign = 1\n for transform in self.transforms:\n sign = sign * transform.sign\n if sign is 1:\n return value\n return sign * (value - 0.5) + 0.5\n\n def cdf(self, value):\n \"\"\"\n Computes the cumulative distribution function by inverting the\n transform(s) and computing the score of the base distribution.\n \"\"\"\n for transform in self.transforms[::-1]:\n value = transform.inv(value)\n if self._validate_args:\n self.base_dist._validate_sample(value)\n value = self.base_dist.cdf(value)\n value = self._monotonize_cdf(value)\n return value\n\n def icdf(self, value):\n \"\"\"\n Computes the inverse cumulative distribution function using\n transform(s) and computing the score of the base distribution.\n \"\"\"\n value = self._monotonize_cdf(value)\n if self._validate_args:\n self.base_dist._validate_sample(value)\n value = self.base_dist.icdf(value)\n for transform in self.transforms:\n value = transform(value)\n return value\n",
"\"\"\"\nSpectral Normalization from https://arxiv.org/abs/1802.05957\n\"\"\"\nimport torch\nfrom torch.nn.functional import normalize\nfrom torch.nn.parameter import Parameter\n\n\nclass SpectralNorm(object):\n\n def __init__(self, name='weight', n_power_iterations=1, dim=0, eps=1e-12):\n self.name = name\n self.dim = dim\n if n_power_iterations <= 0:\n raise ValueError('Expected n_power_iterations to be positive, but '\n 'got n_power_iterations={}'.format(n_power_iterations))\n self.n_power_iterations = n_power_iterations\n self.eps = eps\n\n def compute_weight(self, module):\n weight = getattr(module, self.name + '_orig')\n u = getattr(module, self.name + '_u')\n weight_mat = weight\n if self.dim != 0:\n # permute dim to front\n weight_mat = weight_mat.permute(self.dim,\n *[d for d in range(weight_mat.dim()) if d != self.dim])\n height = weight_mat.size(0)\n weight_mat = weight_mat.reshape(height, -1)\n with torch.no_grad():\n for _ in range(self.n_power_iterations):\n # Spectral norm of weight equals to `u^T W v`, where `u` and `v`\n # are the first left and right singular vectors.\n # This power iteration produces approximations of `u` and `v`.\n v = normalize(torch.matmul(weight_mat.t(), u), dim=0, eps=self.eps)\n u = normalize(torch.matmul(weight_mat, v), dim=0, eps=self.eps)\n\n sigma = torch.dot(u, torch.matmul(weight_mat, v))\n weight = weight / sigma\n return weight, u\n\n def remove(self, module):\n weight = getattr(module, self.name)\n delattr(module, self.name)\n delattr(module, self.name + '_u')\n delattr(module, self.name + '_orig')\n module.register_parameter(self.name, torch.nn.Parameter(weight))\n\n def __call__(self, module, inputs):\n if module.training:\n weight, u = self.compute_weight(module)\n setattr(module, self.name, weight)\n setattr(module, self.name + '_u', u)\n else:\n r_g = getattr(module, self.name + '_orig').requires_grad\n getattr(module, self.name).detach_().requires_grad_(r_g)\n\n @staticmethod\n def apply(module, name, n_power_iterations, dim, eps):\n fn = SpectralNorm(name, n_power_iterations, dim, eps)\n weight = module._parameters[name]\n height = weight.size(dim)\n\n u = normalize(weight.new_empty(height).normal_(0, 1), dim=0, eps=fn.eps)\n delattr(module, fn.name)\n module.register_parameter(fn.name + \"_orig\", weight)\n # We still need to assign weight back as fn.name because all sorts of\n # things may assume that it exists, e.g., when initializing weights.\n # However, we can't directly assign as it could be an nn.Parameter and\n # gets added as a parameter. Instead, we register weight.data as a\n # buffer, which will cause weight to be included in the state dict\n # and also supports nn.init due to shared storage.\n module.register_buffer(fn.name, weight.data)\n module.register_buffer(fn.name + \"_u\", u)\n\n module.register_forward_pre_hook(fn)\n return fn\n\n\ndef spectral_norm(module, name='weight', n_power_iterations=1, eps=1e-12, dim=None):\n r\"\"\"Applies spectral normalization to a parameter in the given module.\n\n .. math::\n \\mathbf{W} &= \\dfrac{\\mathbf{W}}{\\sigma(\\mathbf{W})} \\\\\n \\sigma(\\mathbf{W}) &= \\max_{\\mathbf{h}: \\mathbf{h} \\ne 0} \\dfrac{\\|\\mathbf{W} \\mathbf{h}\\|_2}{\\|\\mathbf{h}\\|_2}\n\n Spectral normalization stabilizes the training of discriminators (critics)\n in Generaive Adversarial Networks (GANs) by rescaling the weight tensor\n with spectral norm :math:`\\sigma` of the weight matrix calculated using\n power iteration method. If the dimension of the weight tensor is greater\n than 2, it is reshaped to 2D in power iteration method to get spectral\n norm. This is implemented via a hook that calculates spectral norm and\n rescales weight before every :meth:`~Module.forward` call.\n\n See `Spectral Normalization for Generative Adversarial Networks`_ .\n\n .. _`Spectral Normalization for Generative Adversarial Networks`: https://arxiv.org/abs/1802.05957\n\n Args:\n module (nn.Module): containing module\n name (str, optional): name of weight parameter\n n_power_iterations (int, optional): number of power iterations to\n calculate spectal norm\n eps (float, optional): epsilon for numerical stability in\n calculating norms\n dim (int, optional): dimension corresponding to number of outputs,\n the default is 0, except for modules that are instances of\n ConvTranspose1/2/3d, when it is 1\n\n Returns:\n The original module with the spectal norm hook\n\n Example::\n\n >>> m = spectral_norm(nn.Linear(20, 40))\n Linear (20 -> 40)\n >>> m.weight_u.size()\n torch.Size([20])\n\n \"\"\"\n if dim is None:\n if isinstance(module, (torch.nn.ConvTranspose1d,\n torch.nn.ConvTranspose2d,\n torch.nn.ConvTranspose3d)):\n dim = 1\n else:\n dim = 0\n SpectralNorm.apply(module, name, n_power_iterations, dim, eps)\n return module\n\n\ndef remove_spectral_norm(module, name='weight'):\n r\"\"\"Removes the spectral normalization reparameterization from a module.\n\n Args:\n module (nn.Module): containing module\n name (str, optional): name of weight parameter\n\n Example:\n >>> m = spectral_norm(nn.Linear(40, 10))\n >>> remove_spectral_norm(m)\n \"\"\"\n for k, hook in module._forward_pre_hooks.items():\n if isinstance(hook, SpectralNorm) and hook.name == name:\n hook.remove(module)\n del module._forward_pre_hooks[k]\n return module\n\n raise ValueError(\"spectral_norm of '{}' not found in {}\".format(\n name, module))\n"
] | [
[
"torch.conv2d",
"torch.conv3d",
"torch.conv_transpose1d",
"torch.conv1d",
"torch.conv_transpose2d",
"torch.conv_transpose3d"
],
[
"torch.Generator",
"torch.distributed.get_world_size",
"torch.distributed.get_rank"
],
[
"torch.cuda.device",
"torch.cuda.current_device"
],
[
"torch.is_storage",
"torch._C._safe_call"
],
[
"torch.Size",
"torch.no_grad"
],
[
"torch.matmul",
"torch.nn.Parameter",
"torch.no_grad"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
vonkaenelerik/self-supervised-poisson-gaussian | [
"7ebb4527fa79ace7d5de8c28fb484ef1a5cd1c96"
] | [
"test_mydat.py"
] | [
"import numpy as np\nfrom skimage.metrics import peak_signal_noise_ratio\nfrom nets import *\nfrom scipy.optimize import minimize\n\nimport os\nfrom os import listdir\nfrom os.path import join\nfrom imageio import imread, imwrite\nimport glob\nfrom tqdm import trange\n\nimport argparse\nparser = argparse.ArgumentParser()\nparser.add_argument('--path',required=True,help='path to dataset root')\nparser.add_argument('--dataset',required=True,help='dataset name e.g. Confocal_MICE')\nparser.add_argument('--mode',default='uncalib',help='noise model: mse, uncalib, gaussian, poisson, poissongaussian')\nparser.add_argument('--reg',type=float,default=0.1,help='regularization weight on prior std. dev.')\n\nargs = parser.parse_args()\n\n\"\"\" Re-create the model and load the weights \"\"\"\n\n# model = gaussian_blindspot_network((512, 512, 1),'uncalib')\n# model = gaussian_blindspot_network((args.crop, args.crop, 1),args.mode,args.reg)\n\nif args.mode == 'uncalib' or args.mode == 'mse':\n model = gaussian_blindspot_network((1024, 1024, 1), args.mode)\n weights_path = 'weights/weights.%s.%s.latest.hdf5'%(args.dataset,args.mode)\nelse:\n model = gaussian_blindspot_network((1024, 1024, 1), args.mode, args.reg)\n weights_path = 'weights/weights.%s.%s.%0.3f.latest.hdf5'%(args.dataset,args.mode,args.reg)\n\nmodel.load_weights(weights_path)\n\n\"\"\" Load test images \"\"\"\n\ntest_images = []\n\ndef load_images(noise):\n basepath = args.path + '/' + args.dataset + '/' + noise\n images = []\n for path in sorted(glob.glob(basepath + '/mydata_test3/*.tif')):\n images.append(imread(path))\n return np.stack(images,axis=0)[:,:,:,None]/255.\n\nX = load_images('raw')\n#Y = load_images('gt')\n#gt = np.squeeze(Y)*255\n\n\"\"\" Denoise test images \"\"\"\ndef poisson_gaussian_loss(x,y,a,b):\n var = np.maximum(1e-4,a*x+b)\n loss = (y-x)**2 / var + np.log(var)\n return np.mean(loss)\noptfun = lambda p, x, y : poisson_gaussian_loss(x,y,p[0],p[1])\n\ndef denoise_uncalib(y,loc,std,a,b):\n total_var = std**2\n noise_var = np.maximum(1e-3,a*loc+b)\n noise_std = noise_var**0.5\n prior_var = np.maximum(1e-4,total_var-noise_var)\n prior_std = prior_var**0.5\n return np.squeeze(gaussian_posterior_mean(y,loc,prior_std,noise_std))\n\nif args.mode == 'mse' or args.mode == 'uncalib':\n experiment_name = '%s.%s'%(args.dataset,args.mode)\nelse:\n experiment_name = '%s.%s.%0.3f'%(args.dataset,args.mode,args.reg)\nos.makedirs(\"results/%s\"%experiment_name,exist_ok=True)\nresults_path = 'results/%s.tab'%experiment_name\nwith open(results_path,'w') as f:\n f.write('inputPSNR\\tdenoisedPSNR\\n')\n for index,im in enumerate(X):\n pred = model.predict(im.reshape(1,1024,1024,1))\n \n if args.mode == 'uncalib':\n # select only pixels above bottom 2% and below top 3% of noisy image\n good = np.logical_and(im >= np.quantile(im,0.02), im <= np.quantile(im,0.97))[None,:,:,:]\n pseudo_clean = pred[0][good]\n noisy = im[np.squeeze(good, axis=0)]\n\n # estimate noise level\n res = minimize(optfun, (0.01,0), (np.squeeze(pseudo_clean),np.squeeze(noisy)), method='Nelder-Mead')\n print('bootstrap poisson-gaussian fit: a = %f, b=%f, loss=%f'%(res.x[0],res.x[1],res.fun))\n a,b = res.x\n \n # run denoising\n denoised = denoise_uncalib(im[None,:,:,:],pred[0],pred[1],a,b)\n else:\n denoised = pred[0]\n \n # scale and clip to 8-bit\n denoised = np.clip(np.squeeze(denoised*255),0,255)\n \n # write out image\n imwrite('results/%s/%02d.png'%(experiment_name,index),denoised.astype('uint8'))\n\n #noisy = np.squeeze(im)*255\n #psnr_noisy = peak_signal_noise_ratio(gt, noisy, data_range = 255)\n #psnr_denoised = peak_signal_noise_ratio(gt, denoised, data_range = 255)\n\n #print(psnr_noisy,psnr_denoised)\n #f.write('%.15f\\t%.15f\\n'%(psnr_noisy,psnr_denoised))\n\n\"\"\" Print averages \"\"\"\n#results = np.loadtxt(results_path,delimiter='\\t',skiprows=1)\n#print('averages:')\n#print(np.mean(results,axis=0))\n\n"
] | [
[
"numpy.log",
"numpy.maximum",
"numpy.squeeze",
"numpy.quantile",
"numpy.stack",
"numpy.mean"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ryanjmccall/sb_ml_eng_capstone | [
"dfa87dcbd741c6f502b6cd0eb8f31203568c09a2"
] | [
"modules/module_5_3_2/data/population.py"
] | [
"#!/usr/bin/env python\n\nimport numpy as np\nimport pandas as pd\nimport os\n\ndata_file = os.path.join(os.path.dirname(__file__),'Top5000population.csv')\n\ndata = pd.read_csv(data_file, header=None, thousands=',',sep=',',\n names=['city','state','pop'],\n encoding='iso-8859-1')\n\ndata['city'] = data['city'].str.strip()\ncities = [{'city':line[0],'state':line[1], 'pop':line[2]} for line in data.values]\n"
] | [
[
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
kovenock/FATES_Parameter_Selection | [
"eb38cc96b3cb6c02ae71426b6351e60b16ed8a56"
] | [
"psfxns/annmeans.py"
] | [
"import netCDF4 as nc4\nimport numpy as np\n\n\ndef annual_mean_model(filepath, var, varfiletype, nyrs, conv_factor):\n \"\"\"Calculate time series of model annual means for one variable.\n \n :param filepath (str): the file path and name for the data file\n :param var (str): the name of the variable to call from data file\n :param varfiletype (int): the model file type, where:\n 0 - contains monthly averages for the entire ecosystem; and\n 1 - contains annual mean values by tree size\n :param nyrs (int): the number of years to use in analysis\n :param conv_factor (int, float): the conversion factor for\n the variable given by var\n :return: a 2-D array containing the annual mean time series\n indexed by (parameter_set, nyrs)\n :rtype: numpy.ndarray\n \"\"\"\n \n # If model output is stored as monthly ecosystem average,\n # calculate annual means. \n if varfiletype == 0:\n \n # Load monthly time series\n if var != 'FLH':\n mthts_temp = nc4.Dataset(filepath).variables[var][:, :, 0]\n elif var == 'FLH':\n mthts_temp = (nc4.Dataset(filepath).variables['FCTR'][:, :, 0] \n + nc4.Dataset(filepath).variables['FGEV'][:, :, 0] \n + nc4.Dataset(filepath).variables['FCEV'][:, :, 0])\n \n # Calculate annual means for nyrs and convert units\n annmeants = (np.nanmean(np.reshape(\n (mthts_temp[:, int(-1*nyrs*12):]),\n (mthts_temp.shape[0], -1, 12)), \n axis=2)) * conv_factor\n \n mthts_temp = None\n \n # If model output is stored as annual means by tree size,\n # sum across tree sizes.\n elif varfiletype == 1:\n \n # Calculate annual means for nyrs and convert units\n annmeants = np.squeeze(np.nansum((\n nc4.Dataset(filepath).variables[var + '_SCLS'][:, int(-1*nyrs):, :]),\n axis=2)) * conv_factor\n \n return annmeants\n\n\ndef annual_mean_fluxobs(mthts, startmth):\n \"\"\"Calculate annual mean time series from monthly fluxtower estimates.\n \n :param mthts (numpy.ndarray): a 2-D array of fluxtower \n observations with shape (years, months)\n :param startmth (int): the number of the start month\n for this annual mean time series calculation\n (e.g., 7 = start with July, 9 = start with Sept)\n :return: a vector containing the annual mean time series\n :rtype: numpy.ndarray\n \"\"\"\n \n # Specify number of months to discard\n mthts_dif = np.reshape(mthts, (1, -1))[:, startmth-1 : startmth-1-12]\n \n # Calculate annual mean time series\n annmeants = np.nanmean(np.reshape(\n mthts_dif, (int(mthts_dif.shape[1] / 12), 12)), axis=1)\n \n return annmeants\n"
] | [
[
"numpy.reshape"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
DFNaiff/BVBQ | [
"48f0eb624483f67b748d791efc0c06ddfb6e0646"
] | [
"bvbq/interface.py"
] | [
"# -*- coding: utf-8 -*-\n# pylint: disable=E1101\n\"\"\"\n Deprecated. Use named_interface.BVBQMixMVN.\n Won't be documented due to this\n\"\"\"\nimport torch\n\nfrom . import utils\nfrom . import bvbq\nfrom . import distributions\nfrom . import gp\nfrom . import acquisition\nfrom . import metrics\n\n\nclass BVBQMixMVN(object):\n def __init__(self, eval_function, ndim):\n self.set_eval_function(eval_function)\n self.ndim = ndim\n self.logprobgp = None\n self.mixmeans = None\n self.mixvars = None\n self.mixweights = None\n self.nmixtures = 0\n\n def initialize_data(self, xdata, ydata, kind='smatern52',\n noise=0.0, mean=-30.0, empirical_params=False,\n **kwargs):\n # TODO : Assertions, customizations and new policies\n logprobgp = gp.SimpleGP(self.ndim, kind=kind,\n noise=noise, zeromax=True)\n logprobgp.mean = mean\n logprobgp.fix_mean()\n logprobgp.fix_noise()\n logprobgp.set_data(xdata, ydata, empirical_params=empirical_params)\n self.logprobgp = logprobgp\n\n def initialize_components(self, init_policy='manual', **kwargs):\n # TODO : Assertions, customization and new policies\n assert init_policy in ['manual', 'manual_mix']\n if init_policy == 'manual':\n mean = kwargs.get('mean')\n var = kwargs.get('var')\n mixmeans = torch.atleast_2d(utils.tensor_convert(mean))\n mixvars = torch.atleast_2d(utils.tensor_convert(var))\n mixweights = torch.ones(1)\n nmixtures = 1\n elif init_policy == 'manual_mix':\n nmixtures = mixmeans.shape[0]\n self.mixmeans = mixmeans\n self.mixvars = mixvars\n self.mixweights = mixweights\n self.nmixtures = nmixtures\n\n def update_distribution(self):\n #TODO : Customization\n mean, var = bvbq.propose_component_mvn_mixmvn_relbo(\n self.logprobgp,\n self.mixmeans,\n self.mixvars,\n self.mixweights)\n mixmeans, mixvars, mixweights = bvbq.update_distribution_mvn_mixmvn(\n self.logprobgp,\n mean, var,\n self.mixmeans,\n self.mixvars,\n self.mixweights)\n self.mixmeans = mixmeans\n self.mixvars = mixvars\n self.mixweights = mixweights\n\n def update_evaluations(self, name='PP'):\n x0 = self.distribution.sample(1)[0, :]\n x = acquisition.acquire_next_point_mixmvn(x0,\n self.logprobgp,\n self.distribution,\n name=name)\n y = self.evaluate_single(x)\n\n # FIXME: Fix this function\n# self.logprobgp.update(x,y)\n # FIXME : Substitute below lines for actual (fixed) efficient update above\n X = torch.vstack([self.eval_points, x])\n y = torch.vstack([self.eval_values, y])\n self.logprobgp.set_data(X, y)\n\n def evaluate_single(self, x):\n return torch.squeeze(self.eval_function(x))\n\n def fit_all_parameters(self):\n #TODO : Customization\n mixmeans, mixvars, mixweights = bvbq.fit_mixmvn_elbo(\n self.logprobgp, self.mixmeans, self.mixvars, self.mixweights)\n self.mixmeans = mixmeans\n self.mixvars = mixvars\n self.mixweights = mixweights\n \n def fit_all_weights(self):\n #TODO : Customization\n mixmeans, mixvars, mixweights = bvbq.reweight_mixmvn_elbo(\n self.logprobgp, self.mixmeans, self.mixvars, self.mixweights)\n self.mixmeans = mixmeans\n self.mixvars = mixvars\n self.mixweights = mixweights\n\n def set_eval_function(self, eval_function):\n self._eval_function = eval_function\n self.eval_function = utils.numpy_to_torch_wrapper(eval_function)\n\n def elbo_metric(self, nsamples=1000):\n return metrics.bq_mixmvn_elbo_with_var(self.logprobgp,\n self.mixmeans,\n self.mixvars,\n self.mixweights,\n nsamples=nsamples)\n\n def optimize_gp_params(self, *args, **kwargs):\n baseopt = kwargs.get('baseopt', 'QN')\n kwargs.pop('baseopt', None)\n assert baseopt in ['QN', 'SGD']\n if baseopt == 'QN':\n return self.optimize_gp_params_qn(*args, **kwargs)\n elif baseopt == 'SGD':\n return self.optimize_gp_params_sgd(*args, **kwargs)\n\n def suggest_initialization_points(self, n):\n raise NotImplementedError\n #return xdata\n\n @property\n def distribution(self):\n return distributions.MixtureDiagonalNormalDistribution(\n self.mixmeans, self.mixvars, self.mixweights)\n\n # XXX: This actually performs computation\n @property\n def optimize_gp_params_qn(self):\n return self.logprobgp.optimize_params_qn\n\n @property\n def optimize_gp_params_sgd(self):\n return self.logprobgp.optimize_params_sgd\n\n @property\n def eval_points(self):\n return self.logprobgp.X\n\n @property\n def eval_values(self):\n return self.logprobgp.y\n"
] | [
[
"torch.vstack",
"torch.ones"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
YoNyeoSeok/refinenet-pytorch | [
"34dfa49a141630247aef1d5d2424c823ecba46c7"
] | [
"train/training.py"
] | [
"import sys\nsys.path.append('/home/user/research/refinenet-pytorch')\nimport os\nimport numpy as np\nimport tqdm\nimport argparse\nimport math\nimport random\nfrom PIL import Image\n\nimport torch\nimport torch.nn as nn\nimport datasets as ds\nfrom torchvision import transforms as trf\nfrom models.refinenet_resnet import refinenet_resnet101\nfrom utils.metrics import runningScore\nfrom vision.transforms import RandomHorizontalFlip, RandomResizedCrop\n\nimport wandb\n\n\ndef arg_parser(parser=argparse.ArgumentParser()):\n parser.add_argument('--input-scale-factor', type=float, default=1.)\n parser.add_argument('--freeze-batch-norm', action='store_true')\n\n parser.add_argument('--clear-foggy-beta', type=str, default='clear', choices=['clear', 'beta_0.02', 'beta_0.01', 'beta_0.005'])\n parser.add_argument('--total-epoch', type=int, default=12)\n parser.add_argument('--batch-size', type=int, default=1)\n parser.add_argument('--valid-batch-size', type=int, default=1)\n parser.add_argument('--optimizer', type=str, default='SGD')\n parser.add_argument('--optimizer-lr', type=float, default=5e-5)\n\n parser.add_argument('--data-aug-hflip', action='store_true')\n parser.add_argument('--data-aug-hflip-p', type=float, default=0.5)\n parser.add_argument('--data-aug-crop', action='store_true')\n parser.add_argument('--data-aug-crop-size', type=int, nargs=2, default=[512, 512])\n parser.add_argument('--data-aug-crop-scale', type=float, nargs=2, default=[0.7, 1.3])\n parser.add_argument('--data-aug-crop-ratio', type=float, nargs=2, default=[1, 1])\n\n # parser.add_argument('--gpu', type=int, default=0)\n parser.add_argument('--use-wandb', action='store_true')\n return parser\n\ndef load_train_valid_loader(args):\n cityscape_dataset_dir = '/home/user/data/Cityscapes'\n classes = ds.Cityscapes.classes\n id2label = {cls.id:cls for cls in classes}\n\n def semantic2sparse(semantic):\n sparse = np.vectorize(lambda x: id2label[x].train_id)(np.array(semantic))\n # pylint: disable=E1101\n sparse = torch.from_numpy(sparse)\n # pylint: enable=E1101\n return sparse\n\n hflip = [RandomHorizontalFlip(args.data_aug_hflip_p)] if args.data_aug_hflip else []\n resized_crop = ([\n RandomResizedCrop(args.data_aug_crop_size, args.data_aug_crop_scale, args.data_aug_crop_ratio)] \n if args.data_aug_crop else [])\n\n aug_transform = trf.Compose(hflip + resized_crop)\n\n tensor_transform = trf.Compose([\n trf.ToTensor(),\n trf.Lambda(lambda x: x*255-128),\n ])\n semantic_transform = trf.Compose([\n trf.Lambda(semantic2sparse),\n ])\n image_transform = trf.Compose([\n trf.Lambda(np.array)\n ])\n\n aug_tensor_transform = trf.Compose(\n aug_transform.transforms + tensor_transform.transforms)\n aug_semantic_transform = trf.Compose(\n aug_transform.transforms + semantic_transform.transforms)\n aug_image_transform = trf.Compose(\n aug_transform.transforms + image_transform.transforms)\n\n if 'clear' == args.clear_foggy_beta:\n image_modes = ['clear', 'gtFine'] \n image_types = [['_leftImg8bit.png'], ['semantic', 'color']]\n else:\n image_modes = ['foggyDBF', 'gtFine'] \n image_types = [[args.clear_foggy_beta], ['semantic', 'color']]\n train_image_transforms = [aug_tensor_transform, [aug_semantic_transform, aug_image_transform]]\n valid_image_transforms = [tensor_transform, [semantic_transform, image_transform]]\n \n train_ds = ds.RefinedFoggyCityscapes(\n cityscape_dataset_dir,\n split='train',\n image_modes=image_modes, \n image_types=image_types,\n image_transforms=train_image_transforms,\n refined_filenames='foggy_trainval_refined_filenames.txt')\n train_ds.share_transform = aug_transform\n train_ds.update_share_transform = lambda : [transform.update() for transform in aug_transform.transforms]\n\n valid_ds = ds.RefinedFoggyCityscapes(\n cityscape_dataset_dir,\n split='val',\n image_modes=image_modes, \n image_types=image_types,\n image_transforms=valid_image_transforms,\n refined_filenames='foggy_trainval_refined_filenames.txt')\n\n train_dl = torch.utils.data.DataLoader(train_ds, batch_size=args.batch_size, shuffle=True)\n valid_dl = torch.utils.data.DataLoader(valid_ds, batch_size=args.valid_batch_size, shuffle=False)\n\n return train_dl, valid_dl\n\ndef load_model(args):\n model_pretrained_dir = '/home/user/research/refinenet-pytorch/pretrained/Cityscapes'\n model = refinenet_resnet101(model_pretrained_dir)\n return model\n\n# train_dl, valid_dl = load_train_valid_loader(args)\nclass InputOutputInterpolate(torch.nn.Module):\n def __init__(self, model, scale_factor):\n super(InputOutputInterpolate, self).__init__()\n self.model = model\n self.scale_factor = scale_factor\n\n def forward(self, x):\n size = x.shape[-2:]\n x = torch.nn.functional.interpolate(x, scale_factor=self.scale_factor, mode='bilinear', align_corners=False)\n out = self.model(x)\n return torch.nn.functional.interpolate(out, size=size, mode='bilinear', align_corners=False)\n\ndef load_training_model(args):\n training_model = InputOutputInterpolate(load_model(args), args.input_scale_factor)\n training_model.model.resnet.to(1)\n training_model.model.refinenets.to(0)\n training_model.model.clf.to(0)\n return training_model\n\nclass ModelCriteria(torch.nn.Module):\n def __init__(self, model, criteria):\n super(ModelCriteria, self).__init__()\n self.model = model\n self.criteria = criteria\n \n def forward(self, input, target):\n output = self.model(input)\n return self.criteria(output, target.to(output.device))\n\n def state_dict(self):\n return self.model.state_dict()\n\nclass ModelOptimizer(torch.nn.Module):\n def __init__(self, model_criteria, optimizer):\n super(ModelOptimizer, self).__init__()\n self.model_criteria = model_criteria\n self.optimizer = optimizer\n\n def step(self, input, target):\n self.optimizer.zero_grad()\n loss = self.model_criteria(input, target)\n loss.backward()\n self.optimizer.step()\n\n return loss\n\ndef load_model_criteria_optimizer(args):\n model = load_training_model(args)\n\n CELoss = torch.nn.CrossEntropyLoss(ignore_index=255)\n # L1Loss = torch.nn.L1Loss()\n # L2Loss = torch.nn.MSELoss()\n model_criteria = ModelCriteria(model, CELoss)\n \n optimizer = torch.optim.__dict__[args.optimizer](\n model.parameters(),\n **{k.lstrip('optimizer_'): v for k, v in vars(args).items() if 'optimizer_' in k})\n # optimizer = torch.optim.SGD(model.parameters(), lr=args.learning_rate)\n model_optimizer = ModelOptimizer(model_criteria, optimizer)\n\n return model, model_criteria, model_optimizer\n\nclass WandbLog():\n def __init__(self, use_wandb):\n self.use_wandb = use_wandb\n self.train_batch_step = 0\n self.valid_epoch_step = 0\n def train_batch_log(self, train_batch_loss):\n self.train_batch_step += 1\n if self.use_wandb:\n wandb.log({'Train_Batch_Loss': train_batch_loss}, step=self.train_batch_step)\n def valid_epoch_log(self, valid_epoch_loss):\n if self.use_wandb:\n wandb.log({'Valid_Epoch_Loss': valid_epoch_loss}, step=self.train_batch_step)\n self.valid_epoch_step += 1\n\ndef train_model(model_optimizer, train_dl, wandb_log, args):\n train_loss = 0\n device = next(model_optimizer.parameters()).device\n model_optimizer.train()\n if args.freeze_batch_norm:\n for module in model_optimizer.modules():\n if isinstance(module, nn.modules.batchnorm._BatchNorm):\n module.eval()\n \n pbar = tqdm.tqdm(enumerate(train_dl), total=len(train_dl))\n for _, ((b_clear_beta, ), (b_sparse, b_color)) in pbar:\n loss = model_optimizer.step(b_clear_beta.to(device), b_sparse)\n train_loss += loss\n\n pbar.set_description(\"Train Batch {:3d}\".format(wandb_log.train_batch_step))\n pbar.set_postfix_str(\"Batch Loss={:.4f}\".format(loss.detach().cpu().numpy()))\n wandb_log.train_batch_log(loss.detach().cpu().numpy())\n\n train_dl.dataset.update_share_transform()\n train_loss /= len(train_dl)\n pbar.write(\"Train Epoch Loss={:.4f}\".format(train_loss.detach().cpu().numpy()))\n return train_loss\n\ndef eval_model(model_criteria, valid_dl, wandb_log, args):\n eval_loss = 0\n device = next(model_criteria.parameters()).device\n model_criteria.eval()\n \n pbar = tqdm.tqdm(enumerate(valid_dl), total=len(valid_dl))\n with torch.no_grad():\n for _, ((b_clear_beta, ), (b_sparse, _)) in pbar:\n loss = model_criteria(b_clear_beta.to(device), b_sparse)\n eval_loss += loss\n\n pbar.set_description(\"Valid Epoch {:3d}\".format(wandb_log.valid_epoch_step))\n eval_loss /= len(valid_dl)\n pbar.write(\"Valid Epoch Loss={:.4f}\".format(eval_loss.cpu().numpy()))\n if wandb_log.use_wandb:\n state_dict_name = 'state_dict.{:02d}.pth'.format(wandb_log.valid_epoch_step)\n torch.save(model_criteria.state_dict(), os.path.join(wandb.run.dir, state_dict_name))\n wandb.save(state_dict_name)\n wandb_log.valid_epoch_log(eval_loss.cpu().numpy())\n return eval_loss\n\ndef main(parser, name, load_train_valid_loader, load_model_criteria_optimizer, train_model, eval_model):\n args = parser.parse_args()\n print(args)\n if args.use_wandb:\n wandb.init(project='refinenet-pytorch', name=name, config=args, dir='/home/user/research/refinenet-pytorch/train')\n\n train_dl, valid_dl = load_train_valid_loader(args)\n # train_dl.dataset.indices = train_dl.dataset.indices[:10]\n # valid_dl.dataset.indices = valid_dl.dataset.indices[:10]\n print('dataset loaded')\n model, model_criteria, model_optimizer = load_model_criteria_optimizer(args)\n print('model loaded')\n wandb_log = WandbLog(args.use_wandb)\n\n eval_model(model_criteria, valid_dl, wandb_log, args)\n for epoch in range(args.total_epoch):\n train_model(model_optimizer, train_dl, wandb_log, args)\n eval_model(model_criteria, valid_dl, wandb_log, args)\n\nif __name__ == '__main__':\n main(\n parser=arg_parser(),\n name='training',\n load_train_valid_loader=load_train_valid_loader,\n load_model_criteria_optimizer=load_model_criteria_optimizer,\n train_model=train_model,\n eval_model=eval_model)"
] | [
[
"torch.nn.CrossEntropyLoss",
"torch.utils.data.DataLoader",
"torch.from_numpy",
"numpy.vectorize",
"torch.no_grad",
"torch.nn.functional.interpolate",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
alexanderkell/temporal_granularity | [
"f29b294beb360d8d66c6fedf78bbf9ae84055b24",
"f29b294beb360d8d66c6fedf78bbf9ae84055b24",
"f29b294beb360d8d66c6fedf78bbf9ae84055b24",
"f29b294beb360d8d66c6fedf78bbf9ae84055b24"
] | [
"test/test_metrics/test_multi_year_metrics.py",
"src/data/make_dataset_load_NG.py",
"src/models/optimisation_algorithms/genetic_algorithms/nsga2.py",
"src/metrics/multi_metrics.py"
] | [
"from pathlib import Path\nimport pandas as pd\nfrom src.metrics.multi_year_metrics import MultiYearMetrics\nimport pytest\nimport logging\n\nlogging.basicConfig(level=logging.DEBUG)\nlogger = logging.getLogger(__name__)\n\nproject_dir = Path(\"__file__\").resolve().parents[1]\n\n\[email protected]\ndef define_multi_year_metrics():\n pv_original = pd.read_csv('{}/temporal_granularity/data/processed/resources/pv_processed.csv'.format(project_dir))\n pv_representative = pd.DataFrame({\"index_for_year\": list(range(8760)), \"capacity_factor\": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] * 365})\n wind_representative = pd.DataFrame({\"index_for_year\": list(range(8760)), \"capacity_factor\": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] * 365})\n load_representative = pd.DataFrame({\"index_for_year\": list(range(8760)), \"capacity_factor\": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] * 365})\n wind_original = pd.read_csv('{}/temporal_granularity/data/processed/resources/onshore_processed.csv'.format(project_dir))\n # load_original = pd.read_csv('{}/temporal_granularity/data/processed/demand/load_processed_normalised.csv'.format(project_dir))\n load_original = pd.read_csv('{}/temporal_granularity/data/processed/demand/load_NG/load_processed_normalised.csv'.format(project_dir))\n\n original_data = []\n for dat in [pv_original, wind_original, load_original]:\n dat.datetime = pd.to_datetime(dat.datetime)\n dat['year'] = dat.datetime.dt.year\n original_data.append(dat)\n\n multi_year_metrics_calc = MultiYearMetrics(original_data[0], pv_representative, original_data[1], wind_representative, original_data[2], load_representative)\n yield multi_year_metrics_calc\n\n\nclass Test_MultiYearMetrics:\n\n def test_group_list_dataframes(self, define_multi_year_metrics):\n\n grouped_dfs = define_multi_year_metrics._group_list_dataframes()\n assert len(grouped_dfs) == 3\n assert list(grouped_dfs[0].groups.keys()) == list(range(1980, 2017))\n assert list(grouped_dfs[1].groups.keys()) == list(range(1980, 2017))\n assert list(grouped_dfs[2].groups.keys()) == list(range(2005, 2019))\n\n def test_get_multi_year_metrics(self, define_multi_year_metrics):\n\n result_errors = define_multi_year_metrics.get_multi_year_metrics(\"dc\")\n\n def test_get_multi_year_average_metrics(self, define_multi_year_metrics):\n mean_errors = define_multi_year_metrics.get_multi_year_average_metrics(\"dc\")\n logger.debug(mean_errors)\n",
"# -*- coding: utf-8 -*-\nimport logging\nimport pandas as pd\nfrom pathlib import Path\nfrom sklearn.preprocessing import MinMaxScaler\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef main(data, output_filepath, output_filepath2):\n \"\"\" Runs data processing scripts to turn raw data from (../raw) into\n cleaned data ready to be analyzed (saved in ../processed).\n \"\"\"\n logger = logging.getLogger(__name__)\n logger.info('making final data set from raw data')\n\n data = data[['SETTLEMENT_DATE', 'SETTLEMENT_PERIOD', 'ND']]\n data['SETTLEMENT_PERIOD'] = data['SETTLEMENT_PERIOD'] * 30\n data['SETTLEMENT_PERIOD'] = pd.to_datetime(data['SETTLEMENT_PERIOD'], unit='m').dt.strftime('%H:%M')\n\n data['SETTLEMENT_DATE'] = data['SETTLEMENT_DATE'] + \" \" + data['SETTLEMENT_PERIOD']\n data.drop(inplace=True, columns=['SETTLEMENT_PERIOD'])\n\n data.rename(inplace=True, columns={\"SETTLEMENT_DATE\": \"datetime\", 'ND': 'capacity_factor'})\n\n data.datetime = pd.to_datetime(data.datetime)\n\n data = data.set_index(\"datetime\")\n data.index = pd.to_datetime(data.index)\n\n data = data.capacity_factor.resample(\"h\").mean()\n\n logger.debug(\"data: \\n{}\".format(data))\n data = data.reset_index()\n\n values = data.capacity_factor.values\n values = values.reshape((len(values), 1))\n # train the normalization\n scaler = MinMaxScaler(feature_range=(0, 1))\n scaler = scaler.fit(values)\n # normalize the dataset and print the first 5 rows\n normalized = scaler.transform(values)\n\n normalized_data = data.copy()\n normalized_data.capacity_factor = normalized\n\n logger.debug(normalized_data.head())\n\n data.to_csv(output_filepath)\n normalized_data.to_csv(output_filepath2)\n\n\nif __name__ == '__main__':\n log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n logging.basicConfig(level=logging.DEBUG, format=log_fmt)\n\n # not used in this stub but often useful for finding various files\n project_dir = Path(\"__file__\").resolve().parents[1]\n\n load_1 = pd.read_csv(\"{}/temporal_granularity/data/raw/resources/demand_nationalgrid/DemandData_2005-2010 (1).csv\".format(project_dir))\n load_2 = pd.read_csv(\"{}/temporal_granularity/data/raw/resources/demand_nationalgrid/DemandData_2011-2016 (1).csv\".format(project_dir))\n load_3 = pd.read_csv(\"{}/temporal_granularity/data/raw/resources/demand_nationalgrid/DemandData_2017 (2).csv\".format(project_dir))\n load_4 = pd.read_csv(\"{}/temporal_granularity/data/raw/resources/demand_nationalgrid/DemandData_2018.csv\".format(project_dir))\n\n load = pd.concat([load_1, load_2, load_3, load_4])\n\n # main('{}/temporal_granularity/data/raw/resources/ninja_pv_country_GB_sarah_nuts-2_corrected.csv'.format(project_dir), '{}/temporal_granularity/data/processed/resources/pv_processed.csv'.format(project_dir))\n main(load,\n '{}/temporal_granularity/data/processed/demand/load_NG/load_processed.csv'.format(project_dir),\n '{}/temporal_granularity/data/processed/demand/load_NG/load_processed_normalised.csv'.format(project_dir))\n",
"# This file is part of DEAP.\n#\n# DEAP is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as\n# published by the Free Software Foundation, either version 3 of\n# the License, or (at your option) any later version.\n#\n# DEAP is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public\n# License along with DEAP. If not, see <http://www.gnu.org/licenses/>.\nimport sys\nimport matplotlib.pyplot as plt\nimport numpy\nimport array\nimport random\nimport json\nimport pandas as pd\nfrom pathlib import Path\nfrom scoop import futures\nimport numpy as np\nfrom mpl_toolkits.mplot3d import Axes3D\n\nproject_dir = Path(\"__file__\").resolve().parents[1]\nsys.path.insert(0, '{}/'.format(project_dir))\n\nproject_dir=\"\"\n\n# import json\n\nimport numpy\n\n# from math import sqrt\n\nfrom deap import algorithms\nfrom deap import base\nfrom deap import benchmarks\nfrom deap.benchmarks.tools import diversity, convergence, hypervolume\nfrom deap import creator\nfrom deap import tools\nfrom src.models.env.som_env import SOMEnv\nfrom src.models.env.k_means_env import KMeansEnv\nimport logging\n\nlogging.basicConfig(level=logging.INFO)\n\n\nyear_start = \"2006\"\nyear_end = \"2011\"\n\nonshore_data = pd.read_csv(\n '{}data/processed/data_grouped_by_day/pv_each_day.csv'.format(project_dir))\n\nonshore_data_np = onshore_data[(onshore_data.date > \"2006\") & (\n onshore_data.date < \"2011\")].reset_index().drop(\n columns=[\"date\", 'index']).values\n\nload_data = pd.read_csv(\n \"{}data/processed/data_grouped_by_day/load_NG_normalised_each_day.csv\".format(project_dir))\n\nload_data_np = load_data[(load_data.date > \"2006\") & (\n load_data.date < \"2011\")].reset_index().drop(columns=[\"date\", 'index']).values\n\n\n# offshore_data = pd.read_csv(\n# '{}data/processed/resources/offshore_processed.csv'.format(project_dir))\npv_data = pd.read_csv(\n '{}data/processed/data_grouped_by_day/pv_each_day.csv'.format(project_dir))\n\npv_data_np = pv_data[(pv_data.date > \"2006\") & (\n pv_data.date < \"2011\")].reset_index().drop(columns=[\"date\", 'index']).values\n\npv_data = pd.read_csv(\n '{}data/processed/resources/pv_processed.csv'.format(project_dir))\nonshore_data = pd.read_csv(\n '{}data/processed/resources/onshore_processed.csv'.format(project_dir))\nload_data = pd.read_csv(\n \"{}data/processed/demand/load_processed_normalised.csv\".format(project_dir))\n\n\ncreator.create(\"FitnessMin\", base.Fitness, weights=(-1.0, -1.0, -1.0))\ncreator.create(\"Individual\", array.array, typecode='d',\n fitness=creator.FitnessMin)\n\ntoolbox = base.Toolbox()\n\n# Problem definition\n# Functions zdt1, zdt2, zdt3, zdt6 have bounds [0, 1]\nBOUND_LOW, BOUND_UP = 0.0, 100\n\n# Functions zdt4 has bounds x1 = [0, 1], xn = [-5, 5], with n = 2, ..., 10\n# BOUND_LOW, BOUND_UP = [0.0] + [-5.0]*9, [1.0] + [5.0]*9\n\n# Functions zdt1, zdt2, zdt3 have 30 dimensions, zdt4 and zdt6 have 10\nNDIM = 3 * 12 * 12 + 2\n# NDIM = 3 * 51 + 1\n\n\ndef uniform(low, up, size=None):\n try:\n return [random.randint(a, b) for a, b in zip(low, up)]\n except TypeError:\n return [random.randint(a, b) for a, b in zip([low] * size, [up] * size)]\n\n\ndef evalMinSOM(individual):\n individual = [int(i) for i in individual]\n env = SOMEnv(pv_data_np, onshore_data_np, load_data_np,\n pv_data, onshore_data, load_data, round(individual[0] / 10) + 2, round(individual[1] / 10) + 2, 20000, int(year_end))\n result = env.step(individual[2:])\n result = result[0], result[1], result[2]\n\n return result\n # return individual[0], individual[1], individual[2]\n\n\ndef evalMinKMeans(individual):\n individual = [int(i) for i in individual]\n env = KMeansEnv(pv_data_np, onshore_data_np, load_data_np,\n pv_data, onshore_data, load_data, int(individual[0] / 2) + 1)\n result = env.step(individual[1:])\n result = result[0], result[1], result[2]\n return result\n\n\ntoolbox.register(\"map_distributed\", futures.map)\n\n\ntoolbox.register(\"attr_int\", np.random.randint, low=0, high=100)\n\ntoolbox.register(\"individual\", tools.initRepeat, creator.Individual,\n toolbox.attr_int, NDIM)\n\ntoolbox.register(\"population\", tools.initRepeat, list, toolbox.individual)\n\n# toolbox.register(\"evaluate\", evalMinKMeans)\ntoolbox.register(\"evaluate\", evalMinSOM)\ntoolbox.register(\"mate\", tools.cxSimulatedBinaryBounded,\n low=BOUND_LOW, up=BOUND_UP, eta=20.0)\ntoolbox.register(\"mutate\", tools.mutPolynomialBounded,\n low=BOUND_LOW, up=BOUND_UP, eta=20.0, indpb=1.0 / NDIM)\ntoolbox.register(\"select\", tools.selNSGA2)\n\n\ndef main(seed=None):\n random.seed(seed)\n\n NGEN = 250\n MU = 100\n CXPB = 0.9\n\n stats = tools.Statistics(lambda ind: ind.fitness.values)\n stats.register(\"avg\", numpy.mean, axis=0)\n stats.register(\"std\", numpy.std, axis=0)\n stats.register(\"min\", numpy.min, axis=0)\n stats.register(\"max\", numpy.max, axis=0)\n\n logbook = tools.Logbook()\n logbook.header = \"gen\", \"evals\", \"std\", \"min\", \"avg\", \"max\"\n\n pop = toolbox.population(n=MU)\n\n # Evaluate the individuals with an invalid fitness\n invalid_ind = [ind for ind in pop if not ind.fitness.valid]\n fitnesses = toolbox.map_distributed(toolbox.evaluate, invalid_ind)\n for ind, fit in zip(invalid_ind, fitnesses):\n ind.fitness.values = fit\n\n # This is just to assign the crowding distance to the individuals\n # no actual selection is done\n pop = toolbox.select(pop, len(pop))\n\n record = stats.compile(pop)\n logbook.record(gen=0, evals=len(invalid_ind), **record)\n print(logbook.stream)\n\n # Begin the generational process\n for gen in range(1, NGEN):\n\n print(\"-- Generation %i --\" % gen)\n\n # Vary the population\n offspring = tools.selTournamentDCD(pop, len(pop))\n offspring = [toolbox.clone(ind) for ind in offspring]\n\n for ind1, ind2 in zip(offspring[::2], offspring[1::2]):\n if random.random() <= CXPB:\n toolbox.mate(ind1, ind2)\n\n toolbox.mutate(ind1)\n toolbox.mutate(ind2)\n del ind1.fitness.values, ind2.fitness.values\n\n # Evaluate the individuals with an invalid fitness\n invalid_ind = [ind for ind in offspring if not ind.fitness.valid]\n fitnesses = toolbox.map_distributed(toolbox.evaluate, invalid_ind)\n for ind, fit in zip(invalid_ind, fitnesses):\n ind.fitness.values = fit\n\n print(\" Evaluated %i individuals\" % len(invalid_ind))\n\n # Select the next generation population\n pop = toolbox.select(pop + offspring, MU)\n record = stats.compile(pop)\n logbook.record(gen=gen, evals=len(invalid_ind), **record)\n print(logbook.stream)\n\n best_ind = tools.selBest(pop, 1)[0]\n\n print(\"Best individual is %s, %s\" %\n (np.rint(best_ind), best_ind.fitness.values))\n\n front = numpy.array(\n [ind.fitness.values + tuple(ind) for ind in pop])\n\n # np.savetxt('{}/src/models/optimisation_algorithms/genetic_algorithms/pareto_front/k_meansdata/pareto_front_{}.csv'.format(project_dir, gen), front, delimiter=\",\")\n np.savetxt('{}src/models/optimisation_algorithms/genetic_algorithms/pareto_front/long_term/data/pareto_front_{}.csv'.format(\n project_dir, gen), front, delimiter=\",\")\n fig = plt.figure(1)\n\n columns = 2\n rows = 1\n\n fig.add_subplot(rows, columns, 1)\n plt.scatter(front[:, 0], front[:, 1], c=\"b\")\n\n fig.add_subplot(rows, columns, 2)\n plt.scatter(front[:, 1], front[:, 2], c=\"b\")\n\n # plt.savefig('{}/src/models/optimisation_algorithms/genetic_algorithms/pareto_front/k_means/images/pareto_front_{}.png'.format(project_dir, gen))\n plt.savefig('{}src/models/optimisation_algorithms/genetic_algorithms/pareto_front/long_term/images/pareto_front_{}.png'.format(project_dir, gen))\n plt.close()\n\n fig = plt.figure(1)\n ax = Axes3D(fig)\n ax.scatter(front[:, 0], front[:, 1], front[:, 2], c='red')\n\n ax.axis(\"tight\")\n fig.savefig('{}src/models/optimisation_algorithms/genetic_algorithms/pareto_front/long_term/images/pareto_front_3D_{}.png'.format(project_dir, gen))\n # fig.savefig('{}/src/models/optimisation_algorithms/genetic_algorithms/pareto_front/k_means/images/pareto_front_3D_{}.png'.format(project_dir, gen))\n plt.close()\n\n # print(\"Final population hypervolume is %f\" %\n # hypervolume(pop, [11.0, 11.0]))\n\n return pop, logbook\n\n\nif __name__ == \"__main__\":\n\n pop, stats = main()\n pop.sort(key=lambda x: x.fitness.values)\n\n print(stats)\n\n front = numpy.array([ind.fitness.values for ind in pop])\n plt.scatter(front[:, 0], front[:, 1], c=\"b\")\n plt.axis(\"tight\")\n plt.savefig('/Users/b1017579/Documents/PhD/Projects/14-temporal-granularity/src/models/optimisation_algorithms/genetic_algorithms/pareto_front.png')\n",
"\"\"\"\n Description: Functionality to calculate error metrics on many different time series or curves.\n\n\b Created on Fri Apr 05 2019\n\b\n\b Copyright (c) 2019 Newcastle University\n\b License is MIT\n\b Email is [email protected]\n\"\"\"\n\n\nimport logging\nfrom scipy import stats\n\nfrom itertools import combinations\n\nlogger = logging.getLogger(__name__)\n\n\nclass MultiMetrics:\n \"\"\"Calculates error metrics which require more than one comparison.\n\n Calculates error metrics which require more than one comparison, for example correlation which\n compares every curve against every other curve\n\n :return: MultiMetrics object\n :rtype: MultiMetrics\n \"\"\"\n\n def __init__(self, solar, wind, load):\n \"\"\"Instantiate MultiMetrics object with solar, wind and load curve.\n\n Get metrics which require the comparison of solar wind and load curves\n\n :param solar: Any curve which represents solar\n :type solar: pandas dataframe\n :param wind: Any curve which represents wind\n :type wind: pandas dataframe\n :param load: Any curve which represents load\n :type load: pandas dataframe\n \"\"\"\n\n self.solar = solar\n self.wind = wind\n self.load = load\n\n def get_correlations(self):\n \"\"\"Return correlations for each of the curves.\n\n Calculates the pearson correlation coefficient of each of the curves against each of the other curves\n\n :return: Pearson correlation coefficient for each of the curves.\n :rtype: dict\n \"\"\"\n combination = list(combinations([self.solar, self.wind, self.load], r=2))\n combination_names = list(combinations([\"solar\", \"wind\", \"load\"], r=2))\n\n correlations = []\n for name, comb in zip(combination_names, combination):\n name = \"-\".join(name)\n single_result = {}\n result = self._calculate_correlation(comb[0], comb[1])\n single_result.update({\"metric\": \"correlation\", \"series_type\": name, \"value\": result})\n\n correlations.append(single_result)\n\n return correlations\n\n def _calculate_correlation(self, time_series_1, time_series_2):\n corr = stats.pearsonr(time_series_1.capacity_factor, time_series_2.capacity_factor)[0]\n return corr\n"
] | [
[
"pandas.to_datetime"
],
[
"pandas.concat",
"pandas.to_datetime",
"sklearn.preprocessing.MinMaxScaler"
],
[
"matplotlib.pyplot.scatter",
"numpy.rint",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.close",
"matplotlib.pyplot.axis",
"numpy.array",
"matplotlib.pyplot.figure"
],
[
"scipy.stats.pearsonr"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
Frognar/Super-Resolution | [
"406b909d71e156aa11ee589698744e3ad9abfee7",
"406b909d71e156aa11ee589698744e3ad9abfee7"
] | [
"nn/block/upsample_blocks.py",
"train_esrgan.py"
] | [
"import torch.nn as nn\nfrom torch.nn.functional import interpolate\n\n\nclass PixelShuffleUpscaleBlock(nn.Module):\n def __init__(self, in_channels=64, kernel_size=3, upscale_factor=2):\n super().__init__()\n\n self.block = nn.Sequential(\n nn.Conv2d(in_channels=in_channels,\n out_channels=in_channels * (upscale_factor ** 2),\n kernel_size=kernel_size, padding=kernel_size // 2),\n nn.PixelShuffle(upscale_factor=upscale_factor),\n nn.PReLU()\n )\n\n def forward(self, input_data):\n return self.block(input_data)\n\n\nclass UpscaleBlock(nn.Module):\n def __init__(self, channels=64, kernel_size=3, upscale_factor=2):\n super().__init__()\n self.scale_factor = upscale_factor\n self.block = nn.Sequential(\n nn.Conv2d(in_channels=channels, out_channels=channels,\n kernel_size=kernel_size, padding=kernel_size // 2),\n nn.LeakyReLU(negative_slope=0.2, inplace=True)\n )\n\n def forward(self, input_data):\n return self.block(self.upscale(input_data))\n\n def upscale(self, data):\n return interpolate(data, scale_factor=self.scale_factor, mode='nearest')\n",
"import torch\nfrom torch.nn import BCEWithLogitsLoss, L1Loss\nfrom torch.optim import Adam\nfrom torch.optim.lr_scheduler import ReduceLROnPlateau\nfrom torch.utils.data import DataLoader\n\nfrom dataset import ImageNetDataset\nfrom nn.feature_extractor import TruncatedVgg\nfrom nn.loss import DiscriminatorLoss, PerceptualLoss\nfrom nn.model import DenseGenerator, Discriminator\nfrom trainers import ReGANTrainer\nfrom utils import Converter, parse_training_args\n\n\ndef main():\n args = parse_training_args(\"ESRGAN\")\n epochs = args.epochs\n load_path = args.load\n init_path = args.init\n out_path = args.out\n cuda = args.cuda\n device = torch.device('cuda' if torch.cuda.is_available() and cuda else 'cpu')\n\n g_net = DenseGenerator().to(device)\n g_criterion = PerceptualLoss(\n feature_extractor=TruncatedVgg(with_activation_layer=False),\n content_criterion=L1Loss(),\n adversarial_criterion=BCEWithLogitsLoss(),\n ).to(device)\n g_optimizer = Adam(params=filter(lambda p: p.requires_grad, g_net.parameters()),\n lr=1e-4)\n g_scheduler = ReduceLROnPlateau(optimizer=g_optimizer, factor=0.5, patience=3,\n verbose=True)\n\n d_net = Discriminator().to(device)\n d_criterion = DiscriminatorLoss(criterion=BCEWithLogitsLoss()).to(device)\n d_optimizer = Adam(params=filter(lambda p: p.requires_grad, d_net.parameters()),\n lr=1e-4)\n d_scheduler = ReduceLROnPlateau(optimizer=d_optimizer, factor=0.5, patience=3,\n verbose=True)\n\n converter = Converter()\n dataset = ImageNetDataset(json_path='data/train.json', converter=converter)\n data_loader = DataLoader(dataset=dataset, batch_size=4, num_workers=4,\n pin_memory=True, shuffle=True)\n\n trainer = ReGANTrainer(g_net=g_net, g_criterion=g_criterion, g_optimizer=g_optimizer,\n g_scheduler=g_scheduler, d_net=d_net, d_criterion=d_criterion,\n d_optimizer=d_optimizer, d_scheduler=d_scheduler,\n data_loader=data_loader, device=device)\n\n if init_path:\n trainer.load_pretrained_generator(init_path)\n\n if load_path:\n trainer.load(load_path)\n\n trainer.train(max_epochs=epochs, save_path=out_path)\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"torch.nn.PReLU",
"torch.nn.Conv2d",
"torch.nn.PixelShuffle",
"torch.nn.LeakyReLU",
"torch.nn.functional.interpolate"
],
[
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"torch.utils.data.DataLoader",
"torch.nn.BCEWithLogitsLoss",
"torch.cuda.is_available",
"torch.nn.L1Loss"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
TannerGilbert/Machine-Learning-Explained | [
"5309f44a38ce862f3f177e8d5de2e60eea44637b",
"5309f44a38ce862f3f177e8d5de2e60eea44637b",
"5309f44a38ce862f3f177e8d5de2e60eea44637b"
] | [
"Optimizers/adam/code/adam.py",
"Algorithms/k_nearest_neighbors/code/k_nearest_neighbors.py",
"Optimizers/qhadam/code/qhadam.py"
] | [
"# based on https://ruder.io/optimizing-gradient-descent/#adam\n# and https://github.com/eriklindernoren/ML-From-Scratch/blob/master/mlfromscratch/deep_learning/optimizers.py#L106\n\nimport numpy as np\n\n\nclass Adam:\n \"\"\"Adam - Adaptive Moment Estimation\n Parameters:\n -----------\n learning_rate: float = 0.001\n The step length used when following the negative gradient.\n beta_1: float = 0.9\n The exponential decay rate for the 1st moment estimates.\n beta_2: float = 0.999\n The exponential decay rate for the 2nd moment estimates.\n epsilon: float = 1e-07\n A small floating point value to avoid zero denominator.\n \"\"\"\n def __init__(self, learning_rate: float = 0.001, beta_1: float = 0.9, beta_2: float = 0.999, epsilon: float = 1e-7) -> None:\n self.learning_rate = learning_rate\n self.epsilon = epsilon\n self.beta_1 = beta_1\n self.beta_2 = beta_2\n\n self.t = 0\n self.m = None # Decaying averages of past gradients\n self.v = None # Decaying averages of past squared gradients\n\n def update(self, w: np.ndarray, grad_wrt_w: np.ndarray) -> np.ndarray:\n self.t += 1\n if self.m is None:\n self.m = np.zeros(np.shape(grad_wrt_w))\n self.v = np.zeros(np.shape(grad_wrt_w))\n\n self.m = self.beta_1 * self.m + (1 - self.beta_1) * grad_wrt_w\n self.v = self.beta_2 * self.v + (1 - self.beta_2) * np.power(grad_wrt_w, 2)\n\n m_hat = self.m / (1 - self.beta_1**self.t)\n v_hat = self.v / (1 - self.beta_2**self.t)\n\n w_update = self.learning_rate * m_hat / (np.sqrt(v_hat) + self.epsilon)\n\n return w - w_update\n",
"from __future__ import annotations\nfrom typing import Union\nimport numpy as np\nfrom collections import Counter\n\n\nclass KNearestNeighbors:\n \"\"\"K Nearest Neighbors classifier.\n Parameters:\n -----------\n k: int\n The number of closest neighbors\n \"\"\"\n def __init__(self, k: int) -> None:\n self.X = None\n self.y = None\n self.k = k\n\n def fit(self, X: Union[list, np.ndarray], y: Union[list, np.ndarray]) -> KNearestNeighbors:\n self.X = X\n self.y = y\n return self\n\n def euclidean_distance(self, X_test: Union[list, np.ndarray]) -> list:\n return [np.linalg.norm(X - X_test) for X in self.X]\n\n def k_nearest(self, X: Union[list, np.ndarray]) -> np.ndarray:\n idx = np.argpartition(X, self.k)\n return np.take(self.y, idx[:self.k])\n\n def predict(self, X: Union[list, np.ndarray]) -> np.ndarray:\n distances_list = [self.euclidean_distance(x) for x in X]\n return np.array([Counter(self.k_nearest(distances)).most_common()[0][0] for distances in distances_list])\n\n\nif __name__ == '__main__':\n import pandas as pd\n from sklearn.model_selection import train_test_split\n from sklearn.preprocessing import LabelEncoder\n df = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data',\n names=['sepal_length', 'sepal_width', 'petal_length', 'petal_width', 'label'])\n X, y = (np.array(df.drop('label', axis=1)),\n LabelEncoder().fit_transform(np.array(df['label'])))\n X_train, X_test, y_train, y_test = train_test_split(\n X, y, test_size=0.2, random_state=42)\n model = KNearestNeighbors(4)\n model.fit(X_train, y_train)\n predictions = model.predict(X_test)\n print('Accuracy:', (predictions == y_test).sum()/len(predictions)*100)\n",
"# based on https://arxiv.org/pdf/1810.06801.pdf\n\nimport numpy as np\n\n\nclass QHAdam:\n \"\"\"QHAdam - Quasi-Hyperbolic Adam\n Parameters:\n -----------\n learning_rate: float = 0.001\n The step length used when following the negative gradient.\n beta_1: float = 0.9\n The exponential decay rate for the 1st moment estimates.\n beta_2: float = 0.999\n The exponential decay rate for the 2nd moment estimates.\n epsilon: float = 1e-07\n A small floating point value to avoid zero denominator.\n v_1: float = 0.7\n Immediate discount factor\n v_2: float = 1.0\n Immediate discount factor\n \"\"\"\n def __init__(self, learning_rate: float = 0.001, beta_1: float = 0.9, beta_2: float = 0.999, epsilon: float = 1e-7, v_1: float = 0.7, v_2: float = 1.0) -> None:\n self.learning_rate = learning_rate\n self.epsilon = epsilon\n self.beta_1 = beta_1\n self.beta_2 = beta_2\n self.v_1 = v_1\n self.v_2 = v_2\n\n self.t = 0\n self.m = None # Decaying averages of past gradients\n self.v = None # Decaying averages of past squared gradients\n\n def update(self, w: np.ndarray, grad_wrt_w: np.ndarray) -> np.ndarray:\n self.t += 1\n if self.m is None:\n self.m = np.zeros(np.shape(grad_wrt_w))\n self.v = np.zeros(np.shape(grad_wrt_w))\n\n self.m = self.beta_1 * self.m + (1 - self.beta_1) * grad_wrt_w\n self.v = self.beta_2 * self.v + (1 - self.beta_2) * np.power(grad_wrt_w, 2)\n\n m_hat = self.m / (1 - self.beta_1**self.t)\n v_hat = self.v / (1 - self.beta_2**self.t)\n\n w_update = self.learning_rate * ((1 - self.v_1) * grad_wrt_w + self.v_1 * m_hat) / (np.sqrt((1 - self.v_2) * np.power(grad_wrt_w, 2) + self.v_2 * v_hat) + self.epsilon)\n\n return w - w_update\n"
] | [
[
"numpy.shape",
"numpy.sqrt",
"numpy.power"
],
[
"pandas.read_csv",
"numpy.take",
"numpy.linalg.norm",
"sklearn.model_selection.train_test_split",
"sklearn.preprocessing.LabelEncoder",
"numpy.argpartition",
"numpy.array"
],
[
"numpy.shape",
"numpy.power"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mustelideos/td-opswtw-competition-rl | [
"afbd6603b74f09c133d5d68e587fc93387ca93ba",
"afbd6603b74f09c133d5d68e587fc93387ca93ba"
] | [
"models/neural_net.py",
"env.py"
] | [
"import torch\nimport torch.nn as nn\nimport torch.nn.init as init\nimport torch.nn.functional as F\nfrom torch.utils.checkpoint import checkpoint\n\nimport math\nimport numpy as np\n\n# ------------------------------------------------------------------------------\n# Transformer model from: https://github.com/JayParks/transformer\n# and https://github.com/jadore801120/attention-is-all-you-need-pytorch\n\n\nclass ScaledDotProductAttention(nn.Module):\n def __init__(self, d_k):\n super(ScaledDotProductAttention, self).__init__()\n self.scale_factor = np.sqrt(d_k)\n self.softmax = nn.Softmax(dim=-1)\n\n def forward(self, q, k, v, attn_mask=None):\n # q: [b_size x len_q x d_k]\n # k: [b_size x len_k x d_k]\n # v: [b_size x len_v x d_v] note: (len_k == len_v)\n attn = torch.bmm(q, k.transpose(1, 2)) / self.scale_factor # attn: [b_size x len_q x len_k]\n if attn_mask is not None:\n # assert attn_mask.size() == attn.size()\n attn.data.masked_fill_(attn_mask==0, -1e32)\n\n attn = self.softmax(attn )\n outputs = torch.bmm(attn, v) # outputs: [b_size x len_q x d_v]\n return outputs, attn\n\n\nclass _MultiHeadAttention(nn.Module):\n def __init__(self, d_model, n_heads):\n super(_MultiHeadAttention, self).__init__()\n\n self.d_k = d_model // n_heads\n self.d_v = d_model // n_heads\n self.d_model = d_model\n self.n_heads = n_heads\n\n self.w_q = nn.Parameter(torch.FloatTensor(n_heads, d_model, self.d_k))\n self.w_k = nn.Parameter(torch.FloatTensor(n_heads, d_model, self.d_k))\n self.w_v = nn.Parameter(torch.FloatTensor(n_heads, d_model, self.d_v))\n\n self.attention = ScaledDotProductAttention(self.d_k)\n\n def forward(self, q, k, v, attn_mask=None, use_adj_mask=False):\n (d_k, d_v, d_model, n_heads) = (self.d_k, self.d_v, self.d_model, self.n_heads)\n b_size = k.size(0)\n\n q_s = q.repeat(n_heads, 1, 1).view(n_heads, -1, d_model) # [n_heads x b_size * len_q x d_model]\n k_s = k.repeat(n_heads, 1, 1).view(n_heads, -1, d_model) # [n_heads x b_size * len_k x d_model]\n v_s = v.repeat(n_heads, 1, 1).view(n_heads, -1, d_model) # [n_heads x b_size * len_v x d_model]\n\n q_s = torch.bmm(q_s, self.w_q).view(b_size * n_heads, -1, d_k) # [b_size * n_heads x len_q x d_k]\n k_s = torch.bmm(k_s, self.w_k).view(b_size * n_heads, -1, d_k) # [b_size * n_heads x len_k x d_k]\n v_s = torch.bmm(v_s, self.w_v).view(b_size * n_heads, -1, d_v) # [b_size * n_heads x len_v x d_v]\n\n if attn_mask is not None:\n if use_adj_mask:\n outputs, attn = self.attention(q_s, k_s, v_s, attn_mask=attn_mask.repeat(n_heads, 1, 1))\n else:\n outputs, attn = self.attention(q_s, k_s, v_s, attn_mask=attn_mask.unsqueeze(1).repeat(n_heads, 1, 1))\n else:\n outputs, attn = self.attention(q_s, k_s, v_s, attn_mask=None)\n\n return torch.split(outputs, b_size, dim=0), attn\n\n\nclass MultiHeadAttention(nn.Module):\n def __init__(self, d_model, n_heads):\n super(MultiHeadAttention, self).__init__()\n\n self.d_k = d_model // n_heads\n self.attention = _MultiHeadAttention(d_model, n_heads)\n self.proj = nn.Linear(n_heads * self.d_k, d_model)\n self.layer_norm = nn.LayerNorm(d_model)\n\n def forward(self, q, k, v, attn_mask = None, use_adj_mask = False):\n # q: [b_size x len_q x d_model]\n # k: [b_size x len_k x d_model]\n # v: [b_size x len_v x d_model] note (len_k == len_v)\n\n # outputs: a list of tensors of shape [b_size x len_q x d_v] (length: n_heads)\n outputs, attn = self.attention(q, k, v, attn_mask=attn_mask, use_adj_mask=use_adj_mask)\n # concatenate 'n_heads' multi-head attentions\n outputs = torch.cat(outputs, dim=-1)\n # project back to residual size, result_size = [b_size x len_q x d_model]\n outputs = self.proj(outputs)\n\n return outputs \n\n\n#----------- Pointer models common blocks ---------------------\n\nclass Attention(nn.Module):\n # Bahdanau Attention (sum)\n def __init__(self, hidden_size, is_glimpse=False, C=10):\n \n super(Attention, self).__init__()\n \n self.C = C # tanh exploration\n self.W1 = nn.Linear(hidden_size, hidden_size, bias=False)\n self.W2 = nn.Linear(hidden_size, hidden_size)\n self.V = nn.Parameter(torch.zeros((hidden_size, 1), requires_grad=True))\n self.is_glimpse = is_glimpse\n\n def forward(self, h0, enc_outputs, mask):\n \n w1e = self.W1(enc_outputs)\n w2h = self.W2(h0).unsqueeze(1)\n u = torch.tanh(w1e + w2h)\n a = u.matmul(self.V)\n\n if self.is_glimpse:\n att = F.softmax(a, dim=1).transpose(1, 2)\n out = torch.bmm(att, enc_outputs).squeeze(1)\n return out\n else:\n a = self.C*torch.tanh(a).squeeze(2)\n policy = F.softmax(a + mask.float().log(), dim=1)\n return policy\n\n\nclass Decoder(nn.Module):\n def __init__(self, hidden_size, has_glimpse = False):\n super(Decoder, self).__init__()\n\n self.has_glimpse = has_glimpse\n self.first_h_0 = nn.Parameter(torch.FloatTensor(1, hidden_size), requires_grad=True)\n self.first_h_0.data.uniform_(-(1. / math.sqrt(hidden_size)), 1. / math.sqrt(hidden_size))\n\n self.c0 = nn.Parameter(torch.FloatTensor( 1, hidden_size),requires_grad=True)\n self.c0.data.uniform_(-(1. / math.sqrt(hidden_size)), 1. / math.sqrt(hidden_size))\n\n self.hidden_0 = (self.first_h_0, self.c0)\n\n self.lstm = nn.LSTMCell(hidden_size, hidden_size)\n\n self.pointer = Attention(hidden_size)\n if self.has_glimpse:\n self.glimpse = Attention(hidden_size, is_glimpse=True)\n\n def forward(self, input, hidden, enc_outputs, mask):\n hidden = self.lstm(input, hidden)\n\n if self.has_glimpse:\n glimpse_h0 = self.glimpse(hidden[0], enc_outputs, mask)\n policy = self.pointer(glimpse_h0, enc_outputs, mask)\n else:\n policy = self.pointer(hidden[0], enc_outputs, mask)\n return policy, hidden\n\n\nclass EncoderLayer(nn.Module):\n def __init__(self, d_model, d_ff, n_heads, dropout, pre_lnorm=False):\n super(EncoderLayer, self).__init__()\n\n self.pre_lnorm = pre_lnorm\n self.self_attn = MultiHeadAttention(d_model, n_heads)\n self.w_1 = nn.Linear(d_model, d_ff)\n self.w_2 = nn.Linear(d_ff, d_model)\n self.dropout1 = nn.Dropout(dropout)\n self.dropout2 = nn.Dropout(dropout)\n self.dropout3 = nn.Dropout(dropout)\n self.layer_norm1 = nn.LayerNorm(d_model)\n self.layer_norm2 = nn.LayerNorm(d_model)\n self.relu = nn.ReLU()\n\n def forward(self, src, rec_src, self_attn_mask, use_adj_mask = False):\n\n if self.pre_lnorm:\n src = self.layer_norm1(src)\n src2 = self.self_attn(src, rec_src, src, attn_mask=self_attn_mask, use_adj_mask = use_adj_mask)\n src = src + self.dropout1(src2)\n src = self.layer_norm2(src)\n src2 = self.w_2(self.dropout2(self.relu(self.w_1(src))))\n src = src + self.dropout3(src2)\n else:\n src2 = self.self_attn(src, rec_src, src, attn_mask=self_attn_mask, use_adj_mask = use_adj_mask)\n src = src + self.dropout1(src2)\n src = self.layer_norm1(src)\n src2 = self.w_2(self.dropout2(self.relu(self.w_1(src))))\n src = src + self.dropout3(src2)\n src = self.layer_norm2(src)\n return src\n\n\nclass Encoder(nn.Module):\n def __init__(self, features_dim, dfeatures_dim, hidden_size, args):\n super(Encoder, self).__init__()\n\n n_heads = args.n_heads # number of heads\n d_ff = args.ff_dim # feed_forward_hidden\n n_layers = args.n_layers # number of Layers\n dropout = args.dropout\n self.pre_lnorm = args.pre_lnorm\n self.L1 = nn.Linear(features_dim, hidden_size//2) # for static features\n self.L2 = nn.Linear(dfeatures_dim, hidden_size//2) # for dynamic features\n\n self.layers = nn.ModuleList([EncoderLayer(hidden_size, d_ff, n_heads, dropout, pre_lnorm=self.pre_lnorm) for _ in range(n_layers)])\n self.last_norm = nn.LayerNorm(hidden_size)\n self.use_adj_mask = args.use_lookahead\n\n def forward(self, emb_inp, rec_inp, mask, dummy_arg):\n for layer in self.layers:\n emb_inp = layer(emb_inp, rec_inp, mask, self.use_adj_mask)\n\n if self.pre_lnorm:\n emb_inp = self.last_norm(emb_inp)\n return emb_inp\n\n\nclass Agent(nn.Module):\n\n def __init__(self, features_dim, dfeatures_dim, hidden_dim, args, has_glimpse = False):\n super(Agent, self).__init__()\n\n self.features_dim = features_dim\n self.dfeatures_dim = dfeatures_dim\n self.use_checkpoint = args.use_checkpoint\n self.hidden_dim = hidden_dim\n self.decoder = Decoder(hidden_dim, has_glimpse)\n self.encoder = Encoder(features_dim, dfeatures_dim, hidden_dim, args)\n # see https://discuss.pytorch.org/t/checkpoint-with-no-grad-requiring-inputs-problem/19117/11\n self.dummy_tensor = torch.ones(1, dtype=torch.float32, requires_grad=True)\n\n self._initialize_parameters()\n\n def _initialize_parameters(self):\n for name, param in self.named_parameters():\n if len(param.shape) > 1:\n nn.init.xavier_uniform_(param)\n\n def _load_model_weights(self, path_string, device):\n self.load_state_dict(torch.load(path_string, map_location=device))\n\n\n def forward(self, enc_inputs, enc_hidden, adj_mask, dec_input, dec_hidden, mask, first_step=False):\n policy, dec_hidden, enc_outputs = self._one_step(enc_inputs, enc_hidden, adj_mask, dec_input, dec_hidden, mask, first_step)\n return policy, dec_hidden, enc_outputs\n\n def _one_step(self, enc_inputs, enc_hidden, adj_mask, dec_input, dec_hidden, mask, first_step):\n if self.use_checkpoint:\n enc_outputs = checkpoint(self.encoder, enc_inputs, enc_hidden, adj_mask, self.dummy_tensor)\n else:\n enc_outputs = self.encoder(enc_inputs, enc_hidden, adj_mask, self.dummy_tensor)\n\n if first_step:\n return None, None, enc_outputs\n else:\n policy, dec_hidden = self.decoder(dec_input, dec_hidden, enc_outputs, mask)\n return policy, dec_hidden, enc_outputs\n\n def sta_emb(self, sta_inp):\n return torch.tanh(self.encoder.L1(sta_inp))\n\n def dyn_emb(self, dyn_inp):\n return torch.tanh(self.encoder.L2(dyn_inp))\n",
"import numpy as np\nimport random\nimport op_utils.instance as u_i\nimport op_utils.op as u_o\n\n\nclass Env:\n maxT_pen = -1.0\n tw_pen = -1.0\n\n def __init__(self, n_nodes=50, seed=None, from_file=False, x_path=None, adj_path=None):\n\n self.x = None\n self.adj = None\n self.seed = seed\n np.random.seed(self.seed)\n random.seed(self.seed)\n self.sim_counter = 0\n self.name = None\n if from_file:\n self.x, self.adj, self.instance_name = u_i.read_instance(x_path, adj_path)\n self.n_nodes = len(self.x)\n else:\n assert n_nodes is not None, 'if no file is given, n_nodes is required'\n self.n_nodes = n_nodes\n self.instance_name = ''\n self.x, self.adj = u_i.make_instance(self.n_nodes, seed=self.seed)\n\n def get_features(self):\n return self.x, self.adj\n\n def check_solution(self, sol):\n\n assert len(sol) == len(self.x) + 1, 'len(sol) = ' + str(len(sol)) + ', n_nodes+1 = ' + str(len(self.x) + 1)\n assert len(sol) == len(set(sol)) + 1\n self.sim_counter += 1\n self.name = f'tour{self.sim_counter:03}'\n tour_time, rewards, pen, feas = u_o.tour_check(sol, self.x, self.adj, self.maxT_pen,\n self.tw_pen, self.n_nodes)\n return tour_time, rewards, pen, feas\n\n\nif __name__ == '__main__':\n env = Env(n_nodes=5, seed=1235)\n sol = [1, 2, 1, 4, 3, 5]\n print(sol)\n for _ in range(10):\n print(env.check_solution(sol))\n"
] | [
[
"torch.nn.Softmax",
"torch.nn.Dropout",
"torch.nn.functional.softmax",
"torch.ones",
"numpy.sqrt",
"torch.cat",
"torch.zeros",
"torch.load",
"torch.nn.LayerNorm",
"torch.tanh",
"torch.nn.Linear",
"torch.nn.LSTMCell",
"torch.nn.init.xavier_uniform_",
"torch.FloatTensor",
"torch.bmm",
"torch.utils.checkpoint.checkpoint",
"torch.split",
"torch.nn.ReLU"
],
[
"numpy.random.seed"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
michelebucelli/cardioemulator | [
"0ce8d5fce017a7251865ab01fdf3d0653490b60f",
"0ce8d5fce017a7251865ab01fdf3d0653490b60f"
] | [
"example/circulation_closed_loop.py",
"cardioemulator/_compute_convergence.py"
] | [
"import numpy as np\nimport pandas as pd\nimport json\nimport csv\nimport time\nfrom scipy.integrate import RK45, solve_ivp\n\nclass circulation_closed_loop:\n \"\"\"\n Closed loop circulation model.\n\n References\n ----------\n F. Regazzoni, M. Salvador, P. C. Africa, M. Fedele, L. Dede', A. Quarteroni,\n \"A cardiac electromechanics model coupled with a lumped parameters model for\n closed-loop blood circulation. Part I: model derivation\", arXiv (2020)\n https://arxiv.org/abs/2011.15040\n\n \"\"\"\n\n def __init__(self, options = dict()):\n\n if isinstance(options, str):\n with open(options, mode='r', newline='') as inputfile:\n options = json.loads(inputfile.read())\n\n ############ Heartbeat\n self.BPM = float(options.get('BPM', 72)) # [1 / min]\n self.THB = 60. / self.BPM # [s], Heartbeat period\n\n ############ Chambers\n # LA\n options_curr = options.get('LA', dict())\n EA_LA = float(options_curr.get('EA', 0.07)) # [mmHg / ml]\n EB_LA = float(options_curr.get('EB', 0.09)) # [mmHg / ml]\n TC_LA = float(options_curr.get('TC', 0.17)) * self.THB # [s]\n TR_LA = float(options_curr.get('TR', 0.17)) * self.THB # [s]\n tC_LA = float(options_curr.get('tC', 0.80)) * self.THB # [s]\n self.V0_LA = float(options_curr.get('V0', 4.0)) # [ml]\n self.E_LA = self.time_varying_elastance(EA_LA, EB_LA, tC_LA, TC_LA, TR_LA)\n\n # LV\n options_curr = options.get('LV', dict())\n EA_LV = float(options_curr.get('EA', 2.75)) # [mmHg / ml]\n EB_LV = float(options_curr.get('EB', 0.08)) # [mmHg / ml]\n TC_LV = float(options_curr.get('TC', 0.34)) * self.THB # [s]\n TR_LV = float(options_curr.get('TR', 0.17)) * self.THB # [s]\n tC_LV = float(options_curr.get('tC', 0.00)) * self.THB # [s]\n self.V0_LV = float(options_curr.get('V0', 5.0)) # [ml]\n self.E_LV = self.time_varying_elastance(EA_LV, EB_LV, tC_LV, TC_LV, TR_LV)\n\n # RA\n options_curr = options.get('RA', dict())\n EA_RA = float(options_curr.get('EA', 0.06)) # [mmHg / ml]\n EB_RA = float(options_curr.get('EB', 0.07)) # [mmHg / ml]\n TC_RA = float(options_curr.get('TC', 0.17)) * self.THB # [s]\n TR_RA = float(options_curr.get('TR', 0.17)) * self.THB # [s]\n tC_RA = float(options_curr.get('tC', 0.80)) * self.THB # [s]\n self.V0_RA = float(options_curr.get('V0', 4.0)) # [ml]\n self.E_RA = self.time_varying_elastance(EA_RA, EB_RA, tC_RA, TC_RA, TR_RA)\n\n # RV\n options_curr = options.get('RV', dict())\n EA_RV = float(options_curr.get('EA', 0.55)) # [mmHg / ml]\n EB_RV = float(options_curr.get('EB', 0.05)) # [mmHg / ml]\n TC_RV = float(options_curr.get('TC', 0.34)) * self.THB # [s]\n TR_RV = float(options_curr.get('TR', 0.17)) * self.THB # [s]\n tC_RV = float(options_curr.get('tC', 0.00)) * self.THB # [s]\n self.V0_RV = float(options_curr.get('V0', 10.0)) # [ml]\n self.E_RV = self.time_varying_elastance(EA_RV, EB_RV, tC_RV, TC_RV, TR_RV)\n\n ############ Valves\n heavisideMY = lambda x: np.arctan( np.pi / 2 * x * 200 ) * 1 / np.pi + 0.5\n options_curr = options.get('valves', dict())\n Rmin = float(options_curr.get('Rmin', 0.0075)) # [mmHg s / ml]\n Rmax = float(options_curr.get('Rmax', 75006.2)) # [mmHg s / ml]\n self.R_MV = lambda w, v: 10.**( np.log10( Rmin ) + ( np.log10( Rmax ) - np.log10( Rmin ) ) * heavisideMY( v - w ) )\n self.R_AV = lambda w, v: 10.**( np.log10( Rmin ) + ( np.log10( Rmax ) - np.log10( Rmin ) ) * heavisideMY( v - w ) )\n self.R_TV = lambda w, v: 10.**( np.log10( Rmin ) + ( np.log10( Rmax ) - np.log10( Rmin ) ) * heavisideMY( v - w ) )\n self.R_PV = lambda w, v: 10.**( np.log10( Rmin ) + ( np.log10( Rmax ) - np.log10( Rmin ) ) * heavisideMY( v - w ) )\n\n ############ Systemic circulation\n options_curr = options.get('SYS', dict())\n self.R_AR_SYS = float(options_curr.get('R_AR' , 0.8 )) # [mmHg s /ml]\n self.C_AR_SYS = float(options_curr.get('C_AR' , 1.2 )) # [ml / mmHg]\n self.R_VEN_SYS = float(options_curr.get('R_VEN', 0.26)) # [mmHg s /ml]\n self.C_VEN_SYS = float(options_curr.get('C_VEN', 60. )) # [ml / mmHg]\n self.L_AR_SYS = float(options_curr.get('L_AR' , 5e-3)) # [mmHg s^2 / ml]\n self.L_VEN_SYS = float(options_curr.get('L_VEN', 5e-4)) # [mmHg s^2 / ml]\n\n ############ Pulmonary circulation\n options_curr = options.get('PUL', dict())\n self.R_AR_PUL = float(options_curr.get('R_AR' , 0.1625)) # [mmHg s /ml]\n self.C_AR_PUL = float(options_curr.get('C_AR' , 10. )) # [ml / mmHg]\n self.R_VEN_PUL = float(options_curr.get('R_VEN', 0.1625)) # [mmHg s /ml]\n self.C_VEN_PUL = float(options_curr.get('C_VEN', 16. )) # [ml / mmHg]\n self.L_AR_PUL = float(options_curr.get('L_AR' , 5e-4 )) # [mmHg s^2 / ml]\n self.L_VEN_PUL = float(options_curr.get('L_VEN', 5e-4 )) # [mmHg s^2 / ml]\n\n ############ PV relationships\n self.p_LA_func = lambda V, t: self.E_LA(t) * ( V - self.V0_LA )\n self.p_LV_func = lambda V, t: self.E_LV(t) * ( V - self.V0_LV )\n self.p_RA_func = lambda V, t: self.E_RA(t) * ( V - self.V0_RA )\n self.p_RV_func = lambda V, t: self.E_RV(t) * ( V - self.V0_RV )\n\n def flux_through_valve(self, p1, p2, R):\n return ( p1 - p2 ) / R( p1, p2 )\n\n def time_varying_elastance(self, EA, EB, time_C, duration_C, duration_R):\n time_R = time_C + duration_C\n e = lambda t: 0.5 * ( 1 - np.cos( np.pi / duration_C * ( np.mod( t - time_C, self.THB ) ) ) ) * ( 0 <= np.mod( t - time_C, self.THB ) ) * ( np.mod( t - time_C, self.THB ) < duration_C ) + \\\n 0.5 * ( 1 + np.cos( np.pi / duration_R * ( np.mod( t - time_R, self.THB ) ) ) ) * ( 0 <= np.mod( t - time_R, self.THB ) ) * ( np.mod( t - time_R, self.THB ) < duration_R )\n return lambda t: EA * np.clip(e(t), 0.0, 1.0) + EB\n\n def initialize(self, initial_state = dict()):\n\n if isinstance(initial_state, str):\n with open(initial_state, mode='r', newline='') as inputfile:\n initial_state = json.loads(inputfile.read())\n\n self.V_LA = float(initial_state.get('V_LA' , 65.)) # [ml]\n self.V_LV = float(initial_state.get('V_LV' , 120.)) # [ml]\n self.V_RA = float(initial_state.get('V_RA' , 65.)) # [ml]\n self.V_RV = float(initial_state.get('V_RV' , 145.)) # [ml]\n\n self.p_AR_SYS = float(initial_state.get('p_AR_SYS' , 80.)) # [mmHg]\n self.p_VEN_SYS = float(initial_state.get('p_VEN_SYS', 30.)) # [mmHg]\n self.p_AR_PUL = float(initial_state.get('p_AR_PUL' , 35.)) # [mmHg]\n self.p_VEN_PUL = float(initial_state.get('p_VEN_PUL', 24.)) # [mmHg]\n\n self.Q_AR_SYS = float(initial_state.get('Q_AR_SYS' , 0.)) # [ml/s]\n self.Q_VEN_SYS = float(initial_state.get('Q_VEN_SYS', 0.)) # [ml/s]\n self.Q_AR_PUL = float(initial_state.get('Q_AR_PUL' , 0.)) # [ml/s]\n self.Q_VEN_PUL = float(initial_state.get('Q_VEN_PUL', 0.)) # [ml/s]\n\n self.update_static_variables(0.)\n\n def update_static_variables(self, t):\n self.p_LA = self.p_LA_func(self.V_LA, t)\n self.p_LV = self.p_LV_func(self.V_LV, t)\n self.p_RA = self.p_RA_func(self.V_RA, t)\n self.p_RV = self.p_RV_func(self.V_RV, t)\n\n self.Q_MV = self.flux_through_valve( self.p_LA, self.p_LV , self.R_MV )\n self.Q_AV = self.flux_through_valve( self.p_LV, self.p_AR_SYS, self.R_AV )\n self.Q_TV = self.flux_through_valve( self.p_RA, self.p_RV , self.R_TV )\n self.Q_PV = self.flux_through_valve( self.p_RV, self.p_AR_PUL, self.R_PV )\n\n def solve_step_FE(self, t, dt):\n self.update_static_variables(t)\n\n self.V_LA += dt * ( self.Q_VEN_PUL - self.Q_MV )\n self.V_LV += dt * ( self.Q_MV - self.Q_AV )\n self.V_RA += dt * ( self.Q_VEN_SYS - self.Q_TV )\n self.V_RV += dt * ( self.Q_TV - self.Q_PV )\n self.p_AR_SYS += dt * ( self.Q_AV - self.Q_AR_SYS ) / self.C_AR_SYS\n self.p_VEN_SYS += dt * ( self.Q_AR_SYS - self.Q_VEN_SYS ) / self.C_VEN_SYS\n self.p_AR_PUL += dt * ( self.Q_PV - self.Q_AR_PUL ) / self.C_AR_PUL\n self.p_VEN_PUL += dt * ( self.Q_AR_PUL - self.Q_VEN_PUL ) / self.C_VEN_PUL\n self.Q_AR_SYS += -dt * ( self.R_AR_SYS * self.Q_AR_SYS + self.p_VEN_SYS - self.p_AR_SYS ) / self.L_AR_SYS\n self.Q_VEN_SYS += -dt * ( self.R_VEN_SYS * self.Q_VEN_SYS + self.p_RA - self.p_VEN_SYS ) / self.L_VEN_SYS\n self.Q_AR_PUL += -dt * ( self.R_AR_PUL * self.Q_AR_PUL + self.p_VEN_PUL - self.p_AR_PUL ) / self.L_AR_PUL\n self.Q_VEN_PUL += -dt * ( self.R_VEN_PUL * self.Q_VEN_PUL + self.p_LA - self.p_VEN_PUL ) / self.L_VEN_PUL\n\n def solve(self, T = None, num_cycles = None,\n initial_state = None,\n dt = 1e-3,\n dt_eval = None):\n\n print('Circulation model - running simulation...')\n if (T is None and num_cycles is None) or (T is not None and num_cycles is not None):\n raise Exception('Exactly one among T and num_cycles should be not None.')\n\n if num_cycles is not None:\n T = self.THB * num_cycles\n if dt_eval is None:\n output_every_n_steps = 1\n else:\n output_every_n_steps = np.round(dt_eval / dt)\n times = np.arange(0, T, dt)\n\n self.initialize(initial_state = initial_state)\n self.initialize_output()\n self.dump_output(0.0)\n\n time_start = time.time()\n\n for iT in range(1, times.shape[0]):\n self.solve_step_FE(times[iT], dt)\n if iT % output_every_n_steps == 0:\n self.dump_output(times[iT])\n\n duration = time.time() - time_start\n\n print('Circulation model - elapsed time %1.4f s' % duration)\n return pd.DataFrame(self.results)\n\n def initialize_output(self):\n self.results = dict()\n self.results['time'] = list()\n self.results['VLA'] = list()\n self.results['VLV'] = list()\n self.results['VRA'] = list()\n self.results['VRV'] = list()\n self.results['pARSYS'] = list()\n self.results['pVENSYS'] = list()\n self.results['pARPUL'] = list()\n self.results['pVENPUL'] = list()\n self.results['QARSYS'] = list()\n self.results['QVENSYS'] = list()\n self.results['QARPUL'] = list()\n self.results['QVENPUL'] = list()\n self.results['pLA'] = list()\n self.results['pLV'] = list()\n self.results['pRA'] = list()\n self.results['pRV'] = list()\n self.results['ELA'] = list()\n self.results['ELV'] = list()\n self.results['ERA'] = list()\n self.results['ERV'] = list()\n self.results['QMV'] = list()\n self.results['QAV'] = list()\n self.results['QTV'] = list()\n self.results['QPV'] = list()\n\n def dump_output(self, t):\n self.results['time' ].append(t)\n self.results['VLA' ].append(self.V_LA)\n self.results['VLV' ].append(self.V_LV)\n self.results['VRA' ].append(self.V_RA)\n self.results['VRV' ].append(self.V_RV)\n self.results['pARSYS' ].append(self.p_AR_SYS)\n self.results['pVENSYS'].append(self.p_VEN_SYS)\n self.results['pARPUL' ].append(self.p_AR_PUL)\n self.results['pVENPUL'].append(self.p_VEN_PUL)\n self.results['QARSYS' ].append(self.Q_AR_SYS)\n self.results['QVENSYS'].append(self.Q_VEN_SYS)\n self.results['QARPUL' ].append(self.Q_AR_PUL)\n self.results['QVENPUL'].append(self.Q_VEN_PUL)\n self.results['pLA' ].append(self.p_LA)\n self.results['pLV' ].append(self.p_LV)\n self.results['pRA' ].append(self.p_RA)\n self.results['pRV' ].append(self.p_RV)\n self.results['ELA' ].append(self.E_LA(t))\n self.results['ELV' ].append(self.E_LV(t))\n self.results['ERA' ].append(self.E_RA(t))\n self.results['ERV' ].append(self.E_RV(t))\n self.results['QMV' ].append(self.Q_MV)\n self.results['QAV' ].append(self.Q_AV)\n self.results['QTV' ].append(self.Q_TV)\n self.results['QPV' ].append(self.Q_PV)\n\n def save_state(self, filename):\n\n with open(filename, mode='w', newline='') as outfile:\n state = dict()\n state['V_LA'] = float(self.V_LA)\n state['V_LV'] = float(self.V_LV)\n state['V_RA'] = float(self.V_RA)\n state['V_RV'] = float(self.V_RV)\n state['p_LA'] = float(self.p_LA)\n state['p_LV'] = float(self.p_LV)\n state['p_RA'] = float(self.p_RA)\n state['p_RV'] = float(self.p_RV)\n state['p_AR_SYS'] = float(self.p_AR_SYS)\n state['p_VEN_SYS'] = float(self.p_VEN_SYS)\n state['p_AR_PUL'] = float(self.p_AR_PUL)\n state['p_VEN_PUL'] = float(self.p_VEN_PUL)\n state['Q_AR_SYS'] = float(self.Q_AR_SYS)\n state['Q_VEN_SYS'] = float(self.Q_VEN_SYS)\n state['Q_AR_PUL'] = float(self.Q_AR_PUL)\n state['Q_VEN_PUL'] = float(self.Q_VEN_PUL)\n json.dump(state, outfile, indent=2)\n\n def print_info(self):\n\n print('V_LA = %4.2f mL' % self.V_LA)\n print('V_LV = %4.2f mL' % self.V_LV)\n print('V_RA = %4.2f mL' % self.V_RA)\n print('V_RV = %4.2f mL' % self.V_RV)\n print('V_AR_SYS = %4.2f mL' % (self.C_AR_SYS * self.p_AR_SYS ))\n print('V_VEN_SYS = %4.2f mL' % (self.C_VEN_SYS * self.p_VEN_SYS))\n print('V_AR_PUL = %4.2f mL' % (self.C_AR_PUL * self.p_AR_PUL ))\n print('V_VEN_PUL = %4.2f mL' % (self.C_VEN_PUL * self.p_VEN_PUL))\n\n V_tot_heart = self.V_LA + self.V_LV + self.V_RA + self.V_RV\n V_tot_SYS = self.C_AR_SYS * self.p_AR_SYS \\\n + self.C_VEN_SYS * self.p_VEN_SYS\n V_tot_PUL = self.C_AR_PUL * self.p_AR_PUL \\\n + self.C_VEN_PUL * self.p_VEN_PUL\n V_tot = V_tot_heart + V_tot_SYS + V_tot_PUL\n print('======================')\n print('V (heart) = %4.2f mL' % V_tot_heart)\n print('V (SYS) = %4.2f mL' % V_tot_SYS)\n print('V (PUL) = %4.2f mL' % V_tot_PUL)\n print('======================')\n print('V = %4.2f mL' % V_tot)",
"import numpy as np\nimport pandas as pd\n\ndef compute_convergence(file_PV_loops, period):\n \"\"\"\n Evaluate the convergence to the limit cycle of a series of PV loops.\n\n Parameters\n ----------\n file_PV_loops : str\n Path of a csv file containing pressure and volume transients.\n Expected columns:\n\n - ``time``: time\n - ``VLV``: volume\n - ``pLV``: pressure\n\n period : float\n Heartbeat period (measure unit must be the same of the `time` column).\n\n Returns\n -------\n res : dict\n Dictionary with convergence metrics (listed below).\n Each metric contains a list, associated with the differences between\n consecutive heartbeats, or between a given heartbeat and the last one.\n If `n` PV loops are provided, each list contains `n - 1` elements.\n\n - ``err_rel_2_prs``: relative mean square error (norm 2) on pressure (between cycles)\n - ``err_rel_2_vol``: relative mean square error (norm 2) on volume (between cycles)\n - ``err_rel_I_prs``: relative maximum error (infinity) on pressure (between cycles)\n - ``err_rel_I_vol``: relative maximum error (infinity) on volume (between cycles)\n - ``err_rel_2_prs_E``: relative mean square error (norm 2) on pressure (wrt last cycle)\n - ``err_rel_2_vol_E``: relative mean square error (norm 2) on volume (wrt last cycle)\n - ``err_rel_I_prs_E``: relative maximum error (infinity) on pressure (wrt last cycle)\n - ``err_rel_I_vol_E``: relative maximum error (infinity) on volume (wrt last cycle)\n - ``err_rel_2_tot``: relative mean square error (norm 2) on pressure and volume (between cycles)\n - ``err_rel_2_tot_E``: relative mean square error (norm 2) on pressure and volume (wrt last cycle)\n - ``n_loops``: number of heartbeats\n\n \"\"\"\n data_loop = pd.read_csv(file_PV_loops)\n\n err_rel_2_prs = list()\n err_rel_2_vol = list()\n err_rel_I_prs = list()\n err_rel_I_vol = list()\n\n err_rel_2_prs_E = list()\n err_rel_2_vol_E = list()\n err_rel_I_prs_E = list()\n err_rel_I_vol_E = list()\n\n err_rel_2_tot = list()\n err_rel_2_tot_E = list()\n\n iters_per_loop = sum(data_loop.time < period)\n n_loops = int(np.round(data_loop.time.max() / period))\n data_loops = [data_loop[(i + 0) * iters_per_loop:(i + 1) * iters_per_loop] for i in range(n_loops)]\n\n volE = np.array(data_loops[-1].VLV)\n prsE = np.array(data_loops[-1].pLV)\n nrm_2_prs_E = np.linalg.norm(prsE, 2 )\n nrm_2_vol_E = np.linalg.norm(volE, 2 )\n nrm_I_prs_E = np.linalg.norm(prsE, np.Inf)\n nrm_I_vol_E = np.linalg.norm(volE, np.Inf)\n\n for i in range(n_loops - 1):\n data1 = data_loops[i]\n data2 = data_loops[i+1]\n\n prs1 = np.array(data1.pLV)\n prs2 = np.array(data2.pLV)\n vol1 = np.array(data1.VLV)\n vol2 = np.array(data2.VLV)\n err_2_prs = np.linalg.norm(prs1 - prs2, 2 )\n nrm_2_prs = np.linalg.norm(prs2 , 2 )\n err_2_vol = np.linalg.norm(vol1 - vol2, 2 )\n nrm_2_vol = np.linalg.norm(vol2 , 2 )\n err_I_prs = np.linalg.norm(prs1 - prs2, np.Inf)\n nrm_I_prs = np.linalg.norm(prs2 , np.Inf)\n err_I_vol = np.linalg.norm(vol1 - vol2, np.Inf)\n nrm_I_vol = np.linalg.norm(vol2 , np.Inf)\n err_rel_2_prs.append(err_2_prs / nrm_2_prs)\n err_rel_2_vol.append(err_2_vol / nrm_2_vol)\n err_rel_I_prs.append(err_I_prs / nrm_I_prs)\n err_rel_I_vol.append(err_I_vol / nrm_I_vol)\n err_2_prs_E = np.linalg.norm(prs1 - prsE, 2 )\n err_2_vol_E = np.linalg.norm(vol1 - volE, 2 )\n err_I_prs_E = np.linalg.norm(prs1 - prsE, np.Inf)\n err_I_vol_E = np.linalg.norm(vol1 - volE, np.Inf)\n err_rel_2_prs_E.append(err_2_prs_E / nrm_2_prs_E)\n err_rel_2_vol_E.append(err_2_vol_E / nrm_2_vol_E)\n err_rel_I_prs_E.append(err_I_prs_E / nrm_I_prs_E)\n err_rel_I_vol_E.append(err_I_vol_E / nrm_I_vol_E)\n\n err_rel_2_tot.append( err_2_prs / nrm_2_prs + err_2_vol / nrm_2_vol )\n err_rel_2_tot_E.append(err_2_prs_E / nrm_2_prs_E + err_2_vol_E / nrm_2_vol_E)\n\n results = dict()\n results['n_loops'] = n_loops\n results['err_rel_2_prs'] = err_rel_2_prs\n results['err_rel_2_vol'] = err_rel_2_vol\n results['err_rel_I_prs'] = err_rel_I_prs\n results['err_rel_I_vol'] = err_rel_I_vol\n results['err_rel_2_prs_E'] = err_rel_2_prs_E\n results['err_rel_2_vol_E'] = err_rel_2_vol_E\n results['err_rel_I_prs_E'] = err_rel_I_prs_E\n results['err_rel_I_vol_E'] = err_rel_I_vol_E\n results['err_rel_2_tot'] = err_rel_2_tot\n results['err_rel_2_tot_E'] = err_rel_2_tot_E\n\n return results\n"
] | [
[
"numpy.arctan",
"numpy.arange",
"pandas.DataFrame",
"numpy.round",
"numpy.log10",
"numpy.mod"
],
[
"numpy.array",
"pandas.read_csv",
"numpy.linalg.norm"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [
"1.10",
"1.12",
"1.11",
"1.19",
"1.24",
"1.13",
"1.16",
"1.9",
"1.18",
"1.23",
"1.21",
"1.22",
"1.20",
"1.7",
"1.15",
"1.14",
"1.17",
"1.8"
],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
kim95175/detr | [
"342947185153e1f599b47da423a0c49329bbe055"
] | [
"main.py"
] | [
"# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nimport argparse\nimport datetime\nimport json\nimport random\nimport time\nfrom pathlib import Path\n\nimport numpy as np\nimport torch\nfrom torch.utils.data import DataLoader, DistributedSampler\n\nimport datasets\nimport util.misc as utils\nfrom datasets import build_dataset, get_coco_api_from_dataset\nfrom engine import evaluate, train_one_epoch\nfrom models import build_model\nfrom detr_dataset import *\n\n\ndef get_args_parser():\n parser = argparse.ArgumentParser('Set transformer detector', add_help=False)\n parser.add_argument('--lr', default=1e-4, type=float)\n parser.add_argument('--lr_backbone', default=1e-5, type=float)\n parser.add_argument('--batch_size', default=32, type=int)\n parser.add_argument('--weight_decay', default=1e-4, type=float)\n parser.add_argument('--epochs', default=200, type=int)\n parser.add_argument('--lr_drop', default=200, type=int)\n parser.add_argument('--clip_max_norm', default=0.1, type=float,\n help='gradient clipping max norm')\n\n # Model parameters\n parser.add_argument('--frozen_weights', type=str, default=None,\n help=\"Path to the pretrained model. If set, only the mask head will be trained\")\n # * Backbone\n parser.add_argument('--backbone', default='resnet50', type=str,\n help=\"Name of the convolutional backbone to use\")\n parser.add_argument('--dilation', action='store_true',\n help=\"If true, we replace stride with dilation in the last convolutional block (DC5)\")\n parser.add_argument('--position_embedding', default='sine', type=str, choices=('sine', 'learned'),\n help=\"Type of positional embedding to use on top of the image features\")\n\n # * Transformer\n parser.add_argument('--enc_layers', default=6, type=int,\n help=\"Number of encoding layers in the transformer\")\n parser.add_argument('--dec_layers', default=6, type=int,\n help=\"Number of decoding layers in the transformer\")\n parser.add_argument('--dim_feedforward', default=2048, type=int,\n help=\"Intermediate size of the feedforward layers in the transformer blocks\")\n parser.add_argument('--hidden_dim', default=256, type=int,\n help=\"Size of the embeddings (dimension of the transformer)\")\n parser.add_argument('--dropout', default=0.1, type=float,\n help=\"Dropout applied in the transformer\")\n parser.add_argument('--nheads', default=8, type=int,\n help=\"Number of attention heads inside the transformer's attentions\")\n parser.add_argument('--num_queries', default=20, type=int,\n help=\"Number of query slots\")\n parser.add_argument('--pre_norm', action='store_true')\n\n # * Segmentation\n parser.add_argument('--masks', action='store_true',\n help=\"Train segmentation head if the flag is provided\")\n\n # Loss\n parser.add_argument('--no_aux_loss', dest='aux_loss', action='store_false',\n help=\"Disables auxiliary decoding losses (loss at each layer)\")\n # * Matcher\n parser.add_argument('--set_cost_class', default=1, type=float,\n help=\"Class coefficient in the matching cost\")\n parser.add_argument('--set_cost_bbox', default=5, type=float,\n help=\"L1 box coefficient in the matching cost\")\n parser.add_argument('--set_cost_giou', default=2, type=float,\n help=\"giou box coefficient in the matching cost\")\n # * Loss coefficients\n parser.add_argument('--mask_loss_coef', default=1, type=float)\n parser.add_argument('--dice_loss_coef', default=1, type=float)\n parser.add_argument('--bbox_loss_coef', default=5, type=float)\n parser.add_argument('--giou_loss_coef', default=2, type=float)\n parser.add_argument('--eos_coef', default=0.1, type=float,\n help=\"Relative classification weight of the no-object class\")\n\n # dataset parameters\n parser.add_argument('--dataset_file', default='coco')\n parser.add_argument('--coco_path', type=str)\n parser.add_argument('--coco_panoptic_path', type=str)\n parser.add_argument('--remove_difficult', action='store_true')\n\n parser.add_argument('--output_dir', default='./weights',\n help='path where to save, empty for no saving')\n parser.add_argument('--device', default='cuda',\n help='device to use for training / testing')\n parser.add_argument('--seed', default=42, type=int)\n parser.add_argument('--resume', default='', help='resume from checkpoint')\n parser.add_argument('--start_epoch', default=0, type=int, metavar='N',\n help='start epoch')\n parser.add_argument('--eval', action='store_true')\n parser.add_argument('--num_workers', default=2, type=int)\n\n # distributed training parameters\n parser.add_argument('--world_size', default=1, type=int,\n help='number of distributed processes')\n parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')\n \n parser.add_argument('--cutoff', type=int, default=320, #448, # 284, 246\n help='cut off the front of the input data, --> length = 2048 - cutoff')\n parser.add_argument('--vis', action='store_true', default=False,\n help='visualize the image for debugging')\n parser.add_argument('--val_vis', action='store_true', default=False,\n help='visualize the image for debugging')\n #parser.add_argument('--gpu-num', type=int, default=0,\n # help = 'gpu number if you use a single gpu, set this device number')\n return parser\n\n\ndef main(args):\n utils.init_distributed_mode(args)\n print(\"git:\\n {}\\n\".format(utils.get_sha()))\n\n if args.frozen_weights is not None:\n assert args.masks, \"Frozen training is meant for segmentation only\"\n print(args)\n\n device = torch.device(args.device)\n\n # fix the seed for reproducibility\n seed = args.seed + utils.get_rank()\n torch.manual_seed(seed)\n np.random.seed(seed)\n random.seed(seed)\n\n model, criterion, postprocessors = build_model(args)\n model.to(device)\n \n model_without_ddp = model\n if args.distributed:\n model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])\n model_without_ddp = model.module\n n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)\n print('number of params:', n_parameters)\n\n param_dicts = [\n {\"params\": [p for n, p in model_without_ddp.named_parameters() if \"backbone\" not in n and p.requires_grad]},\n {\n \"params\": [p for n, p in model_without_ddp.named_parameters() if \"backbone\" in n and p.requires_grad],\n \"lr\": args.lr_backbone,\n },\n ]\n optimizer = torch.optim.AdamW(param_dicts, lr=args.lr,\n weight_decay=args.weight_decay)\n lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, args.lr_drop)\n\n # UWB Dataset\n dataset_train = DetrDataset(mode='train', args=args)\n dataset_val = DetrDataset(mode='test', args=args)\n\n if args.distributed:\n sampler_train = DistributedSampler(dataset_train)\n sampler_val = DistributedSampler(dataset_val)#, shuffle=False)\n else:\n sampler_train = torch.utils.data.RandomSampler(dataset_train)\n sampler_val = torch.utils.data.SequentialSampler(dataset_val)\n\n batch_sampler_train = torch.utils.data.BatchSampler(\n sampler_train, args.batch_size, drop_last=True)\n\n data_loader_train = DataLoader(dataset_train, batch_size=args.batch_size, shuffle=True, collate_fn=detection_collate, num_workers=args.num_workers, pin_memory=True)\n data_loader_val = DataLoader(dataset_val, batch_size=args.batch_size, shuffle=True, collate_fn=detection_collate_var, num_workers=args.num_workers, pin_memory=True)\n\n '''\n if args.dataset_file == \"coco_panoptic\":\n # We also evaluate AP during panoptic training, on original coco DS\n coco_val = datasets.coco.build(\"val\", args)\n base_ds = get_coco_api_from_dataset(coco_val)\n else:\n base_ds = get_coco_api_from_dataset(dataset_val)\n '''\n #exit(-1)\n base_ds = None\n\n if args.frozen_weights is not None:\n checkpoint = torch.load(args.frozen_weights, map_location='cpu')\n model_without_ddp.detr.load_state_dict(checkpoint['model'])\n\n output_dir = Path(args.output_dir)\n if args.resume:\n if args.resume.startswith('https'):\n checkpoint = torch.hub.load_state_dict_from_url(\n args.resume, map_location='cpu', check_hash=True)\n else:\n checkpoint = torch.load(args.resume, map_location='cpu')\n model_without_ddp.load_state_dict(checkpoint['model'])\n if not args.eval and 'optimizer' in checkpoint and 'lr_scheduler' in checkpoint and 'epoch' in checkpoint:\n optimizer.load_state_dict(checkpoint['optimizer'])\n lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])\n args.start_epoch = checkpoint['epoch'] + 1\n\n if args.eval:\n #test_stats, coco_evaluator = evaluate(model, criterion, postprocessors,\n test_stats = evaluate(model, criterion, postprocessors,\n data_loader_val, base_ds, device, args.output_dir, val_vis=args.val_vis)\n #print(test_stats)\n #if args.output_dir:\n # utils.save_on_master(coco_evaluator.coco_eval[\"bbox\"].eval, output_dir / \"eval.pth\")\n return\n\n print(\"Start training\")\n start_time = time.time()\n for epoch in range(args.start_epoch, args.epochs):\n if args.distributed:\n sampler_train.set_epoch(epoch)\n train_stats = train_one_epoch(\n model, criterion, data_loader_train, optimizer, device, epoch,\n args.clip_max_norm)\n lr_scheduler.step()\n if args.output_dir:\n checkpoint_paths = [output_dir / 'checkpoint.pth']\n # extra checkpoint before LR drop and every 10 epochs\n if (epoch + 1) % args.lr_drop == 0 or (epoch + 1) % 10 == 0:\n checkpoint_paths.append(output_dir / f'checkpoint{epoch:04}.pth')\n for checkpoint_path in checkpoint_paths:\n utils.save_on_master({\n 'model': model_without_ddp.state_dict(),\n 'optimizer': optimizer.state_dict(),\n 'lr_scheduler': lr_scheduler.state_dict(),\n 'epoch': epoch,\n 'args': args,\n }, checkpoint_path)\n coco_evaluator = None\n\n #test_stats, coco_evaluator = evaluate(\n test_stats = evaluate(\n model, criterion, postprocessors, data_loader_val, base_ds, device, args.output_dir, val_vis=args.val_vis\n )\n\n log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},\n **{f'test_{k}': v for k, v in test_stats.items()},\n 'epoch': epoch,\n 'n_parameters': n_parameters}\n\n if args.output_dir and utils.is_main_process():\n with (output_dir / \"log.txt\").open(\"a\") as f:\n f.write(json.dumps(log_stats) + \"\\n\")\n\n # for evaluation logs\n '''\n if coco_evaluator is not None:\n (output_dir / 'eval').mkdir(exist_ok=True)\n if \"bbox\" in coco_evaluator.coco_eval:\n filenames = ['latest.pth']\n if epoch % 50 == 0:\n filenames.append(f'{epoch:03}.pth')\n for name in filenames:\n torch.save(coco_evaluator.coco_eval[\"bbox\"].eval,\n output_dir / \"eval\" / name)\n '''\n\n total_time = time.time() - start_time\n total_time_str = str(datetime.timedelta(seconds=int(total_time)))\n print('Training time {}'.format(total_time_str))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser('DETR training and evaluation script', parents=[get_args_parser()])\n args = parser.parse_args()\n if args.output_dir:\n Path(args.output_dir).mkdir(parents=True, exist_ok=True)\n main(args)\n"
] | [
[
"torch.utils.data.DistributedSampler",
"numpy.random.seed",
"torch.load",
"torch.manual_seed",
"torch.utils.data.SequentialSampler",
"torch.utils.data.DataLoader",
"torch.utils.data.RandomSampler",
"torch.optim.AdamW",
"torch.nn.parallel.DistributedDataParallel",
"torch.device",
"torch.hub.load_state_dict_from_url",
"torch.utils.data.BatchSampler",
"torch.optim.lr_scheduler.StepLR"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
oguzhangur96/automl-benchmark | [
"785b4d762164dd251b7c5e63131579113c2dc2c2"
] | [
"autogluon/taxi_trip_duration.py"
] | [
"# %% [markdown]\n# This is a simple notebook for Autogluon AutoMl prediction.\n# MLflow used as tracking tool since experiments take long time complete\n# and it is hard to manage too many experiments.\n#%%\n# Importing necessary libraries\nimport os\nimport re\nimport random\nimport string\nimport math\nimport pandas as pd\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import mean_squared_log_error\nfrom autogluon import TabularPrediction as task\nimport mlflow\nfrom sklearn.metrics import mean_squared_error\nfrom autogluon.utils.tabular.metrics import make_scorer\n\n# %%\n# Initialize mlflow experiment\nmlflow.set_tracking_uri(f'..{os.sep}mlruns')\nexperiment_name = 'automl-benchmark'\ntry:\n experiment = mlflow.create_experiment(experiment_name)\nexcept:\n experiment = mlflow.get_experiment_by_name(experiment_name)\nmlflow.set_experiment(experiment_name)\n\n# Reading seeds\nseed_path = f'..{os.sep}data{os.sep}seeds.txt'\nseeds = []\nwith open(seed_path,mode ='r') as file:\n for seed in file:\n seed.strip(r'/n')\n seeds.append(int(seed))\n\ndataset_name = 'taxi_trip_duration'\ndata = pd.read_pickle(f'..{os.sep}data{os.sep}{dataset_name}{os.sep}{dataset_name}.pkl')\n# Renaming all the characters except for regex experresion\n# For some reason lightgbm gives error with some column names\ndata = data.rename(columns = lambda x:re.sub('[^A-Za-z0-9_]+', '', x))\n#%%\nrun_time_secs = 600\ntarget_column = 'trip_duration'\n# Pickling other models require >1GB amount of space\n# Used hyperparameters option to discard other models\nhyper_parameters = {'NN':{},'GBM':{},'CAT':{},'LR':{} }\n# Since root_mean_squared_log_error does not exist in autogluon\n# it is defined with autogluon.utils.tabular.metrics.make_scorer\ndef RMSLE(y_true, y_pred):\n y_pred[y_pred < 0] = 0\n error = mean_squared_log_error(y_true, y_pred) \n return np.sqrt(error)\n\ncustom_metric = make_scorer('root_mean_squared_log_error',\n RMSLE,\n optimum=0,\n greater_is_better=False)\nfor seed in seeds:\n with mlflow.start_run(run_name='autogluon'):\n # Create output directory for auto gluon\n models_dir = 'AutogluonModels'\n random_dir = ''.join(random.choices(string.ascii_uppercase +\n string.digits, k = 12))\n output_dir = f'{models_dir}{os.sep}{random_dir}'\n os.mkdir(output_dir)\n # Split data into two parts (train, valid)\n train, valid = train_test_split(data, random_state = seed)\n predictor = task.fit(train_data=train, \n label=target_column,\n problem_type = 'regression',\n eval_metric = custom_metric, \n stopping_metric=custom_metric,\n hyperparameters= hyper_parameters,\n stack_ensemble_levels=2, \n time_limits = run_time_secs,\n cache_data=False, \n verbosity = 2,\n output_directory=output_dir)\n test_data = valid\n y_test = test_data[target_column] # values to predict\n test_data_nolab = test_data.drop(labels=[target_column],axis=1) # delete label column to prove we're not cheating\n # AutoGluon will gauge predictive performance using \n # evaluation metric: roc_auc this metric expects predicted probabilities \n # rather than predicted class labels, so you'll need to use predict_proba() \n # instead of predict()\n y_pred = predictor.predict_proba(test_data_nolab)\n score = RMSLE(y_test,y_pred)\n mlflow.log_metric('RMSLE', score)\n mlflow.log_param('seed', seed)\n mlflow.log_param('run_time', run_time_secs)\n mlflow.log_param('dataset_name', dataset_name)\n mlflow.log_param('model_name',predictor.leaderboard().iloc[0,0])\n mlflow.log_artifact(output_dir)"
] | [
[
"pandas.read_pickle",
"sklearn.model_selection.train_test_split",
"numpy.sqrt",
"sklearn.metrics.mean_squared_log_error"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
lebrice/continuum | [
"7fa9048361b5821b61fa8ec1ac535c2438329626",
"7fa9048361b5821b61fa8ec1ac535c2438329626"
] | [
"continuum/task_set.py",
"continuum/datasets/transformed.py"
] | [
"from typing import Tuple, Union\n\nimport numpy as np\nfrom PIL import Image\nfrom torch.utils.data import Dataset as TorchDataset\nfrom torchvision import transforms\n\nfrom continuum.viz import plot\n\n\nclass TaskSet(TorchDataset):\n \"\"\"A task dataset returned by the CLLoader.\n\n :param x: The data, either image-arrays or paths to images saved on disk.\n :param y: The targets, not one-hot encoded.\n :param t: The task id of each sample.\n :param trsf: The transformations to apply on the images.\n :param data_type: Type of the data, either \"image_path\", \"image_array\", or \"text\".\n \"\"\"\n\n def __init__(\n self,\n x: np.ndarray,\n y: np.ndarray,\n t: np.ndarray,\n trsf: transforms.Compose,\n data_type: str = \"image_array\"\n ):\n self.x, self.y, self.t = x, y, t\n self.trsf = trsf\n self.data_type = data_type\n\n @property\n def nb_classes(self):\n \"\"\"The number of classes contained in the current task.\"\"\"\n return len(np.unique(self.y))\n\n def add_memory(\n self, x_memory: np.ndarray, y_memory: np.ndarray, t_memory: Union[None, np.ndarray] = None\n ):\n \"\"\"Add memory for rehearsal.\n\n :param x_memory: Sampled data chosen for rehearsal.\n :param y_memory: The associated targets of `x_memory`.\n :param t_memory: The associated task ids. If not provided, they will be\n defaulted to -1.\n \"\"\"\n self.x = np.concatenate((self.x, x_memory))\n self.y = np.concatenate((self.y, y_memory))\n if t_memory is not None:\n self.t = np.concatenate((self.t, t_memory))\n else:\n self.t = np.concatenate((self.t, -1 * np.ones(len(x_memory))))\n\n def plot(\n self,\n path: Union[str, None] = None,\n title: str = \"\",\n nb_per_class: int = 5,\n shape=None\n ) -> None:\n \"\"\"Plot samples of the current task, useful to check if everything is ok.\n\n :param path: If not None, save on disk at this path.\n :param title: The title of the figure.\n :param nb_per_class: Amount to sample per class.\n :param shape: Shape to resize the image before plotting.\n \"\"\"\n plot(self, title=title, path=path, nb_per_class=nb_per_class, shape=shape)\n\n def __len__(self) -> int:\n \"\"\"The amount of images in the current task.\"\"\"\n return self.x.shape[0]\n\n def get_sample(self, index: int) -> np.ndarray:\n \"\"\"Returns a Pillow image corresponding to the given `index`.\n\n :param index: Index to query the image.\n :return: A Pillow image.\n \"\"\"\n x = self.x[index]\n\n if self.data_type == \"image_path\":\n x = Image.open(x).convert(\"RGB\")\n elif self.data_type == \"image_array\":\n x = Image.fromarray(x.astype(\"uint8\"))\n elif self.data_type == \"text\":\n pass\n\n return x\n\n def __getitem__(self, index: int) -> Tuple[np.ndarray, int, int]:\n \"\"\"Method used by PyTorch's DataLoaders to query a sample and its target.\"\"\"\n img = self.get_sample(index)\n y = self.y[index]\n t = self.t[index]\n\n if self.trsf is not None:\n img = self.trsf(img)\n\n return img, y, t\n\n\ndef split_train_val(dataset: TaskSet, val_split: float = 0.1) -> Tuple[TaskSet, TaskSet]:\n \"\"\"Split train dataset into two datasets, one for training and one for validation.\n\n :param dataset: A torch dataset, with .x and .y attributes.\n :param val_split: Percentage to allocate for validation, between [0, 1[.\n :return: A tuple a dataset, respectively for train and validation.\n \"\"\"\n random_state = np.random.RandomState(seed=1)\n\n indexes = np.arange(len(dataset.x))\n random_state.shuffle(indexes)\n\n train_indexes = indexes[int(val_split * len(indexes)):]\n val_indexes = indexes[:int(val_split * len(indexes))]\n\n x, y, t = dataset.x, dataset.y, dataset.t\n train_dataset = TaskSet(\n x[train_indexes], y[train_indexes], t[train_indexes], dataset.trsf, dataset.data_type\n )\n val_dataset = TaskSet(\n x[val_indexes], y[val_indexes], t[val_indexes], dataset.trsf, dataset.data_type\n )\n\n return train_dataset, val_dataset\n",
"from typing import Tuple\n\nimport numpy as np\nfrom scipy import ndimage\n\nfrom continuum.datasets import MNIST\n\n\nclass PermutedMNIST(MNIST):\n \"\"\"A dataset made of MNIST and pixels permutations.\n\n The first task is the famous MNIST with 10 classes. Then all following tasks\n are the same MNIST but with pixels permuted in a random way.\n\n Note that classes are the same, only their representation changes.\n\n # Reference:\n * Overcoming catastrophic forgetting in neural networks\n Kirkpatrick et al.\n PNAS 2017\n\n :param nb_permutations: Number of permutations in addition of the original MNIST.\n \"\"\"\n\n def __init__(self, *args, nb_permutations=4, **kwargs):\n MNIST.__init__(self, *args, **kwargs)\n\n self._transformations = list(range(nb_permutations))\n self._mapping = None\n\n @property\n def need_class_remapping(self) -> bool:\n \"\"\"Flag for method `class_remapping`.\"\"\"\n return True\n\n def class_remapping(self, class_ids: np.ndarray) -> np.ndarray:\n \"\"\"Optional class remapping.\n\n Remap class ids so that whatever the permutations, the targets stay\n the same (0-9).\n For example, the second task, with permuted pixels, has targets (10-19)\n in order to mimick a class-incremental training but in reality those\n targets are (0-9).\n The remaping is done so that for the end-user, any tasks of PermutedMNIST\n has targets in the (0-9) range.\n\n :param class_ids: Original class_ids.\n :return: A remapping of the class ids.\n \"\"\"\n if self._mapping is None:\n self._mapping = np.concatenate(\n [np.arange(10) for _ in range(len(self._transformations) + 1)]\n )\n return self._mapping[class_ids]\n\n def init(self, train: bool) -> Tuple[np.ndarray, np.ndarray, None]:\n base_data = MNIST.init(self, train)\n\n x, y = [base_data[0]], [base_data[1]]\n class_increment = len(np.unique(base_data[1]))\n\n for i, value in enumerate(self._transformations, start=1):\n x_transformed = self._transform(base_data[0], value)\n\n x.append(x_transformed)\n y.append(base_data[1] + i * class_increment)\n\n x = np.concatenate(x)\n y = np.concatenate(y)\n\n return x, y, None\n\n def _transform(self, x: np.ndarray, value: int) -> np.ndarray:\n # It's important to generate a new random state with a given seed\n # So that every run produces the same transformation,\n # and also that train & test have the same transformation.\n random_state = np.random.RandomState(seed=value)\n permutations = random_state.permutation(x.shape[1] * x.shape[2])\n\n shape = x.shape\n\n x_transformed = x.reshape((shape[0], -1))[..., permutations].reshape(shape)\n\n return x_transformed\n\n\nclass RotatedMNIST(PermutedMNIST):\n \"\"\"A dataset made of MNIST and various rotations.\n\n The first task is the famous MNIST with 10 classes. Then all following tasks\n are the same MNIST but with a fixed rotations per task.\n\n Note that classes are the same, only their representation changes.\n\n # Reference:\n * Gradient Episodic Memory for Continual Learning\n Lopez-Paz and Ranzato\n NeurIPS 2017\n\n :param angles: A list of angles used in the rotation.\n \"\"\"\n\n def __init__(self, *args, angles=[45, 90, 135, 180], **kwargs):\n MNIST.__init__(self, *args, **kwargs) # pylint: disable=non-parent-init-called\n\n self._transformations = angles\n self._mapping = None\n\n def _transform(self, x: np.ndarray, value: int) -> np.ndarray:\n x_transformed = ndimage.rotate(x, angle=value, axes=(2, 1), reshape=False)\n return x_transformed\n"
] | [
[
"numpy.concatenate",
"numpy.random.RandomState",
"numpy.unique"
],
[
"numpy.unique",
"numpy.arange",
"scipy.ndimage.rotate",
"numpy.concatenate",
"numpy.random.RandomState"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
FerdinandEiteneuer/ReinforcementLearning | [
"15c75d7f984bd0a8a25b9df822113d8837aa4a93"
] | [
"utils/memory.py"
] | [
"\"\"\"\nMemory\n\"\"\"\nimport numpy as np\nimport os\n\nfrom utils import export\n\n\n@export\nclass NumpyArrayMemory:\n \"\"\"\n Datastructure for all the experiences (states, actions, rewards, next_states)\n the agent saw.\n \"\"\"\n def __init__(self, size, input_shape, nb_actions, data_dir):\n\n self.data_dir = data_dir\n if data_dir is not None:\n self.memory_path = os.path.join(data_dir, 'memory.npy')\n\n self.size = size\n self.input_shape = input_shape\n self.nb_actions = nb_actions\n\n shape_mem = size, input_shape + nb_actions\n self.memory = np.zeros(shape_mem)\n\n self.add_index = 0\n\n def add(self, states, qvalues):\n\n idx = self.add_index % self.size\n data = list(states) + list(qvalues)\n\n self.memory[idx] = data\n self.add_index += 1\n\n def save(self, path=None):\n\n if path is None:\n path = self.memory_path\n\n np.save(file=path, arr=self.memory)\n\n def load(self, path=None):\n\n if path is None:\n path = self.memory_path\n\n try:\n self.memory = np.load(path)\n except FileNotFoundError as e:\n print(f'Memory could not be loaded: {e}')\n\n def ready(self):\n \"\"\"\n Does the memory still need to be filled up? Can Training begin?\n Not very reliable implementation, but it will do.\n \"\"\"\n assert self.memory is not None\n return np.any(self.memory[-1] != 0)\n\n def complete_training_data(self):\n \"\"\"\n Prepares the data in a format used for keras.\n \"\"\"\n # one Q_memory row consists of [state, Qvalues(dim=nb_actions)]\n states = self.memory[:, :-self.nb_actions]\n targets = self.memory[:, -self.nb_actions:]\n\n return states, targets\n\n def get_batch(self, batch_size):\n raise NotImplementedError\n \"\"\"\n deprecated\n memory = self.Q_memory[:self.episodes]\n batch_size = max(1, min(self.batch_size, self.e))\n\n if self.episodes > self.size_Q_memory: # memory is filled\n indices = np.random.choice(range(self.size_Q_memory), self.batch_size)\n x_train = self.Q_memory[:, :-1][indices]\n y_train = self.Q_memory[:, -1][indices]\n return x_train, y_train, True\n\n else: # memory is too small\n return None, None, False\n \"\"\"\n"
] | [
[
"numpy.load",
"numpy.zeros",
"numpy.any",
"numpy.save"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
KEVINYZY/python-tutorial | [
"d0f7348e1da4ff954e3add66e1aae55d599283ee",
"d0f7348e1da4ff954e3add66e1aae55d599283ee"
] | [
"17tensorflow/mnist/__init__.py",
"12gensim/05.tensorflow.py"
] | [
"# -*- coding: utf-8 -*-\n# Author: XuMing <[email protected]>\n# Data: 17/10/10\n# Brief: \nimport tensorflow as tf\nimport numpy as np\n\n# 使用 NumPy 生成假数据(phony data), 总共 100 个点.\nx_data = np.float32(np.random.rand(2, 100)) # 随机输入\ny_data = np.dot([0.100, 0.200], x_data) + 0.300\n\n# 构造一个线性模型\n#\nb = tf.Variable(tf.zeros([1]))\nW = tf.Variable(tf.random_uniform([1, 2], -1.0, 1.0))\ny = tf.matmul(W, x_data) + b\n\n# 最小化方差\nloss = tf.reduce_mean(tf.square(y - y_data))\noptimizer = tf.train.GradientDescentOptimizer(0.5)\ntrain = optimizer.minimize(loss)\n\n# 初始化变量\ninit = tf.global_variables_initializer()\n\n# 启动图 (graph)\nsess = tf.Session()\nsess.run(init)\n\n# 拟合平面\nfor step in range(0, 201):\n sess.run(train)\n if step % 20 == 0:\n print(step, sess.run(W), sess.run(b))\n\n# 得到最佳拟合结果 W: [[0.100 0.200]], b: [0.300]",
"# -*- coding: utf-8 -*-\n\"\"\"\n@description: \n@author:XuMing\n\"\"\"\nfrom __future__ import print_function # 兼容python3的print写法\nfrom __future__ import unicode_literals # 兼容python3的编码处理\n\nimport tensorflow as tf\n\nx = tf.constant(1, tf.float32)\ny = tf.nn.relu(x)\ndy = tf.gradients(y, x)\nddy = tf.gradients(dy, x)\nwith tf.Session() as sess:\n print(sess.run(y))\n print(sess.run(dy))\n print(sess.run(ddy))\n"
] | [
[
"numpy.dot",
"tensorflow.matmul",
"tensorflow.zeros",
"tensorflow.global_variables_initializer",
"tensorflow.train.GradientDescentOptimizer",
"numpy.random.rand",
"tensorflow.Session",
"tensorflow.square",
"tensorflow.random_uniform"
],
[
"tensorflow.nn.relu",
"tensorflow.gradients",
"tensorflow.constant",
"tensorflow.Session"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
joybhallaa/pandas | [
"1779155552631a30d4bb176dec70b8cc477defd7",
"fc9fdba6592bdb5d0d1147ce4d65639acd897565",
"fc9fdba6592bdb5d0d1147ce4d65639acd897565"
] | [
"pandas/core/internals/concat.py",
"pandas/tests/dtypes/cast/test_promote.py",
"pandas/core/computation/expressions.py"
] | [
"from __future__ import annotations\n\nfrom collections import defaultdict\nimport copy\nimport itertools\nfrom typing import TYPE_CHECKING, Dict, List, Sequence, cast\n\nimport numpy as np\n\nfrom pandas._libs import internals as libinternals\nfrom pandas._typing import ArrayLike, DtypeObj, Manager, Shape\nfrom pandas.util._decorators import cache_readonly\n\nfrom pandas.core.dtypes.cast import ensure_dtype_can_hold_na, find_common_type\nfrom pandas.core.dtypes.common import (\n is_categorical_dtype,\n is_datetime64_dtype,\n is_datetime64tz_dtype,\n is_extension_array_dtype,\n is_float_dtype,\n is_numeric_dtype,\n is_sparse,\n is_timedelta64_dtype,\n)\nfrom pandas.core.dtypes.concat import concat_compat\nfrom pandas.core.dtypes.missing import isna_all\n\nimport pandas.core.algorithms as algos\nfrom pandas.core.arrays import DatetimeArray, ExtensionArray\nfrom pandas.core.internals.array_manager import ArrayManager\nfrom pandas.core.internals.blocks import make_block\nfrom pandas.core.internals.managers import BlockManager\n\nif TYPE_CHECKING:\n from pandas import Index\n from pandas.core.arrays.sparse.dtype import SparseDtype\n\n\ndef concatenate_block_managers(\n mgrs_indexers, axes: List[Index], concat_axis: int, copy: bool\n) -> Manager:\n \"\"\"\n Concatenate block managers into one.\n\n Parameters\n ----------\n mgrs_indexers : list of (BlockManager, {axis: indexer,...}) tuples\n axes : list of Index\n concat_axis : int\n copy : bool\n\n Returns\n -------\n BlockManager\n \"\"\"\n if isinstance(mgrs_indexers[0][0], ArrayManager):\n\n if concat_axis == 1:\n # TODO for now only fastpath without indexers\n mgrs = [t[0] for t in mgrs_indexers]\n arrays = [\n concat_compat([mgrs[i].arrays[j] for i in range(len(mgrs))], axis=0)\n for j in range(len(mgrs[0].arrays))\n ]\n return ArrayManager(arrays, [axes[1], axes[0]])\n elif concat_axis == 0:\n mgrs = [t[0] for t in mgrs_indexers]\n arrays = list(itertools.chain.from_iterable([mgr.arrays for mgr in mgrs]))\n return ArrayManager(arrays, [axes[1], axes[0]])\n\n concat_plans = [\n _get_mgr_concatenation_plan(mgr, indexers) for mgr, indexers in mgrs_indexers\n ]\n concat_plan = _combine_concat_plans(concat_plans, concat_axis)\n blocks = []\n\n for placement, join_units in concat_plan:\n\n if len(join_units) == 1 and not join_units[0].indexers:\n b = join_units[0].block\n values = b.values\n if copy:\n values = values.copy()\n else:\n values = values.view()\n b = b.make_block_same_class(values, placement=placement)\n elif _is_uniform_join_units(join_units):\n blk = join_units[0].block\n vals = [ju.block.values for ju in join_units]\n\n if not blk.is_extension:\n # _is_uniform_join_units ensures a single dtype, so\n # we can use np.concatenate, which is more performant\n # than concat_compat\n values = np.concatenate(vals, axis=blk.ndim - 1)\n else:\n # TODO(EA2D): special-casing not needed with 2D EAs\n values = concat_compat(vals)\n if not isinstance(values, ExtensionArray):\n values = values.reshape(1, len(values))\n\n if blk.values.dtype == values.dtype:\n # Fast-path\n b = blk.make_block_same_class(values, placement=placement)\n else:\n b = make_block(values, placement=placement, ndim=blk.ndim)\n else:\n b = make_block(\n _concatenate_join_units(join_units, concat_axis, copy=copy),\n placement=placement,\n ndim=len(axes),\n )\n blocks.append(b)\n\n return BlockManager(blocks, axes)\n\n\ndef _get_mgr_concatenation_plan(mgr: BlockManager, indexers: Dict[int, np.ndarray]):\n \"\"\"\n Construct concatenation plan for given block manager and indexers.\n\n Parameters\n ----------\n mgr : BlockManager\n indexers : dict of {axis: indexer}\n\n Returns\n -------\n plan : list of (BlockPlacement, JoinUnit) tuples\n\n \"\"\"\n # Calculate post-reindex shape , save for item axis which will be separate\n # for each block anyway.\n mgr_shape_list = list(mgr.shape)\n for ax, indexer in indexers.items():\n mgr_shape_list[ax] = len(indexer)\n mgr_shape = tuple(mgr_shape_list)\n\n if 0 in indexers:\n ax0_indexer = indexers.pop(0)\n blknos = algos.take_nd(mgr.blknos, ax0_indexer, fill_value=-1)\n blklocs = algos.take_nd(mgr.blklocs, ax0_indexer, fill_value=-1)\n else:\n\n if mgr.is_single_block:\n blk = mgr.blocks[0]\n return [(blk.mgr_locs, JoinUnit(blk, mgr_shape, indexers))]\n\n ax0_indexer = None\n blknos = mgr.blknos\n blklocs = mgr.blklocs\n\n plan = []\n for blkno, placements in libinternals.get_blkno_placements(blknos, group=False):\n\n assert placements.is_slice_like\n\n join_unit_indexers = indexers.copy()\n\n shape_list = list(mgr_shape)\n shape_list[0] = len(placements)\n shape = tuple(shape_list)\n\n if blkno == -1:\n unit = JoinUnit(None, shape)\n else:\n blk = mgr.blocks[blkno]\n ax0_blk_indexer = blklocs[placements.indexer]\n\n unit_no_ax0_reindexing = (\n len(placements) == len(blk.mgr_locs)\n and\n # Fastpath detection of join unit not\n # needing to reindex its block: no ax0\n # reindexing took place and block\n # placement was sequential before.\n (\n (\n ax0_indexer is None\n and blk.mgr_locs.is_slice_like\n and blk.mgr_locs.as_slice.step == 1\n )\n or\n # Slow-ish detection: all indexer locs\n # are sequential (and length match is\n # checked above).\n (np.diff(ax0_blk_indexer) == 1).all()\n )\n )\n\n # Omit indexer if no item reindexing is required.\n if unit_no_ax0_reindexing:\n join_unit_indexers.pop(0, None)\n else:\n join_unit_indexers[0] = ax0_blk_indexer\n\n unit = JoinUnit(blk, shape, join_unit_indexers)\n\n plan.append((placements, unit))\n\n return plan\n\n\nclass JoinUnit:\n def __init__(self, block, shape: Shape, indexers=None):\n # Passing shape explicitly is required for cases when block is None.\n if indexers is None:\n indexers = {}\n self.block = block\n self.indexers = indexers\n self.shape = shape\n\n def __repr__(self) -> str:\n return f\"{type(self).__name__}({repr(self.block)}, {self.indexers})\"\n\n @cache_readonly\n def needs_filling(self) -> bool:\n for indexer in self.indexers.values():\n # FIXME: cache results of indexer == -1 checks.\n if (indexer == -1).any():\n return True\n\n return False\n\n @cache_readonly\n def dtype(self):\n blk = self.block\n if blk is None:\n raise AssertionError(\"Block is None, no dtype\")\n\n if not self.needs_filling:\n return blk.dtype\n return ensure_dtype_can_hold_na(blk.dtype)\n\n @cache_readonly\n def is_na(self) -> bool:\n if self.block is None:\n return True\n\n if not self.block._can_hold_na:\n return False\n\n # Usually it's enough to check but a small fraction of values to see if\n # a block is NOT null, chunks should help in such cases. 1000 value\n # was chosen rather arbitrarily.\n values = self.block.values\n if is_sparse(self.block.values.dtype):\n return False\n elif self.block.is_extension:\n # TODO(EA2D): no need for special case with 2D EAs\n values_flat = values\n else:\n values_flat = values.ravel(order=\"K\")\n\n return isna_all(values_flat)\n\n def get_reindexed_values(self, empty_dtype: DtypeObj, upcasted_na) -> ArrayLike:\n if upcasted_na is None:\n # No upcasting is necessary\n fill_value = self.block.fill_value\n values = self.block.get_values()\n else:\n fill_value = upcasted_na\n\n if self.is_na:\n blk_dtype = getattr(self.block, \"dtype\", None)\n\n if blk_dtype == np.dtype(object):\n # we want to avoid filling with np.nan if we are\n # using None; we already know that we are all\n # nulls\n values = self.block.values.ravel(order=\"K\")\n if len(values) and values[0] is None:\n fill_value = None\n\n if is_datetime64tz_dtype(blk_dtype) or is_datetime64tz_dtype(\n empty_dtype\n ):\n if self.block is None:\n # TODO(EA2D): special case unneeded with 2D EAs\n i8values = np.full(self.shape[1], fill_value.value)\n return DatetimeArray(i8values, dtype=empty_dtype)\n elif is_categorical_dtype(blk_dtype):\n pass\n elif is_extension_array_dtype(blk_dtype):\n pass\n elif is_extension_array_dtype(empty_dtype):\n missing_arr = empty_dtype.construct_array_type()._from_sequence(\n [], dtype=empty_dtype\n )\n ncols, nrows = self.shape\n assert ncols == 1, ncols\n empty_arr = -1 * np.ones((nrows,), dtype=np.intp)\n return missing_arr.take(\n empty_arr, allow_fill=True, fill_value=fill_value\n )\n else:\n missing_arr = np.empty(self.shape, dtype=empty_dtype)\n missing_arr.fill(fill_value)\n return missing_arr\n\n if (not self.indexers) and (not self.block._can_consolidate):\n # preserve these for validation in concat_compat\n return self.block.values\n\n if self.block.is_bool and not self.block.is_categorical:\n # External code requested filling/upcasting, bool values must\n # be upcasted to object to avoid being upcasted to numeric.\n values = self.block.astype(np.object_).values\n elif self.block.is_extension:\n values = self.block.values\n else:\n # No dtype upcasting is done here, it will be performed during\n # concatenation itself.\n values = self.block.values\n\n if not self.indexers:\n # If there's no indexing to be done, we want to signal outside\n # code that this array must be copied explicitly. This is done\n # by returning a view and checking `retval.base`.\n values = values.view()\n\n else:\n for ax, indexer in self.indexers.items():\n values = algos.take_nd(values, indexer, axis=ax)\n\n return values\n\n\ndef _concatenate_join_units(\n join_units: List[JoinUnit], concat_axis: int, copy: bool\n) -> ArrayLike:\n \"\"\"\n Concatenate values from several join units along selected axis.\n \"\"\"\n if concat_axis == 0 and len(join_units) > 1:\n # Concatenating join units along ax0 is handled in _merge_blocks.\n raise AssertionError(\"Concatenating join units along axis0\")\n\n empty_dtype = _get_empty_dtype(join_units)\n\n has_none_blocks = any(unit.block is None for unit in join_units)\n upcasted_na = _dtype_to_na_value(empty_dtype, has_none_blocks)\n\n to_concat = [\n ju.get_reindexed_values(empty_dtype=empty_dtype, upcasted_na=upcasted_na)\n for ju in join_units\n ]\n\n if len(to_concat) == 1:\n # Only one block, nothing to concatenate.\n concat_values = to_concat[0]\n if copy:\n if isinstance(concat_values, np.ndarray):\n # non-reindexed (=not yet copied) arrays are made into a view\n # in JoinUnit.get_reindexed_values\n if concat_values.base is not None:\n concat_values = concat_values.copy()\n else:\n concat_values = concat_values.copy()\n elif any(isinstance(t, ExtensionArray) for t in to_concat):\n # concatting with at least one EA means we are concatting a single column\n # the non-EA values are 2D arrays with shape (1, n)\n to_concat = [t if isinstance(t, ExtensionArray) else t[0, :] for t in to_concat]\n concat_values = concat_compat(to_concat, axis=0)\n if not isinstance(concat_values, ExtensionArray) or (\n isinstance(concat_values, DatetimeArray) and concat_values.tz is None\n ):\n # if the result of concat is not an EA but an ndarray, reshape to\n # 2D to put it a non-EA Block\n # special case DatetimeArray, which *is* an EA, but is put in a\n # consolidated 2D block\n concat_values = np.atleast_2d(concat_values)\n else:\n concat_values = concat_compat(to_concat, axis=concat_axis)\n\n return concat_values\n\n\ndef _dtype_to_na_value(dtype: DtypeObj, has_none_blocks: bool):\n \"\"\"\n Find the NA value to go with this dtype.\n \"\"\"\n if is_extension_array_dtype(dtype):\n return dtype.na_value\n elif dtype.kind in [\"m\", \"M\"]:\n return dtype.type(\"NaT\")\n elif dtype.kind in [\"f\", \"c\"]:\n return dtype.type(\"NaN\")\n elif dtype.kind == \"b\":\n return None\n elif dtype.kind in [\"i\", \"u\"]:\n if not has_none_blocks:\n return None\n return np.nan\n elif dtype.kind == \"O\":\n return np.nan\n raise NotImplementedError\n\n\ndef _get_empty_dtype(join_units: Sequence[JoinUnit]) -> DtypeObj:\n \"\"\"\n Return dtype and N/A values to use when concatenating specified units.\n\n Returned N/A value may be None which means there was no casting involved.\n\n Returns\n -------\n dtype\n \"\"\"\n if len(join_units) == 1:\n blk = join_units[0].block\n if blk is None:\n return np.dtype(np.float64)\n\n if _is_uniform_reindex(join_units):\n # FIXME: integrate property\n empty_dtype = join_units[0].block.dtype\n return empty_dtype\n\n has_none_blocks = any(unit.block is None for unit in join_units)\n dtypes = [None if unit.block is None else unit.dtype for unit in join_units]\n\n filtered_dtypes = [\n unit.dtype for unit in join_units if unit.block is not None and not unit.is_na\n ]\n if not len(filtered_dtypes):\n filtered_dtypes = [unit.dtype for unit in join_units if unit.block is not None]\n dtype_alt = find_common_type(filtered_dtypes)\n\n upcast_classes = _get_upcast_classes(join_units, dtypes)\n\n if is_extension_array_dtype(dtype_alt):\n return dtype_alt\n elif dtype_alt == object:\n return dtype_alt\n\n # TODO: de-duplicate with maybe_promote?\n # create the result\n if \"extension\" in upcast_classes:\n return np.dtype(\"object\")\n elif \"bool\" in upcast_classes:\n if has_none_blocks:\n return np.dtype(np.object_)\n else:\n return np.dtype(np.bool_)\n elif \"datetimetz\" in upcast_classes:\n # GH-25014. We use NaT instead of iNaT, since this eventually\n # ends up in DatetimeArray.take, which does not allow iNaT.\n dtype = upcast_classes[\"datetimetz\"]\n return dtype[0]\n elif \"datetime\" in upcast_classes:\n return np.dtype(\"M8[ns]\")\n elif \"timedelta\" in upcast_classes:\n return np.dtype(\"m8[ns]\")\n else:\n try:\n common_dtype = np.find_common_type(upcast_classes, [])\n except TypeError:\n # At least one is an ExtensionArray\n return np.dtype(np.object_)\n else:\n if is_float_dtype(common_dtype):\n return common_dtype\n elif is_numeric_dtype(common_dtype):\n if has_none_blocks:\n return np.dtype(np.float64)\n else:\n return common_dtype\n\n msg = \"invalid dtype determination in get_concat_dtype\"\n raise AssertionError(msg)\n\n\ndef _get_upcast_classes(\n join_units: Sequence[JoinUnit],\n dtypes: Sequence[DtypeObj],\n) -> Dict[str, List[DtypeObj]]:\n \"\"\"Create mapping between upcast class names and lists of dtypes.\"\"\"\n upcast_classes: Dict[str, List[DtypeObj]] = defaultdict(list)\n null_upcast_classes: Dict[str, List[DtypeObj]] = defaultdict(list)\n for dtype, unit in zip(dtypes, join_units):\n if dtype is None:\n continue\n\n upcast_cls = _select_upcast_cls_from_dtype(dtype)\n # Null blocks should not influence upcast class selection, unless there\n # are only null blocks, when same upcasting rules must be applied to\n # null upcast classes.\n if unit.is_na:\n null_upcast_classes[upcast_cls].append(dtype)\n else:\n upcast_classes[upcast_cls].append(dtype)\n\n if not upcast_classes:\n upcast_classes = null_upcast_classes\n\n return upcast_classes\n\n\ndef _select_upcast_cls_from_dtype(dtype: DtypeObj) -> str:\n \"\"\"Select upcast class name based on dtype.\"\"\"\n if is_categorical_dtype(dtype):\n return \"extension\"\n elif is_datetime64tz_dtype(dtype):\n return \"datetimetz\"\n elif is_extension_array_dtype(dtype):\n return \"extension\"\n elif issubclass(dtype.type, np.bool_):\n return \"bool\"\n elif issubclass(dtype.type, np.object_):\n return \"object\"\n elif is_datetime64_dtype(dtype):\n return \"datetime\"\n elif is_timedelta64_dtype(dtype):\n return \"timedelta\"\n elif is_sparse(dtype):\n dtype = cast(\"SparseDtype\", dtype)\n return dtype.subtype.name\n elif is_float_dtype(dtype) or is_numeric_dtype(dtype):\n return dtype.name\n else:\n return \"float\"\n\n\ndef _is_uniform_join_units(join_units: List[JoinUnit]) -> bool:\n \"\"\"\n Check if the join units consist of blocks of uniform type that can\n be concatenated using Block.concat_same_type instead of the generic\n _concatenate_join_units (which uses `concat_compat`).\n\n \"\"\"\n # TODO: require dtype match in addition to same type? e.g. DatetimeTZBlock\n # cannot necessarily join\n return (\n # all blocks need to have the same type\n all(type(ju.block) is type(join_units[0].block) for ju in join_units) # noqa\n and\n # no blocks that would get missing values (can lead to type upcasts)\n # unless we're an extension dtype.\n all(not ju.is_na or ju.block.is_extension for ju in join_units)\n and\n # no blocks with indexers (as then the dimensions do not fit)\n all(not ju.indexers for ju in join_units)\n and\n # only use this path when there is something to concatenate\n len(join_units) > 1\n )\n\n\ndef _is_uniform_reindex(join_units) -> bool:\n return (\n # TODO: should this be ju.block._can_hold_na?\n all(ju.block and ju.block.is_extension for ju in join_units)\n and len({ju.block.dtype.name for ju in join_units}) == 1\n )\n\n\ndef _trim_join_unit(join_unit: JoinUnit, length: int) -> JoinUnit:\n \"\"\"\n Reduce join_unit's shape along item axis to length.\n\n Extra items that didn't fit are returned as a separate block.\n \"\"\"\n if 0 not in join_unit.indexers:\n extra_indexers = join_unit.indexers\n\n if join_unit.block is None:\n extra_block = None\n else:\n extra_block = join_unit.block.getitem_block(slice(length, None))\n join_unit.block = join_unit.block.getitem_block(slice(length))\n else:\n extra_block = join_unit.block\n\n extra_indexers = copy.copy(join_unit.indexers)\n extra_indexers[0] = extra_indexers[0][length:]\n join_unit.indexers[0] = join_unit.indexers[0][:length]\n\n extra_shape = (join_unit.shape[0] - length,) + join_unit.shape[1:]\n join_unit.shape = (length,) + join_unit.shape[1:]\n\n return JoinUnit(block=extra_block, indexers=extra_indexers, shape=extra_shape)\n\n\ndef _combine_concat_plans(plans, concat_axis: int):\n \"\"\"\n Combine multiple concatenation plans into one.\n\n existing_plan is updated in-place.\n \"\"\"\n if len(plans) == 1:\n for p in plans[0]:\n yield p[0], [p[1]]\n\n elif concat_axis == 0:\n offset = 0\n for plan in plans:\n last_plc = None\n\n for plc, unit in plan:\n yield plc.add(offset), [unit]\n last_plc = plc\n\n if last_plc is not None:\n offset += last_plc.as_slice.stop\n\n else:\n num_ended = [0]\n\n def _next_or_none(seq):\n retval = next(seq, None)\n if retval is None:\n num_ended[0] += 1\n return retval\n\n plans = list(map(iter, plans))\n next_items = list(map(_next_or_none, plans))\n\n while num_ended[0] != len(next_items):\n if num_ended[0] > 0:\n raise ValueError(\"Plan shapes are not aligned\")\n\n placements, units = zip(*next_items)\n\n lengths = list(map(len, placements))\n min_len, max_len = min(lengths), max(lengths)\n\n if min_len == max_len:\n yield placements[0], units\n next_items[:] = map(_next_or_none, plans)\n else:\n yielded_placement = None\n yielded_units = [None] * len(next_items)\n for i, (plc, unit) in enumerate(next_items):\n yielded_units[i] = unit\n if len(plc) > min_len:\n # _trim_join_unit updates unit in place, so only\n # placement needs to be sliced to skip min_len.\n next_items[i] = (plc[min_len:], _trim_join_unit(unit, min_len))\n else:\n yielded_placement = plc\n next_items[i] = _next_or_none(plans[i])\n\n yield yielded_placement, yielded_units\n",
"\"\"\"\nThese test the method maybe_promote from core/dtypes/cast.py\n\"\"\"\n\nimport datetime\n\nimport numpy as np\nimport pytest\n\nfrom pandas._libs.tslibs import NaT\n\nfrom pandas.core.dtypes.cast import maybe_promote\nfrom pandas.core.dtypes.common import (\n is_complex_dtype,\n is_datetime64_dtype,\n is_datetime_or_timedelta_dtype,\n is_float_dtype,\n is_integer_dtype,\n is_object_dtype,\n is_scalar,\n is_timedelta64_dtype,\n)\nfrom pandas.core.dtypes.dtypes import DatetimeTZDtype\nfrom pandas.core.dtypes.missing import isna\n\nimport pandas as pd\n\n\[email protected](\n params=[\n bool,\n \"uint8\",\n \"int32\",\n \"uint64\",\n \"float32\",\n \"float64\",\n \"complex64\",\n \"complex128\",\n \"M8[ns]\",\n \"m8[ns]\",\n str,\n bytes,\n object,\n ]\n)\ndef any_numpy_dtype_reduced(request):\n \"\"\"\n Parameterized fixture for numpy dtypes, reduced from any_numpy_dtype.\n\n * bool\n * 'int32'\n * 'uint64'\n * 'float32'\n * 'float64'\n * 'complex64'\n * 'complex128'\n * 'M8[ns]'\n * 'M8[ns]'\n * str\n * bytes\n * object\n \"\"\"\n return request.param\n\n\ndef _check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar=None):\n \"\"\"\n Auxiliary function to unify testing of scalar/array promotion.\n\n Parameters\n ----------\n dtype : dtype\n The value to pass on as the first argument to maybe_promote.\n fill_value : scalar\n The value to pass on as the second argument to maybe_promote as\n a scalar.\n expected_dtype : dtype\n The expected dtype returned by maybe_promote (by design this is the\n same regardless of whether fill_value was passed as a scalar or in an\n array!).\n exp_val_for_scalar : scalar\n The expected value for the (potentially upcast) fill_value returned by\n maybe_promote.\n \"\"\"\n assert is_scalar(fill_value)\n\n # here, we pass on fill_value as a scalar directly; the expected value\n # returned from maybe_promote is fill_value, potentially upcast to the\n # returned dtype.\n result_dtype, result_fill_value = maybe_promote(dtype, fill_value)\n expected_fill_value = exp_val_for_scalar\n\n assert result_dtype == expected_dtype\n _assert_match(result_fill_value, expected_fill_value)\n\n\ndef _assert_match(result_fill_value, expected_fill_value):\n # GH#23982/25425 require the same type in addition to equality/NA-ness\n res_type = type(result_fill_value)\n ex_type = type(expected_fill_value)\n\n if hasattr(result_fill_value, \"dtype\"):\n # Compare types in a way that is robust to platform-specific\n # idiosyncracies where e.g. sometimes we get \"ulonglong\" as an alias\n # for \"uint64\" or \"intc\" as an alias for \"int32\"\n assert result_fill_value.dtype.kind == expected_fill_value.dtype.kind\n assert result_fill_value.dtype.itemsize == expected_fill_value.dtype.itemsize\n else:\n # On some builds, type comparison fails, e.g. np.int32 != np.int32\n assert res_type == ex_type or res_type.__name__ == ex_type.__name__\n\n match_value = result_fill_value == expected_fill_value\n if match_value is pd.NA:\n match_value = False\n\n # Note: type check above ensures that we have the _same_ NA value\n # for missing values, None == None (which is checked\n # through match_value above), but np.nan != np.nan and pd.NaT != pd.NaT\n match_missing = isna(result_fill_value) and isna(expected_fill_value)\n\n assert match_value or match_missing\n\n\[email protected](\n \"dtype, fill_value, expected_dtype\",\n [\n # size 8\n (\"int8\", 1, \"int8\"),\n (\"int8\", np.iinfo(\"int8\").max + 1, \"int16\"),\n (\"int8\", np.iinfo(\"int16\").max + 1, \"int32\"),\n (\"int8\", np.iinfo(\"int32\").max + 1, \"int64\"),\n (\"int8\", np.iinfo(\"int64\").max + 1, \"object\"),\n (\"int8\", -1, \"int8\"),\n (\"int8\", np.iinfo(\"int8\").min - 1, \"int16\"),\n (\"int8\", np.iinfo(\"int16\").min - 1, \"int32\"),\n (\"int8\", np.iinfo(\"int32\").min - 1, \"int64\"),\n (\"int8\", np.iinfo(\"int64\").min - 1, \"object\"),\n # keep signed-ness as long as possible\n (\"uint8\", 1, \"uint8\"),\n (\"uint8\", np.iinfo(\"int8\").max + 1, \"uint8\"),\n (\"uint8\", np.iinfo(\"uint8\").max + 1, \"uint16\"),\n (\"uint8\", np.iinfo(\"int16\").max + 1, \"uint16\"),\n (\"uint8\", np.iinfo(\"uint16\").max + 1, \"uint32\"),\n (\"uint8\", np.iinfo(\"int32\").max + 1, \"uint32\"),\n (\"uint8\", np.iinfo(\"uint32\").max + 1, \"uint64\"),\n (\"uint8\", np.iinfo(\"int64\").max + 1, \"uint64\"),\n (\"uint8\", np.iinfo(\"uint64\").max + 1, \"object\"),\n # max of uint8 cannot be contained in int8\n (\"uint8\", -1, \"int16\"),\n (\"uint8\", np.iinfo(\"int8\").min - 1, \"int16\"),\n (\"uint8\", np.iinfo(\"int16\").min - 1, \"int32\"),\n (\"uint8\", np.iinfo(\"int32\").min - 1, \"int64\"),\n (\"uint8\", np.iinfo(\"int64\").min - 1, \"object\"),\n # size 16\n (\"int16\", 1, \"int16\"),\n (\"int16\", np.iinfo(\"int8\").max + 1, \"int16\"),\n (\"int16\", np.iinfo(\"int16\").max + 1, \"int32\"),\n (\"int16\", np.iinfo(\"int32\").max + 1, \"int64\"),\n (\"int16\", np.iinfo(\"int64\").max + 1, \"object\"),\n (\"int16\", -1, \"int16\"),\n (\"int16\", np.iinfo(\"int8\").min - 1, \"int16\"),\n (\"int16\", np.iinfo(\"int16\").min - 1, \"int32\"),\n (\"int16\", np.iinfo(\"int32\").min - 1, \"int64\"),\n (\"int16\", np.iinfo(\"int64\").min - 1, \"object\"),\n (\"uint16\", 1, \"uint16\"),\n (\"uint16\", np.iinfo(\"int8\").max + 1, \"uint16\"),\n (\"uint16\", np.iinfo(\"uint8\").max + 1, \"uint16\"),\n (\"uint16\", np.iinfo(\"int16\").max + 1, \"uint16\"),\n (\"uint16\", np.iinfo(\"uint16\").max + 1, \"uint32\"),\n (\"uint16\", np.iinfo(\"int32\").max + 1, \"uint32\"),\n (\"uint16\", np.iinfo(\"uint32\").max + 1, \"uint64\"),\n (\"uint16\", np.iinfo(\"int64\").max + 1, \"uint64\"),\n (\"uint16\", np.iinfo(\"uint64\").max + 1, \"object\"),\n (\"uint16\", -1, \"int32\"),\n (\"uint16\", np.iinfo(\"int8\").min - 1, \"int32\"),\n (\"uint16\", np.iinfo(\"int16\").min - 1, \"int32\"),\n (\"uint16\", np.iinfo(\"int32\").min - 1, \"int64\"),\n (\"uint16\", np.iinfo(\"int64\").min - 1, \"object\"),\n # size 32\n (\"int32\", 1, \"int32\"),\n (\"int32\", np.iinfo(\"int8\").max + 1, \"int32\"),\n (\"int32\", np.iinfo(\"int16\").max + 1, \"int32\"),\n (\"int32\", np.iinfo(\"int32\").max + 1, \"int64\"),\n (\"int32\", np.iinfo(\"int64\").max + 1, \"object\"),\n (\"int32\", -1, \"int32\"),\n (\"int32\", np.iinfo(\"int8\").min - 1, \"int32\"),\n (\"int32\", np.iinfo(\"int16\").min - 1, \"int32\"),\n (\"int32\", np.iinfo(\"int32\").min - 1, \"int64\"),\n (\"int32\", np.iinfo(\"int64\").min - 1, \"object\"),\n (\"uint32\", 1, \"uint32\"),\n (\"uint32\", np.iinfo(\"int8\").max + 1, \"uint32\"),\n (\"uint32\", np.iinfo(\"uint8\").max + 1, \"uint32\"),\n (\"uint32\", np.iinfo(\"int16\").max + 1, \"uint32\"),\n (\"uint32\", np.iinfo(\"uint16\").max + 1, \"uint32\"),\n (\"uint32\", np.iinfo(\"int32\").max + 1, \"uint32\"),\n (\"uint32\", np.iinfo(\"uint32\").max + 1, \"uint64\"),\n (\"uint32\", np.iinfo(\"int64\").max + 1, \"uint64\"),\n (\"uint32\", np.iinfo(\"uint64\").max + 1, \"object\"),\n (\"uint32\", -1, \"int64\"),\n (\"uint32\", np.iinfo(\"int8\").min - 1, \"int64\"),\n (\"uint32\", np.iinfo(\"int16\").min - 1, \"int64\"),\n (\"uint32\", np.iinfo(\"int32\").min - 1, \"int64\"),\n (\"uint32\", np.iinfo(\"int64\").min - 1, \"object\"),\n # size 64\n (\"int64\", 1, \"int64\"),\n (\"int64\", np.iinfo(\"int8\").max + 1, \"int64\"),\n (\"int64\", np.iinfo(\"int16\").max + 1, \"int64\"),\n (\"int64\", np.iinfo(\"int32\").max + 1, \"int64\"),\n (\"int64\", np.iinfo(\"int64\").max + 1, \"object\"),\n (\"int64\", -1, \"int64\"),\n (\"int64\", np.iinfo(\"int8\").min - 1, \"int64\"),\n (\"int64\", np.iinfo(\"int16\").min - 1, \"int64\"),\n (\"int64\", np.iinfo(\"int32\").min - 1, \"int64\"),\n (\"int64\", np.iinfo(\"int64\").min - 1, \"object\"),\n (\"uint64\", 1, \"uint64\"),\n (\"uint64\", np.iinfo(\"int8\").max + 1, \"uint64\"),\n (\"uint64\", np.iinfo(\"uint8\").max + 1, \"uint64\"),\n (\"uint64\", np.iinfo(\"int16\").max + 1, \"uint64\"),\n (\"uint64\", np.iinfo(\"uint16\").max + 1, \"uint64\"),\n (\"uint64\", np.iinfo(\"int32\").max + 1, \"uint64\"),\n (\"uint64\", np.iinfo(\"uint32\").max + 1, \"uint64\"),\n (\"uint64\", np.iinfo(\"int64\").max + 1, \"uint64\"),\n (\"uint64\", np.iinfo(\"uint64\").max + 1, \"object\"),\n (\"uint64\", -1, \"object\"),\n (\"uint64\", np.iinfo(\"int8\").min - 1, \"object\"),\n (\"uint64\", np.iinfo(\"int16\").min - 1, \"object\"),\n (\"uint64\", np.iinfo(\"int32\").min - 1, \"object\"),\n (\"uint64\", np.iinfo(\"int64\").min - 1, \"object\"),\n ],\n)\ndef test_maybe_promote_int_with_int(dtype, fill_value, expected_dtype):\n dtype = np.dtype(dtype)\n expected_dtype = np.dtype(expected_dtype)\n\n # output is not a generic int, but corresponds to expected_dtype\n exp_val_for_scalar = np.array([fill_value], dtype=expected_dtype)[0]\n\n _check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar)\n\n\ndef test_maybe_promote_int_with_float(any_int_dtype, float_dtype):\n dtype = np.dtype(any_int_dtype)\n fill_dtype = np.dtype(float_dtype)\n\n # create array of given dtype; casts \"1\" to correct dtype\n fill_value = np.array([1], dtype=fill_dtype)[0]\n\n # filling int with float always upcasts to float64\n expected_dtype = np.float64\n # fill_value can be different float type\n exp_val_for_scalar = np.float64(fill_value)\n\n _check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar)\n\n\ndef test_maybe_promote_float_with_int(float_dtype, any_int_dtype):\n\n dtype = np.dtype(float_dtype)\n fill_dtype = np.dtype(any_int_dtype)\n\n # create array of given dtype; casts \"1\" to correct dtype\n fill_value = np.array([1], dtype=fill_dtype)[0]\n\n # filling float with int always keeps float dtype\n # because: np.finfo('float32').max > np.iinfo('uint64').max\n expected_dtype = dtype\n # output is not a generic float, but corresponds to expected_dtype\n exp_val_for_scalar = np.array([fill_value], dtype=expected_dtype)[0]\n\n _check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar)\n\n\[email protected](\n \"dtype, fill_value, expected_dtype\",\n [\n # float filled with float\n (\"float32\", 1, \"float32\"),\n (\"float32\", np.finfo(\"float32\").max * 1.1, \"float64\"),\n (\"float64\", 1, \"float64\"),\n (\"float64\", np.finfo(\"float32\").max * 1.1, \"float64\"),\n # complex filled with float\n (\"complex64\", 1, \"complex64\"),\n (\"complex64\", np.finfo(\"float32\").max * 1.1, \"complex128\"),\n (\"complex128\", 1, \"complex128\"),\n (\"complex128\", np.finfo(\"float32\").max * 1.1, \"complex128\"),\n # float filled with complex\n (\"float32\", 1 + 1j, \"complex64\"),\n (\"float32\", np.finfo(\"float32\").max * (1.1 + 1j), \"complex128\"),\n (\"float64\", 1 + 1j, \"complex128\"),\n (\"float64\", np.finfo(\"float32\").max * (1.1 + 1j), \"complex128\"),\n # complex filled with complex\n (\"complex64\", 1 + 1j, \"complex64\"),\n (\"complex64\", np.finfo(\"float32\").max * (1.1 + 1j), \"complex128\"),\n (\"complex128\", 1 + 1j, \"complex128\"),\n (\"complex128\", np.finfo(\"float32\").max * (1.1 + 1j), \"complex128\"),\n ],\n)\ndef test_maybe_promote_float_with_float(dtype, fill_value, expected_dtype):\n\n dtype = np.dtype(dtype)\n expected_dtype = np.dtype(expected_dtype)\n\n # output is not a generic float, but corresponds to expected_dtype\n exp_val_for_scalar = np.array([fill_value], dtype=expected_dtype)[0]\n\n _check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar)\n\n\ndef test_maybe_promote_bool_with_any(any_numpy_dtype_reduced):\n dtype = np.dtype(bool)\n fill_dtype = np.dtype(any_numpy_dtype_reduced)\n\n # create array of given dtype; casts \"1\" to correct dtype\n fill_value = np.array([1], dtype=fill_dtype)[0]\n\n # filling bool with anything but bool casts to object\n expected_dtype = np.dtype(object) if fill_dtype != bool else fill_dtype\n exp_val_for_scalar = fill_value\n\n _check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar)\n\n\ndef test_maybe_promote_any_with_bool(any_numpy_dtype_reduced):\n dtype = np.dtype(any_numpy_dtype_reduced)\n fill_value = True\n\n # filling anything but bool with bool casts to object\n expected_dtype = np.dtype(object) if dtype != bool else dtype\n # output is not a generic bool, but corresponds to expected_dtype\n exp_val_for_scalar = np.array([fill_value], dtype=expected_dtype)[0]\n\n _check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar)\n\n\ndef test_maybe_promote_bytes_with_any(bytes_dtype, any_numpy_dtype_reduced):\n dtype = np.dtype(bytes_dtype)\n fill_dtype = np.dtype(any_numpy_dtype_reduced)\n\n # create array of given dtype; casts \"1\" to correct dtype\n fill_value = np.array([1], dtype=fill_dtype)[0]\n\n # we never use bytes dtype internally, always promote to object\n expected_dtype = np.dtype(np.object_)\n exp_val_for_scalar = fill_value\n\n _check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar)\n\n\ndef test_maybe_promote_any_with_bytes(any_numpy_dtype_reduced, bytes_dtype):\n dtype = np.dtype(any_numpy_dtype_reduced)\n\n # create array of given dtype\n fill_value = b\"abc\"\n\n # we never use bytes dtype internally, always promote to object\n expected_dtype = np.dtype(np.object_)\n # output is not a generic bytes, but corresponds to expected_dtype\n exp_val_for_scalar = np.array([fill_value], dtype=expected_dtype)[0]\n\n _check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar)\n\n\ndef test_maybe_promote_datetime64_with_any(datetime64_dtype, any_numpy_dtype_reduced):\n dtype = np.dtype(datetime64_dtype)\n fill_dtype = np.dtype(any_numpy_dtype_reduced)\n\n # create array of given dtype; casts \"1\" to correct dtype\n fill_value = np.array([1], dtype=fill_dtype)[0]\n\n # filling datetime with anything but datetime casts to object\n if is_datetime64_dtype(fill_dtype):\n expected_dtype = dtype\n # for datetime dtypes, scalar values get cast to to_datetime64\n exp_val_for_scalar = pd.Timestamp(fill_value).to_datetime64()\n else:\n expected_dtype = np.dtype(object)\n exp_val_for_scalar = fill_value\n\n _check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar)\n\n\[email protected](\n \"fill_value\",\n [\n pd.Timestamp(\"now\"),\n np.datetime64(\"now\"),\n datetime.datetime.now(),\n datetime.date.today(),\n ],\n ids=[\"pd.Timestamp\", \"np.datetime64\", \"datetime.datetime\", \"datetime.date\"],\n)\ndef test_maybe_promote_any_with_datetime64(\n any_numpy_dtype_reduced, datetime64_dtype, fill_value\n):\n dtype = np.dtype(any_numpy_dtype_reduced)\n\n # filling datetime with anything but datetime casts to object\n if is_datetime64_dtype(dtype):\n expected_dtype = dtype\n # for datetime dtypes, scalar values get cast to pd.Timestamp.value\n exp_val_for_scalar = pd.Timestamp(fill_value).to_datetime64()\n else:\n expected_dtype = np.dtype(object)\n exp_val_for_scalar = fill_value\n\n _check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar)\n\n\[email protected](\n \"fill_value\",\n [\n pd.Timestamp(\"now\"),\n np.datetime64(\"now\"),\n datetime.datetime.now(),\n datetime.date.today(),\n ],\n ids=[\"pd.Timestamp\", \"np.datetime64\", \"datetime.datetime\", \"datetime.date\"],\n)\ndef test_maybe_promote_any_numpy_dtype_with_datetimetz(\n any_numpy_dtype_reduced, tz_aware_fixture, fill_value\n):\n dtype = np.dtype(any_numpy_dtype_reduced)\n fill_dtype = DatetimeTZDtype(tz=tz_aware_fixture)\n\n fill_value = pd.Series([fill_value], dtype=fill_dtype)[0]\n\n # filling any numpy dtype with datetimetz casts to object\n expected_dtype = np.dtype(object)\n exp_val_for_scalar = fill_value\n\n _check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar)\n\n\ndef test_maybe_promote_timedelta64_with_any(timedelta64_dtype, any_numpy_dtype_reduced):\n dtype = np.dtype(timedelta64_dtype)\n fill_dtype = np.dtype(any_numpy_dtype_reduced)\n\n # create array of given dtype; casts \"1\" to correct dtype\n fill_value = np.array([1], dtype=fill_dtype)[0]\n\n # filling timedelta with anything but timedelta casts to object\n if is_timedelta64_dtype(fill_dtype):\n expected_dtype = dtype\n # for timedelta dtypes, scalar values get cast to pd.Timedelta.value\n exp_val_for_scalar = pd.Timedelta(fill_value).to_timedelta64()\n else:\n expected_dtype = np.dtype(object)\n exp_val_for_scalar = fill_value\n\n _check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar)\n\n\[email protected](\n \"fill_value\",\n [pd.Timedelta(days=1), np.timedelta64(24, \"h\"), datetime.timedelta(1)],\n ids=[\"pd.Timedelta\", \"np.timedelta64\", \"datetime.timedelta\"],\n)\ndef test_maybe_promote_any_with_timedelta64(\n any_numpy_dtype_reduced, timedelta64_dtype, fill_value\n):\n dtype = np.dtype(any_numpy_dtype_reduced)\n\n # filling anything but timedelta with timedelta casts to object\n if is_timedelta64_dtype(dtype):\n expected_dtype = dtype\n # for timedelta dtypes, scalar values get cast to pd.Timedelta.value\n exp_val_for_scalar = pd.Timedelta(fill_value).to_timedelta64()\n else:\n expected_dtype = np.dtype(object)\n exp_val_for_scalar = fill_value\n\n _check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar)\n\n\ndef test_maybe_promote_string_with_any(string_dtype, any_numpy_dtype_reduced):\n dtype = np.dtype(string_dtype)\n fill_dtype = np.dtype(any_numpy_dtype_reduced)\n\n # create array of given dtype; casts \"1\" to correct dtype\n fill_value = np.array([1], dtype=fill_dtype)[0]\n\n # filling string with anything casts to object\n expected_dtype = np.dtype(object)\n exp_val_for_scalar = fill_value\n\n _check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar)\n\n\ndef test_maybe_promote_any_with_string(any_numpy_dtype_reduced, string_dtype):\n dtype = np.dtype(any_numpy_dtype_reduced)\n\n # create array of given dtype\n fill_value = \"abc\"\n\n # filling anything with a string casts to object\n expected_dtype = np.dtype(object)\n exp_val_for_scalar = fill_value\n\n _check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar)\n\n\ndef test_maybe_promote_object_with_any(object_dtype, any_numpy_dtype_reduced):\n dtype = np.dtype(object_dtype)\n fill_dtype = np.dtype(any_numpy_dtype_reduced)\n\n # create array of given dtype; casts \"1\" to correct dtype\n fill_value = np.array([1], dtype=fill_dtype)[0]\n\n # filling object with anything stays object\n expected_dtype = np.dtype(object)\n exp_val_for_scalar = fill_value\n\n _check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar)\n\n\ndef test_maybe_promote_any_with_object(any_numpy_dtype_reduced, object_dtype):\n dtype = np.dtype(any_numpy_dtype_reduced)\n\n # create array of object dtype from a scalar value (i.e. passing\n # dtypes.common.is_scalar), which can however not be cast to int/float etc.\n fill_value = pd.DateOffset(1)\n\n # filling object with anything stays object\n expected_dtype = np.dtype(object)\n exp_val_for_scalar = fill_value\n\n _check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar)\n\n\ndef test_maybe_promote_any_numpy_dtype_with_na(any_numpy_dtype_reduced, nulls_fixture):\n fill_value = nulls_fixture\n dtype = np.dtype(any_numpy_dtype_reduced)\n\n if is_integer_dtype(dtype) and fill_value is not NaT:\n # integer + other missing value (np.nan / None) casts to float\n expected_dtype = np.float64\n exp_val_for_scalar = np.nan\n elif is_object_dtype(dtype) and fill_value is NaT:\n # inserting into object does not cast the value\n # but *does* cast None to np.nan\n expected_dtype = np.dtype(object)\n exp_val_for_scalar = fill_value\n elif is_datetime_or_timedelta_dtype(dtype):\n # datetime / timedelta cast all missing values to dtyped-NaT\n expected_dtype = dtype\n exp_val_for_scalar = dtype.type(\"NaT\", \"ns\")\n elif fill_value is NaT:\n # NaT upcasts everything that's not datetime/timedelta to object\n expected_dtype = np.dtype(object)\n exp_val_for_scalar = NaT\n elif is_float_dtype(dtype) or is_complex_dtype(dtype):\n # float / complex + missing value (!= NaT) stays the same\n expected_dtype = dtype\n exp_val_for_scalar = np.nan\n else:\n # all other cases cast to object, and use np.nan as missing value\n expected_dtype = np.dtype(object)\n if fill_value is pd.NA:\n exp_val_for_scalar = pd.NA\n else:\n exp_val_for_scalar = np.nan\n\n _check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar)\n",
"\"\"\"\nExpressions\n-----------\n\nOffer fast expression evaluation through numexpr\n\n\"\"\"\nimport operator\nfrom typing import List, Optional, Set\nimport warnings\n\nimport numpy as np\n\nfrom pandas._config import get_option\n\nfrom pandas._typing import FuncType\n\nfrom pandas.core.dtypes.generic import ABCDataFrame\n\nfrom pandas.core.computation.check import NUMEXPR_INSTALLED\nfrom pandas.core.ops import roperator\n\nif NUMEXPR_INSTALLED:\n import numexpr as ne\n\n_TEST_MODE: Optional[bool] = None\n_TEST_RESULT: List[bool] = []\nUSE_NUMEXPR = NUMEXPR_INSTALLED\n_evaluate: Optional[FuncType] = None\n_where: Optional[FuncType] = None\n\n# the set of dtypes that we will allow pass to numexpr\n_ALLOWED_DTYPES = {\n \"evaluate\": {\"int64\", \"int32\", \"float64\", \"float32\", \"bool\"},\n \"where\": {\"int64\", \"float64\", \"bool\"},\n}\n\n# the minimum prod shape that we will use numexpr\n_MIN_ELEMENTS = 10000\n\n\ndef set_use_numexpr(v=True):\n # set/unset to use numexpr\n global USE_NUMEXPR\n if NUMEXPR_INSTALLED:\n USE_NUMEXPR = v\n\n # choose what we are going to do\n global _evaluate, _where\n\n _evaluate = _evaluate_numexpr if USE_NUMEXPR else _evaluate_standard\n _where = _where_numexpr if USE_NUMEXPR else _where_standard\n\n\ndef set_numexpr_threads(n=None):\n # if we are using numexpr, set the threads to n\n # otherwise reset\n if NUMEXPR_INSTALLED and USE_NUMEXPR:\n if n is None:\n n = ne.detect_number_of_cores()\n ne.set_num_threads(n)\n\n\ndef _evaluate_standard(op, op_str, a, b):\n \"\"\"\n Standard evaluation.\n \"\"\"\n if _TEST_MODE:\n _store_test_result(False)\n with np.errstate(all=\"ignore\"):\n return op(a, b)\n\n\ndef _can_use_numexpr(op, op_str, a, b, dtype_check):\n \"\"\" return a boolean if we WILL be using numexpr \"\"\"\n if op_str is not None:\n\n # required min elements (otherwise we are adding overhead)\n if np.prod(a.shape) > _MIN_ELEMENTS:\n # check for dtype compatibility\n dtypes: Set[str] = set()\n for o in [a, b]:\n # Series implements dtypes, check for dimension count as well\n if hasattr(o, \"dtypes\") and o.ndim > 1:\n s = o.dtypes.value_counts()\n if len(s) > 1:\n return False\n dtypes |= set(s.index.astype(str))\n # ndarray and Series Case\n elif hasattr(o, \"dtype\"):\n dtypes |= {o.dtype.name}\n\n # allowed are a superset\n if not len(dtypes) or _ALLOWED_DTYPES[dtype_check] >= dtypes:\n return True\n\n return False\n\n\ndef _evaluate_numexpr(op, op_str, a, b):\n result = None\n\n if _can_use_numexpr(op, op_str, a, b, \"evaluate\"):\n is_reversed = op.__name__.strip(\"_\").startswith(\"r\")\n if is_reversed:\n # we were originally called by a reversed op method\n a, b = b, a\n\n a_value = a\n b_value = b\n\n result = ne.evaluate(\n f\"a_value {op_str} b_value\",\n local_dict={\"a_value\": a_value, \"b_value\": b_value},\n casting=\"safe\",\n )\n\n if _TEST_MODE:\n _store_test_result(result is not None)\n\n if result is None:\n result = _evaluate_standard(op, op_str, a, b)\n\n return result\n\n\n_op_str_mapping = {\n operator.add: \"+\",\n roperator.radd: \"+\",\n operator.mul: \"*\",\n roperator.rmul: \"*\",\n operator.sub: \"-\",\n roperator.rsub: \"-\",\n operator.truediv: \"/\",\n roperator.rtruediv: \"/\",\n operator.floordiv: \"//\",\n roperator.rfloordiv: \"//\",\n # we require Python semantics for mod of negative for backwards compatibility\n # see https://github.com/pydata/numexpr/issues/365\n # so sticking with unaccelerated for now\n operator.mod: None,\n roperator.rmod: \"%\",\n operator.pow: \"**\",\n roperator.rpow: \"**\",\n operator.eq: \"==\",\n operator.ne: \"!=\",\n operator.le: \"<=\",\n operator.lt: \"<\",\n operator.ge: \">=\",\n operator.gt: \">\",\n operator.and_: \"&\",\n roperator.rand_: \"&\",\n operator.or_: \"|\",\n roperator.ror_: \"|\",\n operator.xor: \"^\",\n roperator.rxor: \"^\",\n divmod: None,\n roperator.rdivmod: None,\n}\n\n\ndef _where_standard(cond, a, b):\n # Caller is responsible for extracting ndarray if necessary\n return np.where(cond, a, b)\n\n\ndef _where_numexpr(cond, a, b):\n # Caller is responsible for extracting ndarray if necessary\n result = None\n\n if _can_use_numexpr(None, \"where\", a, b, \"where\"):\n\n result = ne.evaluate(\n \"where(cond_value, a_value, b_value)\",\n local_dict={\"cond_value\": cond, \"a_value\": a, \"b_value\": b},\n casting=\"safe\",\n )\n\n if result is None:\n result = _where_standard(cond, a, b)\n\n return result\n\n\n# turn myself on\nset_use_numexpr(get_option(\"compute.use_numexpr\"))\n\n\ndef _has_bool_dtype(x):\n if isinstance(x, ABCDataFrame):\n return \"bool\" in x.dtypes\n try:\n return x.dtype == bool\n except AttributeError:\n return isinstance(x, (bool, np.bool_))\n\n\ndef _bool_arith_check(\n op_str, a, b, not_allowed=frozenset((\"/\", \"//\", \"**\")), unsupported=None\n):\n if unsupported is None:\n unsupported = {\"+\": \"|\", \"*\": \"&\", \"-\": \"^\"}\n\n if _has_bool_dtype(a) and _has_bool_dtype(b):\n if op_str in unsupported:\n warnings.warn(\n f\"evaluating in Python space because the {repr(op_str)} \"\n \"operator is not supported by numexpr for \"\n f\"the bool dtype, use {repr(unsupported[op_str])} instead\"\n )\n return False\n\n if op_str in not_allowed:\n raise NotImplementedError(\n f\"operator {repr(op_str)} not implemented for bool dtypes\"\n )\n return True\n\n\ndef evaluate(op, a, b, use_numexpr: bool = True):\n \"\"\"\n Evaluate and return the expression of the op on a and b.\n\n Parameters\n ----------\n op : the actual operand\n a : left operand\n b : right operand\n use_numexpr : bool, default True\n Whether to try to use numexpr.\n \"\"\"\n op_str = _op_str_mapping[op]\n if op_str is not None:\n use_numexpr = use_numexpr and _bool_arith_check(op_str, a, b)\n if use_numexpr:\n # error: \"None\" not callable\n return _evaluate(op, op_str, a, b) # type: ignore[misc]\n return _evaluate_standard(op, op_str, a, b)\n\n\ndef where(cond, a, b, use_numexpr=True):\n \"\"\"\n Evaluate the where condition cond on a and b.\n\n Parameters\n ----------\n cond : np.ndarray[bool]\n a : return if cond is True\n b : return if cond is False\n use_numexpr : bool, default True\n Whether to try to use numexpr.\n \"\"\"\n assert _where is not None\n return _where(cond, a, b) if use_numexpr else _where_standard(cond, a, b)\n\n\ndef set_test_mode(v: bool = True) -> None:\n \"\"\"\n Keeps track of whether numexpr was used.\n\n Stores an additional ``True`` for every successful use of evaluate with\n numexpr since the last ``get_test_result``.\n \"\"\"\n global _TEST_MODE, _TEST_RESULT\n _TEST_MODE = v\n _TEST_RESULT = []\n\n\ndef _store_test_result(used_numexpr: bool) -> None:\n global _TEST_RESULT\n if used_numexpr:\n _TEST_RESULT.append(used_numexpr)\n\n\ndef get_test_result() -> List[bool]:\n \"\"\"\n Get test result and reset test_results.\n \"\"\"\n global _TEST_RESULT\n res = _TEST_RESULT\n _TEST_RESULT = []\n return res\n"
] | [
[
"pandas.core.dtypes.common.is_extension_array_dtype",
"numpy.dtype",
"pandas.core.dtypes.common.is_datetime64tz_dtype",
"numpy.concatenate",
"pandas._libs.internals.get_blkno_placements",
"pandas.core.dtypes.common.is_datetime64_dtype",
"pandas.core.internals.managers.BlockManager",
"pandas.core.dtypes.common.is_numeric_dtype",
"numpy.full",
"numpy.diff",
"pandas.core.dtypes.concat.concat_compat",
"pandas.core.dtypes.common.is_float_dtype",
"pandas.core.algorithms.take_nd",
"pandas.core.dtypes.common.is_categorical_dtype",
"pandas.core.internals.array_manager.ArrayManager",
"pandas.core.dtypes.common.is_timedelta64_dtype",
"pandas.core.internals.blocks.make_block",
"pandas.core.arrays.DatetimeArray",
"numpy.atleast_2d",
"pandas.core.dtypes.cast.ensure_dtype_can_hold_na",
"pandas.core.dtypes.common.is_sparse",
"numpy.find_common_type",
"pandas.core.dtypes.cast.find_common_type",
"numpy.ones",
"pandas.core.dtypes.missing.isna_all",
"numpy.empty"
],
[
"pandas.Series",
"pandas.core.dtypes.dtypes.DatetimeTZDtype",
"numpy.dtype",
"pandas.core.dtypes.common.is_complex_dtype",
"numpy.iinfo",
"pandas.core.dtypes.common.is_datetime64_dtype",
"numpy.finfo",
"pandas.core.dtypes.common.is_float_dtype",
"pandas.core.dtypes.common.is_integer_dtype",
"pandas.core.dtypes.common.is_timedelta64_dtype",
"pandas.Timedelta",
"numpy.timedelta64",
"pandas.core.dtypes.cast.maybe_promote",
"numpy.array",
"pandas.DateOffset",
"pandas.core.dtypes.common.is_scalar",
"numpy.datetime64",
"pandas.core.dtypes.common.is_object_dtype",
"numpy.float64",
"pandas.core.dtypes.missing.isna",
"pandas.core.dtypes.common.is_datetime_or_timedelta_dtype",
"pandas.Timestamp"
],
[
"pandas._config.get_option",
"numpy.where",
"numpy.prod",
"numpy.errstate"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"1.5",
"1.3",
"2.0",
"1.4"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ankitshaw/DenGa | [
"92dfb0f3760c30dd9a32d650da92d5c3276099d1"
] | [
"denga/genda.py"
] | [
"\nimport denga.augment as au\nimport pandas as pd\n\nclass Genda():\n\n\tdef __init__(self,filepath): \n\t\tself.filepath = filepath\n\t\tself.dataset = None\n\t\ttry:\n\t\t\tself.dataset = pd.read_csv(self.filepath, header= None, error_bad_lines=False)\n\t\texcept:\n\t\t\traise Exception(\"ERROR: File Missing\") \n\t\tself.data = None\n\n\tdef generate(self):\n\t\tself.data = au.nlp(self.dataset)\n\n\tdef save(self,filename=\"genda.txt\"):\n\t\tif(self.data is None):\n\t\t\traise Exception(\"ERROR: New Dataset not yet generated.\")\n\n\t\tif not \".\" in filename:\n\t\t\traise Exception(\"ERROR: extension missing from file name.\")\n\t\telif filename.endswith(\".csv\"):\n\t\t\tdf = pd.DataFrame(self.data, columns=[\"New Sentences\"])\n\t\t\tdf.to_csv(filename, index=False)\n\t\telif filename.endswith(\".txt\"):\n\t\t\twith open(filename, \"w\") as output:\n\t\t\t\tfor line in self.data:\n\t\t\t\t\toutput.write(str(line)+\"\\n\")\n\t\telse:\n\t\t\traise Exception(\"ERROR: file type not supported use .txt or .csv file name.\") \t"
] | [
[
"pandas.read_csv",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
ABernard27/PROJET-groupe-3 | [
"a9ab9d80c10724ded9e20751fda018a7ed05589b"
] | [
"Coberny/graph_min_cost/best_price_path.py"
] | [
"import pandas as pd\nimport networkx as nx\nfrom networkx.algorithms import dijkstra_path\nimport itertools\nimport time\nimport datetime as dt\n# import matplotlib.pyplot as plt\n\n\n# Retourne la liste de toutes les villes du dataframe\ndef GetListOfcolnames(data):\n listofColnames = list(data.columns)[1:]\n return listofColnames\n\n# Retourne la liste de tous les sommets des chemins possibles entre\n# la ville de départ (entrance) et la ville d'arrivée (outlet)\ndef GetListOfVertexPath(data, entrance, outlet):\n listofColnames = GetListOfcolnames(data)\n outlet_index = listofColnames.index(outlet)\n listOfVertexPath = listofColnames[0:outlet_index+1]\n return listOfVertexPath\n\n# Retourne la liste de toutes les sorties possibles entre\n# la ville de départ (entrance) et la ville d'arrivée (outlet)\ndef GetListOfPossibleExit(data, entrance, outlet):\n listOfVertexPath = GetListOfVertexPath(data, entrance, outlet)\n listOfExit = [e for e in listOfVertexPath if (e != entrance and e != outlet)]\n return listOfExit\n\n# Retourne la contrainte K qui correspond au nombre total de sorties\n# qu'on peut emprunter entre la ville de départ (entrance) et \n# la ville d'arrivée (outlet)\ndef GetKMaxConstraint(data, entrance, outlet):\n k = len(GetListOfPossibleExit(data, entrance, outlet))\n return k\n\n# Retourne la liste de tous les chemins qu'on peut emprunter en fonction\n# du nombre de sorties (nbr_exit) utilisé entre la ville de départ\n# et celle d'arrivée\ndef GetListOfPath(data, entrance, outlet, nbr_exit):\n listOfExit = GetListOfPossibleExit(data, entrance, outlet)\n listOfPath = list(itertools.combinations(listOfExit, nbr_exit))\n for i in range(len(listOfPath)):\n listOfPath[i] = (entrance,) + listOfPath[i] + (outlet,)\n return listOfPath\n\n# Création du graph représentant tous les itinéraires qu'on peut \n# emprunter (entre la ville de départ et celle d'arrivée)\n# en fonction du nombre de sorties (nbr_exit) utilisé \ndef CreateGraphOfPath(data, entrance, outlet, nbr_exit):\n G_nbr_exit = nx.DiGraph()\n cities = data.columns[0]\n if nbr_exit == 0:\n G_nbr_exit.add_nodes_from([entrance, outlet])\n row_index = int(data[data[cities] == entrance].index[0])\n col_index = data.columns.get_loc(outlet)\n G_nbr_exit.add_weighted_edges_from(\n [(entrance, outlet, data.iloc[row_index,col_index])]\n )\n else:\n listOfVertexPath = GetListOfVertexPath(data, entrance, outlet)\n G_nbr_exit.add_nodes_from(listOfVertexPath)\n listOfEdges = []\n listOfPath = GetListOfPath(data, entrance, outlet, nbr_exit)\n for tup in listOfPath:\n for elt in range(len(tup) - 1):\n row_index = int(data[data[cities] == tup[elt]].index[0])\n col_index = data.columns.get_loc(tup[elt+1])\n listOfEdges.append(\n (tup[elt], tup[elt + 1], data.iloc[row_index,col_index])\n )\n G_nbr_exit.add_weighted_edges_from(listOfEdges)\n return G_nbr_exit\n\n# Retourne la liste des noeuds du plus court chemin trouvé dans le graph G\ndef FindShortestPath(G, entrance, outlet):\n listOfSPNodes = dijkstra_path(G, entrance, outlet)\n return listOfSPNodes\n\n# Retourne la somme des poids du plus court chemin\ndef ShortestPathWeight(G, entrance, outlet):\n listOfSPNodes = FindShortestPath(G, entrance, outlet)\n w = 0\n for elt in range(len(listOfSPNodes)-1):\n d = G.get_edge_data(listOfSPNodes[elt], listOfSPNodes[elt+1])\n w += d['weight']\n return w\n\n# Cette fonction calcule la somme totale des poids du chemin\ndef weight(data, n_uplet):\n cities = data.columns[0]\n listOfWeights = []\n for i in range(len(n_uplet) - 1):\n row_index = int(data[data[cities] == n_uplet[i]].index[0])\n col_index = data.columns.get_loc(n_uplet[i+1])\n w = data.iloc[row_index, col_index]\n listOfWeights.append(w)\n return sum(listOfWeights)\n\n# Cette fonction retourne le chemin optimal dans un n-uplet \ndef FindBestPathForPriceV2(data, listOfTuple):\n tupWeights = []\n for tup in listOfTuple:\n tp_w = weight(data, tup)\n tupWeights.append(tp_w)\n min_w = min(tupWeights)\n min_w_index = tupWeights.index(min_w)\n bestPathForPrice = listOfTuple[min_w_index]\n return bestPathForPrice\n\n# Retourne le couple composé du chemin optimal et du prix final (minimal)\n# que l'on va payer en empruntant ce cheminpour aller de la ville de \n# départ à celle d'arrivée\n\ndef FindBestPathForPrice(data, entrance, outlet, k):\n \"\"\"Affiche la liste composé des sommets du chemin optimal (le chemin qui revient le moins cher entre la ville de départ et celle d'arrivée) et le prix total (minimal) que l'utilisateur va payer en empruntant ce chemin.\n \n :param dataframe data: Le dataframe donnant le prix du trajet direct entre 2 villes. Chaque case du dataframe correspond au prix que l'on va payer entre la ville associée à l'indice de la ligne et la ville associée à l'indice de la colonne dans le dataframe.\n\n .. warning::\n\n Attention! le dataframe doit avoir un format adéquat pour que l'algorithme fonctionne, une rubrique détaillant le format attendu pour le dataframe est disponible\n\n :param str entrance: La ville de départ\n :param str outlet: La ville de sortie\n :param int k: Contrainte du nombre de sorties maximales imposées par l'utilisateur\n \n :returns: Un couple. Le 1er element du couple est la liste des sommets du chemin optimal. Le 2ème élément du couple est le prix total (minimal) que l'utilisateur va payer en empruntant ce chemin.\n \n .. code:: \n \n Coberny.FindBestPathForPrice(data, entrance, outlet, k)\n \n \"\"\"\n if k > GetKMaxConstraint(data, entrance, outlet):\n ans = 'La contrainte k est supérieure au nombre maximal de sorties possibles'\n return ans\n else:\n listOfSP = []\n listOfSPWeight = []\n for i in range(k+1):\n G = CreateGraphOfPath(data, entrance, outlet, i)\n listOfSP.append(FindShortestPath(G, entrance, outlet))\n listOfSPWeight.append(ShortestPathWeight(G, entrance, outlet))\n best_price = min(listOfSPWeight)\n best_price_index = listOfSPWeight.index(best_price)\n bestPathForPrice = listOfSP[best_price_index]\n if len(bestPathForPrice) <= 2+k:\n return (bestPathForPrice, best_price)\n else:\n listOfTuple = GetListOfPath(data, entrance, outlet, k)\n bestTupleForPrice = FindBestPathForPriceV2(data, listOfTuple)\n best_price = weight(data, bestTupleForPrice)\n bestPathForPrice = list(bestTupleForPrice)\n return (bestPathForPrice, best_price)\n\n# Retourne le graph du chemin optimal ie le chemin qui revient le moins cher\n# entre la ville de départ et celle d'arrivée\n# Les sorties intermédiraires sont coloriées en orange\n# La ville de départ et d'arrivée sont coloriées en bleu\ndef CreateGraphOfBestPathForPrice(data, entrance, outlet, k):\n \"\"\"Trace le graph du chemin optimal ie le chemin qui revient le moins cher entre la ville de départ et celle d'arrivée.\n\n :param dataframe data: Le dataframe donnant le prix du trajet direct entre 2 villes. Chaque case du dataframe correspond au prix que l'on va payer entre la ville associée à l'indice de la ligne et la ville associée à l'indice de la colonne dans le dataframe.\n\n .. warning::\n\n Attention! le dataframe doit avoir un format adéquat pour que l'algorithme fonctionne, une rubrique détaillant le format attendu pour le dataframe est disponible.\n\n :param str entrance: La ville de départ\n :param str outlet: La ville de sortie\n :param int k: Contrainte du nombre de sorties maximales imposées par l'utilisateur\n\n :returns: Le graphe du chemin optimal entre la ville de départ et celle d'arrivée\n \n .. code:: \n \n Coberny.CreateGraphOfBestPathForPrice(data, entrance, outlet, k)\n \n \"\"\"\n \n if k > GetKMaxConstraint(data, entrance, outlet):\n ans = 'La contrainte k est supérieure au nombre maximal de sorties possibles'\n return ans\n else:\n cities = data.columns[0]\n listOfNodesColors = []\n listOfEdges = []\n # d_edges_labels = {}\n G_bestPath = nx.DiGraph()\n couple = FindBestPathForPrice(data, entrance, outlet, k)\n bestPathForPrice = couple[0]\n for node in bestPathForPrice:\n if (node != entrance) and (node != outlet):\n listOfNodesColors.append('tab:orange')\n else:\n listOfNodesColors.append('tab:blue')\n G_bestPath.add_nodes_from(bestPathForPrice)\n for vx in range(len(bestPathForPrice)-1):\n row_index = int(data[data[cities] == bestPathForPrice[vx]].index[0])\n col_index = data.columns.get_loc(bestPathForPrice[vx+1])\n listOfEdges.append(\n (bestPathForPrice[vx], bestPathForPrice[vx+1], data.iloc[row_index,col_index])\n )\n # d_edges_labels[(str(bestPathForPrice[vx]), str(bestPathForPrice[vx+1]))] = str(\n # data.iloc[row_index,col_index]\n # )\n G_bestPath.add_weighted_edges_from(listOfEdges)\n return nx.draw(G_bestPath, node_color = listOfNodesColors, with_labels = True)\n # plt.show()\n # nx.draw_networkx_edge_labels(G_bestPath, nx.spring_layout(G_bestPath, seed=3113794652),\n # edge_labels = d_edges_labels)\n\n\nif __name__ == '__main__':\n df_price = pd.read_csv('prix.csv')\n df_price = df_price.fillna(0)\n startTime = time.time()\n print('Couple meilleur chemin et prix: ', FindBestPathForPrice(df_price,\n 'Sete', 'Montgiscard', 5))\n CreateGraphOfBestPathForPrice(df_price, 'Sete', 'Montgiscard', 5)\n runTime = time.time() - startTime\n roundRunTime = str(dt.timedelta(seconds=runTime))\n print(\"Le temps d'execution du programme vaut: \", runTime, ' secondes.\\n cad '\n , roundRunTime, \" dans le format heures minutes secondes\")\n \n"
] | [
[
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
sdementen/pandas | [
"e23e6f164209167c0fba0d32c862c5e75e6d4a8a",
"e23e6f164209167c0fba0d32c862c5e75e6d4a8a",
"e23e6f164209167c0fba0d32c862c5e75e6d4a8a"
] | [
"pandas/io/pytables.py",
"pandas/tests/indexes/common.py",
"pandas/io/tests/test_sql.py"
] | [
"\"\"\"\nHigh level interface to PyTables for reading and writing pandas data structures\nto disk\n\"\"\"\n\n# pylint: disable-msg=E1101,W0613,W0603\nfrom datetime import datetime, date\nimport time\nimport re\nimport copy\nimport itertools\nimport warnings\nimport os\n\nfrom pandas.types.common import (is_list_like,\n is_categorical_dtype,\n is_timedelta64_dtype,\n is_datetime64tz_dtype,\n is_datetime64_dtype,\n _ensure_object,\n _ensure_int64,\n _ensure_platform_int)\nfrom pandas.types.missing import array_equivalent\n\nimport numpy as np\n\nimport pandas as pd\nfrom pandas import (Series, DataFrame, Panel, Panel4D, Index,\n MultiIndex, Int64Index, isnull)\nfrom pandas.core import config\nfrom pandas.io.common import _stringify_path\nfrom pandas.sparse.api import SparseSeries, SparseDataFrame\nfrom pandas.sparse.array import BlockIndex, IntIndex\nfrom pandas.tseries.api import PeriodIndex, DatetimeIndex\nfrom pandas.tseries.tdi import TimedeltaIndex\nfrom pandas.core.base import StringMixin\nfrom pandas.formats.printing import adjoin, pprint_thing\nfrom pandas.core.common import _asarray_tuplesafe, PerformanceWarning\nfrom pandas.core.algorithms import match, unique\nfrom pandas.core.categorical import Categorical\nfrom pandas.core.internals import (BlockManager, make_block,\n _block2d_to_blocknd,\n _factor_indexer, _block_shape)\nfrom pandas.core.index import _ensure_index\nfrom pandas.tools.merge import concat\nfrom pandas import compat\nfrom pandas.compat import u_safe as u, PY3, range, lrange, string_types, filter\nfrom pandas.core.config import get_option\nfrom pandas.computation.pytables import Expr, maybe_expression\n\nimport pandas.lib as lib\nimport pandas.algos as algos\nimport pandas.tslib as tslib\n\nfrom distutils.version import LooseVersion\n\n# versioning attribute\n_version = '0.15.2'\n\n# encoding\n# PY3 encoding if we don't specify\n_default_encoding = 'UTF-8'\n\n\ndef _ensure_decoded(s):\n \"\"\" if we have bytes, decode them to unicode \"\"\"\n if isinstance(s, np.bytes_):\n s = s.decode('UTF-8')\n return s\n\n\ndef _ensure_encoding(encoding):\n # set the encoding if we need\n if encoding is None:\n if PY3:\n encoding = _default_encoding\n return encoding\n\nTerm = Expr\n\n\ndef _ensure_term(where, scope_level):\n \"\"\"\n ensure that the where is a Term or a list of Term\n this makes sure that we are capturing the scope of variables\n that are passed\n create the terms here with a frame_level=2 (we are 2 levels down)\n \"\"\"\n\n # only consider list/tuple here as an ndarray is automaticaly a coordinate\n # list\n level = scope_level + 1\n if isinstance(where, (list, tuple)):\n wlist = []\n for w in filter(lambda x: x is not None, where):\n if not maybe_expression(w):\n wlist.append(w)\n else:\n wlist.append(Term(w, scope_level=level))\n where = wlist\n elif maybe_expression(where):\n where = Term(where, scope_level=level)\n return where\n\n\nclass PossibleDataLossError(Exception):\n pass\n\n\nclass ClosedFileError(Exception):\n pass\n\n\nclass IncompatibilityWarning(Warning):\n pass\n\nincompatibility_doc = \"\"\"\nwhere criteria is being ignored as this version [%s] is too old (or\nnot-defined), read the file in and write it out to a new file to upgrade (with\nthe copy_to method)\n\"\"\"\n\n\nclass AttributeConflictWarning(Warning):\n pass\n\nattribute_conflict_doc = \"\"\"\nthe [%s] attribute of the existing index is [%s] which conflicts with the new\n[%s], resetting the attribute to None\n\"\"\"\n\n\nclass DuplicateWarning(Warning):\n pass\n\nduplicate_doc = \"\"\"\nduplicate entries in table, taking most recently appended\n\"\"\"\n\nperformance_doc = \"\"\"\nyour performance may suffer as PyTables will pickle object types that it cannot\nmap directly to c-types [inferred_type->%s,key->%s] [items->%s]\n\"\"\"\n\n# formats\n_FORMAT_MAP = {\n u('f'): 'fixed',\n u('fixed'): 'fixed',\n u('t'): 'table',\n u('table'): 'table',\n}\n\nformat_deprecate_doc = \"\"\"\nthe table keyword has been deprecated\nuse the format='fixed(f)|table(t)' keyword instead\n fixed(f) : specifies the Fixed format\n and is the default for put operations\n table(t) : specifies the Table format\n and is the default for append operations\n\"\"\"\n\n# map object types\n_TYPE_MAP = {\n\n Series: u('series'),\n SparseSeries: u('sparse_series'),\n pd.TimeSeries: u('series'),\n DataFrame: u('frame'),\n SparseDataFrame: u('sparse_frame'),\n Panel: u('wide'),\n Panel4D: u('ndim'),\n}\n\n# storer class map\n_STORER_MAP = {\n u('TimeSeries'): 'LegacySeriesFixed',\n u('Series'): 'LegacySeriesFixed',\n u('DataFrame'): 'LegacyFrameFixed',\n u('DataMatrix'): 'LegacyFrameFixed',\n u('series'): 'SeriesFixed',\n u('sparse_series'): 'SparseSeriesFixed',\n u('frame'): 'FrameFixed',\n u('sparse_frame'): 'SparseFrameFixed',\n u('wide'): 'PanelFixed',\n}\n\n# table class map\n_TABLE_MAP = {\n u('generic_table'): 'GenericTable',\n u('appendable_series'): 'AppendableSeriesTable',\n u('appendable_multiseries'): 'AppendableMultiSeriesTable',\n u('appendable_frame'): 'AppendableFrameTable',\n u('appendable_multiframe'): 'AppendableMultiFrameTable',\n u('appendable_panel'): 'AppendablePanelTable',\n u('appendable_ndim'): 'AppendableNDimTable',\n u('worm'): 'WORMTable',\n u('legacy_frame'): 'LegacyFrameTable',\n u('legacy_panel'): 'LegacyPanelTable',\n}\n\n# axes map\n_AXES_MAP = {\n DataFrame: [0],\n Panel: [1, 2],\n Panel4D: [1, 2, 3],\n}\n\n# register our configuration options\ndropna_doc = \"\"\"\n: boolean\n drop ALL nan rows when appending to a table\n\"\"\"\nformat_doc = \"\"\"\n: format\n default format writing format, if None, then\n put will default to 'fixed' and append will default to 'table'\n\"\"\"\n\nwith config.config_prefix('io.hdf'):\n config.register_option('dropna_table', False, dropna_doc,\n validator=config.is_bool)\n config.register_option(\n 'default_format', None, format_doc,\n validator=config.is_one_of_factory(['fixed', 'table', None])\n )\n\n# oh the troubles to reduce import time\n_table_mod = None\n_table_file_open_policy_is_strict = False\n\n\ndef _tables():\n global _table_mod\n global _table_file_open_policy_is_strict\n if _table_mod is None:\n import tables\n _table_mod = tables\n\n # version requirements\n if LooseVersion(tables.__version__) < '3.0.0':\n raise ImportError(\"PyTables version >= 3.0.0 is required\")\n\n # set the file open policy\n # return the file open policy; this changes as of pytables 3.1\n # depending on the HDF5 version\n try:\n _table_file_open_policy_is_strict = (\n tables.file._FILE_OPEN_POLICY == 'strict')\n except:\n pass\n\n return _table_mod\n\n# interface to/from ###\n\n\ndef to_hdf(path_or_buf, key, value, mode=None, complevel=None, complib=None,\n append=None, **kwargs):\n \"\"\" store this object, close it if we opened it \"\"\"\n\n if append:\n f = lambda store: store.append(key, value, **kwargs)\n else:\n f = lambda store: store.put(key, value, **kwargs)\n\n path_or_buf = _stringify_path(path_or_buf)\n if isinstance(path_or_buf, string_types):\n with HDFStore(path_or_buf, mode=mode, complevel=complevel,\n complib=complib) as store:\n f(store)\n else:\n f(path_or_buf)\n\n\ndef read_hdf(path_or_buf, key=None, **kwargs):\n \"\"\" read from the store, close it if we opened it\n\n Retrieve pandas object stored in file, optionally based on where\n criteria\n\n Parameters\n ----------\n path_or_buf : path (string), buffer, or path object (pathlib.Path or\n py._path.local.LocalPath) to read from\n\n .. versionadded:: 0.19.0 support for pathlib, py.path.\n\n key : group identifier in the store. Can be omitted if the HDF file\n contains a single pandas object.\n where : list of Term (or convertable) objects, optional\n start : optional, integer (defaults to None), row number to start\n selection\n stop : optional, integer (defaults to None), row number to stop\n selection\n columns : optional, a list of columns that if not None, will limit the\n return columns\n iterator : optional, boolean, return an iterator, default False\n chunksize : optional, nrows to include in iteration, return an iterator\n\n Returns\n -------\n The selected object\n\n \"\"\"\n\n if kwargs.get('mode', 'a') not in ['r', 'r+', 'a']:\n raise ValueError('mode {0} is not allowed while performing a read. '\n 'Allowed modes are r, r+ and a.'\n .format(kwargs.get('mode')))\n # grab the scope\n if 'where' in kwargs:\n kwargs['where'] = _ensure_term(kwargs['where'], scope_level=1)\n\n path_or_buf = _stringify_path(path_or_buf)\n if isinstance(path_or_buf, string_types):\n\n try:\n exists = os.path.exists(path_or_buf)\n\n # if filepath is too long\n except (TypeError, ValueError):\n exists = False\n\n if not exists:\n raise IOError('File %s does not exist' % path_or_buf)\n\n # can't auto open/close if we are using an iterator\n # so delegate to the iterator\n store = HDFStore(path_or_buf, **kwargs)\n auto_close = True\n\n elif isinstance(path_or_buf, HDFStore):\n if not path_or_buf.is_open:\n raise IOError('The HDFStore must be open for reading.')\n\n store = path_or_buf\n auto_close = False\n\n else:\n raise NotImplementedError('Support for generic buffers has not been '\n 'implemented.')\n\n try:\n if key is None:\n groups = store.groups()\n if len(groups) == 0:\n raise ValueError('No dataset in HDF5 file.')\n candidate_only_group = groups[0]\n\n # For the HDF file to have only one dataset, all other groups\n # should then be metadata groups for that candidate group. (This\n # assumes that the groups() method enumerates parent groups\n # before their children.)\n for group_to_check in groups[1:]:\n if not _is_metadata_of(group_to_check, candidate_only_group):\n raise ValueError('key must be provided when HDF5 file '\n 'contains multiple datasets.')\n key = candidate_only_group._v_pathname\n return store.select(key, auto_close=auto_close, **kwargs)\n except:\n # if there is an error, close the store\n try:\n store.close()\n except:\n pass\n\n raise\n\n\ndef _is_metadata_of(group, parent_group):\n \"\"\"Check if a given group is a metadata group for a given parent_group.\"\"\"\n if group._v_depth <= parent_group._v_depth:\n return False\n\n current = group\n while current._v_depth > 1:\n parent = current._v_parent\n if parent == parent_group and current._v_name == 'meta':\n return True\n current = current._v_parent\n return False\n\n\nclass HDFStore(StringMixin):\n\n \"\"\"\n dict-like IO interface for storing pandas objects in PyTables\n either Fixed or Table format.\n\n Parameters\n ----------\n path : string\n File path to HDF5 file\n mode : {'a', 'w', 'r', 'r+'}, default 'a'\n\n ``'r'``\n Read-only; no data can be modified.\n ``'w'``\n Write; a new file is created (an existing file with the same\n name would be deleted).\n ``'a'``\n Append; an existing file is opened for reading and writing,\n and if the file does not exist it is created.\n ``'r+'``\n It is similar to ``'a'``, but the file must already exist.\n complevel : int, 1-9, default 0\n If a complib is specified compression will be applied\n where possible\n complib : {'zlib', 'bzip2', 'lzo', 'blosc', None}, default None\n If complevel is > 0 apply compression to objects written\n in the store wherever possible\n fletcher32 : bool, default False\n If applying compression use the fletcher32 checksum\n\n Examples\n --------\n >>> from pandas import DataFrame\n >>> from numpy.random import randn\n >>> bar = DataFrame(randn(10, 4))\n >>> store = HDFStore('test.h5')\n >>> store['foo'] = bar # write to HDF5\n >>> bar = store['foo'] # retrieve\n >>> store.close()\n \"\"\"\n\n def __init__(self, path, mode=None, complevel=None, complib=None,\n fletcher32=False, **kwargs):\n try:\n import tables # noqa\n except ImportError as ex: # pragma: no cover\n raise ImportError('HDFStore requires PyTables, \"{ex}\" problem '\n 'importing'.format(ex=str(ex)))\n\n if complib not in (None, 'blosc', 'bzip2', 'lzo', 'zlib'):\n raise ValueError(\"complib only supports 'blosc', 'bzip2', lzo' \"\n \"or 'zlib' compression.\")\n\n self._path = path\n if mode is None:\n mode = 'a'\n self._mode = mode\n self._handle = None\n self._complevel = complevel\n self._complib = complib\n self._fletcher32 = fletcher32\n self._filters = None\n self.open(mode=mode, **kwargs)\n\n @property\n def root(self):\n \"\"\" return the root node \"\"\"\n self._check_if_open()\n return self._handle.root\n\n @property\n def filename(self):\n return self._path\n\n def __getitem__(self, key):\n return self.get(key)\n\n def __setitem__(self, key, value):\n self.put(key, value)\n\n def __delitem__(self, key):\n return self.remove(key)\n\n def __getattr__(self, name):\n \"\"\" allow attribute access to get stores \"\"\"\n self._check_if_open()\n try:\n return self.get(name)\n except:\n pass\n raise AttributeError(\"'%s' object has no attribute '%s'\" %\n (type(self).__name__, name))\n\n def __contains__(self, key):\n \"\"\" check for existance of this key\n can match the exact pathname or the pathnm w/o the leading '/'\n \"\"\"\n node = self.get_node(key)\n if node is not None:\n name = node._v_pathname\n if name == key or name[1:] == key:\n return True\n return False\n\n def __len__(self):\n return len(self.groups())\n\n def __unicode__(self):\n output = '%s\\nFile path: %s\\n' % (type(self), pprint_thing(self._path))\n if self.is_open:\n lkeys = sorted(list(self.keys()))\n if len(lkeys):\n keys = []\n values = []\n\n for k in lkeys:\n try:\n s = self.get_storer(k)\n if s is not None:\n keys.append(pprint_thing(s.pathname or k))\n values.append(\n pprint_thing(s or 'invalid_HDFStore node'))\n except Exception as detail:\n keys.append(k)\n values.append(\"[invalid_HDFStore node: %s]\"\n % pprint_thing(detail))\n\n output += adjoin(12, keys, values)\n else:\n output += 'Empty'\n else:\n output += \"File is CLOSED\"\n\n return output\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n self.close()\n\n def keys(self):\n \"\"\"\n Return a (potentially unordered) list of the keys corresponding to the\n objects stored in the HDFStore. These are ABSOLUTE path-names (e.g.\n have the leading '/'\n \"\"\"\n return [n._v_pathname for n in self.groups()]\n\n def __iter__(self):\n return iter(self.keys())\n\n def items(self):\n \"\"\"\n iterate on key->group\n \"\"\"\n for g in self.groups():\n yield g._v_pathname, g\n\n iteritems = items\n\n def open(self, mode='a', **kwargs):\n \"\"\"\n Open the file in the specified mode\n\n Parameters\n ----------\n mode : {'a', 'w', 'r', 'r+'}, default 'a'\n See HDFStore docstring or tables.open_file for info about modes\n \"\"\"\n tables = _tables()\n\n if self._mode != mode:\n\n # if we are changing a write mode to read, ok\n if self._mode in ['a', 'w'] and mode in ['r', 'r+']:\n pass\n elif mode in ['w']:\n\n # this would truncate, raise here\n if self.is_open:\n raise PossibleDataLossError(\n \"Re-opening the file [{0}] with mode [{1}] \"\n \"will delete the current file!\"\n .format(self._path, self._mode)\n )\n\n self._mode = mode\n\n # close and reopen the handle\n if self.is_open:\n self.close()\n\n if self._complib is not None:\n if self._complevel is None:\n self._complevel = 9\n self._filters = _tables().Filters(self._complevel,\n self._complib,\n fletcher32=self._fletcher32)\n\n try:\n self._handle = tables.open_file(self._path, self._mode, **kwargs)\n except (IOError) as e: # pragma: no cover\n if 'can not be written' in str(e):\n print('Opening %s in read-only mode' % self._path)\n self._handle = tables.open_file(self._path, 'r', **kwargs)\n else:\n raise\n\n except (ValueError) as e:\n\n # trap PyTables >= 3.1 FILE_OPEN_POLICY exception\n # to provide an updated message\n if 'FILE_OPEN_POLICY' in str(e):\n e = ValueError(\n \"PyTables [{version}] no longer supports opening multiple \"\n \"files\\n\"\n \"even in read-only mode on this HDF5 version \"\n \"[{hdf_version}]. You can accept this\\n\"\n \"and not open the same file multiple times at once,\\n\"\n \"upgrade the HDF5 version, or downgrade to PyTables 3.0.0 \"\n \"which allows\\n\"\n \"files to be opened multiple times at once\\n\"\n .format(version=tables.__version__,\n hdf_version=tables.get_hdf5_version()))\n\n raise e\n\n except (Exception) as e:\n\n # trying to read from a non-existant file causes an error which\n # is not part of IOError, make it one\n if self._mode == 'r' and 'Unable to open/create file' in str(e):\n raise IOError(str(e))\n raise\n\n def close(self):\n \"\"\"\n Close the PyTables file handle\n \"\"\"\n if self._handle is not None:\n self._handle.close()\n self._handle = None\n\n @property\n def is_open(self):\n \"\"\"\n return a boolean indicating whether the file is open\n \"\"\"\n if self._handle is None:\n return False\n return bool(self._handle.isopen)\n\n def flush(self, fsync=False):\n \"\"\"\n Force all buffered modifications to be written to disk.\n\n Parameters\n ----------\n fsync : bool (default False)\n call ``os.fsync()`` on the file handle to force writing to disk.\n\n Notes\n -----\n Without ``fsync=True``, flushing may not guarantee that the OS writes\n to disk. With fsync, the operation will block until the OS claims the\n file has been written; however, other caching layers may still\n interfere.\n \"\"\"\n if self._handle is not None:\n self._handle.flush()\n if fsync:\n try:\n os.fsync(self._handle.fileno())\n except:\n pass\n\n def get(self, key):\n \"\"\"\n Retrieve pandas object stored in file\n\n Parameters\n ----------\n key : object\n\n Returns\n -------\n obj : type of object stored in file\n \"\"\"\n group = self.get_node(key)\n if group is None:\n raise KeyError('No object named %s in the file' % key)\n return self._read_group(group)\n\n def select(self, key, where=None, start=None, stop=None, columns=None,\n iterator=False, chunksize=None, auto_close=False, **kwargs):\n \"\"\"\n Retrieve pandas object stored in file, optionally based on where\n criteria\n\n Parameters\n ----------\n key : object\n where : list of Term (or convertable) objects, optional\n start : integer (defaults to None), row number to start selection\n stop : integer (defaults to None), row number to stop selection\n columns : a list of columns that if not None, will limit the return\n columns\n iterator : boolean, return an iterator, default False\n chunksize : nrows to include in iteration, return an iterator\n auto_close : boolean, should automatically close the store when\n finished, default is False\n\n Returns\n -------\n The selected object\n\n \"\"\"\n group = self.get_node(key)\n if group is None:\n raise KeyError('No object named %s in the file' % key)\n\n # create the storer and axes\n where = _ensure_term(where, scope_level=1)\n s = self._create_storer(group)\n s.infer_axes()\n\n # function to call on iteration\n def func(_start, _stop, _where):\n return s.read(start=_start, stop=_stop,\n where=_where,\n columns=columns, **kwargs)\n\n # create the iterator\n it = TableIterator(self, s, func, where=where, nrows=s.nrows,\n start=start, stop=stop, iterator=iterator,\n chunksize=chunksize, auto_close=auto_close)\n\n return it.get_result()\n\n def select_as_coordinates(\n self, key, where=None, start=None, stop=None, **kwargs):\n \"\"\"\n return the selection as an Index\n\n Parameters\n ----------\n key : object\n where : list of Term (or convertable) objects, optional\n start : integer (defaults to None), row number to start selection\n stop : integer (defaults to None), row number to stop selection\n \"\"\"\n where = _ensure_term(where, scope_level=1)\n return self.get_storer(key).read_coordinates(where=where, start=start,\n stop=stop, **kwargs)\n\n def select_column(self, key, column, **kwargs):\n \"\"\"\n return a single column from the table. This is generally only useful to\n select an indexable\n\n Parameters\n ----------\n key : object\n column: the column of interest\n\n Exceptions\n ----------\n raises KeyError if the column is not found (or key is not a valid\n store)\n raises ValueError if the column can not be extracted individually (it\n is part of a data block)\n\n \"\"\"\n return self.get_storer(key).read_column(column=column, **kwargs)\n\n def select_as_multiple(self, keys, where=None, selector=None, columns=None,\n start=None, stop=None, iterator=False,\n chunksize=None, auto_close=False, **kwargs):\n \"\"\" Retrieve pandas objects from multiple tables\n\n Parameters\n ----------\n keys : a list of the tables\n selector : the table to apply the where criteria (defaults to keys[0]\n if not supplied)\n columns : the columns I want back\n start : integer (defaults to None), row number to start selection\n stop : integer (defaults to None), row number to stop selection\n iterator : boolean, return an iterator, default False\n chunksize : nrows to include in iteration, return an iterator\n\n Exceptions\n ----------\n raises KeyError if keys or selector is not found or keys is empty\n raises TypeError if keys is not a list or tuple\n raises ValueError if the tables are not ALL THE SAME DIMENSIONS\n \"\"\"\n\n # default to single select\n where = _ensure_term(where, scope_level=1)\n if isinstance(keys, (list, tuple)) and len(keys) == 1:\n keys = keys[0]\n if isinstance(keys, string_types):\n return self.select(key=keys, where=where, columns=columns,\n start=start, stop=stop, iterator=iterator,\n chunksize=chunksize, **kwargs)\n\n if not isinstance(keys, (list, tuple)):\n raise TypeError(\"keys must be a list/tuple\")\n\n if not len(keys):\n raise ValueError(\"keys must have a non-zero length\")\n\n if selector is None:\n selector = keys[0]\n\n # collect the tables\n tbls = [self.get_storer(k) for k in keys]\n s = self.get_storer(selector)\n\n # validate rows\n nrows = None\n for t, k in itertools.chain([(s, selector)], zip(tbls, keys)):\n if t is None:\n raise KeyError(\"Invalid table [%s]\" % k)\n if not t.is_table:\n raise TypeError(\n \"object [%s] is not a table, and cannot be used in all \"\n \"select as multiple\" % t.pathname\n )\n\n if nrows is None:\n nrows = t.nrows\n elif t.nrows != nrows:\n raise ValueError(\n \"all tables must have exactly the same nrows!\")\n\n # axis is the concentation axes\n axis = list(set([t.non_index_axes[0][0] for t in tbls]))[0]\n\n def func(_start, _stop, _where):\n\n # retrieve the objs, _where is always passed as a set of\n # coordinates here\n objs = [t.read(where=_where, columns=columns, **kwargs)\n for t in tbls]\n\n # concat and return\n return concat(objs, axis=axis,\n verify_integrity=False).consolidate()\n\n # create the iterator\n it = TableIterator(self, s, func, where=where, nrows=nrows,\n start=start, stop=stop, iterator=iterator,\n chunksize=chunksize, auto_close=auto_close)\n\n return it.get_result(coordinates=True)\n\n def put(self, key, value, format=None, append=False, **kwargs):\n \"\"\"\n Store object in HDFStore\n\n Parameters\n ----------\n key : object\n value : {Series, DataFrame, Panel}\n format : 'fixed(f)|table(t)', default is 'fixed'\n fixed(f) : Fixed format\n Fast writing/reading. Not-appendable, nor searchable\n table(t) : Table format\n Write as a PyTables Table structure which may perform\n worse but allow more flexible operations like searching\n / selecting subsets of the data\n append : boolean, default False\n This will force Table format, append the input data to the\n existing.\n data_columns : list of columns to create as data columns, or True to\n use all columns. See\n `here <http://pandas.pydata.org/pandas-docs/stable/io.html#query-via-data-columns>`__ # noqa\n encoding : default None, provide an encoding for strings\n dropna : boolean, default False, do not write an ALL nan row to\n the store settable by the option 'io.hdf.dropna_table'\n \"\"\"\n if format is None:\n format = get_option(\"io.hdf.default_format\") or 'fixed'\n kwargs = self._validate_format(format, kwargs)\n self._write_to_group(key, value, append=append, **kwargs)\n\n def remove(self, key, where=None, start=None, stop=None):\n \"\"\"\n Remove pandas object partially by specifying the where condition\n\n Parameters\n ----------\n key : string\n Node to remove or delete rows from\n where : list of Term (or convertable) objects, optional\n start : integer (defaults to None), row number to start selection\n stop : integer (defaults to None), row number to stop selection\n\n Returns\n -------\n number of rows removed (or None if not a Table)\n\n Exceptions\n ----------\n raises KeyError if key is not a valid store\n\n \"\"\"\n where = _ensure_term(where, scope_level=1)\n try:\n s = self.get_storer(key)\n except:\n\n if where is not None:\n raise ValueError(\n \"trying to remove a node with a non-None where clause!\")\n\n # we are actually trying to remove a node (with children)\n s = self.get_node(key)\n if s is not None:\n s._f_remove(recursive=True)\n return None\n\n if s is None:\n raise KeyError('No object named %s in the file' % key)\n\n # remove the node\n if where is None and start is None and stop is None:\n s.group._f_remove(recursive=True)\n\n # delete from the table\n else:\n if not s.is_table:\n raise ValueError(\n 'can only remove with where on objects written as tables')\n return s.delete(where=where, start=start, stop=stop)\n\n def append(self, key, value, format=None, append=True, columns=None,\n dropna=None, **kwargs):\n \"\"\"\n Append to Table in file. Node must already exist and be Table\n format.\n\n Parameters\n ----------\n key : object\n value : {Series, DataFrame, Panel, Panel4D}\n format: 'table' is the default\n table(t) : table format\n Write as a PyTables Table structure which may perform\n worse but allow more flexible operations like searching\n / selecting subsets of the data\n append : boolean, default True, append the input data to the\n existing\n data_columns : list of columns, or True, default None\n List of columns to create as indexed data columns for on-disk\n queries, or True to use all columns. By default only the axes\n of the object are indexed. See `here\n <http://pandas.pydata.org/pandas-docs/stable/io.html#query-via-data-columns>`__.\n min_itemsize : dict of columns that specify minimum string sizes\n nan_rep : string to use as string nan represenation\n chunksize : size to chunk the writing\n expectedrows : expected TOTAL row size of this table\n encoding : default None, provide an encoding for strings\n dropna : boolean, default False, do not write an ALL nan row to\n the store settable by the option 'io.hdf.dropna_table'\n\n Notes\n -----\n Does *not* check if data being appended overlaps with existing\n data in the table, so be careful\n \"\"\"\n if columns is not None:\n raise TypeError(\"columns is not a supported keyword in append, \"\n \"try data_columns\")\n\n if dropna is None:\n dropna = get_option(\"io.hdf.dropna_table\")\n if format is None:\n format = get_option(\"io.hdf.default_format\") or 'table'\n kwargs = self._validate_format(format, kwargs)\n self._write_to_group(key, value, append=append, dropna=dropna,\n **kwargs)\n\n def append_to_multiple(self, d, value, selector, data_columns=None,\n axes=None, dropna=False, **kwargs):\n \"\"\"\n Append to multiple tables\n\n Parameters\n ----------\n d : a dict of table_name to table_columns, None is acceptable as the\n values of one node (this will get all the remaining columns)\n value : a pandas object\n selector : a string that designates the indexable table; all of its\n columns will be designed as data_columns, unless data_columns is\n passed, in which case these are used\n data_columns : list of columns to create as data columns, or True to\n use all columns\n dropna : if evaluates to True, drop rows from all tables if any single\n row in each table has all NaN. Default False.\n\n Notes\n -----\n axes parameter is currently not accepted\n\n \"\"\"\n if axes is not None:\n raise TypeError(\"axes is currently not accepted as a parameter to\"\n \" append_to_multiple; you can create the \"\n \"tables independently instead\")\n\n if not isinstance(d, dict):\n raise ValueError(\n \"append_to_multiple must have a dictionary specified as the \"\n \"way to split the value\"\n )\n\n if selector not in d:\n raise ValueError(\n \"append_to_multiple requires a selector that is in passed dict\"\n )\n\n # figure out the splitting axis (the non_index_axis)\n axis = list(set(range(value.ndim)) - set(_AXES_MAP[type(value)]))[0]\n\n # figure out how to split the value\n remain_key = None\n remain_values = []\n for k, v in d.items():\n if v is None:\n if remain_key is not None:\n raise ValueError(\n \"append_to_multiple can only have one value in d that \"\n \"is None\"\n )\n remain_key = k\n else:\n remain_values.extend(v)\n if remain_key is not None:\n ordered = value.axes[axis]\n ordd = ordered.difference(Index(remain_values))\n ordd = sorted(ordered.get_indexer(ordd))\n d[remain_key] = ordered.take(ordd)\n\n # data_columns\n if data_columns is None:\n data_columns = d[selector]\n\n # ensure rows are synchronized across the tables\n if dropna:\n idxs = (value[cols].dropna(how='all').index for cols in d.values())\n valid_index = next(idxs)\n for index in idxs:\n valid_index = valid_index.intersection(index)\n value = value.ix[valid_index]\n\n # append\n for k, v in d.items():\n dc = data_columns if k == selector else None\n\n # compute the val\n val = value.reindex_axis(v, axis=axis)\n\n self.append(k, val, data_columns=dc, **kwargs)\n\n def create_table_index(self, key, **kwargs):\n \"\"\" Create a pytables index on the table\n Parameters\n ----------\n key : object (the node to index)\n\n Exceptions\n ----------\n raises if the node is not a table\n\n \"\"\"\n\n # version requirements\n _tables()\n s = self.get_storer(key)\n if s is None:\n return\n\n if not s.is_table:\n raise TypeError(\n \"cannot create table index on a Fixed format store\")\n s.create_index(**kwargs)\n\n def groups(self):\n \"\"\"return a list of all the top-level nodes (that are not themselves a\n pandas storage object)\n \"\"\"\n _tables()\n self._check_if_open()\n return [\n g for g in self._handle.walk_nodes()\n if (getattr(g._v_attrs, 'pandas_type', None) or\n getattr(g, 'table', None) or\n (isinstance(g, _table_mod.table.Table) and\n g._v_name != u('table')))\n ]\n\n def get_node(self, key):\n \"\"\" return the node with the key or None if it does not exist \"\"\"\n self._check_if_open()\n try:\n if not key.startswith('/'):\n key = '/' + key\n return self._handle.get_node(self.root, key)\n except:\n return None\n\n def get_storer(self, key):\n \"\"\" return the storer object for a key, raise if not in the file \"\"\"\n group = self.get_node(key)\n if group is None:\n return None\n s = self._create_storer(group)\n s.infer_axes()\n return s\n\n def copy(self, file, mode='w', propindexes=True, keys=None, complib=None,\n complevel=None, fletcher32=False, overwrite=True):\n \"\"\" copy the existing store to a new file, upgrading in place\n\n Parameters\n ----------\n propindexes: restore indexes in copied file (defaults to True)\n keys : list of keys to include in the copy (defaults to all)\n overwrite : overwrite (remove and replace) existing nodes in the\n new store (default is True)\n mode, complib, complevel, fletcher32 same as in HDFStore.__init__\n\n Returns\n -------\n open file handle of the new store\n\n \"\"\"\n new_store = HDFStore(\n file,\n mode=mode,\n complib=complib,\n complevel=complevel,\n fletcher32=fletcher32)\n if keys is None:\n keys = list(self.keys())\n if not isinstance(keys, (tuple, list)):\n keys = [keys]\n for k in keys:\n s = self.get_storer(k)\n if s is not None:\n\n if k in new_store:\n if overwrite:\n new_store.remove(k)\n\n data = self.select(k)\n if s.is_table:\n\n index = False\n if propindexes:\n index = [a.name for a in s.axes if a.is_indexed]\n new_store.append(\n k, data, index=index,\n data_columns=getattr(s, 'data_columns', None),\n encoding=s.encoding\n )\n else:\n new_store.put(k, data, encoding=s.encoding)\n\n return new_store\n\n # private methods ######\n def _check_if_open(self):\n if not self.is_open:\n raise ClosedFileError(\"{0} file is not open!\".format(self._path))\n\n def _validate_format(self, format, kwargs):\n \"\"\" validate / deprecate formats; return the new kwargs \"\"\"\n kwargs = kwargs.copy()\n\n # validate\n try:\n kwargs['format'] = _FORMAT_MAP[format.lower()]\n except:\n raise TypeError(\"invalid HDFStore format specified [{0}]\"\n .format(format))\n\n return kwargs\n\n def _create_storer(self, group, format=None, value=None, append=False,\n **kwargs):\n \"\"\" return a suitable class to operate \"\"\"\n\n def error(t):\n raise TypeError(\n \"cannot properly create the storer for: [%s] [group->%s,\"\n \"value->%s,format->%s,append->%s,kwargs->%s]\"\n % (t, group, type(value), format, append, kwargs)\n )\n\n pt = _ensure_decoded(getattr(group._v_attrs, 'pandas_type', None))\n tt = _ensure_decoded(getattr(group._v_attrs, 'table_type', None))\n\n # infer the pt from the passed value\n if pt is None:\n if value is None:\n\n _tables()\n if (getattr(group, 'table', None) or\n isinstance(group, _table_mod.table.Table)):\n pt = u('frame_table')\n tt = u('generic_table')\n else:\n raise TypeError(\n \"cannot create a storer if the object is not existing \"\n \"nor a value are passed\")\n else:\n\n try:\n pt = _TYPE_MAP[type(value)]\n except:\n error('_TYPE_MAP')\n\n # we are actually a table\n if format == 'table':\n pt += u('_table')\n\n # a storer node\n if u('table') not in pt:\n try:\n return globals()[_STORER_MAP[pt]](self, group, **kwargs)\n except:\n error('_STORER_MAP')\n\n # existing node (and must be a table)\n if tt is None:\n\n # if we are a writer, determin the tt\n if value is not None:\n\n if pt == u('series_table'):\n index = getattr(value, 'index', None)\n if index is not None:\n if index.nlevels == 1:\n tt = u('appendable_series')\n elif index.nlevels > 1:\n tt = u('appendable_multiseries')\n elif pt == u('frame_table'):\n index = getattr(value, 'index', None)\n if index is not None:\n if index.nlevels == 1:\n tt = u('appendable_frame')\n elif index.nlevels > 1:\n tt = u('appendable_multiframe')\n elif pt == u('wide_table'):\n tt = u('appendable_panel')\n elif pt == u('ndim_table'):\n tt = u('appendable_ndim')\n\n else:\n\n # distiguish between a frame/table\n tt = u('legacy_panel')\n try:\n fields = group.table._v_attrs.fields\n if len(fields) == 1 and fields[0] == u('value'):\n tt = u('legacy_frame')\n except:\n pass\n\n try:\n return globals()[_TABLE_MAP[tt]](self, group, **kwargs)\n except:\n error('_TABLE_MAP')\n\n def _write_to_group(self, key, value, format, index=True, append=False,\n complib=None, encoding=None, **kwargs):\n group = self.get_node(key)\n\n # remove the node if we are not appending\n if group is not None and not append:\n self._handle.remove_node(group, recursive=True)\n group = None\n\n # we don't want to store a table node at all if are object is 0-len\n # as there are not dtypes\n if getattr(value, 'empty', None) and (format == 'table' or append):\n return\n\n if group is None:\n paths = key.split('/')\n\n # recursively create the groups\n path = '/'\n for p in paths:\n if not len(p):\n continue\n new_path = path\n if not path.endswith('/'):\n new_path += '/'\n new_path += p\n group = self.get_node(new_path)\n if group is None:\n group = self._handle.create_group(path, p)\n path = new_path\n\n s = self._create_storer(group, format, value, append=append,\n encoding=encoding, **kwargs)\n if append:\n # raise if we are trying to append to a Fixed format,\n # or a table that exists (and we are putting)\n if (not s.is_table or\n (s.is_table and format == 'fixed' and s.is_exists)):\n raise ValueError('Can only append to Tables')\n if not s.is_exists:\n s.set_object_info()\n else:\n s.set_object_info()\n\n if not s.is_table and complib:\n raise ValueError(\n 'Compression not supported on Fixed format stores'\n )\n\n # write the object\n s.write(obj=value, append=append, complib=complib, **kwargs)\n\n if s.is_table and index:\n s.create_index(columns=index)\n\n def _read_group(self, group, **kwargs):\n s = self._create_storer(group)\n s.infer_axes()\n return s.read(**kwargs)\n\n\ndef get_store(path, **kwargs):\n \"\"\" Backwards compatible alias for ``HDFStore``\n \"\"\"\n return HDFStore(path, **kwargs)\n\n\nclass TableIterator(object):\n\n \"\"\" define the iteration interface on a table\n\n Parameters\n ----------\n\n store : the reference store\n s : the refered storer\n func : the function to execute the query\n where : the where of the query\n nrows : the rows to iterate on\n start : the passed start value (default is None)\n stop : the passed stop value (default is None)\n iterator : boolean, whether to use the default iterator\n chunksize : the passed chunking value (default is 50000)\n auto_close : boolean, automatically close the store at the end of\n iteration, default is False\n kwargs : the passed kwargs\n \"\"\"\n\n def __init__(self, store, s, func, where, nrows, start=None, stop=None,\n iterator=False, chunksize=None, auto_close=False):\n self.store = store\n self.s = s\n self.func = func\n self.where = where\n\n # set start/stop if they are not set if we are a table\n if self.s.is_table:\n if nrows is None:\n nrows = 0\n if start is None:\n start = 0\n if stop is None:\n stop = nrows\n stop = min(nrows, stop)\n\n self.nrows = nrows\n self.start = start\n self.stop = stop\n\n self.coordinates = None\n if iterator or chunksize is not None:\n if chunksize is None:\n chunksize = 100000\n self.chunksize = int(chunksize)\n else:\n self.chunksize = None\n\n self.auto_close = auto_close\n\n def __iter__(self):\n\n # iterate\n current = self.start\n while current < self.stop:\n\n stop = min(current + self.chunksize, self.stop)\n value = self.func(None, None, self.coordinates[current:stop])\n current = stop\n if value is None or not len(value):\n continue\n\n yield value\n\n self.close()\n\n def close(self):\n if self.auto_close:\n self.store.close()\n\n def get_result(self, coordinates=False):\n\n # return the actual iterator\n if self.chunksize is not None:\n if not self.s.is_table:\n raise TypeError(\n \"can only use an iterator or chunksize on a table\")\n\n self.coordinates = self.s.read_coordinates(where=self.where)\n\n return self\n\n # if specified read via coordinates (necessary for multiple selections\n if coordinates:\n where = self.s.read_coordinates(where=self.where)\n else:\n where = self.where\n\n # directly return the result\n results = self.func(self.start, self.stop, where)\n self.close()\n return results\n\n\nclass IndexCol(StringMixin):\n\n \"\"\" an index column description class\n\n Parameters\n ----------\n\n axis : axis which I reference\n values : the ndarray like converted values\n kind : a string description of this type\n typ : the pytables type\n pos : the position in the pytables\n\n \"\"\"\n is_an_indexable = True\n is_data_indexable = True\n _info_fields = ['freq', 'tz', 'index_name']\n\n def __init__(self, values=None, kind=None, typ=None, cname=None,\n itemsize=None, name=None, axis=None, kind_attr=None,\n pos=None, freq=None, tz=None, index_name=None, **kwargs):\n self.values = values\n self.kind = kind\n self.typ = typ\n self.itemsize = itemsize\n self.name = name\n self.cname = cname\n self.kind_attr = kind_attr\n self.axis = axis\n self.pos = pos\n self.freq = freq\n self.tz = tz\n self.index_name = index_name\n self.table = None\n self.meta = None\n self.metadata = None\n\n if name is not None:\n self.set_name(name, kind_attr)\n if pos is not None:\n self.set_pos(pos)\n\n def set_name(self, name, kind_attr=None):\n \"\"\" set the name of this indexer \"\"\"\n self.name = name\n self.kind_attr = kind_attr or \"%s_kind\" % name\n if self.cname is None:\n self.cname = name\n\n return self\n\n def set_axis(self, axis):\n \"\"\" set the axis over which I index \"\"\"\n self.axis = axis\n\n return self\n\n def set_pos(self, pos):\n \"\"\" set the position of this column in the Table \"\"\"\n self.pos = pos\n if pos is not None and self.typ is not None:\n self.typ._v_pos = pos\n return self\n\n def set_table(self, table):\n self.table = table\n return self\n\n def __unicode__(self):\n temp = tuple(\n map(pprint_thing,\n (self.name,\n self.cname,\n self.axis,\n self.pos,\n self.kind)))\n return \"name->%s,cname->%s,axis->%s,pos->%s,kind->%s\" % temp\n\n def __eq__(self, other):\n \"\"\" compare 2 col items \"\"\"\n return all([getattr(self, a, None) == getattr(other, a, None)\n for a in ['name', 'cname', 'axis', 'pos']])\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n @property\n def is_indexed(self):\n \"\"\" return whether I am an indexed column \"\"\"\n try:\n return getattr(self.table.cols, self.cname).is_indexed\n except:\n False\n\n def copy(self):\n new_self = copy.copy(self)\n return new_self\n\n def infer(self, handler):\n \"\"\"infer this column from the table: create and return a new object\"\"\"\n table = handler.table\n new_self = self.copy()\n new_self.set_table(table)\n new_self.get_attr()\n new_self.read_metadata(handler)\n return new_self\n\n def convert(self, values, nan_rep, encoding):\n \"\"\" set the values from this selection: take = take ownership \"\"\"\n\n # values is a recarray\n if values.dtype.fields is not None:\n values = values[self.cname]\n\n values = _maybe_convert(values, self.kind, encoding)\n\n kwargs = dict()\n if self.freq is not None:\n kwargs['freq'] = _ensure_decoded(self.freq)\n if self.index_name is not None:\n kwargs['name'] = _ensure_decoded(self.index_name)\n try:\n self.values = Index(values, **kwargs)\n except:\n\n # if the output freq is different that what we recorded,\n # it should be None (see also 'doc example part 2')\n if 'freq' in kwargs:\n kwargs['freq'] = None\n self.values = Index(values, **kwargs)\n\n self.values = _set_tz(self.values, self.tz)\n\n return self\n\n def take_data(self):\n \"\"\" return the values & release the memory \"\"\"\n self.values, values = None, self.values\n return values\n\n @property\n def attrs(self):\n return self.table._v_attrs\n\n @property\n def description(self):\n return self.table.description\n\n @property\n def col(self):\n \"\"\" return my current col description \"\"\"\n return getattr(self.description, self.cname, None)\n\n @property\n def cvalues(self):\n \"\"\" return my cython values \"\"\"\n return self.values\n\n def __iter__(self):\n return iter(self.values)\n\n def maybe_set_size(self, min_itemsize=None, **kwargs):\n \"\"\" maybe set a string col itemsize:\n min_itemsize can be an interger or a dict with this columns name\n with an integer size \"\"\"\n if _ensure_decoded(self.kind) == u('string'):\n\n if isinstance(min_itemsize, dict):\n min_itemsize = min_itemsize.get(self.name)\n\n if min_itemsize is not None and self.typ.itemsize < min_itemsize:\n self.typ = _tables(\n ).StringCol(itemsize=min_itemsize, pos=self.pos)\n\n def validate(self, handler, append, **kwargs):\n self.validate_names()\n\n def validate_names(self):\n pass\n\n def validate_and_set(self, handler, append, **kwargs):\n self.set_table(handler.table)\n self.validate_col()\n self.validate_attr(append)\n self.validate_metadata(handler)\n self.write_metadata(handler)\n self.set_attr()\n\n def validate_col(self, itemsize=None):\n \"\"\" validate this column: return the compared against itemsize \"\"\"\n\n # validate this column for string truncation (or reset to the max size)\n if _ensure_decoded(self.kind) == u('string'):\n c = self.col\n if c is not None:\n if itemsize is None:\n itemsize = self.itemsize\n if c.itemsize < itemsize:\n raise ValueError(\n \"Trying to store a string with len [%s] in [%s] \"\n \"column but\\nthis column has a limit of [%s]!\\n\"\n \"Consider using min_itemsize to preset the sizes on \"\n \"these columns\" % (itemsize, self.cname, c.itemsize))\n return c.itemsize\n\n return None\n\n def validate_attr(self, append):\n # check for backwards incompatibility\n if append:\n existing_kind = getattr(self.attrs, self.kind_attr, None)\n if existing_kind is not None and existing_kind != self.kind:\n raise TypeError(\"incompatible kind in col [%s - %s]\" %\n (existing_kind, self.kind))\n\n def update_info(self, info):\n \"\"\" set/update the info for this indexable with the key/value\n if there is a conflict raise/warn as needed \"\"\"\n\n for key in self._info_fields:\n\n value = getattr(self, key, None)\n idx = _get_info(info, self.name)\n\n existing_value = idx.get(key)\n if key in idx and value is not None and existing_value != value:\n\n # frequency/name just warn\n if key in ['freq', 'index_name']:\n ws = attribute_conflict_doc % (key, existing_value, value)\n warnings.warn(ws, AttributeConflictWarning, stacklevel=6)\n\n # reset\n idx[key] = None\n setattr(self, key, None)\n\n else:\n raise ValueError(\n \"invalid info for [%s] for [%s], existing_value [%s] \"\n \"conflicts with new value [%s]\"\n % (self.name, key, existing_value, value))\n else:\n if value is not None or existing_value is not None:\n idx[key] = value\n\n return self\n\n def set_info(self, info):\n \"\"\" set my state from the passed info \"\"\"\n idx = info.get(self.name)\n if idx is not None:\n self.__dict__.update(idx)\n\n def get_attr(self):\n \"\"\" set the kind for this colummn \"\"\"\n self.kind = getattr(self.attrs, self.kind_attr, None)\n\n def set_attr(self):\n \"\"\" set the kind for this colummn \"\"\"\n setattr(self.attrs, self.kind_attr, self.kind)\n\n def read_metadata(self, handler):\n \"\"\" retrieve the metadata for this columns \"\"\"\n self.metadata = handler.read_metadata(self.cname)\n\n def validate_metadata(self, handler):\n \"\"\" validate that kind=category does not change the categories \"\"\"\n if self.meta == 'category':\n new_metadata = self.metadata\n cur_metadata = handler.read_metadata(self.cname)\n if new_metadata is not None and cur_metadata is not None \\\n and not array_equivalent(new_metadata, cur_metadata):\n raise ValueError(\"cannot append a categorical with \"\n \"different categories to the existing\")\n\n def write_metadata(self, handler):\n \"\"\" set the meta data \"\"\"\n if self.metadata is not None:\n handler.write_metadata(self.cname, self.metadata)\n\n\nclass GenericIndexCol(IndexCol):\n\n \"\"\" an index which is not represented in the data of the table \"\"\"\n\n @property\n def is_indexed(self):\n return False\n\n def convert(self, values, nan_rep, encoding):\n \"\"\" set the values from this selection: take = take ownership \"\"\"\n\n self.values = Int64Index(np.arange(self.table.nrows))\n return self\n\n def get_attr(self):\n pass\n\n def set_attr(self):\n pass\n\n\nclass DataCol(IndexCol):\n\n \"\"\" a data holding column, by definition this is not indexable\n\n Parameters\n ----------\n\n data : the actual data\n cname : the column name in the table to hold the data (typically\n values)\n meta : a string description of the metadata\n metadata : the actual metadata\n \"\"\"\n is_an_indexable = False\n is_data_indexable = False\n _info_fields = ['tz', 'ordered']\n\n @classmethod\n def create_for_block(\n cls, i=None, name=None, cname=None, version=None, **kwargs):\n \"\"\" return a new datacol with the block i \"\"\"\n\n if cname is None:\n cname = name or 'values_block_%d' % i\n if name is None:\n name = cname\n\n # prior to 0.10.1, we named values blocks like: values_block_0 an the\n # name values_0\n try:\n if version[0] == 0 and version[1] <= 10 and version[2] == 0:\n m = re.search(\"values_block_(\\d+)\", name)\n if m:\n name = \"values_%s\" % m.groups()[0]\n except:\n pass\n\n return cls(name=name, cname=cname, **kwargs)\n\n def __init__(self, values=None, kind=None, typ=None,\n cname=None, data=None, meta=None, metadata=None,\n block=None, **kwargs):\n super(DataCol, self).__init__(values=values, kind=kind, typ=typ,\n cname=cname, **kwargs)\n self.dtype = None\n self.dtype_attr = u(\"%s_dtype\" % self.name)\n self.meta = meta\n self.meta_attr = u(\"%s_meta\" % self.name)\n self.set_data(data)\n self.set_metadata(metadata)\n\n def __unicode__(self):\n temp = tuple(\n map(pprint_thing,\n (self.name,\n self.cname,\n self.dtype,\n self.kind,\n self.shape)))\n return \"name->%s,cname->%s,dtype->%s,kind->%s,shape->%s\" % temp\n\n def __eq__(self, other):\n \"\"\" compare 2 col items \"\"\"\n return all([getattr(self, a, None) == getattr(other, a, None)\n for a in ['name', 'cname', 'dtype', 'pos']])\n\n def set_data(self, data, dtype=None):\n self.data = data\n if data is not None:\n if dtype is not None:\n self.dtype = dtype\n self.set_kind()\n elif self.dtype is None:\n self.dtype = data.dtype.name\n self.set_kind()\n\n def take_data(self):\n \"\"\" return the data & release the memory \"\"\"\n self.data, data = None, self.data\n return data\n\n def set_metadata(self, metadata):\n \"\"\" record the metadata \"\"\"\n if metadata is not None:\n metadata = np.array(metadata, copy=False).ravel()\n self.metadata = metadata\n\n def set_kind(self):\n # set my kind if we can\n\n if self.dtype is not None:\n dtype = _ensure_decoded(self.dtype)\n\n if dtype.startswith(u('string')) or dtype.startswith(u('bytes')):\n self.kind = 'string'\n elif dtype.startswith(u('float')):\n self.kind = 'float'\n elif dtype.startswith(u('complex')):\n self.kind = 'complex'\n elif dtype.startswith(u('int')) or dtype.startswith(u('uint')):\n self.kind = 'integer'\n elif dtype.startswith(u('date')):\n self.kind = 'datetime'\n elif dtype.startswith(u('timedelta')):\n self.kind = 'timedelta'\n elif dtype.startswith(u('bool')):\n self.kind = 'bool'\n else:\n raise AssertionError(\n \"cannot interpret dtype of [%s] in [%s]\" % (dtype, self))\n\n # set my typ if we need\n if self.typ is None:\n self.typ = getattr(self.description, self.cname, None)\n\n def set_atom(self, block, block_items, existing_col, min_itemsize,\n nan_rep, info, encoding=None, **kwargs):\n \"\"\" create and setup my atom from the block b \"\"\"\n\n self.values = list(block_items)\n\n # short-cut certain block types\n if block.is_categorical:\n return self.set_atom_categorical(block, items=block_items,\n info=info)\n elif block.is_datetimetz:\n return self.set_atom_datetime64tz(block, info=info)\n elif block.is_datetime:\n return self.set_atom_datetime64(block)\n elif block.is_timedelta:\n return self.set_atom_timedelta64(block)\n elif block.is_complex:\n return self.set_atom_complex(block)\n\n dtype = block.dtype.name\n inferred_type = lib.infer_dtype(block.values)\n\n if inferred_type == 'date':\n raise TypeError(\n \"[date] is not implemented as a table column\")\n elif inferred_type == 'datetime':\n # after 8260\n # this only would be hit for a mutli-timezone dtype\n # which is an error\n\n raise TypeError(\n \"too many timezones in this block, create separate \"\n \"data columns\"\n )\n elif inferred_type == 'unicode':\n raise TypeError(\n \"[unicode] is not implemented as a table column\")\n\n # this is basically a catchall; if say a datetime64 has nans then will\n # end up here ###\n elif inferred_type == 'string' or dtype == 'object':\n self.set_atom_string(\n block, block_items,\n existing_col,\n min_itemsize,\n nan_rep,\n encoding)\n\n # set as a data block\n else:\n self.set_atom_data(block)\n\n def get_atom_string(self, block, itemsize):\n return _tables().StringCol(itemsize=itemsize, shape=block.shape[0])\n\n def set_atom_string(self, block, block_items, existing_col, min_itemsize,\n nan_rep, encoding):\n # fill nan items with myself, don't disturb the blocks by\n # trying to downcast\n block = block.fillna(nan_rep, downcast=False)\n if isinstance(block, list):\n block = block[0]\n data = block.values\n\n # see if we have a valid string type\n inferred_type = lib.infer_dtype(data.ravel())\n if inferred_type != 'string':\n\n # we cannot serialize this data, so report an exception on a column\n # by column basis\n for i, item in enumerate(block_items):\n\n col = block.iget(i)\n inferred_type = lib.infer_dtype(col.ravel())\n if inferred_type != 'string':\n raise TypeError(\n \"Cannot serialize the column [%s] because\\n\"\n \"its data contents are [%s] object dtype\"\n % (item, inferred_type)\n )\n\n # itemsize is the maximum length of a string (along any dimension)\n data_converted = _convert_string_array(data, encoding)\n itemsize = data_converted.itemsize\n\n # specified min_itemsize?\n if isinstance(min_itemsize, dict):\n min_itemsize = int(min_itemsize.get(\n self.name) or min_itemsize.get('values') or 0)\n itemsize = max(min_itemsize or 0, itemsize)\n\n # check for column in the values conflicts\n if existing_col is not None:\n eci = existing_col.validate_col(itemsize)\n if eci > itemsize:\n itemsize = eci\n\n self.itemsize = itemsize\n self.kind = 'string'\n self.typ = self.get_atom_string(block, itemsize)\n self.set_data(data_converted.astype('|S%d' % itemsize, copy=False))\n\n def get_atom_coltype(self, kind=None):\n \"\"\" return the PyTables column class for this column \"\"\"\n if kind is None:\n kind = self.kind\n if self.kind.startswith('uint'):\n col_name = \"UInt%sCol\" % kind[4:]\n else:\n col_name = \"%sCol\" % kind.capitalize()\n\n return getattr(_tables(), col_name)\n\n def get_atom_data(self, block, kind=None):\n return self.get_atom_coltype(kind=kind)(shape=block.shape[0])\n\n def set_atom_complex(self, block):\n self.kind = block.dtype.name\n itemsize = int(self.kind.split('complex')[-1]) // 8\n self.typ = _tables().ComplexCol(\n itemsize=itemsize, shape=block.shape[0])\n self.set_data(block.values.astype(self.typ.type, copy=False))\n\n def set_atom_data(self, block):\n self.kind = block.dtype.name\n self.typ = self.get_atom_data(block)\n self.set_data(block.values.astype(self.typ.type, copy=False))\n\n def set_atom_categorical(self, block, items, info=None, values=None):\n # currently only supports a 1-D categorical\n # in a 1-D block\n\n values = block.values\n codes = values.codes\n self.kind = 'integer'\n self.dtype = codes.dtype.name\n if values.ndim > 1:\n raise NotImplementedError(\"only support 1-d categoricals\")\n if len(items) > 1:\n raise NotImplementedError(\"only support single block categoricals\")\n\n # write the codes; must be in a block shape\n self.ordered = values.ordered\n self.typ = self.get_atom_data(block, kind=codes.dtype.name)\n self.set_data(_block_shape(codes))\n\n # write the categories\n self.meta = 'category'\n self.set_metadata(block.values.categories)\n\n # update the info\n self.update_info(info)\n\n def get_atom_datetime64(self, block):\n return _tables().Int64Col(shape=block.shape[0])\n\n def set_atom_datetime64(self, block, values=None):\n self.kind = 'datetime64'\n self.typ = self.get_atom_datetime64(block)\n if values is None:\n values = block.values.view('i8')\n self.set_data(values, 'datetime64')\n\n def set_atom_datetime64tz(self, block, info, values=None):\n\n if values is None:\n values = block.values\n\n # convert this column to i8 in UTC, and save the tz\n values = values.asi8.reshape(block.shape)\n\n # store a converted timezone\n self.tz = _get_tz(block.values.tz)\n self.update_info(info)\n\n self.kind = 'datetime64'\n self.typ = self.get_atom_datetime64(block)\n self.set_data(values, 'datetime64')\n\n def get_atom_timedelta64(self, block):\n return _tables().Int64Col(shape=block.shape[0])\n\n def set_atom_timedelta64(self, block, values=None):\n self.kind = 'timedelta64'\n self.typ = self.get_atom_timedelta64(block)\n if values is None:\n values = block.values.view('i8')\n self.set_data(values, 'timedelta64')\n\n @property\n def shape(self):\n return getattr(self.data, 'shape', None)\n\n @property\n def cvalues(self):\n \"\"\" return my cython values \"\"\"\n return self.data\n\n def validate_attr(self, append):\n \"\"\"validate that we have the same order as the existing & same dtype\"\"\"\n if append:\n existing_fields = getattr(self.attrs, self.kind_attr, None)\n if (existing_fields is not None and\n existing_fields != list(self.values)):\n raise ValueError(\"appended items do not match existing items\"\n \" in table!\")\n\n existing_dtype = getattr(self.attrs, self.dtype_attr, None)\n if (existing_dtype is not None and\n existing_dtype != self.dtype):\n raise ValueError(\"appended items dtype do not match existing \"\n \"items dtype in table!\")\n\n def convert(self, values, nan_rep, encoding):\n \"\"\"set the data from this selection (and convert to the correct dtype\n if we can)\n \"\"\"\n\n # values is a recarray\n if values.dtype.fields is not None:\n values = values[self.cname]\n\n self.set_data(values)\n\n # use the meta if needed\n meta = _ensure_decoded(self.meta)\n\n # convert to the correct dtype\n if self.dtype is not None:\n dtype = _ensure_decoded(self.dtype)\n\n # reverse converts\n if dtype == u('datetime64'):\n\n # recreate with tz if indicated\n self.data = _set_tz(self.data, self.tz, coerce=True)\n\n elif dtype == u('timedelta64'):\n self.data = np.asarray(self.data, dtype='m8[ns]')\n elif dtype == u('date'):\n try:\n self.data = np.asarray(\n [date.fromordinal(v) for v in self.data], dtype=object)\n except ValueError:\n self.data = np.asarray(\n [date.fromtimestamp(v) for v in self.data],\n dtype=object)\n elif dtype == u('datetime'):\n self.data = np.asarray(\n [datetime.fromtimestamp(v) for v in self.data],\n dtype=object)\n\n elif meta == u('category'):\n\n # we have a categorical\n categories = self.metadata\n self.data = Categorical.from_codes(self.data.ravel(),\n categories=categories,\n ordered=self.ordered)\n\n else:\n\n try:\n self.data = self.data.astype(dtype, copy=False)\n except:\n self.data = self.data.astype('O', copy=False)\n\n # convert nans / decode\n if _ensure_decoded(self.kind) == u('string'):\n self.data = _unconvert_string_array(\n self.data, nan_rep=nan_rep, encoding=encoding)\n\n return self\n\n def get_attr(self):\n \"\"\" get the data for this colummn \"\"\"\n self.values = getattr(self.attrs, self.kind_attr, None)\n self.dtype = getattr(self.attrs, self.dtype_attr, None)\n self.meta = getattr(self.attrs, self.meta_attr, None)\n self.set_kind()\n\n def set_attr(self):\n \"\"\" set the data for this colummn \"\"\"\n setattr(self.attrs, self.kind_attr, self.values)\n setattr(self.attrs, self.meta_attr, self.meta)\n if self.dtype is not None:\n setattr(self.attrs, self.dtype_attr, self.dtype)\n\n\nclass DataIndexableCol(DataCol):\n\n \"\"\" represent a data column that can be indexed \"\"\"\n is_data_indexable = True\n\n def validate_names(self):\n if not Index(self.values).is_object():\n raise ValueError(\"cannot have non-object label DataIndexableCol\")\n\n def get_atom_string(self, block, itemsize):\n return _tables().StringCol(itemsize=itemsize)\n\n def get_atom_data(self, block, kind=None):\n return self.get_atom_coltype(kind=kind)()\n\n def get_atom_datetime64(self, block):\n return _tables().Int64Col()\n\n def get_atom_timedelta64(self, block):\n return _tables().Int64Col()\n\n\nclass GenericDataIndexableCol(DataIndexableCol):\n\n \"\"\" represent a generic pytables data column \"\"\"\n\n def get_attr(self):\n pass\n\n\nclass Fixed(StringMixin):\n\n \"\"\" represent an object in my store\n facilitate read/write of various types of objects\n this is an abstract base class\n\n Parameters\n ----------\n\n parent : my parent HDFStore\n group : the group node where the table resides\n \"\"\"\n pandas_kind = None\n obj_type = None\n ndim = None\n is_table = False\n\n def __init__(self, parent, group, encoding=None, **kwargs):\n self.parent = parent\n self.group = group\n self.encoding = _ensure_encoding(encoding)\n self.set_version()\n\n @property\n def is_old_version(self):\n return (self.version[0] <= 0 and self.version[1] <= 10 and\n self.version[2] < 1)\n\n def set_version(self):\n \"\"\" compute and set our version \"\"\"\n version = _ensure_decoded(\n getattr(self.group._v_attrs, 'pandas_version', None))\n try:\n self.version = tuple([int(x) for x in version.split('.')])\n if len(self.version) == 2:\n self.version = self.version + (0,)\n except:\n self.version = (0, 0, 0)\n\n @property\n def pandas_type(self):\n return _ensure_decoded(getattr(self.group._v_attrs,\n 'pandas_type', None))\n\n @property\n def format_type(self):\n return 'fixed'\n\n def __unicode__(self):\n \"\"\" return a pretty representation of myself \"\"\"\n self.infer_axes()\n s = self.shape\n if s is not None:\n if isinstance(s, (list, tuple)):\n s = \"[%s]\" % ','.join([pprint_thing(x) for x in s])\n return \"%-12.12s (shape->%s)\" % (self.pandas_type, s)\n return self.pandas_type\n\n def set_object_info(self):\n \"\"\" set my pandas type & version \"\"\"\n self.attrs.pandas_type = str(self.pandas_kind)\n self.attrs.pandas_version = str(_version)\n self.set_version()\n\n def copy(self):\n new_self = copy.copy(self)\n return new_self\n\n @property\n def storage_obj_type(self):\n return self.obj_type\n\n @property\n def shape(self):\n return self.nrows\n\n @property\n def pathname(self):\n return self.group._v_pathname\n\n @property\n def _handle(self):\n return self.parent._handle\n\n @property\n def _filters(self):\n return self.parent._filters\n\n @property\n def _complevel(self):\n return self.parent._complevel\n\n @property\n def _fletcher32(self):\n return self.parent._fletcher32\n\n @property\n def _complib(self):\n return self.parent._complib\n\n @property\n def attrs(self):\n return self.group._v_attrs\n\n def set_attrs(self):\n \"\"\" set our object attributes \"\"\"\n pass\n\n def get_attrs(self):\n \"\"\" get our object attributes \"\"\"\n pass\n\n @property\n def storable(self):\n \"\"\" return my storable \"\"\"\n return self.group\n\n @property\n def is_exists(self):\n return False\n\n @property\n def nrows(self):\n return getattr(self.storable, 'nrows', None)\n\n def validate(self, other):\n \"\"\" validate against an existing storable \"\"\"\n if other is None:\n return\n return True\n\n def validate_version(self, where=None):\n \"\"\" are we trying to operate on an old version? \"\"\"\n return True\n\n def infer_axes(self):\n \"\"\" infer the axes of my storer\n return a boolean indicating if we have a valid storer or not \"\"\"\n\n s = self.storable\n if s is None:\n return False\n self.get_attrs()\n return True\n\n def read(self, **kwargs):\n raise NotImplementedError(\n \"cannot read on an abstract storer: subclasses should implement\")\n\n def write(self, **kwargs):\n raise NotImplementedError(\n \"cannot write on an abstract storer: sublcasses should implement\")\n\n def delete(self, where=None, start=None, stop=None, **kwargs):\n \"\"\"\n support fully deleting the node in its entirety (only) - where\n specification must be None\n \"\"\"\n if where is None and start is None and stop is None:\n self._handle.remove_node(self.group, recursive=True)\n return None\n\n raise TypeError(\"cannot delete on an abstract storer\")\n\n\nclass GenericFixed(Fixed):\n\n \"\"\" a generified fixed version \"\"\"\n _index_type_map = {DatetimeIndex: 'datetime', PeriodIndex: 'period'}\n _reverse_index_map = dict([(v, k)\n for k, v in compat.iteritems(_index_type_map)])\n attributes = []\n\n # indexer helpders\n def _class_to_alias(self, cls):\n return self._index_type_map.get(cls, '')\n\n def _alias_to_class(self, alias):\n if isinstance(alias, type): # pragma: no cover\n # compat: for a short period of time master stored types\n return alias\n return self._reverse_index_map.get(alias, Index)\n\n def _get_index_factory(self, klass):\n if klass == DatetimeIndex:\n def f(values, freq=None, tz=None):\n return DatetimeIndex._simple_new(values, None, freq=freq,\n tz=tz)\n return f\n elif klass == PeriodIndex:\n def f(values, freq=None, tz=None):\n return PeriodIndex._simple_new(values, None, freq=freq)\n return f\n\n return klass\n\n def validate_read(self, kwargs):\n \"\"\"\n remove table keywords from kwargs and return\n raise if any keywords are passed which are not-None\n \"\"\"\n kwargs = copy.copy(kwargs)\n\n columns = kwargs.pop('columns', None)\n if columns is not None:\n raise TypeError(\"cannot pass a column specification when reading \"\n \"a Fixed format store. this store must be \"\n \"selected in its entirety\")\n where = kwargs.pop('where', None)\n if where is not None:\n raise TypeError(\"cannot pass a where specification when reading \"\n \"from a Fixed format store. this store must be \"\n \"selected in its entirety\")\n return kwargs\n\n @property\n def is_exists(self):\n return True\n\n def set_attrs(self):\n \"\"\" set our object attributes \"\"\"\n self.attrs.encoding = self.encoding\n\n def get_attrs(self):\n \"\"\" retrieve our attributes \"\"\"\n self.encoding = _ensure_encoding(getattr(self.attrs, 'encoding', None))\n for n in self.attributes:\n setattr(self, n, _ensure_decoded(getattr(self.attrs, n, None)))\n\n def write(self, obj, **kwargs):\n self.set_attrs()\n\n def read_array(self, key, start=None, stop=None):\n \"\"\" read an array for the specified node (off of group \"\"\"\n import tables\n node = getattr(self.group, key)\n data = node[start:stop]\n attrs = node._v_attrs\n\n transposed = getattr(attrs, 'transposed', False)\n\n if isinstance(node, tables.VLArray):\n ret = data[0]\n else:\n dtype = getattr(attrs, 'value_type', None)\n shape = getattr(attrs, 'shape', None)\n\n if shape is not None:\n # length 0 axis\n ret = np.empty(shape, dtype=dtype)\n else:\n ret = data\n\n if dtype == u('datetime64'):\n\n # reconstruct a timezone if indicated\n ret = _set_tz(ret, getattr(attrs, 'tz', None), coerce=True)\n\n elif dtype == u('timedelta64'):\n ret = np.asarray(ret, dtype='m8[ns]')\n\n if transposed:\n return ret.T\n else:\n return ret\n\n def read_index(self, key, **kwargs):\n variety = _ensure_decoded(getattr(self.attrs, '%s_variety' % key))\n\n if variety == u('multi'):\n return self.read_multi_index(key, **kwargs)\n elif variety == u('block'):\n return self.read_block_index(key, **kwargs)\n elif variety == u('sparseint'):\n return self.read_sparse_intindex(key, **kwargs)\n elif variety == u('regular'):\n _, index = self.read_index_node(getattr(self.group, key), **kwargs)\n return index\n else: # pragma: no cover\n raise TypeError('unrecognized index variety: %s' % variety)\n\n def write_index(self, key, index):\n if isinstance(index, MultiIndex):\n setattr(self.attrs, '%s_variety' % key, 'multi')\n self.write_multi_index(key, index)\n elif isinstance(index, BlockIndex):\n setattr(self.attrs, '%s_variety' % key, 'block')\n self.write_block_index(key, index)\n elif isinstance(index, IntIndex):\n setattr(self.attrs, '%s_variety' % key, 'sparseint')\n self.write_sparse_intindex(key, index)\n else:\n setattr(self.attrs, '%s_variety' % key, 'regular')\n converted = _convert_index(index, self.encoding,\n self.format_type).set_name('index')\n\n self.write_array(key, converted.values)\n\n node = getattr(self.group, key)\n node._v_attrs.kind = converted.kind\n node._v_attrs.name = index.name\n\n if isinstance(index, (DatetimeIndex, PeriodIndex)):\n node._v_attrs.index_class = self._class_to_alias(type(index))\n\n if hasattr(index, 'freq'):\n node._v_attrs.freq = index.freq\n\n if hasattr(index, 'tz') and index.tz is not None:\n node._v_attrs.tz = _get_tz(index.tz)\n\n def write_block_index(self, key, index):\n self.write_array('%s_blocs' % key, index.blocs)\n self.write_array('%s_blengths' % key, index.blengths)\n setattr(self.attrs, '%s_length' % key, index.length)\n\n def read_block_index(self, key, **kwargs):\n length = getattr(self.attrs, '%s_length' % key)\n blocs = self.read_array('%s_blocs' % key, **kwargs)\n blengths = self.read_array('%s_blengths' % key, **kwargs)\n return BlockIndex(length, blocs, blengths)\n\n def write_sparse_intindex(self, key, index):\n self.write_array('%s_indices' % key, index.indices)\n setattr(self.attrs, '%s_length' % key, index.length)\n\n def read_sparse_intindex(self, key, **kwargs):\n length = getattr(self.attrs, '%s_length' % key)\n indices = self.read_array('%s_indices' % key, **kwargs)\n return IntIndex(length, indices)\n\n def write_multi_index(self, key, index):\n setattr(self.attrs, '%s_nlevels' % key, index.nlevels)\n\n for i, (lev, lab, name) in enumerate(zip(index.levels,\n index.labels,\n index.names)):\n # write the level\n level_key = '%s_level%d' % (key, i)\n conv_level = _convert_index(lev, self.encoding,\n self.format_type).set_name(level_key)\n self.write_array(level_key, conv_level.values)\n node = getattr(self.group, level_key)\n node._v_attrs.kind = conv_level.kind\n node._v_attrs.name = name\n\n # write the name\n setattr(node._v_attrs, '%s_name%d' % (key, i), name)\n\n # write the labels\n label_key = '%s_label%d' % (key, i)\n self.write_array(label_key, lab)\n\n def read_multi_index(self, key, **kwargs):\n nlevels = getattr(self.attrs, '%s_nlevels' % key)\n\n levels = []\n labels = []\n names = []\n for i in range(nlevels):\n level_key = '%s_level%d' % (key, i)\n name, lev = self.read_index_node(getattr(self.group, level_key),\n **kwargs)\n levels.append(lev)\n names.append(name)\n\n label_key = '%s_label%d' % (key, i)\n lab = self.read_array(label_key, **kwargs)\n labels.append(lab)\n\n return MultiIndex(levels=levels, labels=labels, names=names,\n verify_integrity=True)\n\n def read_index_node(self, node, start=None, stop=None):\n data = node[start:stop]\n # If the index was an empty array write_array_empty() will\n # have written a sentinel. Here we relace it with the original.\n if ('shape' in node._v_attrs and\n self._is_empty_array(getattr(node._v_attrs, 'shape'))):\n data = np.empty(getattr(node._v_attrs, 'shape'),\n dtype=getattr(node._v_attrs, 'value_type'))\n kind = _ensure_decoded(node._v_attrs.kind)\n name = None\n\n if 'name' in node._v_attrs:\n name = node._v_attrs.name\n\n index_class = self._alias_to_class(getattr(node._v_attrs,\n 'index_class', ''))\n factory = self._get_index_factory(index_class)\n\n kwargs = {}\n if u('freq') in node._v_attrs:\n kwargs['freq'] = node._v_attrs['freq']\n\n if u('tz') in node._v_attrs:\n kwargs['tz'] = node._v_attrs['tz']\n\n if kind in (u('date'), u('datetime')):\n index = factory(_unconvert_index(data, kind,\n encoding=self.encoding),\n dtype=object, **kwargs)\n else:\n index = factory(_unconvert_index(data, kind,\n encoding=self.encoding), **kwargs)\n\n index.name = name\n\n return name, index\n\n def write_array_empty(self, key, value):\n \"\"\" write a 0-len array \"\"\"\n\n # ugly hack for length 0 axes\n arr = np.empty((1,) * value.ndim)\n self._handle.create_array(self.group, key, arr)\n getattr(self.group, key)._v_attrs.value_type = str(value.dtype)\n getattr(self.group, key)._v_attrs.shape = value.shape\n\n def _is_empty_array(self, shape):\n \"\"\"Returns true if any axis is zero length.\"\"\"\n return any(x == 0 for x in shape)\n\n def write_array(self, key, value, items=None):\n if key in self.group:\n self._handle.remove_node(self.group, key)\n\n # Transform needed to interface with pytables row/col notation\n empty_array = self._is_empty_array(value.shape)\n transposed = False\n\n if is_categorical_dtype(value):\n raise NotImplementedError('Cannot store a category dtype in '\n 'a HDF5 dataset that uses format='\n '\"fixed\". Use format=\"table\".')\n\n if not empty_array:\n value = value.T\n transposed = True\n\n if self._filters is not None:\n atom = None\n try:\n # get the atom for this datatype\n atom = _tables().Atom.from_dtype(value.dtype)\n except ValueError:\n pass\n\n if atom is not None:\n # create an empty chunked array and fill it from value\n if not empty_array:\n ca = self._handle.create_carray(self.group, key, atom,\n value.shape,\n filters=self._filters)\n ca[:] = value\n getattr(self.group, key)._v_attrs.transposed = transposed\n\n else:\n self.write_array_empty(key, value)\n\n return\n\n if value.dtype.type == np.object_:\n\n # infer the type, warn if we have a non-string type here (for\n # performance)\n inferred_type = lib.infer_dtype(value.ravel())\n if empty_array:\n pass\n elif inferred_type == 'string':\n pass\n else:\n try:\n items = list(items)\n except:\n pass\n ws = performance_doc % (inferred_type, key, items)\n warnings.warn(ws, PerformanceWarning, stacklevel=7)\n\n vlarr = self._handle.create_vlarray(self.group, key,\n _tables().ObjectAtom())\n vlarr.append(value)\n else:\n if empty_array:\n self.write_array_empty(key, value)\n else:\n if is_datetime64_dtype(value.dtype):\n self._handle.create_array(\n self.group, key, value.view('i8'))\n getattr(\n self.group, key)._v_attrs.value_type = 'datetime64'\n elif is_datetime64tz_dtype(value.dtype):\n # store as UTC\n # with a zone\n self._handle.create_array(self.group, key,\n value.asi8)\n\n node = getattr(self.group, key)\n node._v_attrs.tz = _get_tz(value.tz)\n node._v_attrs.value_type = 'datetime64'\n elif is_timedelta64_dtype(value.dtype):\n self._handle.create_array(\n self.group, key, value.view('i8'))\n getattr(\n self.group, key)._v_attrs.value_type = 'timedelta64'\n else:\n self._handle.create_array(self.group, key, value)\n\n getattr(self.group, key)._v_attrs.transposed = transposed\n\n\nclass LegacyFixed(GenericFixed):\n\n def read_index_legacy(self, key, start=None, stop=None):\n node = getattr(self.group, key)\n data = node[start:stop]\n kind = node._v_attrs.kind\n return _unconvert_index_legacy(data, kind, encoding=self.encoding)\n\n\nclass LegacySeriesFixed(LegacyFixed):\n\n def read(self, **kwargs):\n kwargs = self.validate_read(kwargs)\n index = self.read_index_legacy('index')\n values = self.read_array('values')\n return Series(values, index=index)\n\n\nclass LegacyFrameFixed(LegacyFixed):\n\n def read(self, **kwargs):\n kwargs = self.validate_read(kwargs)\n index = self.read_index_legacy('index')\n columns = self.read_index_legacy('columns')\n values = self.read_array('values')\n return DataFrame(values, index=index, columns=columns)\n\n\nclass SeriesFixed(GenericFixed):\n pandas_kind = u('series')\n attributes = ['name']\n\n @property\n def shape(self):\n try:\n return len(getattr(self.group, 'values')),\n except:\n return None\n\n def read(self, **kwargs):\n kwargs = self.validate_read(kwargs)\n index = self.read_index('index', **kwargs)\n values = self.read_array('values', **kwargs)\n return Series(values, index=index, name=self.name)\n\n def write(self, obj, **kwargs):\n super(SeriesFixed, self).write(obj, **kwargs)\n self.write_index('index', obj.index)\n self.write_array('values', obj.values)\n self.attrs.name = obj.name\n\n\nclass SparseFixed(GenericFixed):\n\n def validate_read(self, kwargs):\n \"\"\"\n we don't support start, stop kwds in Sparse\n \"\"\"\n kwargs = super(SparseFixed, self).validate_read(kwargs)\n if 'start' in kwargs or 'stop' in kwargs:\n raise NotImplementedError(\"start and/or stop are not supported \"\n \"in fixed Sparse reading\")\n return kwargs\n\n\nclass SparseSeriesFixed(SparseFixed):\n pandas_kind = u('sparse_series')\n attributes = ['name', 'fill_value', 'kind']\n\n def read(self, **kwargs):\n kwargs = self.validate_read(kwargs)\n index = self.read_index('index')\n sp_values = self.read_array('sp_values')\n sp_index = self.read_index('sp_index')\n return SparseSeries(sp_values, index=index, sparse_index=sp_index,\n kind=self.kind or u('block'),\n fill_value=self.fill_value,\n name=self.name)\n\n def write(self, obj, **kwargs):\n super(SparseSeriesFixed, self).write(obj, **kwargs)\n self.write_index('index', obj.index)\n self.write_index('sp_index', obj.sp_index)\n self.write_array('sp_values', obj.sp_values)\n self.attrs.name = obj.name\n self.attrs.fill_value = obj.fill_value\n self.attrs.kind = obj.kind\n\n\nclass SparseFrameFixed(SparseFixed):\n pandas_kind = u('sparse_frame')\n attributes = ['default_kind', 'default_fill_value']\n\n def read(self, **kwargs):\n kwargs = self.validate_read(kwargs)\n columns = self.read_index('columns')\n sdict = {}\n for c in columns:\n key = 'sparse_series_%s' % c\n s = SparseSeriesFixed(self.parent, getattr(self.group, key))\n s.infer_axes()\n sdict[c] = s.read()\n return SparseDataFrame(sdict, columns=columns,\n default_kind=self.default_kind,\n default_fill_value=self.default_fill_value)\n\n def write(self, obj, **kwargs):\n \"\"\" write it as a collection of individual sparse series \"\"\"\n super(SparseFrameFixed, self).write(obj, **kwargs)\n for name, ss in compat.iteritems(obj):\n key = 'sparse_series_%s' % name\n if key not in self.group._v_children:\n node = self._handle.create_group(self.group, key)\n else:\n node = getattr(self.group, key)\n s = SparseSeriesFixed(self.parent, node)\n s.write(ss)\n self.attrs.default_fill_value = obj.default_fill_value\n self.attrs.default_kind = obj.default_kind\n self.write_index('columns', obj.columns)\n\n\nclass BlockManagerFixed(GenericFixed):\n attributes = ['ndim', 'nblocks']\n is_shape_reversed = False\n\n @property\n def shape(self):\n try:\n ndim = self.ndim\n\n # items\n items = 0\n for i in range(self.nblocks):\n node = getattr(self.group, 'block%d_items' % i)\n shape = getattr(node, 'shape', None)\n if shape is not None:\n items += shape[0]\n\n # data shape\n node = getattr(self.group, 'block0_values')\n shape = getattr(node, 'shape', None)\n if shape is not None:\n shape = list(shape[0:(ndim - 1)])\n else:\n shape = []\n\n shape.append(items)\n\n # hacky - this works for frames, but is reversed for panels\n if self.is_shape_reversed:\n shape = shape[::-1]\n\n return shape\n except:\n return None\n\n def read(self, start=None, stop=None, **kwargs):\n # start, stop applied to rows, so 0th axis only\n\n kwargs = self.validate_read(kwargs)\n select_axis = self.obj_type()._get_block_manager_axis(0)\n\n axes = []\n for i in range(self.ndim):\n\n _start, _stop = (start, stop) if i == select_axis else (None, None)\n ax = self.read_index('axis%d' % i, start=_start, stop=_stop)\n axes.append(ax)\n\n items = axes[0]\n blocks = []\n for i in range(self.nblocks):\n\n blk_items = self.read_index('block%d_items' % i)\n values = self.read_array('block%d_values' % i,\n start=_start, stop=_stop)\n blk = make_block(values,\n placement=items.get_indexer(blk_items))\n blocks.append(blk)\n\n return self.obj_type(BlockManager(blocks, axes))\n\n def write(self, obj, **kwargs):\n super(BlockManagerFixed, self).write(obj, **kwargs)\n data = obj._data\n if not data.is_consolidated():\n data = data.consolidate()\n\n self.attrs.ndim = data.ndim\n for i, ax in enumerate(data.axes):\n if i == 0:\n if not ax.is_unique:\n raise ValueError(\n \"Columns index has to be unique for fixed format\")\n self.write_index('axis%d' % i, ax)\n\n # Supporting mixed-type DataFrame objects...nontrivial\n self.attrs.nblocks = len(data.blocks)\n for i, blk in enumerate(data.blocks):\n # I have no idea why, but writing values before items fixed #2299\n blk_items = data.items.take(blk.mgr_locs)\n self.write_array('block%d_values' % i, blk.values, items=blk_items)\n self.write_index('block%d_items' % i, blk_items)\n\n\nclass FrameFixed(BlockManagerFixed):\n pandas_kind = u('frame')\n obj_type = DataFrame\n\n\nclass PanelFixed(BlockManagerFixed):\n pandas_kind = u('wide')\n obj_type = Panel\n is_shape_reversed = True\n\n def write(self, obj, **kwargs):\n obj._consolidate_inplace()\n return super(PanelFixed, self).write(obj, **kwargs)\n\n\nclass Table(Fixed):\n\n \"\"\" represent a table:\n facilitate read/write of various types of tables\n\n Attrs in Table Node\n -------------------\n These are attributes that are store in the main table node, they are\n necessary to recreate these tables when read back in.\n\n index_axes : a list of tuples of the (original indexing axis and\n index column)\n non_index_axes: a list of tuples of the (original index axis and\n columns on a non-indexing axis)\n values_axes : a list of the columns which comprise the data of this\n table\n data_columns : a list of the columns that we are allowing indexing\n (these become single columns in values_axes), or True to force all\n columns\n nan_rep : the string to use for nan representations for string\n objects\n levels : the names of levels\n metadata : the names of the metadata columns\n\n \"\"\"\n pandas_kind = u('wide_table')\n table_type = None\n levels = 1\n is_table = True\n is_shape_reversed = False\n\n def __init__(self, *args, **kwargs):\n super(Table, self).__init__(*args, **kwargs)\n self.index_axes = []\n self.non_index_axes = []\n self.values_axes = []\n self.data_columns = []\n self.metadata = []\n self.info = dict()\n self.nan_rep = None\n self.selection = None\n\n @property\n def table_type_short(self):\n return self.table_type.split('_')[0]\n\n @property\n def format_type(self):\n return 'table'\n\n def __unicode__(self):\n \"\"\" return a pretty representatgion of myself \"\"\"\n self.infer_axes()\n dc = \",dc->[%s]\" % ','.join(\n self.data_columns) if len(self.data_columns) else ''\n\n ver = ''\n if self.is_old_version:\n ver = \"[%s]\" % '.'.join([str(x) for x in self.version])\n\n return \"%-12.12s%s (typ->%s,nrows->%s,ncols->%s,indexers->[%s]%s)\" % (\n self.pandas_type, ver, self.table_type_short, self.nrows,\n self.ncols, ','.join([a.name for a in self.index_axes]), dc\n )\n\n def __getitem__(self, c):\n \"\"\" return the axis for c \"\"\"\n for a in self.axes:\n if c == a.name:\n return a\n return None\n\n def validate(self, other):\n \"\"\" validate against an existing table \"\"\"\n if other is None:\n return\n\n if other.table_type != self.table_type:\n raise TypeError(\"incompatible table_type with existing [%s - %s]\" %\n (other.table_type, self.table_type))\n\n for c in ['index_axes', 'non_index_axes', 'values_axes']:\n sv = getattr(self, c, None)\n ov = getattr(other, c, None)\n if sv != ov:\n\n # show the error for the specific axes\n for i, sax in enumerate(sv):\n oax = ov[i]\n if sax != oax:\n raise ValueError(\n \"invalid combinate of [%s] on appending data [%s] \"\n \"vs current table [%s]\" % (c, sax, oax))\n\n # should never get here\n raise Exception(\n \"invalid combinate of [%s] on appending data [%s] vs \"\n \"current table [%s]\" % (c, sv, ov))\n\n @property\n def is_multi_index(self):\n \"\"\"the levels attribute is 1 or a list in the case of a multi-index\"\"\"\n return isinstance(self.levels, list)\n\n def validate_metadata(self, existing):\n \"\"\" create / validate metadata \"\"\"\n self.metadata = [\n c.name for c in self.values_axes if c.metadata is not None]\n\n def validate_multiindex(self, obj):\n \"\"\"validate that we can store the multi-index; reset and return the\n new object\n \"\"\"\n levels = [l if l is not None else \"level_{0}\".format(i)\n for i, l in enumerate(obj.index.names)]\n try:\n return obj.reset_index(), levels\n except ValueError:\n raise ValueError(\"duplicate names/columns in the multi-index when \"\n \"storing as a table\")\n\n @property\n def nrows_expected(self):\n \"\"\" based on our axes, compute the expected nrows \"\"\"\n return np.prod([i.cvalues.shape[0] for i in self.index_axes])\n\n @property\n def is_exists(self):\n \"\"\" has this table been created \"\"\"\n return u('table') in self.group\n\n @property\n def storable(self):\n return getattr(self.group, 'table', None)\n\n @property\n def table(self):\n \"\"\" return the table group (this is my storable) \"\"\"\n return self.storable\n\n @property\n def dtype(self):\n return self.table.dtype\n\n @property\n def description(self):\n return self.table.description\n\n @property\n def axes(self):\n return itertools.chain(self.index_axes, self.values_axes)\n\n @property\n def ncols(self):\n \"\"\" the number of total columns in the values axes \"\"\"\n return sum([len(a.values) for a in self.values_axes])\n\n @property\n def is_transposed(self):\n return False\n\n @property\n def data_orientation(self):\n \"\"\"return a tuple of my permutated axes, non_indexable at the front\"\"\"\n return tuple(itertools.chain([int(a[0]) for a in self.non_index_axes],\n [int(a.axis) for a in self.index_axes]))\n\n def queryables(self):\n \"\"\" return a dict of the kinds allowable columns for this object \"\"\"\n\n # compute the values_axes queryables\n return dict(\n [(a.cname, a) for a in self.index_axes] +\n [(self.storage_obj_type._AXIS_NAMES[axis], None)\n for axis, values in self.non_index_axes] +\n [(v.cname, v) for v in self.values_axes\n if v.name in set(self.data_columns)]\n )\n\n def index_cols(self):\n \"\"\" return a list of my index cols \"\"\"\n return [(i.axis, i.cname) for i in self.index_axes]\n\n def values_cols(self):\n \"\"\" return a list of my values cols \"\"\"\n return [i.cname for i in self.values_axes]\n\n def _get_metadata_path(self, key):\n \"\"\" return the metadata pathname for this key \"\"\"\n return \"{group}/meta/{key}/meta\".format(group=self.group._v_pathname,\n key=key)\n\n def write_metadata(self, key, values):\n \"\"\"\n write out a meta data array to the key as a fixed-format Series\n\n Parameters\n ----------\n key : string\n values : ndarray\n\n \"\"\"\n values = Series(values)\n self.parent.put(self._get_metadata_path(key), values, format='table',\n encoding=self.encoding, nan_rep=self.nan_rep)\n\n def read_metadata(self, key):\n \"\"\" return the meta data array for this key \"\"\"\n if getattr(getattr(self.group, 'meta', None), key, None) is not None:\n return self.parent.select(self._get_metadata_path(key))\n return None\n\n def set_info(self):\n \"\"\" update our table index info \"\"\"\n self.attrs.info = self.info\n\n def set_attrs(self):\n \"\"\" set our table type & indexables \"\"\"\n self.attrs.table_type = str(self.table_type)\n self.attrs.index_cols = self.index_cols()\n self.attrs.values_cols = self.values_cols()\n self.attrs.non_index_axes = self.non_index_axes\n self.attrs.data_columns = self.data_columns\n self.attrs.nan_rep = self.nan_rep\n self.attrs.encoding = self.encoding\n self.attrs.levels = self.levels\n self.attrs.metadata = self.metadata\n self.set_info()\n\n def get_attrs(self):\n \"\"\" retrieve our attributes \"\"\"\n self.non_index_axes = getattr(\n self.attrs, 'non_index_axes', None) or []\n self.data_columns = getattr(\n self.attrs, 'data_columns', None) or []\n self.info = getattr(\n self.attrs, 'info', None) or dict()\n self.nan_rep = getattr(self.attrs, 'nan_rep', None)\n self.encoding = _ensure_encoding(\n getattr(self.attrs, 'encoding', None))\n self.levels = getattr(\n self.attrs, 'levels', None) or []\n self.index_axes = [\n a.infer(self) for a in self.indexables if a.is_an_indexable\n ]\n self.values_axes = [\n a.infer(self) for a in self.indexables if not a.is_an_indexable\n ]\n self.metadata = getattr(\n self.attrs, 'metadata', None) or []\n\n def validate_version(self, where=None):\n \"\"\" are we trying to operate on an old version? \"\"\"\n if where is not None:\n if (self.version[0] <= 0 and self.version[1] <= 10 and\n self.version[2] < 1):\n ws = incompatibility_doc % '.'.join(\n [str(x) for x in self.version])\n warnings.warn(ws, IncompatibilityWarning)\n\n def validate_min_itemsize(self, min_itemsize):\n \"\"\"validate the min_itemisze doesn't contain items that are not in the\n axes this needs data_columns to be defined\n \"\"\"\n if min_itemsize is None:\n return\n if not isinstance(min_itemsize, dict):\n return\n\n q = self.queryables()\n for k, v in min_itemsize.items():\n\n # ok, apply generally\n if k == 'values':\n continue\n if k not in q:\n raise ValueError(\n \"min_itemsize has the key [%s] which is not an axis or \"\n \"data_column\" % k)\n\n @property\n def indexables(self):\n \"\"\" create/cache the indexables if they don't exist \"\"\"\n if self._indexables is None:\n\n self._indexables = []\n\n # index columns\n self._indexables.extend([\n IndexCol(name=name, axis=axis, pos=i)\n for i, (axis, name) in enumerate(self.attrs.index_cols)\n ])\n\n # values columns\n dc = set(self.data_columns)\n base_pos = len(self._indexables)\n\n def f(i, c):\n klass = DataCol\n if c in dc:\n klass = DataIndexableCol\n return klass.create_for_block(i=i, name=c, pos=base_pos + i,\n version=self.version)\n\n self._indexables.extend(\n [f(i, c) for i, c in enumerate(self.attrs.values_cols)])\n\n return self._indexables\n\n def create_index(self, columns=None, optlevel=None, kind=None):\n \"\"\"\n Create a pytables index on the specified columns\n note: cannot index Time64Col() or ComplexCol currently;\n PyTables must be >= 3.0\n\n Parameters\n ----------\n columns : False (don't create an index), True (create all columns\n index), None or list_like (the indexers to index)\n optlevel: optimization level (defaults to 6)\n kind : kind of index (defaults to 'medium')\n\n Exceptions\n ----------\n raises if the node is not a table\n\n \"\"\"\n\n if not self.infer_axes():\n return\n if columns is False:\n return\n\n # index all indexables and data_columns\n if columns is None or columns is True:\n columns = [a.cname for a in self.axes if a.is_data_indexable]\n if not isinstance(columns, (tuple, list)):\n columns = [columns]\n\n kw = dict()\n if optlevel is not None:\n kw['optlevel'] = optlevel\n if kind is not None:\n kw['kind'] = kind\n\n table = self.table\n for c in columns:\n v = getattr(table.cols, c, None)\n if v is not None:\n\n # remove the index if the kind/optlevel have changed\n if v.is_indexed:\n index = v.index\n cur_optlevel = index.optlevel\n cur_kind = index.kind\n\n if kind is not None and cur_kind != kind:\n v.remove_index()\n else:\n kw['kind'] = cur_kind\n\n if optlevel is not None and cur_optlevel != optlevel:\n v.remove_index()\n else:\n kw['optlevel'] = cur_optlevel\n\n # create the index\n if not v.is_indexed:\n if v.type.startswith('complex'):\n raise TypeError(\n 'Columns containing complex values can be stored '\n 'but cannot'\n ' be indexed when using table format. Either use '\n 'fixed format, set index=False, or do not include '\n 'the columns containing complex values to '\n 'data_columns when initializing the table.')\n v.create_index(**kw)\n\n def read_axes(self, where, **kwargs):\n \"\"\"create and return the axes sniffed from the table: return boolean\n for success\n \"\"\"\n\n # validate the version\n self.validate_version(where)\n\n # infer the data kind\n if not self.infer_axes():\n return False\n\n # create the selection\n self.selection = Selection(self, where=where, **kwargs)\n values = self.selection.select()\n\n # convert the data\n for a in self.axes:\n a.set_info(self.info)\n a.convert(values, nan_rep=self.nan_rep, encoding=self.encoding)\n\n return True\n\n def get_object(self, obj):\n \"\"\" return the data for this obj \"\"\"\n return obj\n\n def validate_data_columns(self, data_columns, min_itemsize):\n \"\"\"take the input data_columns and min_itemize and create a data\n columns spec\n \"\"\"\n\n if not len(self.non_index_axes):\n return []\n\n axis, axis_labels = self.non_index_axes[0]\n info = self.info.get(axis, dict())\n if info.get('type') == 'MultiIndex' and data_columns:\n raise ValueError(\"cannot use a multi-index on axis [{0}] with \"\n \"data_columns {1}\".format(axis, data_columns))\n\n # evaluate the passed data_columns, True == use all columns\n # take only valide axis labels\n if data_columns is True:\n data_columns = axis_labels\n elif data_columns is None:\n data_columns = []\n\n # if min_itemsize is a dict, add the keys (exclude 'values')\n if isinstance(min_itemsize, dict):\n\n existing_data_columns = set(data_columns)\n data_columns.extend([\n k for k in min_itemsize.keys()\n if k != 'values' and k not in existing_data_columns\n ])\n\n # return valid columns in the order of our axis\n return [c for c in data_columns if c in axis_labels]\n\n def create_axes(self, axes, obj, validate=True, nan_rep=None,\n data_columns=None, min_itemsize=None, **kwargs):\n \"\"\" create and return the axes\n leagcy tables create an indexable column, indexable index,\n non-indexable fields\n\n Parameters:\n -----------\n axes: a list of the axes in order to create (names or numbers of\n the axes)\n obj : the object to create axes on\n validate: validate the obj against an existing object already\n written\n min_itemsize: a dict of the min size for a column in bytes\n nan_rep : a values to use for string column nan_rep\n encoding : the encoding for string values\n data_columns : a list of columns that we want to create separate to\n allow indexing (or True will force all columns)\n\n \"\"\"\n\n # set the default axes if needed\n if axes is None:\n try:\n axes = _AXES_MAP[type(obj)]\n except:\n raise TypeError(\"cannot properly create the storer for: \"\n \"[group->%s,value->%s]\"\n % (self.group._v_name, type(obj)))\n\n # map axes to numbers\n axes = [obj._get_axis_number(a) for a in axes]\n\n # do we have an existing table (if so, use its axes & data_columns)\n if self.infer_axes():\n existing_table = self.copy()\n existing_table.infer_axes()\n axes = [a.axis for a in existing_table.index_axes]\n data_columns = existing_table.data_columns\n nan_rep = existing_table.nan_rep\n self.encoding = existing_table.encoding\n self.info = copy.copy(existing_table.info)\n else:\n existing_table = None\n\n # currently support on ndim-1 axes\n if len(axes) != self.ndim - 1:\n raise ValueError(\n \"currently only support ndim-1 indexers in an AppendableTable\")\n\n # create according to the new data\n self.non_index_axes = []\n self.data_columns = []\n\n # nan_representation\n if nan_rep is None:\n nan_rep = 'nan'\n\n self.nan_rep = nan_rep\n\n # create axes to index and non_index\n index_axes_map = dict()\n for i, a in enumerate(obj.axes):\n\n if i in axes:\n name = obj._AXIS_NAMES[i]\n index_axes_map[i] = _convert_index(\n a, self.encoding, self.format_type\n ).set_name(name).set_axis(i)\n else:\n\n # we might be able to change the axes on the appending data if\n # necessary\n append_axis = list(a)\n if existing_table is not None:\n indexer = len(self.non_index_axes)\n exist_axis = existing_table.non_index_axes[indexer][1]\n if append_axis != exist_axis:\n\n # ahah! -> reindex\n if sorted(append_axis) == sorted(exist_axis):\n append_axis = exist_axis\n\n # the non_index_axes info\n info = _get_info(self.info, i)\n info['names'] = list(a.names)\n info['type'] = a.__class__.__name__\n\n self.non_index_axes.append((i, append_axis))\n\n # set axis positions (based on the axes)\n self.index_axes = [\n index_axes_map[a].set_pos(j).update_info(self.info)\n for j, a in enumerate(axes)\n ]\n j = len(self.index_axes)\n\n # check for column conflicts\n if validate:\n for a in self.axes:\n a.maybe_set_size(min_itemsize=min_itemsize)\n\n # reindex by our non_index_axes & compute data_columns\n for a in self.non_index_axes:\n obj = _reindex_axis(obj, a[0], a[1])\n\n def get_blk_items(mgr, blocks):\n return [mgr.items.take(blk.mgr_locs) for blk in blocks]\n\n # figure out data_columns and get out blocks\n block_obj = self.get_object(obj).consolidate()\n blocks = block_obj._data.blocks\n blk_items = get_blk_items(block_obj._data, blocks)\n if len(self.non_index_axes):\n axis, axis_labels = self.non_index_axes[0]\n data_columns = self.validate_data_columns(\n data_columns, min_itemsize)\n if len(data_columns):\n mgr = block_obj.reindex_axis(\n Index(axis_labels).difference(Index(data_columns)),\n axis=axis\n )._data\n\n blocks = list(mgr.blocks)\n blk_items = get_blk_items(mgr, blocks)\n for c in data_columns:\n mgr = block_obj.reindex_axis([c], axis=axis)._data\n blocks.extend(mgr.blocks)\n blk_items.extend(get_blk_items(mgr, mgr.blocks))\n\n # reorder the blocks in the same order as the existing_table if we can\n if existing_table is not None:\n by_items = dict([(tuple(b_items.tolist()), (b, b_items))\n for b, b_items in zip(blocks, blk_items)])\n new_blocks = []\n new_blk_items = []\n for ea in existing_table.values_axes:\n items = tuple(ea.values)\n try:\n b, b_items = by_items.pop(items)\n new_blocks.append(b)\n new_blk_items.append(b_items)\n except:\n raise ValueError(\n \"cannot match existing table structure for [%s] on \"\n \"appending data\" % ','.join(pprint_thing(item) for\n item in items))\n blocks = new_blocks\n blk_items = new_blk_items\n\n # add my values\n self.values_axes = []\n for i, (b, b_items) in enumerate(zip(blocks, blk_items)):\n\n # shape of the data column are the indexable axes\n klass = DataCol\n name = None\n\n # we have a data_column\n if (data_columns and len(b_items) == 1 and\n b_items[0] in data_columns):\n klass = DataIndexableCol\n name = b_items[0]\n self.data_columns.append(name)\n\n # make sure that we match up the existing columns\n # if we have an existing table\n if existing_table is not None and validate:\n try:\n existing_col = existing_table.values_axes[i]\n except:\n raise ValueError(\"Incompatible appended table [%s] with \"\n \"existing table [%s]\"\n % (blocks, existing_table.values_axes))\n else:\n existing_col = None\n\n try:\n col = klass.create_for_block(\n i=i, name=name, version=self.version)\n col.set_atom(block=b, block_items=b_items,\n existing_col=existing_col,\n min_itemsize=min_itemsize,\n nan_rep=nan_rep,\n encoding=self.encoding,\n info=self.info,\n **kwargs)\n col.set_pos(j)\n\n self.values_axes.append(col)\n except (NotImplementedError, ValueError, TypeError) as e:\n raise e\n except Exception as detail:\n raise Exception(\n \"cannot find the correct atom type -> \"\n \"[dtype->%s,items->%s] %s\"\n % (b.dtype.name, b_items, str(detail))\n )\n j += 1\n\n # validate our min_itemsize\n self.validate_min_itemsize(min_itemsize)\n\n # validate our metadata\n self.validate_metadata(existing_table)\n\n # validate the axes if we have an existing table\n if validate:\n self.validate(existing_table)\n\n def process_axes(self, obj, columns=None):\n \"\"\" process axes filters \"\"\"\n\n # make a copy to avoid side effects\n if columns is not None:\n columns = list(columns)\n\n # make sure to include levels if we have them\n if columns is not None and self.is_multi_index:\n for n in self.levels:\n if n not in columns:\n columns.insert(0, n)\n\n # reorder by any non_index_axes & limit to the select columns\n for axis, labels in self.non_index_axes:\n obj = _reindex_axis(obj, axis, labels, columns)\n\n # apply the selection filters (but keep in the same order)\n if self.selection.filter is not None:\n for field, op, filt in self.selection.filter.format():\n\n def process_filter(field, filt):\n\n for axis_name in obj._AXIS_NAMES.values():\n axis_number = obj._get_axis_number(axis_name)\n axis_values = obj._get_axis(axis_name)\n\n # see if the field is the name of an axis\n if field == axis_name:\n\n # if we have a multi-index, then need to include\n # the levels\n if self.is_multi_index:\n filt = filt.union(Index(self.levels))\n\n takers = op(axis_values, filt)\n return obj.ix._getitem_axis(takers,\n axis=axis_number)\n\n # this might be the name of a file IN an axis\n elif field in axis_values:\n\n # we need to filter on this dimension\n values = _ensure_index(getattr(obj, field).values)\n filt = _ensure_index(filt)\n\n # hack until we support reversed dim flags\n if isinstance(obj, DataFrame):\n axis_number = 1 - axis_number\n takers = op(values, filt)\n return obj.ix._getitem_axis(takers,\n axis=axis_number)\n\n raise ValueError(\n \"cannot find the field [%s] for filtering!\" % field)\n\n obj = process_filter(field, filt)\n\n return obj\n\n def create_description(self, complib=None, complevel=None,\n fletcher32=False, expectedrows=None):\n \"\"\" create the description of the table from the axes & values \"\"\"\n\n # provided expected rows if its passed\n if expectedrows is None:\n expectedrows = max(self.nrows_expected, 10000)\n\n d = dict(name='table', expectedrows=expectedrows)\n\n # description from the axes & values\n d['description'] = dict([(a.cname, a.typ) for a in self.axes])\n\n if complib:\n if complevel is None:\n complevel = self._complevel or 9\n filters = _tables().Filters(\n complevel=complevel, complib=complib,\n fletcher32=fletcher32 or self._fletcher32)\n d['filters'] = filters\n elif self._filters is not None:\n d['filters'] = self._filters\n\n return d\n\n def read_coordinates(self, where=None, start=None, stop=None, **kwargs):\n \"\"\"select coordinates (row numbers) from a table; return the\n coordinates object\n \"\"\"\n\n # validate the version\n self.validate_version(where)\n\n # infer the data kind\n if not self.infer_axes():\n return False\n\n # create the selection\n self.selection = Selection(\n self, where=where, start=start, stop=stop, **kwargs)\n coords = self.selection.select_coords()\n if self.selection.filter is not None:\n for field, op, filt in self.selection.filter.format():\n data = self.read_column(\n field, start=coords.min(), stop=coords.max() + 1)\n coords = coords[\n op(data.iloc[coords - coords.min()], filt).values]\n\n return Index(coords)\n\n def read_column(self, column, where=None, start=None, stop=None, **kwargs):\n \"\"\"return a single column from the table, generally only indexables\n are interesting\n \"\"\"\n\n # validate the version\n self.validate_version()\n\n # infer the data kind\n if not self.infer_axes():\n return False\n\n if where is not None:\n raise TypeError(\"read_column does not currently accept a where \"\n \"clause\")\n\n # find the axes\n for a in self.axes:\n if column == a.name:\n\n if not a.is_data_indexable:\n raise ValueError(\n \"column [%s] can not be extracted individually; it is \"\n \"not data indexable\" % column)\n\n # column must be an indexable or a data column\n c = getattr(self.table.cols, column)\n a.set_info(self.info)\n return Series(_set_tz(a.convert(c[start:stop],\n nan_rep=self.nan_rep,\n encoding=self.encoding\n ).take_data(),\n a.tz, True), name=column)\n\n raise KeyError(\"column [%s] not found in the table\" % column)\n\n\nclass WORMTable(Table):\n\n \"\"\" a write-once read-many table: this format DOES NOT ALLOW appending to a\n table. writing is a one-time operation the data are stored in a format\n that allows for searching the data on disk\n \"\"\"\n table_type = u('worm')\n\n def read(self, **kwargs):\n \"\"\" read the indicies and the indexing array, calculate offset rows and\n return \"\"\"\n raise NotImplementedError(\"WORMTable needs to implement read\")\n\n def write(self, **kwargs):\n \"\"\" write in a format that we can search later on (but cannot append\n to): write out the indicies and the values using _write_array\n (e.g. a CArray) create an indexing table so that we can search\n \"\"\"\n raise NotImplementedError(\"WORKTable needs to implement write\")\n\n\nclass LegacyTable(Table):\n\n \"\"\" an appendable table: allow append/query/delete operations to a\n (possibily) already existing appendable table this table ALLOWS\n append (but doesn't require them), and stores the data in a format\n that can be easily searched\n\n \"\"\"\n _indexables = [\n IndexCol(name='index', axis=1, pos=0),\n IndexCol(name='column', axis=2, pos=1, index_kind='columns_kind'),\n DataCol(name='fields', cname='values', kind_attr='fields', pos=2)\n ]\n table_type = u('legacy')\n ndim = 3\n\n def write(self, **kwargs):\n raise TypeError(\"write operations are not allowed on legacy tables!\")\n\n def read(self, where=None, columns=None, **kwargs):\n \"\"\"we have n indexable columns, with an arbitrary number of data\n axes\n \"\"\"\n\n if not self.read_axes(where=where, **kwargs):\n return None\n\n factors = [Categorical.from_array(\n a.values, ordered=True) for a in self.index_axes]\n levels = [f.categories for f in factors]\n N = [len(f.categories) for f in factors]\n labels = [f.codes for f in factors]\n\n # compute the key\n key = _factor_indexer(N[1:], labels)\n\n objs = []\n if len(unique(key)) == len(key):\n\n sorter, _ = algos.groupsort_indexer(\n _ensure_int64(key), np.prod(N))\n sorter = _ensure_platform_int(sorter)\n\n # create the objs\n for c in self.values_axes:\n\n # the data need to be sorted\n sorted_values = c.take_data().take(sorter, axis=0)\n if sorted_values.ndim == 1:\n sorted_values = sorted_values.reshape(\n (sorted_values.shape[0], 1))\n\n take_labels = [l.take(sorter) for l in labels]\n items = Index(c.values)\n block = _block2d_to_blocknd(\n values=sorted_values, placement=np.arange(len(items)),\n shape=tuple(N), labels=take_labels, ref_items=items)\n\n # create the object\n mgr = BlockManager([block], [items] + levels)\n obj = self.obj_type(mgr)\n\n # permute if needed\n if self.is_transposed:\n obj = obj.transpose(\n *tuple(Series(self.data_orientation).argsort()))\n\n objs.append(obj)\n\n else:\n warnings.warn(duplicate_doc, DuplicateWarning, stacklevel=5)\n\n # reconstruct\n long_index = MultiIndex.from_arrays(\n [i.values for i in self.index_axes])\n\n for c in self.values_axes:\n lp = DataFrame(c.data, index=long_index, columns=c.values)\n\n # need a better algorithm\n tuple_index = long_index._tuple_index\n\n unique_tuples = lib.fast_unique(tuple_index.values)\n unique_tuples = _asarray_tuplesafe(unique_tuples)\n\n indexer = match(unique_tuples, tuple_index)\n indexer = _ensure_platform_int(indexer)\n\n new_index = long_index.take(indexer)\n new_values = lp.values.take(indexer, axis=0)\n\n lp = DataFrame(new_values, index=new_index, columns=lp.columns)\n objs.append(lp.to_panel())\n\n # create the composite object\n if len(objs) == 1:\n wp = objs[0]\n else:\n wp = concat(objs, axis=0, verify_integrity=False).consolidate()\n\n # apply the selection filters & axis orderings\n wp = self.process_axes(wp, columns=columns)\n\n return wp\n\n\nclass LegacyFrameTable(LegacyTable):\n\n \"\"\" support the legacy frame table \"\"\"\n pandas_kind = u('frame_table')\n table_type = u('legacy_frame')\n obj_type = Panel\n\n def read(self, *args, **kwargs):\n return super(LegacyFrameTable, self).read(*args, **kwargs)['value']\n\n\nclass LegacyPanelTable(LegacyTable):\n\n \"\"\" support the legacy panel table \"\"\"\n table_type = u('legacy_panel')\n obj_type = Panel\n\n\nclass AppendableTable(LegacyTable):\n\n \"\"\" suppor the new appendable table formats \"\"\"\n _indexables = None\n table_type = u('appendable')\n\n def write(self, obj, axes=None, append=False, complib=None,\n complevel=None, fletcher32=None, min_itemsize=None,\n chunksize=None, expectedrows=None, dropna=False, **kwargs):\n\n if not append and self.is_exists:\n self._handle.remove_node(self.group, 'table')\n\n # create the axes\n self.create_axes(axes=axes, obj=obj, validate=append,\n min_itemsize=min_itemsize,\n **kwargs)\n\n for a in self.axes:\n a.validate(self, append)\n\n if not self.is_exists:\n\n # create the table\n options = self.create_description(complib=complib,\n complevel=complevel,\n fletcher32=fletcher32,\n expectedrows=expectedrows)\n\n # set the table attributes\n self.set_attrs()\n\n # create the table\n self._handle.create_table(self.group, **options)\n else:\n pass\n # table = self.table\n\n # update my info\n self.set_info()\n\n # validate the axes and set the kinds\n for a in self.axes:\n a.validate_and_set(self, append)\n\n # add the rows\n self.write_data(chunksize, dropna=dropna)\n\n def write_data(self, chunksize, dropna=False):\n \"\"\" we form the data into a 2-d including indexes,values,mask\n write chunk-by-chunk \"\"\"\n\n names = self.dtype.names\n nrows = self.nrows_expected\n\n # if dropna==True, then drop ALL nan rows\n masks = []\n if dropna:\n\n for a in self.values_axes:\n\n # figure the mask: only do if we can successfully process this\n # column, otherwise ignore the mask\n mask = isnull(a.data).all(axis=0)\n if isinstance(mask, np.ndarray):\n masks.append(mask.astype('u1', copy=False))\n\n # consolidate masks\n if len(masks):\n mask = masks[0]\n for m in masks[1:]:\n mask = mask & m\n mask = mask.ravel()\n else:\n mask = None\n\n # broadcast the indexes if needed\n indexes = [a.cvalues for a in self.index_axes]\n nindexes = len(indexes)\n bindexes = []\n for i, idx in enumerate(indexes):\n\n # broadcast to all other indexes except myself\n if i > 0 and i < nindexes:\n repeater = np.prod(\n [indexes[bi].shape[0] for bi in range(0, i)])\n idx = np.tile(idx, repeater)\n\n if i < nindexes - 1:\n repeater = np.prod([indexes[bi].shape[0]\n for bi in range(i + 1, nindexes)])\n idx = np.repeat(idx, repeater)\n\n bindexes.append(idx)\n\n # transpose the values so first dimension is last\n # reshape the values if needed\n values = [a.take_data() for a in self.values_axes]\n values = [v.transpose(np.roll(np.arange(v.ndim), v.ndim - 1))\n for v in values]\n bvalues = []\n for i, v in enumerate(values):\n new_shape = (nrows,) + self.dtype[names[nindexes + i]].shape\n bvalues.append(values[i].reshape(new_shape))\n\n # write the chunks\n if chunksize is None:\n chunksize = 100000\n\n rows = np.empty(min(chunksize, nrows), dtype=self.dtype)\n chunks = int(nrows / chunksize) + 1\n for i in range(chunks):\n start_i = i * chunksize\n end_i = min((i + 1) * chunksize, nrows)\n if start_i >= end_i:\n break\n\n self.write_data_chunk(\n rows,\n indexes=[a[start_i:end_i] for a in bindexes],\n mask=mask[start_i:end_i] if mask is not None else None,\n values=[v[start_i:end_i] for v in bvalues])\n\n def write_data_chunk(self, rows, indexes, mask, values):\n \"\"\"\n Parameters\n ----------\n rows : an empty memory space where we are putting the chunk\n indexes : an array of the indexes\n mask : an array of the masks\n values : an array of the values\n \"\"\"\n\n # 0 len\n for v in values:\n if not np.prod(v.shape):\n return\n\n try:\n nrows = indexes[0].shape[0]\n if nrows != len(rows):\n rows = np.empty(nrows, dtype=self.dtype)\n names = self.dtype.names\n nindexes = len(indexes)\n\n # indexes\n for i, idx in enumerate(indexes):\n rows[names[i]] = idx\n\n # values\n for i, v in enumerate(values):\n rows[names[i + nindexes]] = v\n\n # mask\n if mask is not None:\n m = ~mask.ravel().astype(bool, copy=False)\n if not m.all():\n rows = rows[m]\n\n except Exception as detail:\n raise Exception(\"cannot create row-data -> %s\" % detail)\n\n try:\n if len(rows):\n self.table.append(rows)\n self.table.flush()\n except Exception as detail:\n raise TypeError(\"tables cannot write this data -> %s\" % detail)\n\n def delete(self, where=None, start=None, stop=None, **kwargs):\n\n # delete all rows (and return the nrows)\n if where is None or not len(where):\n if start is None and stop is None:\n nrows = self.nrows\n self._handle.remove_node(self.group, recursive=True)\n else:\n # pytables<3.0 would remove a single row with stop=None\n if stop is None:\n stop = self.nrows\n nrows = self.table.remove_rows(start=start, stop=stop)\n self.table.flush()\n return nrows\n\n # infer the data kind\n if not self.infer_axes():\n return None\n\n # create the selection\n table = self.table\n self.selection = Selection(\n self, where, start=start, stop=stop, **kwargs)\n values = self.selection.select_coords()\n\n # delete the rows in reverse order\n l = Series(values).sort_values()\n ln = len(l)\n\n if ln:\n\n # construct groups of consecutive rows\n diff = l.diff()\n groups = list(diff[diff > 1].index)\n\n # 1 group\n if not len(groups):\n groups = [0]\n\n # final element\n if groups[-1] != ln:\n groups.append(ln)\n\n # initial element\n if groups[0] != 0:\n groups.insert(0, 0)\n\n # we must remove in reverse order!\n pg = groups.pop()\n for g in reversed(groups):\n rows = l.take(lrange(g, pg))\n table.remove_rows(start=rows[rows.index[0]\n ], stop=rows[rows.index[-1]] + 1)\n pg = g\n\n self.table.flush()\n\n # return the number of rows removed\n return ln\n\n\nclass AppendableFrameTable(AppendableTable):\n\n \"\"\" suppor the new appendable table formats \"\"\"\n pandas_kind = u('frame_table')\n table_type = u('appendable_frame')\n ndim = 2\n obj_type = DataFrame\n\n @property\n def is_transposed(self):\n return self.index_axes[0].axis == 1\n\n def get_object(self, obj):\n \"\"\" these are written transposed \"\"\"\n if self.is_transposed:\n obj = obj.T\n return obj\n\n def read(self, where=None, columns=None, **kwargs):\n\n if not self.read_axes(where=where, **kwargs):\n return None\n\n info = (self.info.get(self.non_index_axes[0][0], dict())\n if len(self.non_index_axes) else dict())\n index = self.index_axes[0].values\n frames = []\n for a in self.values_axes:\n\n # we could have a multi-index constructor here\n # _ensure_index doesn't recognized our list-of-tuples here\n if info.get('type') == 'MultiIndex':\n cols = MultiIndex.from_tuples(a.values)\n else:\n cols = Index(a.values)\n names = info.get('names')\n if names is not None:\n cols.set_names(names, inplace=True)\n\n if self.is_transposed:\n values = a.cvalues\n index_ = cols\n cols_ = Index(index, name=getattr(index, 'name', None))\n else:\n values = a.cvalues.T\n index_ = Index(index, name=getattr(index, 'name', None))\n cols_ = cols\n\n # if we have a DataIndexableCol, its shape will only be 1 dim\n if values.ndim == 1 and isinstance(values, np.ndarray):\n values = values.reshape((1, values.shape[0]))\n\n block = make_block(values, placement=np.arange(len(cols_)))\n mgr = BlockManager([block], [cols_, index_])\n frames.append(DataFrame(mgr))\n\n if len(frames) == 1:\n df = frames[0]\n else:\n df = concat(frames, axis=1)\n\n # apply the selection filters & axis orderings\n df = self.process_axes(df, columns=columns)\n\n return df\n\n\nclass AppendableSeriesTable(AppendableFrameTable):\n \"\"\" support the new appendable table formats \"\"\"\n pandas_kind = u('series_table')\n table_type = u('appendable_series')\n ndim = 2\n obj_type = Series\n storage_obj_type = DataFrame\n\n @property\n def is_transposed(self):\n return False\n\n def get_object(self, obj):\n return obj\n\n def write(self, obj, data_columns=None, **kwargs):\n \"\"\" we are going to write this as a frame table \"\"\"\n if not isinstance(obj, DataFrame):\n name = obj.name or 'values'\n obj = DataFrame({name: obj}, index=obj.index)\n obj.columns = [name]\n return super(AppendableSeriesTable, self).write(\n obj=obj, data_columns=obj.columns, **kwargs)\n\n def read(self, columns=None, **kwargs):\n\n is_multi_index = self.is_multi_index\n if columns is not None and is_multi_index:\n for n in self.levels:\n if n not in columns:\n columns.insert(0, n)\n s = super(AppendableSeriesTable, self).read(columns=columns, **kwargs)\n if is_multi_index:\n s.set_index(self.levels, inplace=True)\n\n s = s.iloc[:, 0]\n\n # remove the default name\n if s.name == 'values':\n s.name = None\n return s\n\n\nclass AppendableMultiSeriesTable(AppendableSeriesTable):\n \"\"\" support the new appendable table formats \"\"\"\n pandas_kind = u('series_table')\n table_type = u('appendable_multiseries')\n\n def write(self, obj, **kwargs):\n \"\"\" we are going to write this as a frame table \"\"\"\n name = obj.name or 'values'\n obj, self.levels = self.validate_multiindex(obj)\n cols = list(self.levels)\n cols.append(name)\n obj.columns = cols\n return super(AppendableMultiSeriesTable, self).write(obj=obj, **kwargs)\n\n\nclass GenericTable(AppendableFrameTable):\n \"\"\" a table that read/writes the generic pytables table format \"\"\"\n pandas_kind = u('frame_table')\n table_type = u('generic_table')\n ndim = 2\n obj_type = DataFrame\n\n @property\n def pandas_type(self):\n return self.pandas_kind\n\n @property\n def storable(self):\n return getattr(self.group, 'table', None) or self.group\n\n def get_attrs(self):\n \"\"\" retrieve our attributes \"\"\"\n self.non_index_axes = []\n self.nan_rep = None\n self.levels = []\n\n self.index_axes = [a.infer(self)\n for a in self.indexables if a.is_an_indexable]\n self.values_axes = [a.infer(self)\n for a in self.indexables if not a.is_an_indexable]\n self.data_columns = [a.name for a in self.values_axes]\n\n @property\n def indexables(self):\n \"\"\" create the indexables from the table description \"\"\"\n if self._indexables is None:\n\n d = self.description\n\n # the index columns is just a simple index\n self._indexables = [GenericIndexCol(name='index', axis=0)]\n\n for i, n in enumerate(d._v_names):\n\n dc = GenericDataIndexableCol(\n name=n, pos=i, values=[n], version=self.version)\n self._indexables.append(dc)\n\n return self._indexables\n\n def write(self, **kwargs):\n raise NotImplementedError(\"cannot write on an generic table\")\n\n\nclass AppendableMultiFrameTable(AppendableFrameTable):\n\n \"\"\" a frame with a multi-index \"\"\"\n table_type = u('appendable_multiframe')\n obj_type = DataFrame\n ndim = 2\n _re_levels = re.compile(\"^level_\\d+$\")\n\n @property\n def table_type_short(self):\n return u('appendable_multi')\n\n def write(self, obj, data_columns=None, **kwargs):\n if data_columns is None:\n data_columns = []\n elif data_columns is True:\n data_columns = obj.columns[:]\n obj, self.levels = self.validate_multiindex(obj)\n for n in self.levels:\n if n not in data_columns:\n data_columns.insert(0, n)\n return super(AppendableMultiFrameTable, self).write(\n obj=obj, data_columns=data_columns, **kwargs)\n\n def read(self, **kwargs):\n\n df = super(AppendableMultiFrameTable, self).read(**kwargs)\n df = df.set_index(self.levels)\n\n # remove names for 'level_%d'\n df.index = df.index.set_names([\n None if self._re_levels.search(l) else l for l in df.index.names\n ])\n\n return df\n\n\nclass AppendablePanelTable(AppendableTable):\n\n \"\"\" suppor the new appendable table formats \"\"\"\n table_type = u('appendable_panel')\n ndim = 3\n obj_type = Panel\n\n def get_object(self, obj):\n \"\"\" these are written transposed \"\"\"\n if self.is_transposed:\n obj = obj.transpose(*self.data_orientation)\n return obj\n\n @property\n def is_transposed(self):\n return self.data_orientation != tuple(range(self.ndim))\n\n\nclass AppendableNDimTable(AppendablePanelTable):\n\n \"\"\" suppor the new appendable table formats \"\"\"\n table_type = u('appendable_ndim')\n ndim = 4\n obj_type = Panel4D\n\n\ndef _reindex_axis(obj, axis, labels, other=None):\n ax = obj._get_axis(axis)\n labels = _ensure_index(labels)\n\n # try not to reindex even if other is provided\n # if it equals our current index\n if other is not None:\n other = _ensure_index(other)\n if (other is None or labels.equals(other)) and labels.equals(ax):\n return obj\n\n labels = _ensure_index(labels.unique())\n if other is not None:\n labels = labels & _ensure_index(other.unique())\n if not labels.equals(ax):\n slicer = [slice(None, None)] * obj.ndim\n slicer[axis] = labels\n obj = obj.loc[tuple(slicer)]\n return obj\n\n\ndef _get_info(info, name):\n \"\"\" get/create the info for this name \"\"\"\n try:\n idx = info[name]\n except:\n idx = info[name] = dict()\n return idx\n\n# tz to/from coercion\n\n\ndef _get_tz(tz):\n \"\"\" for a tz-aware type, return an encoded zone \"\"\"\n zone = tslib.get_timezone(tz)\n if zone is None:\n zone = tslib.tot_seconds(tz.utcoffset())\n return zone\n\n\ndef _set_tz(values, tz, preserve_UTC=False, coerce=False):\n \"\"\"\n coerce the values to a DatetimeIndex if tz is set\n preserve the input shape if possible\n\n Parameters\n ----------\n values : ndarray\n tz : string/pickled tz object\n preserve_UTC : boolean,\n preserve the UTC of the result\n coerce : if we do not have a passed timezone, coerce to M8[ns] ndarray\n \"\"\"\n if tz is not None:\n name = getattr(values, 'name', None)\n values = values.ravel()\n tz = tslib.get_timezone(_ensure_decoded(tz))\n values = DatetimeIndex(values, name=name)\n if values.tz is None:\n values = values.tz_localize('UTC').tz_convert(tz)\n if preserve_UTC:\n if tz == 'UTC':\n values = list(values)\n elif coerce:\n values = np.asarray(values, dtype='M8[ns]')\n\n return values\n\n\ndef _convert_index(index, encoding=None, format_type=None):\n index_name = getattr(index, 'name', None)\n\n if isinstance(index, DatetimeIndex):\n converted = index.asi8\n return IndexCol(converted, 'datetime64', _tables().Int64Col(),\n freq=getattr(index, 'freq', None),\n tz=getattr(index, 'tz', None),\n index_name=index_name)\n elif isinstance(index, TimedeltaIndex):\n converted = index.asi8\n return IndexCol(converted, 'timedelta64', _tables().Int64Col(),\n freq=getattr(index, 'freq', None),\n index_name=index_name)\n elif isinstance(index, (Int64Index, PeriodIndex)):\n atom = _tables().Int64Col()\n # avoid to store ndarray of Period objects\n return IndexCol(index._values, 'integer', atom,\n freq=getattr(index, 'freq', None),\n index_name=index_name)\n\n if isinstance(index, MultiIndex):\n raise TypeError('MultiIndex not supported here!')\n\n inferred_type = lib.infer_dtype(index)\n\n values = np.asarray(index)\n\n if inferred_type == 'datetime64':\n converted = values.view('i8')\n return IndexCol(converted, 'datetime64', _tables().Int64Col(),\n freq=getattr(index, 'freq', None),\n tz=getattr(index, 'tz', None),\n index_name=index_name)\n elif inferred_type == 'timedelta64':\n converted = values.view('i8')\n return IndexCol(converted, 'timedelta64', _tables().Int64Col(),\n freq=getattr(index, 'freq', None),\n index_name=index_name)\n elif inferred_type == 'datetime':\n converted = np.asarray([(time.mktime(v.timetuple()) +\n v.microsecond / 1E6) for v in values],\n dtype=np.float64)\n return IndexCol(converted, 'datetime', _tables().Time64Col(),\n index_name=index_name)\n elif inferred_type == 'date':\n converted = np.asarray([v.toordinal() for v in values],\n dtype=np.int32)\n return IndexCol(converted, 'date', _tables().Time32Col(),\n index_name=index_name)\n elif inferred_type == 'string':\n # atom = _tables().ObjectAtom()\n # return np.asarray(values, dtype='O'), 'object', atom\n\n converted = _convert_string_array(values, encoding)\n itemsize = converted.dtype.itemsize\n return IndexCol(\n converted, 'string', _tables().StringCol(itemsize),\n itemsize=itemsize, index_name=index_name\n )\n elif inferred_type == 'unicode':\n if format_type == 'fixed':\n atom = _tables().ObjectAtom()\n return IndexCol(np.asarray(values, dtype='O'), 'object', atom,\n index_name=index_name)\n raise TypeError(\n \"[unicode] is not supported as a in index type for [{0}] formats\"\n .format(format_type)\n )\n\n elif inferred_type == 'integer':\n # take a guess for now, hope the values fit\n atom = _tables().Int64Col()\n return IndexCol(np.asarray(values, dtype=np.int64), 'integer', atom,\n index_name=index_name)\n elif inferred_type == 'floating':\n atom = _tables().Float64Col()\n return IndexCol(np.asarray(values, dtype=np.float64), 'float', atom,\n index_name=index_name)\n else: # pragma: no cover\n atom = _tables().ObjectAtom()\n return IndexCol(np.asarray(values, dtype='O'), 'object', atom,\n index_name=index_name)\n\n\ndef _unconvert_index(data, kind, encoding=None):\n kind = _ensure_decoded(kind)\n if kind == u('datetime64'):\n index = DatetimeIndex(data)\n elif kind == u('timedelta64'):\n index = TimedeltaIndex(data)\n elif kind == u('datetime'):\n index = np.asarray([datetime.fromtimestamp(v) for v in data],\n dtype=object)\n elif kind == u('date'):\n try:\n index = np.asarray(\n [date.fromordinal(v) for v in data], dtype=object)\n except (ValueError):\n index = np.asarray(\n [date.fromtimestamp(v) for v in data], dtype=object)\n elif kind in (u('integer'), u('float')):\n index = np.asarray(data)\n elif kind in (u('string')):\n index = _unconvert_string_array(data, nan_rep=None, encoding=encoding)\n elif kind == u('object'):\n index = np.asarray(data[0])\n else: # pragma: no cover\n raise ValueError('unrecognized index type %s' % kind)\n return index\n\n\ndef _unconvert_index_legacy(data, kind, legacy=False, encoding=None):\n kind = _ensure_decoded(kind)\n if kind == u('datetime'):\n index = lib.time64_to_datetime(data)\n elif kind in (u('integer')):\n index = np.asarray(data, dtype=object)\n elif kind in (u('string')):\n index = _unconvert_string_array(data, nan_rep=None, encoding=encoding)\n else: # pragma: no cover\n raise ValueError('unrecognized index type %s' % kind)\n return index\n\n\ndef _convert_string_array(data, encoding, itemsize=None):\n \"\"\"\n we take a string-like that is object dtype and coerce to a fixed size\n string type\n\n Parameters\n ----------\n data : a numpy array of object dtype\n encoding : None or string-encoding\n itemsize : integer, optional, defaults to the max length of the strings\n\n Returns\n -------\n data in a fixed-length string dtype, encoded to bytes if needed\n \"\"\"\n\n # encode if needed\n if encoding is not None and len(data):\n data = Series(data.ravel()).str.encode(\n encoding).values.reshape(data.shape)\n\n # create the sized dtype\n if itemsize is None:\n itemsize = lib.max_len_string_array(_ensure_object(data.ravel()))\n\n data = np.asarray(data, dtype=\"S%d\" % itemsize)\n return data\n\n\ndef _unconvert_string_array(data, nan_rep=None, encoding=None):\n \"\"\"\n inverse of _convert_string_array\n\n Parameters\n ----------\n data : fixed length string dtyped array\n nan_rep : the storage repr of NaN, optional\n encoding : the encoding of the data, optional\n\n Returns\n -------\n an object array of the decoded data\n\n \"\"\"\n shape = data.shape\n data = np.asarray(data.ravel(), dtype=object)\n\n # guard against a None encoding in PY3 (because of a legacy\n # where the passed encoding is actually None)\n encoding = _ensure_encoding(encoding)\n if encoding is not None and len(data):\n\n itemsize = lib.max_len_string_array(_ensure_object(data))\n if compat.PY3:\n dtype = \"U{0}\".format(itemsize)\n else:\n dtype = \"S{0}\".format(itemsize)\n\n if isinstance(data[0], compat.binary_type):\n data = Series(data).str.decode(encoding).values\n else:\n data = data.astype(dtype, copy=False).astype(object, copy=False)\n\n if nan_rep is None:\n nan_rep = 'nan'\n\n data = lib.string_array_replace_from_nan_rep(data, nan_rep)\n return data.reshape(shape)\n\n\ndef _maybe_convert(values, val_kind, encoding):\n if _need_convert(val_kind):\n conv = _get_converter(val_kind, encoding)\n # conv = np.frompyfunc(conv, 1, 1)\n values = conv(values)\n return values\n\n\ndef _get_converter(kind, encoding):\n kind = _ensure_decoded(kind)\n if kind == 'datetime64':\n return lambda x: np.asarray(x, dtype='M8[ns]')\n elif kind == 'datetime':\n return lib.convert_timestamps\n elif kind == 'string':\n return lambda x: _unconvert_string_array(x, encoding=encoding)\n else: # pragma: no cover\n raise ValueError('invalid kind %s' % kind)\n\n\ndef _need_convert(kind):\n kind = _ensure_decoded(kind)\n if kind in (u('datetime'), u('datetime64'), u('string')):\n return True\n return False\n\n\nclass Selection(object):\n\n \"\"\"\n Carries out a selection operation on a tables.Table object.\n\n Parameters\n ----------\n table : a Table object\n where : list of Terms (or convertable to)\n start, stop: indicies to start and/or stop selection\n\n \"\"\"\n\n def __init__(self, table, where=None, start=None, stop=None, **kwargs):\n self.table = table\n self.where = where\n self.start = start\n self.stop = stop\n self.condition = None\n self.filter = None\n self.terms = None\n self.coordinates = None\n\n if is_list_like(where):\n\n # see if we have a passed coordinate like\n try:\n inferred = lib.infer_dtype(where)\n if inferred == 'integer' or inferred == 'boolean':\n where = np.asarray(where)\n if where.dtype == np.bool_:\n start, stop = self.start, self.stop\n if start is None:\n start = 0\n if stop is None:\n stop = self.table.nrows\n self.coordinates = np.arange(start, stop)[where]\n elif issubclass(where.dtype.type, np.integer):\n if ((self.start is not None and\n (where < self.start).any()) or\n (self.stop is not None and\n (where >= self.stop).any())):\n raise ValueError(\n \"where must have index locations >= start and \"\n \"< stop\"\n )\n self.coordinates = where\n\n except:\n pass\n\n if self.coordinates is None:\n\n self.terms = self.generate(where)\n\n # create the numexpr & the filter\n if self.terms is not None:\n self.condition, self.filter = self.terms.evaluate()\n\n def generate(self, where):\n \"\"\" where can be a : dict,list,tuple,string \"\"\"\n if where is None:\n return None\n\n q = self.table.queryables()\n try:\n return Expr(where, queryables=q, encoding=self.table.encoding)\n except NameError:\n # raise a nice message, suggesting that the user should use\n # data_columns\n raise ValueError(\n \"The passed where expression: {0}\\n\"\n \" contains an invalid variable reference\\n\"\n \" all of the variable refrences must be a \"\n \"reference to\\n\"\n \" an axis (e.g. 'index' or 'columns'), or a \"\n \"data_column\\n\"\n \" The currently defined references are: {1}\\n\"\n .format(where, ','.join(q.keys()))\n )\n\n def select(self):\n \"\"\"\n generate the selection\n \"\"\"\n if self.condition is not None:\n return self.table.table.read_where(self.condition.format(),\n start=self.start,\n stop=self.stop)\n elif self.coordinates is not None:\n return self.table.table.read_coordinates(self.coordinates)\n return self.table.table.read(start=self.start, stop=self.stop)\n\n def select_coords(self):\n \"\"\"\n generate the selection\n \"\"\"\n start, stop = self.start, self.stop\n nrows = self.table.nrows\n if start is None:\n start = 0\n elif start < 0:\n start += nrows\n if self.stop is None:\n stop = nrows\n elif stop < 0:\n stop += nrows\n\n if self.condition is not None:\n return self.table.table.get_where_list(self.condition.format(),\n start=start, stop=stop,\n sort=True)\n elif self.coordinates is not None:\n return self.coordinates\n\n return np.arange(start, stop)\n\n# utilities ###\n\n\ndef timeit(key, df, fn=None, remove=True, **kwargs):\n if fn is None:\n fn = 'timeit.h5'\n store = HDFStore(fn, mode='w')\n store.append(key, df, **kwargs)\n store.close()\n\n if remove:\n os.remove(fn)\n",
"# -*- coding: utf-8 -*-\n\nfrom pandas import compat\nfrom pandas.compat import PY3\n\nimport numpy as np\n\nfrom pandas import (Series, Index, Float64Index, Int64Index, RangeIndex,\n MultiIndex, CategoricalIndex, DatetimeIndex,\n TimedeltaIndex, PeriodIndex, notnull)\nfrom pandas.util.testing import assertRaisesRegexp\n\nimport pandas.util.testing as tm\n\nimport pandas as pd\n\n\nclass Base(object):\n \"\"\" base class for index sub-class tests \"\"\"\n _holder = None\n _compat_props = ['shape', 'ndim', 'size', 'itemsize', 'nbytes']\n\n def setup_indices(self):\n for name, idx in self.indices.items():\n setattr(self, name, idx)\n\n def verify_pickle(self, index):\n unpickled = self.round_trip_pickle(index)\n self.assertTrue(index.equals(unpickled))\n\n def test_pickle_compat_construction(self):\n # this is testing for pickle compat\n if self._holder is None:\n return\n\n # need an object to create with\n self.assertRaises(TypeError, self._holder)\n\n def test_shift(self):\n\n # GH8083 test the base class for shift\n idx = self.create_index()\n self.assertRaises(NotImplementedError, idx.shift, 1)\n self.assertRaises(NotImplementedError, idx.shift, 1, 2)\n\n def test_create_index_existing_name(self):\n\n # GH11193, when an existing index is passed, and a new name is not\n # specified, the new index should inherit the previous object name\n expected = self.create_index()\n if not isinstance(expected, MultiIndex):\n expected.name = 'foo'\n result = pd.Index(expected)\n tm.assert_index_equal(result, expected)\n\n result = pd.Index(expected, name='bar')\n expected.name = 'bar'\n tm.assert_index_equal(result, expected)\n else:\n expected.names = ['foo', 'bar']\n result = pd.Index(expected)\n tm.assert_index_equal(\n result, Index(Index([('foo', 'one'), ('foo', 'two'),\n ('bar', 'one'), ('baz', 'two'),\n ('qux', 'one'), ('qux', 'two')],\n dtype='object'),\n names=['foo', 'bar']))\n\n result = pd.Index(expected, names=['A', 'B'])\n tm.assert_index_equal(\n result,\n Index(Index([('foo', 'one'), ('foo', 'two'), ('bar', 'one'),\n ('baz', 'two'), ('qux', 'one'), ('qux', 'two')],\n dtype='object'), names=['A', 'B']))\n\n def test_numeric_compat(self):\n\n idx = self.create_index()\n tm.assertRaisesRegexp(TypeError, \"cannot perform __mul__\",\n lambda: idx * 1)\n tm.assertRaisesRegexp(TypeError, \"cannot perform __mul__\",\n lambda: 1 * idx)\n\n div_err = \"cannot perform __truediv__\" if PY3 \\\n else \"cannot perform __div__\"\n tm.assertRaisesRegexp(TypeError, div_err, lambda: idx / 1)\n tm.assertRaisesRegexp(TypeError, div_err, lambda: 1 / idx)\n tm.assertRaisesRegexp(TypeError, \"cannot perform __floordiv__\",\n lambda: idx // 1)\n tm.assertRaisesRegexp(TypeError, \"cannot perform __floordiv__\",\n lambda: 1 // idx)\n\n def test_logical_compat(self):\n idx = self.create_index()\n tm.assertRaisesRegexp(TypeError, 'cannot perform all',\n lambda: idx.all())\n tm.assertRaisesRegexp(TypeError, 'cannot perform any',\n lambda: idx.any())\n\n def test_boolean_context_compat(self):\n\n # boolean context compat\n idx = self.create_index()\n\n def f():\n if idx:\n pass\n\n tm.assertRaisesRegexp(ValueError, 'The truth value of a', f)\n\n def test_reindex_base(self):\n idx = self.create_index()\n expected = np.arange(idx.size, dtype=np.intp)\n\n actual = idx.get_indexer(idx)\n tm.assert_numpy_array_equal(expected, actual)\n\n with tm.assertRaisesRegexp(ValueError, 'Invalid fill method'):\n idx.get_indexer(idx, method='invalid')\n\n def test_ndarray_compat_properties(self):\n\n idx = self.create_index()\n self.assertTrue(idx.T.equals(idx))\n self.assertTrue(idx.transpose().equals(idx))\n\n values = idx.values\n for prop in self._compat_props:\n self.assertEqual(getattr(idx, prop), getattr(values, prop))\n\n # test for validity\n idx.nbytes\n idx.values.nbytes\n\n def test_repr_roundtrip(self):\n\n idx = self.create_index()\n tm.assert_index_equal(eval(repr(idx)), idx)\n\n def test_str(self):\n\n # test the string repr\n idx = self.create_index()\n idx.name = 'foo'\n self.assertTrue(\"'foo'\" in str(idx))\n self.assertTrue(idx.__class__.__name__ in str(idx))\n\n def test_dtype_str(self):\n for idx in self.indices.values():\n dtype = idx.dtype_str\n self.assertIsInstance(dtype, compat.string_types)\n self.assertEqual(dtype, str(idx.dtype))\n\n def test_repr_max_seq_item_setting(self):\n # GH10182\n idx = self.create_index()\n idx = idx.repeat(50)\n with pd.option_context(\"display.max_seq_items\", None):\n repr(idx)\n self.assertFalse('...' in str(idx))\n\n def test_wrong_number_names(self):\n def testit(ind):\n ind.names = [\"apple\", \"banana\", \"carrot\"]\n\n for ind in self.indices.values():\n assertRaisesRegexp(ValueError, \"^Length\", testit, ind)\n\n def test_set_name_methods(self):\n new_name = \"This is the new name for this index\"\n for ind in self.indices.values():\n\n # don't tests a MultiIndex here (as its tested separated)\n if isinstance(ind, MultiIndex):\n continue\n\n original_name = ind.name\n new_ind = ind.set_names([new_name])\n self.assertEqual(new_ind.name, new_name)\n self.assertEqual(ind.name, original_name)\n res = ind.rename(new_name, inplace=True)\n\n # should return None\n self.assertIsNone(res)\n self.assertEqual(ind.name, new_name)\n self.assertEqual(ind.names, [new_name])\n # with assertRaisesRegexp(TypeError, \"list-like\"):\n # # should still fail even if it would be the right length\n # ind.set_names(\"a\")\n with assertRaisesRegexp(ValueError, \"Level must be None\"):\n ind.set_names(\"a\", level=0)\n\n # rename in place just leaves tuples and other containers alone\n name = ('A', 'B')\n ind.rename(name, inplace=True)\n self.assertEqual(ind.name, name)\n self.assertEqual(ind.names, [name])\n\n def test_hash_error(self):\n for ind in self.indices.values():\n with tm.assertRaisesRegexp(TypeError, \"unhashable type: %r\" %\n type(ind).__name__):\n hash(ind)\n\n def test_copy_name(self):\n # Check that \"name\" argument passed at initialization is honoured\n # GH12309\n for name, index in compat.iteritems(self.indices):\n if isinstance(index, MultiIndex):\n continue\n\n first = index.__class__(index, copy=True, name='mario')\n second = first.__class__(first, copy=False)\n\n # Even though \"copy=False\", we want a new object.\n self.assertIsNot(first, second)\n # Not using tm.assert_index_equal() since names differ:\n self.assertTrue(index.equals(first))\n\n self.assertEqual(first.name, 'mario')\n self.assertEqual(second.name, 'mario')\n\n s1 = Series(2, index=first)\n s2 = Series(3, index=second[:-1])\n if not isinstance(index, CategoricalIndex): # See GH13365\n s3 = s1 * s2\n self.assertEqual(s3.index.name, 'mario')\n\n def test_ensure_copied_data(self):\n # Check the \"copy\" argument of each Index.__new__ is honoured\n # GH12309\n for name, index in compat.iteritems(self.indices):\n init_kwargs = {}\n if isinstance(index, PeriodIndex):\n # Needs \"freq\" specification:\n init_kwargs['freq'] = index.freq\n elif isinstance(index, (RangeIndex, MultiIndex, CategoricalIndex)):\n # RangeIndex cannot be initialized from data\n # MultiIndex and CategoricalIndex are tested separately\n continue\n\n index_type = index.__class__\n result = index_type(index.values, copy=True, **init_kwargs)\n tm.assert_index_equal(index, result)\n tm.assert_numpy_array_equal(index.values, result.values,\n check_same='copy')\n\n if not isinstance(index, PeriodIndex):\n result = index_type(index.values, copy=False, **init_kwargs)\n tm.assert_numpy_array_equal(index.values, result.values,\n check_same='same')\n tm.assert_numpy_array_equal(index._values, result._values,\n check_same='same')\n else:\n # .values an object array of Period, thus copied\n result = index_type(ordinal=index.asi8, copy=False,\n **init_kwargs)\n tm.assert_numpy_array_equal(index._values, result._values,\n check_same='same')\n\n def test_copy_and_deepcopy(self):\n from copy import copy, deepcopy\n\n for ind in self.indices.values():\n\n # don't tests a MultiIndex here (as its tested separated)\n if isinstance(ind, MultiIndex):\n continue\n\n for func in (copy, deepcopy):\n idx_copy = func(ind)\n self.assertIsNot(idx_copy, ind)\n self.assertTrue(idx_copy.equals(ind))\n\n new_copy = ind.copy(deep=True, name=\"banana\")\n self.assertEqual(new_copy.name, \"banana\")\n\n def test_duplicates(self):\n for ind in self.indices.values():\n\n if not len(ind):\n continue\n if isinstance(ind, MultiIndex):\n continue\n idx = self._holder([ind[0]] * 5)\n self.assertFalse(idx.is_unique)\n self.assertTrue(idx.has_duplicates)\n\n # GH 10115\n # preserve names\n idx.name = 'foo'\n result = idx.drop_duplicates()\n self.assertEqual(result.name, 'foo')\n self.assert_index_equal(result, Index([ind[0]], name='foo'))\n\n def test_get_unique_index(self):\n for ind in self.indices.values():\n\n # MultiIndex tested separately\n if not len(ind) or isinstance(ind, MultiIndex):\n continue\n\n idx = ind[[0] * 5]\n idx_unique = ind[[0]]\n # We test against `idx_unique`, so first we make sure it's unique\n # and doesn't contain nans.\n self.assertTrue(idx_unique.is_unique)\n try:\n self.assertFalse(idx_unique.hasnans)\n except NotImplementedError:\n pass\n\n for dropna in [False, True]:\n result = idx._get_unique_index(dropna=dropna)\n self.assert_index_equal(result, idx_unique)\n\n # nans:\n\n if not ind._can_hold_na:\n continue\n\n vals = ind.values[[0] * 5]\n vals[0] = np.nan\n vals_unique = vals[:2]\n idx_nan = ind._shallow_copy(vals)\n idx_unique_nan = ind._shallow_copy(vals_unique)\n self.assertTrue(idx_unique_nan.is_unique)\n\n for dropna, expected in zip([False, True],\n [idx_unique_nan, idx_unique]):\n for i in [idx_nan, idx_unique_nan]:\n result = i._get_unique_index(dropna=dropna)\n self.assert_index_equal(result, expected)\n\n def test_sort(self):\n for ind in self.indices.values():\n self.assertRaises(TypeError, ind.sort)\n\n def test_order(self):\n for ind in self.indices.values():\n # 9816 deprecated\n with tm.assert_produces_warning(FutureWarning):\n ind.order()\n\n def test_mutability(self):\n for ind in self.indices.values():\n if not len(ind):\n continue\n self.assertRaises(TypeError, ind.__setitem__, 0, ind[0])\n\n def test_view(self):\n for ind in self.indices.values():\n i_view = ind.view()\n self.assertEqual(i_view.name, ind.name)\n\n def test_compat(self):\n for ind in self.indices.values():\n self.assertEqual(ind.tolist(), list(ind))\n\n def test_argsort(self):\n for k, ind in self.indices.items():\n\n # separately tested\n if k in ['catIndex']:\n continue\n\n result = ind.argsort()\n expected = np.array(ind).argsort()\n tm.assert_numpy_array_equal(result, expected, check_dtype=False)\n\n def test_numpy_argsort(self):\n for k, ind in self.indices.items():\n result = np.argsort(ind)\n expected = ind.argsort()\n tm.assert_numpy_array_equal(result, expected)\n\n # these are the only two types that perform\n # pandas compatibility input validation - the\n # rest already perform separate (or no) such\n # validation via their 'values' attribute as\n # defined in pandas/indexes/base.py - they\n # cannot be changed at the moment due to\n # backwards compatibility concerns\n if isinstance(type(ind), (CategoricalIndex, RangeIndex)):\n msg = \"the 'axis' parameter is not supported\"\n tm.assertRaisesRegexp(ValueError, msg,\n np.argsort, ind, axis=1)\n\n msg = \"the 'kind' parameter is not supported\"\n tm.assertRaisesRegexp(ValueError, msg, np.argsort,\n ind, kind='mergesort')\n\n msg = \"the 'order' parameter is not supported\"\n tm.assertRaisesRegexp(ValueError, msg, np.argsort,\n ind, order=('a', 'b'))\n\n def test_pickle(self):\n for ind in self.indices.values():\n self.verify_pickle(ind)\n ind.name = 'foo'\n self.verify_pickle(ind)\n\n def test_take(self):\n indexer = [4, 3, 0, 2]\n for k, ind in self.indices.items():\n\n # separate\n if k in ['boolIndex', 'tuples', 'empty']:\n continue\n\n result = ind.take(indexer)\n expected = ind[indexer]\n self.assertTrue(result.equals(expected))\n\n if not isinstance(ind,\n (DatetimeIndex, PeriodIndex, TimedeltaIndex)):\n # GH 10791\n with tm.assertRaises(AttributeError):\n ind.freq\n\n def test_take_invalid_kwargs(self):\n idx = self.create_index()\n indices = [1, 2]\n\n msg = \"take\\(\\) got an unexpected keyword argument 'foo'\"\n tm.assertRaisesRegexp(TypeError, msg, idx.take,\n indices, foo=2)\n\n msg = \"the 'out' parameter is not supported\"\n tm.assertRaisesRegexp(ValueError, msg, idx.take,\n indices, out=indices)\n\n msg = \"the 'mode' parameter is not supported\"\n tm.assertRaisesRegexp(ValueError, msg, idx.take,\n indices, mode='clip')\n\n def test_repeat(self):\n rep = 2\n i = self.create_index()\n expected = pd.Index(i.values.repeat(rep), name=i.name)\n tm.assert_index_equal(i.repeat(rep), expected)\n\n i = self.create_index()\n rep = np.arange(len(i))\n expected = pd.Index(i.values.repeat(rep), name=i.name)\n tm.assert_index_equal(i.repeat(rep), expected)\n\n def test_numpy_repeat(self):\n rep = 2\n i = self.create_index()\n expected = i.repeat(rep)\n tm.assert_index_equal(np.repeat(i, rep), expected)\n\n msg = \"the 'axis' parameter is not supported\"\n tm.assertRaisesRegexp(ValueError, msg, np.repeat,\n i, rep, axis=0)\n\n def test_where(self):\n i = self.create_index()\n result = i.where(notnull(i))\n expected = i\n tm.assert_index_equal(result, expected)\n\n i2 = i.copy()\n i2 = pd.Index([np.nan, np.nan] + i[2:].tolist())\n result = i.where(notnull(i2))\n expected = i2\n tm.assert_index_equal(result, expected)\n\n def test_setops_errorcases(self):\n for name, idx in compat.iteritems(self.indices):\n # # non-iterable input\n cases = [0.5, 'xxx']\n methods = [idx.intersection, idx.union, idx.difference,\n idx.symmetric_difference]\n\n for method in methods:\n for case in cases:\n assertRaisesRegexp(TypeError,\n \"Input must be Index or array-like\",\n method, case)\n\n def test_intersection_base(self):\n for name, idx in compat.iteritems(self.indices):\n first = idx[:5]\n second = idx[:3]\n intersect = first.intersection(second)\n\n if isinstance(idx, CategoricalIndex):\n pass\n else:\n self.assertTrue(tm.equalContents(intersect, second))\n\n # GH 10149\n cases = [klass(second.values)\n for klass in [np.array, Series, list]]\n for case in cases:\n if isinstance(idx, PeriodIndex):\n msg = \"can only call with other PeriodIndex-ed objects\"\n with tm.assertRaisesRegexp(ValueError, msg):\n result = first.intersection(case)\n elif isinstance(idx, CategoricalIndex):\n pass\n else:\n result = first.intersection(case)\n self.assertTrue(tm.equalContents(result, second))\n\n if isinstance(idx, MultiIndex):\n msg = \"other must be a MultiIndex or a list of tuples\"\n with tm.assertRaisesRegexp(TypeError, msg):\n result = first.intersection([1, 2, 3])\n\n def test_union_base(self):\n for name, idx in compat.iteritems(self.indices):\n first = idx[3:]\n second = idx[:5]\n everything = idx\n union = first.union(second)\n self.assertTrue(tm.equalContents(union, everything))\n\n # GH 10149\n cases = [klass(second.values)\n for klass in [np.array, Series, list]]\n for case in cases:\n if isinstance(idx, PeriodIndex):\n msg = \"can only call with other PeriodIndex-ed objects\"\n with tm.assertRaisesRegexp(ValueError, msg):\n result = first.union(case)\n elif isinstance(idx, CategoricalIndex):\n pass\n else:\n result = first.union(case)\n self.assertTrue(tm.equalContents(result, everything))\n\n if isinstance(idx, MultiIndex):\n msg = \"other must be a MultiIndex or a list of tuples\"\n with tm.assertRaisesRegexp(TypeError, msg):\n result = first.union([1, 2, 3])\n\n def test_difference_base(self):\n for name, idx in compat.iteritems(self.indices):\n first = idx[2:]\n second = idx[:4]\n answer = idx[4:]\n result = first.difference(second)\n\n if isinstance(idx, CategoricalIndex):\n pass\n else:\n self.assertTrue(tm.equalContents(result, answer))\n\n # GH 10149\n cases = [klass(second.values)\n for klass in [np.array, Series, list]]\n for case in cases:\n if isinstance(idx, PeriodIndex):\n msg = \"can only call with other PeriodIndex-ed objects\"\n with tm.assertRaisesRegexp(ValueError, msg):\n result = first.difference(case)\n elif isinstance(idx, CategoricalIndex):\n pass\n elif isinstance(idx, (DatetimeIndex, TimedeltaIndex)):\n self.assertEqual(result.__class__, answer.__class__)\n tm.assert_numpy_array_equal(result.asi8, answer.asi8)\n else:\n result = first.difference(case)\n self.assertTrue(tm.equalContents(result, answer))\n\n if isinstance(idx, MultiIndex):\n msg = \"other must be a MultiIndex or a list of tuples\"\n with tm.assertRaisesRegexp(TypeError, msg):\n result = first.difference([1, 2, 3])\n\n def test_symmetric_difference(self):\n for name, idx in compat.iteritems(self.indices):\n first = idx[1:]\n second = idx[:-1]\n if isinstance(idx, CategoricalIndex):\n pass\n else:\n answer = idx[[0, -1]]\n result = first.symmetric_difference(second)\n self.assertTrue(tm.equalContents(result, answer))\n\n # GH 10149\n cases = [klass(second.values)\n for klass in [np.array, Series, list]]\n for case in cases:\n if isinstance(idx, PeriodIndex):\n msg = \"can only call with other PeriodIndex-ed objects\"\n with tm.assertRaisesRegexp(ValueError, msg):\n result = first.symmetric_difference(case)\n elif isinstance(idx, CategoricalIndex):\n pass\n else:\n result = first.symmetric_difference(case)\n self.assertTrue(tm.equalContents(result, answer))\n\n if isinstance(idx, MultiIndex):\n msg = \"other must be a MultiIndex or a list of tuples\"\n with tm.assertRaisesRegexp(TypeError, msg):\n result = first.symmetric_difference([1, 2, 3])\n\n # 12591 deprecated\n with tm.assert_produces_warning(FutureWarning):\n first.sym_diff(second)\n\n def test_insert_base(self):\n\n for name, idx in compat.iteritems(self.indices):\n result = idx[1:4]\n\n if not len(idx):\n continue\n\n # test 0th element\n self.assertTrue(idx[0:4].equals(result.insert(0, idx[0])))\n\n def test_delete_base(self):\n\n for name, idx in compat.iteritems(self.indices):\n\n if not len(idx):\n continue\n\n if isinstance(idx, RangeIndex):\n # tested in class\n continue\n\n expected = idx[1:]\n result = idx.delete(0)\n self.assertTrue(result.equals(expected))\n self.assertEqual(result.name, expected.name)\n\n expected = idx[:-1]\n result = idx.delete(-1)\n self.assertTrue(result.equals(expected))\n self.assertEqual(result.name, expected.name)\n\n with tm.assertRaises((IndexError, ValueError)):\n # either depending on numpy version\n result = idx.delete(len(idx))\n\n def test_equals_op(self):\n # GH9947, GH10637\n index_a = self.create_index()\n if isinstance(index_a, PeriodIndex):\n return\n\n n = len(index_a)\n index_b = index_a[0:-1]\n index_c = index_a[0:-1].append(index_a[-2:-1])\n index_d = index_a[0:1]\n with tm.assertRaisesRegexp(ValueError, \"Lengths must match\"):\n index_a == index_b\n expected1 = np.array([True] * n)\n expected2 = np.array([True] * (n - 1) + [False])\n tm.assert_numpy_array_equal(index_a == index_a, expected1)\n tm.assert_numpy_array_equal(index_a == index_c, expected2)\n\n # test comparisons with numpy arrays\n array_a = np.array(index_a)\n array_b = np.array(index_a[0:-1])\n array_c = np.array(index_a[0:-1].append(index_a[-2:-1]))\n array_d = np.array(index_a[0:1])\n with tm.assertRaisesRegexp(ValueError, \"Lengths must match\"):\n index_a == array_b\n tm.assert_numpy_array_equal(index_a == array_a, expected1)\n tm.assert_numpy_array_equal(index_a == array_c, expected2)\n\n # test comparisons with Series\n series_a = Series(array_a)\n series_b = Series(array_b)\n series_c = Series(array_c)\n series_d = Series(array_d)\n with tm.assertRaisesRegexp(ValueError, \"Lengths must match\"):\n index_a == series_b\n tm.assert_numpy_array_equal(index_a == series_a, expected1)\n tm.assert_numpy_array_equal(index_a == series_c, expected2)\n\n # cases where length is 1 for one of them\n with tm.assertRaisesRegexp(ValueError, \"Lengths must match\"):\n index_a == index_d\n with tm.assertRaisesRegexp(ValueError, \"Lengths must match\"):\n index_a == series_d\n with tm.assertRaisesRegexp(ValueError, \"Lengths must match\"):\n index_a == array_d\n with tm.assertRaisesRegexp(ValueError, \"Series lengths must match\"):\n series_a == series_d\n with tm.assertRaisesRegexp(ValueError, \"Lengths must match\"):\n series_a == array_d\n\n # comparing with a scalar should broadcast; note that we are excluding\n # MultiIndex because in this case each item in the index is a tuple of\n # length 2, and therefore is considered an array of length 2 in the\n # comparison instead of a scalar\n if not isinstance(index_a, MultiIndex):\n expected3 = np.array([False] * (len(index_a) - 2) + [True, False])\n # assuming the 2nd to last item is unique in the data\n item = index_a[-2]\n tm.assert_numpy_array_equal(index_a == item, expected3)\n tm.assert_series_equal(series_a == item, Series(expected3))\n\n def test_numpy_ufuncs(self):\n # test ufuncs of numpy 1.9.2. see:\n # http://docs.scipy.org/doc/numpy/reference/ufuncs.html\n\n # some functions are skipped because it may return different result\n # for unicode input depending on numpy version\n\n for name, idx in compat.iteritems(self.indices):\n for func in [np.exp, np.exp2, np.expm1, np.log, np.log2, np.log10,\n np.log1p, np.sqrt, np.sin, np.cos, np.tan, np.arcsin,\n np.arccos, np.arctan, np.sinh, np.cosh, np.tanh,\n np.arcsinh, np.arccosh, np.arctanh, np.deg2rad,\n np.rad2deg]:\n if isinstance(idx, pd.tseries.base.DatetimeIndexOpsMixin):\n # raise TypeError or ValueError (PeriodIndex)\n # PeriodIndex behavior should be changed in future version\n with tm.assertRaises(Exception):\n with np.errstate(all='ignore'):\n func(idx)\n elif isinstance(idx, (Float64Index, Int64Index)):\n # coerces to float (e.g. np.sin)\n with np.errstate(all='ignore'):\n result = func(idx)\n exp = Index(func(idx.values), name=idx.name)\n self.assert_index_equal(result, exp)\n self.assertIsInstance(result, pd.Float64Index)\n else:\n # raise AttributeError or TypeError\n if len(idx) == 0:\n continue\n else:\n with tm.assertRaises(Exception):\n with np.errstate(all='ignore'):\n func(idx)\n\n for func in [np.isfinite, np.isinf, np.isnan, np.signbit]:\n if isinstance(idx, pd.tseries.base.DatetimeIndexOpsMixin):\n # raise TypeError or ValueError (PeriodIndex)\n with tm.assertRaises(Exception):\n func(idx)\n elif isinstance(idx, (Float64Index, Int64Index)):\n # results in bool array\n result = func(idx)\n exp = func(idx.values)\n self.assertIsInstance(result, np.ndarray)\n tm.assertNotIsInstance(result, Index)\n else:\n if len(idx) == 0:\n continue\n else:\n with tm.assertRaises(Exception):\n func(idx)\n\n def test_hasnans_isnans(self):\n # GH 11343, added tests for hasnans / isnans\n for name, index in self.indices.items():\n if isinstance(index, MultiIndex):\n pass\n else:\n idx = index.copy()\n\n # cases in indices doesn't include NaN\n expected = np.array([False] * len(idx), dtype=bool)\n self.assert_numpy_array_equal(idx._isnan, expected)\n self.assertFalse(idx.hasnans)\n\n idx = index.copy()\n values = idx.values\n\n if len(index) == 0:\n continue\n elif isinstance(index, pd.tseries.base.DatetimeIndexOpsMixin):\n values[1] = pd.tslib.iNaT\n elif isinstance(index, Int64Index):\n continue\n else:\n values[1] = np.nan\n\n if isinstance(index, PeriodIndex):\n idx = index.__class__(values, freq=index.freq)\n else:\n idx = index.__class__(values)\n\n expected = np.array([False] * len(idx), dtype=bool)\n expected[1] = True\n self.assert_numpy_array_equal(idx._isnan, expected)\n self.assertTrue(idx.hasnans)\n\n def test_fillna(self):\n # GH 11343\n for name, index in self.indices.items():\n if len(index) == 0:\n pass\n elif isinstance(index, MultiIndex):\n idx = index.copy()\n msg = \"isnull is not defined for MultiIndex\"\n with self.assertRaisesRegexp(NotImplementedError, msg):\n idx.fillna(idx[0])\n else:\n idx = index.copy()\n result = idx.fillna(idx[0])\n self.assert_index_equal(result, idx)\n self.assertFalse(result is idx)\n\n msg = \"'value' must be a scalar, passed: \"\n with self.assertRaisesRegexp(TypeError, msg):\n idx.fillna([idx[0]])\n\n idx = index.copy()\n values = idx.values\n\n if isinstance(index, pd.tseries.base.DatetimeIndexOpsMixin):\n values[1] = pd.tslib.iNaT\n elif isinstance(index, Int64Index):\n continue\n else:\n values[1] = np.nan\n\n if isinstance(index, PeriodIndex):\n idx = index.__class__(values, freq=index.freq)\n else:\n idx = index.__class__(values)\n\n expected = np.array([False] * len(idx), dtype=bool)\n expected[1] = True\n self.assert_numpy_array_equal(idx._isnan, expected)\n self.assertTrue(idx.hasnans)\n",
"\"\"\"SQL io tests\n\nThe SQL tests are broken down in different classes:\n\n- `PandasSQLTest`: base class with common methods for all test classes\n- Tests for the public API (only tests with sqlite3)\n - `_TestSQLApi` base class\n - `TestSQLApi`: test the public API with sqlalchemy engine\n - `TestSQLiteFallbackApi`: test the public API with a sqlite DBAPI\n connection\n- Tests for the different SQL flavors (flavor specific type conversions)\n - Tests for the sqlalchemy mode: `_TestSQLAlchemy` is the base class with\n common methods, `_TestSQLAlchemyConn` tests the API with a SQLAlchemy\n Connection object. The different tested flavors (sqlite3, MySQL,\n PostgreSQL) derive from the base class\n - Tests for the fallback mode (`TestSQLiteFallback`)\n\n\"\"\"\n\nfrom __future__ import print_function\nimport unittest\nimport sqlite3\nimport csv\nimport os\nimport sys\n\nimport nose\nimport warnings\nimport numpy as np\nimport pandas as pd\n\nfrom datetime import datetime, date, time\n\nfrom pandas.types.common import (is_object_dtype, is_datetime64_dtype,\n is_datetime64tz_dtype)\nfrom pandas import DataFrame, Series, Index, MultiIndex, isnull, concat\nfrom pandas import date_range, to_datetime, to_timedelta, Timestamp\nimport pandas.compat as compat\nfrom pandas.compat import StringIO, range, lrange, string_types\nfrom pandas.core.datetools import format as date_format\n\nimport pandas.io.sql as sql\nfrom pandas.io.sql import read_sql_table, read_sql_query\nimport pandas.util.testing as tm\n\n\ntry:\n import sqlalchemy\n import sqlalchemy.schema\n import sqlalchemy.sql.sqltypes as sqltypes\n from sqlalchemy.ext import declarative\n from sqlalchemy.orm import session as sa_session\n SQLALCHEMY_INSTALLED = True\nexcept ImportError:\n SQLALCHEMY_INSTALLED = False\n\nSQL_STRINGS = {\n 'create_iris': {\n 'sqlite': \"\"\"CREATE TABLE iris (\n \"SepalLength\" REAL,\n \"SepalWidth\" REAL,\n \"PetalLength\" REAL,\n \"PetalWidth\" REAL,\n \"Name\" TEXT\n )\"\"\",\n 'mysql': \"\"\"CREATE TABLE iris (\n `SepalLength` DOUBLE,\n `SepalWidth` DOUBLE,\n `PetalLength` DOUBLE,\n `PetalWidth` DOUBLE,\n `Name` VARCHAR(200)\n )\"\"\",\n 'postgresql': \"\"\"CREATE TABLE iris (\n \"SepalLength\" DOUBLE PRECISION,\n \"SepalWidth\" DOUBLE PRECISION,\n \"PetalLength\" DOUBLE PRECISION,\n \"PetalWidth\" DOUBLE PRECISION,\n \"Name\" VARCHAR(200)\n )\"\"\"\n },\n 'insert_iris': {\n 'sqlite': \"\"\"INSERT INTO iris VALUES(?, ?, ?, ?, ?)\"\"\",\n 'mysql': \"\"\"INSERT INTO iris VALUES(%s, %s, %s, %s, \"%s\");\"\"\",\n 'postgresql': \"\"\"INSERT INTO iris VALUES(%s, %s, %s, %s, %s);\"\"\"\n },\n 'create_test_types': {\n 'sqlite': \"\"\"CREATE TABLE types_test_data (\n \"TextCol\" TEXT,\n \"DateCol\" TEXT,\n \"IntDateCol\" INTEGER,\n \"FloatCol\" REAL,\n \"IntCol\" INTEGER,\n \"BoolCol\" INTEGER,\n \"IntColWithNull\" INTEGER,\n \"BoolColWithNull\" INTEGER\n )\"\"\",\n 'mysql': \"\"\"CREATE TABLE types_test_data (\n `TextCol` TEXT,\n `DateCol` DATETIME,\n `IntDateCol` INTEGER,\n `FloatCol` DOUBLE,\n `IntCol` INTEGER,\n `BoolCol` BOOLEAN,\n `IntColWithNull` INTEGER,\n `BoolColWithNull` BOOLEAN\n )\"\"\",\n 'postgresql': \"\"\"CREATE TABLE types_test_data (\n \"TextCol\" TEXT,\n \"DateCol\" TIMESTAMP,\n \"DateColWithTz\" TIMESTAMP WITH TIME ZONE,\n \"IntDateCol\" INTEGER,\n \"FloatCol\" DOUBLE PRECISION,\n \"IntCol\" INTEGER,\n \"BoolCol\" BOOLEAN,\n \"IntColWithNull\" INTEGER,\n \"BoolColWithNull\" BOOLEAN\n )\"\"\"\n },\n 'insert_test_types': {\n 'sqlite': {\n 'query': \"\"\"\n INSERT INTO types_test_data\n VALUES(?, ?, ?, ?, ?, ?, ?, ?)\n \"\"\",\n 'fields': (\n 'TextCol', 'DateCol', 'IntDateCol', 'FloatCol',\n 'IntCol', 'BoolCol', 'IntColWithNull', 'BoolColWithNull'\n )\n },\n 'mysql': {\n 'query': \"\"\"\n INSERT INTO types_test_data\n VALUES(\"%s\", %s, %s, %s, %s, %s, %s, %s)\n \"\"\",\n 'fields': (\n 'TextCol', 'DateCol', 'IntDateCol', 'FloatCol',\n 'IntCol', 'BoolCol', 'IntColWithNull', 'BoolColWithNull'\n )\n },\n 'postgresql': {\n 'query': \"\"\"\n INSERT INTO types_test_data\n VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s)\n \"\"\",\n 'fields': (\n 'TextCol', 'DateCol', 'DateColWithTz',\n 'IntDateCol', 'FloatCol',\n 'IntCol', 'BoolCol', 'IntColWithNull', 'BoolColWithNull'\n )\n },\n },\n 'read_parameters': {\n 'sqlite': \"SELECT * FROM iris WHERE Name=? AND SepalLength=?\",\n 'mysql': 'SELECT * FROM iris WHERE `Name`=\"%s\" AND `SepalLength`=%s',\n 'postgresql': 'SELECT * FROM iris WHERE \"Name\"=%s AND \"SepalLength\"=%s'\n },\n 'read_named_parameters': {\n 'sqlite': \"\"\"\n SELECT * FROM iris WHERE Name=:name AND SepalLength=:length\n \"\"\",\n 'mysql': \"\"\"\n SELECT * FROM iris WHERE\n `Name`=\"%(name)s\" AND `SepalLength`=%(length)s\n \"\"\",\n 'postgresql': \"\"\"\n SELECT * FROM iris WHERE\n \"Name\"=%(name)s AND \"SepalLength\"=%(length)s\n \"\"\"\n },\n 'create_view': {\n 'sqlite': \"\"\"\n CREATE VIEW iris_view AS\n SELECT * FROM iris\n \"\"\"\n }\n}\n\n\nclass MixInBase(object):\n\n def tearDown(self):\n for tbl in self._get_all_tables():\n self.drop_table(tbl)\n self._close_conn()\n\n\nclass MySQLMixIn(MixInBase):\n\n def drop_table(self, table_name):\n cur = self.conn.cursor()\n cur.execute(\"DROP TABLE IF EXISTS %s\" %\n sql._get_valid_mysql_name(table_name))\n self.conn.commit()\n\n def _get_all_tables(self):\n cur = self.conn.cursor()\n cur.execute('SHOW TABLES')\n return [table[0] for table in cur.fetchall()]\n\n def _close_conn(self):\n from pymysql.err import Error\n try:\n self.conn.close()\n except Error:\n pass\n\n\nclass SQLiteMixIn(MixInBase):\n\n def drop_table(self, table_name):\n self.conn.execute(\"DROP TABLE IF EXISTS %s\" %\n sql._get_valid_sqlite_name(table_name))\n self.conn.commit()\n\n def _get_all_tables(self):\n c = self.conn.execute(\n \"SELECT name FROM sqlite_master WHERE type='table'\")\n return [table[0] for table in c.fetchall()]\n\n def _close_conn(self):\n self.conn.close()\n\n\nclass SQLAlchemyMixIn(MixInBase):\n\n def drop_table(self, table_name):\n sql.SQLDatabase(self.conn).drop_table(table_name)\n\n def _get_all_tables(self):\n meta = sqlalchemy.schema.MetaData(bind=self.conn)\n meta.reflect()\n table_list = meta.tables.keys()\n return table_list\n\n def _close_conn(self):\n pass\n\n\nclass PandasSQLTest(unittest.TestCase):\n \"\"\"\n Base class with common private methods for SQLAlchemy and fallback cases.\n\n \"\"\"\n\n def _get_exec(self):\n if hasattr(self.conn, 'execute'):\n return self.conn\n else:\n return self.conn.cursor()\n\n def _load_iris_data(self):\n import io\n iris_csv_file = os.path.join(tm.get_data_path(), 'iris.csv')\n\n self.drop_table('iris')\n self._get_exec().execute(SQL_STRINGS['create_iris'][self.flavor])\n\n with io.open(iris_csv_file, mode='r', newline=None) as iris_csv:\n r = csv.reader(iris_csv)\n next(r) # skip header row\n ins = SQL_STRINGS['insert_iris'][self.flavor]\n\n for row in r:\n self._get_exec().execute(ins, row)\n\n def _load_iris_view(self):\n self.drop_table('iris_view')\n self._get_exec().execute(SQL_STRINGS['create_view'][self.flavor])\n\n def _check_iris_loaded_frame(self, iris_frame):\n pytype = iris_frame.dtypes[0].type\n row = iris_frame.iloc[0]\n\n self.assertTrue(\n issubclass(pytype, np.floating), 'Loaded frame has incorrect type')\n tm.equalContents(row.values, [5.1, 3.5, 1.4, 0.2, 'Iris-setosa'])\n\n def _load_test1_data(self):\n columns = ['index', 'A', 'B', 'C', 'D']\n data = [(\n '2000-01-03 00:00:00', 0.980268513777, 3.68573087906,\n -0.364216805298, -1.15973806169),\n ('2000-01-04 00:00:00', 1.04791624281, -\n 0.0412318367011, -0.16181208307, 0.212549316967),\n ('2000-01-05 00:00:00', 0.498580885705,\n 0.731167677815, -0.537677223318, 1.34627041952),\n ('2000-01-06 00:00:00', 1.12020151869, 1.56762092543,\n 0.00364077397681, 0.67525259227)]\n\n self.test_frame1 = DataFrame(data, columns=columns)\n\n def _load_test2_data(self):\n df = DataFrame(dict(A=[4, 1, 3, 6],\n B=['asd', 'gsq', 'ylt', 'jkl'],\n C=[1.1, 3.1, 6.9, 5.3],\n D=[False, True, True, False],\n E=['1990-11-22', '1991-10-26',\n '1993-11-26', '1995-12-12']))\n df['E'] = to_datetime(df['E'])\n\n self.test_frame2 = df\n\n def _load_test3_data(self):\n columns = ['index', 'A', 'B']\n data = [(\n '2000-01-03 00:00:00', 2 ** 31 - 1, -1.987670),\n ('2000-01-04 00:00:00', -29, -0.0412318367011),\n ('2000-01-05 00:00:00', 20000, 0.731167677815),\n ('2000-01-06 00:00:00', -290867, 1.56762092543)]\n\n self.test_frame3 = DataFrame(data, columns=columns)\n\n def _load_raw_sql(self):\n self.drop_table('types_test_data')\n self._get_exec().execute(SQL_STRINGS['create_test_types'][self.flavor])\n ins = SQL_STRINGS['insert_test_types'][self.flavor]\n\n data = [\n {\n 'TextCol': 'first',\n 'DateCol': '2000-01-03 00:00:00',\n 'DateColWithTz': '2000-01-01 00:00:00-08:00',\n 'IntDateCol': 535852800,\n 'FloatCol': 10.10,\n 'IntCol': 1,\n 'BoolCol': False,\n 'IntColWithNull': 1,\n 'BoolColWithNull': False,\n },\n {\n 'TextCol': 'first',\n 'DateCol': '2000-01-04 00:00:00',\n 'DateColWithTz': '2000-06-01 00:00:00-07:00',\n 'IntDateCol': 1356998400,\n 'FloatCol': 10.10,\n 'IntCol': 1,\n 'BoolCol': False,\n 'IntColWithNull': None,\n 'BoolColWithNull': None,\n },\n ]\n\n for d in data:\n self._get_exec().execute(\n ins['query'],\n [d[field] for field in ins['fields']]\n )\n\n def _count_rows(self, table_name):\n result = self._get_exec().execute(\n \"SELECT count(*) AS count_1 FROM %s\" % table_name).fetchone()\n return result[0]\n\n def _read_sql_iris(self):\n iris_frame = self.pandasSQL.read_query(\"SELECT * FROM iris\")\n self._check_iris_loaded_frame(iris_frame)\n\n def _read_sql_iris_parameter(self):\n query = SQL_STRINGS['read_parameters'][self.flavor]\n params = ['Iris-setosa', 5.1]\n iris_frame = self.pandasSQL.read_query(query, params=params)\n self._check_iris_loaded_frame(iris_frame)\n\n def _read_sql_iris_named_parameter(self):\n query = SQL_STRINGS['read_named_parameters'][self.flavor]\n params = {'name': 'Iris-setosa', 'length': 5.1}\n iris_frame = self.pandasSQL.read_query(query, params=params)\n self._check_iris_loaded_frame(iris_frame)\n\n def _to_sql(self):\n self.drop_table('test_frame1')\n\n self.pandasSQL.to_sql(self.test_frame1, 'test_frame1')\n self.assertTrue(self.pandasSQL.has_table(\n 'test_frame1'), 'Table not written to DB')\n\n # Nuke table\n self.drop_table('test_frame1')\n\n def _to_sql_empty(self):\n self.drop_table('test_frame1')\n self.pandasSQL.to_sql(self.test_frame1.iloc[:0], 'test_frame1')\n\n def _to_sql_fail(self):\n self.drop_table('test_frame1')\n\n self.pandasSQL.to_sql(\n self.test_frame1, 'test_frame1', if_exists='fail')\n self.assertTrue(self.pandasSQL.has_table(\n 'test_frame1'), 'Table not written to DB')\n\n self.assertRaises(ValueError, self.pandasSQL.to_sql,\n self.test_frame1, 'test_frame1', if_exists='fail')\n\n self.drop_table('test_frame1')\n\n def _to_sql_replace(self):\n self.drop_table('test_frame1')\n\n self.pandasSQL.to_sql(\n self.test_frame1, 'test_frame1', if_exists='fail')\n # Add to table again\n self.pandasSQL.to_sql(\n self.test_frame1, 'test_frame1', if_exists='replace')\n self.assertTrue(self.pandasSQL.has_table(\n 'test_frame1'), 'Table not written to DB')\n\n num_entries = len(self.test_frame1)\n num_rows = self._count_rows('test_frame1')\n\n self.assertEqual(\n num_rows, num_entries, \"not the same number of rows as entries\")\n\n self.drop_table('test_frame1')\n\n def _to_sql_append(self):\n # Nuke table just in case\n self.drop_table('test_frame1')\n\n self.pandasSQL.to_sql(\n self.test_frame1, 'test_frame1', if_exists='fail')\n\n # Add to table again\n self.pandasSQL.to_sql(\n self.test_frame1, 'test_frame1', if_exists='append')\n self.assertTrue(self.pandasSQL.has_table(\n 'test_frame1'), 'Table not written to DB')\n\n num_entries = 2 * len(self.test_frame1)\n num_rows = self._count_rows('test_frame1')\n\n self.assertEqual(\n num_rows, num_entries, \"not the same number of rows as entries\")\n\n self.drop_table('test_frame1')\n\n def _roundtrip(self):\n self.drop_table('test_frame_roundtrip')\n self.pandasSQL.to_sql(self.test_frame1, 'test_frame_roundtrip')\n result = self.pandasSQL.read_query(\n 'SELECT * FROM test_frame_roundtrip')\n\n result.set_index('level_0', inplace=True)\n # result.index.astype(int)\n\n result.index.name = None\n\n tm.assert_frame_equal(result, self.test_frame1)\n\n def _execute_sql(self):\n # drop_sql = \"DROP TABLE IF EXISTS test\" # should already be done\n iris_results = self.pandasSQL.execute(\"SELECT * FROM iris\")\n row = iris_results.fetchone()\n tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, 'Iris-setosa'])\n\n def _to_sql_save_index(self):\n df = DataFrame.from_records([(1, 2.1, 'line1'), (2, 1.5, 'line2')],\n columns=['A', 'B', 'C'], index=['A'])\n self.pandasSQL.to_sql(df, 'test_to_sql_saves_index')\n ix_cols = self._get_index_columns('test_to_sql_saves_index')\n self.assertEqual(ix_cols, [['A', ], ])\n\n def _transaction_test(self):\n self.pandasSQL.execute(\"CREATE TABLE test_trans (A INT, B TEXT)\")\n\n ins_sql = \"INSERT INTO test_trans (A,B) VALUES (1, 'blah')\"\n\n # Make sure when transaction is rolled back, no rows get inserted\n try:\n with self.pandasSQL.run_transaction() as trans:\n trans.execute(ins_sql)\n raise Exception('error')\n except:\n # ignore raised exception\n pass\n res = self.pandasSQL.read_query('SELECT * FROM test_trans')\n self.assertEqual(len(res), 0)\n\n # Make sure when transaction is committed, rows do get inserted\n with self.pandasSQL.run_transaction() as trans:\n trans.execute(ins_sql)\n res2 = self.pandasSQL.read_query('SELECT * FROM test_trans')\n self.assertEqual(len(res2), 1)\n\n\n# -----------------------------------------------------------------------------\n# -- Testing the public API\n\nclass _TestSQLApi(PandasSQLTest):\n\n \"\"\"\n Base class to test the public API.\n\n From this two classes are derived to run these tests for both the\n sqlalchemy mode (`TestSQLApi`) and the fallback mode\n (`TestSQLiteFallbackApi`). These tests are run with sqlite3. Specific\n tests for the different sql flavours are included in `_TestSQLAlchemy`.\n\n Notes:\n flavor can always be passed even in SQLAlchemy mode,\n should be correctly ignored.\n\n we don't use drop_table because that isn't part of the public api\n\n \"\"\"\n flavor = 'sqlite'\n mode = None\n\n def setUp(self):\n self.conn = self.connect()\n self._load_iris_data()\n self._load_iris_view()\n self._load_test1_data()\n self._load_test2_data()\n self._load_test3_data()\n self._load_raw_sql()\n\n def test_read_sql_iris(self):\n iris_frame = sql.read_sql_query(\n \"SELECT * FROM iris\", self.conn)\n self._check_iris_loaded_frame(iris_frame)\n\n def test_read_sql_view(self):\n iris_frame = sql.read_sql_query(\n \"SELECT * FROM iris_view\", self.conn)\n self._check_iris_loaded_frame(iris_frame)\n\n def test_to_sql(self):\n sql.to_sql(self.test_frame1, 'test_frame1', self.conn)\n self.assertTrue(\n sql.has_table('test_frame1', self.conn),\n 'Table not written to DB')\n\n def test_to_sql_fail(self):\n sql.to_sql(self.test_frame1, 'test_frame2',\n self.conn, if_exists='fail')\n self.assertTrue(\n sql.has_table('test_frame2', self.conn),\n 'Table not written to DB')\n\n self.assertRaises(ValueError, sql.to_sql, self.test_frame1,\n 'test_frame2', self.conn, if_exists='fail')\n\n def test_to_sql_replace(self):\n sql.to_sql(self.test_frame1, 'test_frame3',\n self.conn, if_exists='fail')\n # Add to table again\n sql.to_sql(self.test_frame1, 'test_frame3',\n self.conn, if_exists='replace')\n self.assertTrue(\n sql.has_table('test_frame3', self.conn),\n 'Table not written to DB')\n\n num_entries = len(self.test_frame1)\n num_rows = self._count_rows('test_frame3')\n\n self.assertEqual(\n num_rows, num_entries, \"not the same number of rows as entries\")\n\n def test_to_sql_append(self):\n sql.to_sql(self.test_frame1, 'test_frame4',\n self.conn, if_exists='fail')\n\n # Add to table again\n sql.to_sql(self.test_frame1, 'test_frame4',\n self.conn, if_exists='append')\n self.assertTrue(\n sql.has_table('test_frame4', self.conn),\n 'Table not written to DB')\n\n num_entries = 2 * len(self.test_frame1)\n num_rows = self._count_rows('test_frame4')\n\n self.assertEqual(\n num_rows, num_entries, \"not the same number of rows as entries\")\n\n def test_to_sql_type_mapping(self):\n sql.to_sql(self.test_frame3, 'test_frame5', self.conn, index=False)\n result = sql.read_sql(\"SELECT * FROM test_frame5\", self.conn)\n\n tm.assert_frame_equal(self.test_frame3, result)\n\n def test_to_sql_series(self):\n s = Series(np.arange(5, dtype='int64'), name='series')\n sql.to_sql(s, \"test_series\", self.conn, index=False)\n s2 = sql.read_sql_query(\"SELECT * FROM test_series\", self.conn)\n tm.assert_frame_equal(s.to_frame(), s2)\n\n def test_to_sql_panel(self):\n panel = tm.makePanel()\n self.assertRaises(NotImplementedError, sql.to_sql, panel,\n 'test_panel', self.conn)\n\n def test_roundtrip(self):\n sql.to_sql(self.test_frame1, 'test_frame_roundtrip',\n con=self.conn)\n result = sql.read_sql_query(\n 'SELECT * FROM test_frame_roundtrip',\n con=self.conn)\n\n # HACK!\n result.index = self.test_frame1.index\n result.set_index('level_0', inplace=True)\n result.index.astype(int)\n result.index.name = None\n tm.assert_frame_equal(result, self.test_frame1)\n\n def test_roundtrip_chunksize(self):\n sql.to_sql(self.test_frame1, 'test_frame_roundtrip', con=self.conn,\n index=False, chunksize=2)\n result = sql.read_sql_query(\n 'SELECT * FROM test_frame_roundtrip',\n con=self.conn)\n tm.assert_frame_equal(result, self.test_frame1)\n\n def test_execute_sql(self):\n # drop_sql = \"DROP TABLE IF EXISTS test\" # should already be done\n iris_results = sql.execute(\"SELECT * FROM iris\", con=self.conn)\n row = iris_results.fetchone()\n tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, 'Iris-setosa'])\n\n def test_date_parsing(self):\n # Test date parsing in read_sq\n # No Parsing\n df = sql.read_sql_query(\"SELECT * FROM types_test_data\", self.conn)\n self.assertFalse(\n issubclass(df.DateCol.dtype.type, np.datetime64),\n \"DateCol loaded with incorrect type\")\n\n df = sql.read_sql_query(\"SELECT * FROM types_test_data\", self.conn,\n parse_dates=['DateCol'])\n self.assertTrue(\n issubclass(df.DateCol.dtype.type, np.datetime64),\n \"DateCol loaded with incorrect type\")\n\n df = sql.read_sql_query(\"SELECT * FROM types_test_data\", self.conn,\n parse_dates={'DateCol': '%Y-%m-%d %H:%M:%S'})\n self.assertTrue(\n issubclass(df.DateCol.dtype.type, np.datetime64),\n \"DateCol loaded with incorrect type\")\n\n df = sql.read_sql_query(\"SELECT * FROM types_test_data\", self.conn,\n parse_dates=['IntDateCol'])\n\n self.assertTrue(issubclass(df.IntDateCol.dtype.type, np.datetime64),\n \"IntDateCol loaded with incorrect type\")\n\n df = sql.read_sql_query(\"SELECT * FROM types_test_data\", self.conn,\n parse_dates={'IntDateCol': 's'})\n\n self.assertTrue(issubclass(df.IntDateCol.dtype.type, np.datetime64),\n \"IntDateCol loaded with incorrect type\")\n\n def test_date_and_index(self):\n # Test case where same column appears in parse_date and index_col\n\n df = sql.read_sql_query(\"SELECT * FROM types_test_data\", self.conn,\n index_col='DateCol',\n parse_dates=['DateCol', 'IntDateCol'])\n\n self.assertTrue(issubclass(df.index.dtype.type, np.datetime64),\n \"DateCol loaded with incorrect type\")\n\n self.assertTrue(issubclass(df.IntDateCol.dtype.type, np.datetime64),\n \"IntDateCol loaded with incorrect type\")\n\n def test_timedelta(self):\n\n # see #6921\n df = to_timedelta(\n Series(['00:00:01', '00:00:03'], name='foo')).to_frame()\n with tm.assert_produces_warning(UserWarning):\n df.to_sql('test_timedelta', self.conn)\n result = sql.read_sql_query('SELECT * FROM test_timedelta', self.conn)\n tm.assert_series_equal(result['foo'], df['foo'].astype('int64'))\n\n def test_complex(self):\n df = DataFrame({'a': [1 + 1j, 2j]})\n # Complex data type should raise error\n self.assertRaises(ValueError, df.to_sql, 'test_complex', self.conn)\n\n def test_to_sql_index_label(self):\n temp_frame = DataFrame({'col1': range(4)})\n\n # no index name, defaults to 'index'\n sql.to_sql(temp_frame, 'test_index_label', self.conn)\n frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn)\n self.assertEqual(frame.columns[0], 'index')\n\n # specifying index_label\n sql.to_sql(temp_frame, 'test_index_label', self.conn,\n if_exists='replace', index_label='other_label')\n frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn)\n self.assertEqual(frame.columns[0], 'other_label',\n \"Specified index_label not written to database\")\n\n # using the index name\n temp_frame.index.name = 'index_name'\n sql.to_sql(temp_frame, 'test_index_label', self.conn,\n if_exists='replace')\n frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn)\n self.assertEqual(frame.columns[0], 'index_name',\n \"Index name not written to database\")\n\n # has index name, but specifying index_label\n sql.to_sql(temp_frame, 'test_index_label', self.conn,\n if_exists='replace', index_label='other_label')\n frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn)\n self.assertEqual(frame.columns[0], 'other_label',\n \"Specified index_label not written to database\")\n\n def test_to_sql_index_label_multiindex(self):\n temp_frame = DataFrame({'col1': range(4)},\n index=MultiIndex.from_product(\n [('A0', 'A1'), ('B0', 'B1')]))\n\n # no index name, defaults to 'level_0' and 'level_1'\n sql.to_sql(temp_frame, 'test_index_label', self.conn)\n frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn)\n self.assertEqual(frame.columns[0], 'level_0')\n self.assertEqual(frame.columns[1], 'level_1')\n\n # specifying index_label\n sql.to_sql(temp_frame, 'test_index_label', self.conn,\n if_exists='replace', index_label=['A', 'B'])\n frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn)\n self.assertEqual(frame.columns[:2].tolist(), ['A', 'B'],\n \"Specified index_labels not written to database\")\n\n # using the index name\n temp_frame.index.names = ['A', 'B']\n sql.to_sql(temp_frame, 'test_index_label', self.conn,\n if_exists='replace')\n frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn)\n self.assertEqual(frame.columns[:2].tolist(), ['A', 'B'],\n \"Index names not written to database\")\n\n # has index name, but specifying index_label\n sql.to_sql(temp_frame, 'test_index_label', self.conn,\n if_exists='replace', index_label=['C', 'D'])\n frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn)\n self.assertEqual(frame.columns[:2].tolist(), ['C', 'D'],\n \"Specified index_labels not written to database\")\n\n # wrong length of index_label\n self.assertRaises(ValueError, sql.to_sql, temp_frame,\n 'test_index_label', self.conn, if_exists='replace',\n index_label='C')\n\n def test_multiindex_roundtrip(self):\n df = DataFrame.from_records([(1, 2.1, 'line1'), (2, 1.5, 'line2')],\n columns=['A', 'B', 'C'], index=['A', 'B'])\n\n df.to_sql('test_multiindex_roundtrip', self.conn)\n result = sql.read_sql_query('SELECT * FROM test_multiindex_roundtrip',\n self.conn, index_col=['A', 'B'])\n tm.assert_frame_equal(df, result, check_index_type=True)\n\n def test_integer_col_names(self):\n df = DataFrame([[1, 2], [3, 4]], columns=[0, 1])\n sql.to_sql(df, \"test_frame_integer_col_names\", self.conn,\n if_exists='replace')\n\n def test_get_schema(self):\n create_sql = sql.get_schema(self.test_frame1, 'test', con=self.conn)\n self.assertTrue('CREATE' in create_sql)\n\n def test_get_schema_dtypes(self):\n float_frame = DataFrame({'a': [1.1, 1.2], 'b': [2.1, 2.2]})\n dtype = sqlalchemy.Integer if self.mode == 'sqlalchemy' else 'INTEGER'\n create_sql = sql.get_schema(float_frame, 'test',\n con=self.conn, dtype={'b': dtype})\n self.assertTrue('CREATE' in create_sql)\n self.assertTrue('INTEGER' in create_sql)\n\n def test_get_schema_keys(self):\n frame = DataFrame({'Col1': [1.1, 1.2], 'Col2': [2.1, 2.2]})\n create_sql = sql.get_schema(frame, 'test', con=self.conn, keys='Col1')\n constraint_sentence = 'CONSTRAINT test_pk PRIMARY KEY (\"Col1\")'\n self.assertTrue(constraint_sentence in create_sql)\n\n # multiple columns as key (GH10385)\n create_sql = sql.get_schema(self.test_frame1, 'test',\n con=self.conn, keys=['A', 'B'])\n constraint_sentence = 'CONSTRAINT test_pk PRIMARY KEY (\"A\", \"B\")'\n self.assertTrue(constraint_sentence in create_sql)\n\n def test_chunksize_read(self):\n df = DataFrame(np.random.randn(22, 5), columns=list('abcde'))\n df.to_sql('test_chunksize', self.conn, index=False)\n\n # reading the query in one time\n res1 = sql.read_sql_query(\"select * from test_chunksize\", self.conn)\n\n # reading the query in chunks with read_sql_query\n res2 = DataFrame()\n i = 0\n sizes = [5, 5, 5, 5, 2]\n\n for chunk in sql.read_sql_query(\"select * from test_chunksize\",\n self.conn, chunksize=5):\n res2 = concat([res2, chunk], ignore_index=True)\n self.assertEqual(len(chunk), sizes[i])\n i += 1\n\n tm.assert_frame_equal(res1, res2)\n\n # reading the query in chunks with read_sql_query\n if self.mode == 'sqlalchemy':\n res3 = DataFrame()\n i = 0\n sizes = [5, 5, 5, 5, 2]\n\n for chunk in sql.read_sql_table(\"test_chunksize\", self.conn,\n chunksize=5):\n res3 = concat([res3, chunk], ignore_index=True)\n self.assertEqual(len(chunk), sizes[i])\n i += 1\n\n tm.assert_frame_equal(res1, res3)\n\n def test_categorical(self):\n # GH8624\n # test that categorical gets written correctly as dense column\n df = DataFrame(\n {'person_id': [1, 2, 3],\n 'person_name': ['John P. Doe', 'Jane Dove', 'John P. Doe']})\n df2 = df.copy()\n df2['person_name'] = df2['person_name'].astype('category')\n\n df2.to_sql('test_categorical', self.conn, index=False)\n res = sql.read_sql_query('SELECT * FROM test_categorical', self.conn)\n\n tm.assert_frame_equal(res, df)\n\n def test_unicode_column_name(self):\n # GH 11431\n df = DataFrame([[1, 2], [3, 4]], columns=[u'\\xe9', u'b'])\n df.to_sql('test_unicode', self.conn, index=False)\n\n\nclass TestSQLApi(SQLAlchemyMixIn, _TestSQLApi):\n \"\"\"\n Test the public API as it would be used directly\n\n Tests for `read_sql_table` are included here, as this is specific for the\n sqlalchemy mode.\n\n \"\"\"\n flavor = 'sqlite'\n mode = 'sqlalchemy'\n\n def connect(self):\n if SQLALCHEMY_INSTALLED:\n return sqlalchemy.create_engine('sqlite:///:memory:')\n else:\n raise nose.SkipTest('SQLAlchemy not installed')\n\n def test_read_table_columns(self):\n # test columns argument in read_table\n sql.to_sql(self.test_frame1, 'test_frame', self.conn)\n\n cols = ['A', 'B']\n result = sql.read_sql_table('test_frame', self.conn, columns=cols)\n self.assertEqual(result.columns.tolist(), cols,\n \"Columns not correctly selected\")\n\n def test_read_table_index_col(self):\n # test columns argument in read_table\n sql.to_sql(self.test_frame1, 'test_frame', self.conn)\n\n result = sql.read_sql_table('test_frame', self.conn, index_col=\"index\")\n self.assertEqual(result.index.names, [\"index\"],\n \"index_col not correctly set\")\n\n result = sql.read_sql_table(\n 'test_frame', self.conn, index_col=[\"A\", \"B\"])\n self.assertEqual(result.index.names, [\"A\", \"B\"],\n \"index_col not correctly set\")\n\n result = sql.read_sql_table('test_frame', self.conn,\n index_col=[\"A\", \"B\"],\n columns=[\"C\", \"D\"])\n self.assertEqual(result.index.names, [\"A\", \"B\"],\n \"index_col not correctly set\")\n self.assertEqual(result.columns.tolist(), [\"C\", \"D\"],\n \"columns not set correctly whith index_col\")\n\n def test_read_sql_delegate(self):\n iris_frame1 = sql.read_sql_query(\n \"SELECT * FROM iris\", self.conn)\n iris_frame2 = sql.read_sql(\n \"SELECT * FROM iris\", self.conn)\n tm.assert_frame_equal(iris_frame1, iris_frame2)\n\n iris_frame1 = sql.read_sql_table('iris', self.conn)\n iris_frame2 = sql.read_sql('iris', self.conn)\n tm.assert_frame_equal(iris_frame1, iris_frame2)\n\n def test_not_reflect_all_tables(self):\n # create invalid table\n qry = \"\"\"CREATE TABLE invalid (x INTEGER, y UNKNOWN);\"\"\"\n self.conn.execute(qry)\n qry = \"\"\"CREATE TABLE other_table (x INTEGER, y INTEGER);\"\"\"\n self.conn.execute(qry)\n\n with warnings.catch_warnings(record=True) as w:\n # Cause all warnings to always be triggered.\n warnings.simplefilter(\"always\")\n # Trigger a warning.\n sql.read_sql_table('other_table', self.conn)\n sql.read_sql_query('SELECT * FROM other_table', self.conn)\n # Verify some things\n self.assertEqual(len(w), 0, \"Warning triggered for other table\")\n\n def test_warning_case_insensitive_table_name(self):\n # see GH7815.\n # We can't test that this warning is triggered, a the database\n # configuration would have to be altered. But here we test that\n # the warning is certainly NOT triggered in a normal case.\n with warnings.catch_warnings(record=True) as w:\n # Cause all warnings to always be triggered.\n warnings.simplefilter(\"always\")\n # This should not trigger a Warning\n self.test_frame1.to_sql('CaseSensitive', self.conn)\n # Verify some things\n self.assertEqual(\n len(w), 0, \"Warning triggered for writing a table\")\n\n def _get_index_columns(self, tbl_name):\n from sqlalchemy.engine import reflection\n insp = reflection.Inspector.from_engine(self.conn)\n ixs = insp.get_indexes('test_index_saved')\n ixs = [i['column_names'] for i in ixs]\n return ixs\n\n def test_sqlalchemy_type_mapping(self):\n\n # Test Timestamp objects (no datetime64 because of timezone) (GH9085)\n df = DataFrame({'time': to_datetime(['201412120154', '201412110254'],\n utc=True)})\n db = sql.SQLDatabase(self.conn)\n table = sql.SQLTable(\"test_type\", db, frame=df)\n self.assertTrue(isinstance(\n table.table.c['time'].type, sqltypes.DateTime))\n\n def test_to_sql_read_sql_with_database_uri(self):\n\n # Test read_sql and .to_sql method with a database URI (GH10654)\n test_frame1 = self.test_frame1\n # db_uri = 'sqlite:///:memory:' # raises\n # sqlalchemy.exc.OperationalError: (sqlite3.OperationalError) near\n # \"iris\": syntax error [SQL: 'iris']\n with tm.ensure_clean() as name:\n db_uri = 'sqlite:///' + name\n table = 'iris'\n test_frame1.to_sql(table, db_uri, if_exists='replace', index=False)\n test_frame2 = sql.read_sql(table, db_uri)\n test_frame3 = sql.read_sql_table(table, db_uri)\n query = 'SELECT * FROM iris'\n test_frame4 = sql.read_sql_query(query, db_uri)\n tm.assert_frame_equal(test_frame1, test_frame2)\n tm.assert_frame_equal(test_frame1, test_frame3)\n tm.assert_frame_equal(test_frame1, test_frame4)\n\n def _make_iris_table_metadata(self):\n sa = sqlalchemy\n metadata = sa.MetaData()\n iris = sa.Table('iris', metadata,\n sa.Column('SepalLength', sa.REAL),\n sa.Column('SepalWidth', sa.REAL),\n sa.Column('PetalLength', sa.REAL),\n sa.Column('PetalWidth', sa.REAL),\n sa.Column('Name', sa.TEXT)\n )\n\n return iris\n\n def test_query_by_text_obj(self):\n # WIP : GH10846\n name_text = sqlalchemy.text('select * from iris where name=:name')\n iris_df = sql.read_sql(name_text, self.conn, params={\n 'name': 'Iris-versicolor'})\n all_names = set(iris_df['Name'])\n self.assertEqual(all_names, set(['Iris-versicolor']))\n\n def test_query_by_select_obj(self):\n # WIP : GH10846\n iris = self._make_iris_table_metadata()\n\n name_select = sqlalchemy.select([iris]).where(\n iris.c.Name == sqlalchemy.bindparam('name'))\n iris_df = sql.read_sql(name_select, self.conn,\n params={'name': 'Iris-setosa'})\n all_names = set(iris_df['Name'])\n self.assertEqual(all_names, set(['Iris-setosa']))\n\n\nclass _EngineToConnMixin(object):\n \"\"\"\n A mixin that causes setup_connect to create a conn rather than an engine.\n \"\"\"\n\n def setUp(self):\n super(_EngineToConnMixin, self).setUp()\n engine = self.conn\n conn = engine.connect()\n self.__tx = conn.begin()\n self.pandasSQL = sql.SQLDatabase(conn)\n self.__engine = engine\n self.conn = conn\n\n def tearDown(self):\n self.__tx.rollback()\n self.conn.close()\n self.conn = self.__engine\n self.pandasSQL = sql.SQLDatabase(self.__engine)\n super(_EngineToConnMixin, self).tearDown()\n\n\nclass TestSQLApiConn(_EngineToConnMixin, TestSQLApi):\n pass\n\n\nclass TestSQLiteFallbackApi(SQLiteMixIn, _TestSQLApi):\n \"\"\"\n Test the public sqlite connection fallback API\n\n \"\"\"\n flavor = 'sqlite'\n mode = 'fallback'\n\n def connect(self, database=\":memory:\"):\n return sqlite3.connect(database)\n\n def test_sql_open_close(self):\n # Test if the IO in the database still work if the connection closed\n # between the writing and reading (as in many real situations).\n\n with tm.ensure_clean() as name:\n\n conn = self.connect(name)\n sql.to_sql(self.test_frame3, \"test_frame3_legacy\",\n conn, index=False)\n conn.close()\n\n conn = self.connect(name)\n result = sql.read_sql_query(\"SELECT * FROM test_frame3_legacy;\",\n conn)\n conn.close()\n\n tm.assert_frame_equal(self.test_frame3, result)\n\n def test_con_string_import_error(self):\n if not SQLALCHEMY_INSTALLED:\n conn = 'mysql://root@localhost/pandas_nosetest'\n self.assertRaises(ImportError, sql.read_sql, \"SELECT * FROM iris\",\n conn)\n else:\n raise nose.SkipTest('SQLAlchemy is installed')\n\n def test_read_sql_delegate(self):\n iris_frame1 = sql.read_sql_query(\"SELECT * FROM iris\", self.conn)\n iris_frame2 = sql.read_sql(\"SELECT * FROM iris\", self.conn)\n tm.assert_frame_equal(iris_frame1, iris_frame2)\n\n self.assertRaises(sql.DatabaseError, sql.read_sql, 'iris', self.conn)\n\n def test_safe_names_warning(self):\n # GH 6798\n df = DataFrame([[1, 2], [3, 4]], columns=['a', 'b ']) # has a space\n # warns on create table with spaces in names\n with tm.assert_produces_warning():\n sql.to_sql(df, \"test_frame3_legacy\", self.conn, index=False)\n\n def test_get_schema2(self):\n # without providing a connection object (available for backwards comp)\n create_sql = sql.get_schema(self.test_frame1, 'test')\n self.assertTrue('CREATE' in create_sql)\n\n def _get_sqlite_column_type(self, schema, column):\n\n for col in schema.split('\\n'):\n if col.split()[0].strip('\"\"') == column:\n return col.split()[1]\n raise ValueError('Column %s not found' % (column))\n\n def test_sqlite_type_mapping(self):\n\n # Test Timestamp objects (no datetime64 because of timezone) (GH9085)\n df = DataFrame({'time': to_datetime(['201412120154', '201412110254'],\n utc=True)})\n db = sql.SQLiteDatabase(self.conn)\n table = sql.SQLiteTable(\"test_type\", db, frame=df)\n schema = table.sql_schema()\n self.assertEqual(self._get_sqlite_column_type(schema, 'time'),\n \"TIMESTAMP\")\n\n\n# -----------------------------------------------------------------------------\n# -- Database flavor specific tests\n\n\nclass _TestSQLAlchemy(SQLAlchemyMixIn, PandasSQLTest):\n \"\"\"\n Base class for testing the sqlalchemy backend.\n\n Subclasses for specific database types are created below. Tests that\n deviate for each flavor are overwritten there.\n\n \"\"\"\n flavor = None\n\n @classmethod\n def setUpClass(cls):\n cls.setup_import()\n cls.setup_driver()\n\n # test connection\n try:\n conn = cls.connect()\n conn.connect()\n except sqlalchemy.exc.OperationalError:\n msg = \"{0} - can't connect to {1} server\".format(cls, cls.flavor)\n raise nose.SkipTest(msg)\n\n def setUp(self):\n self.setup_connect()\n\n self._load_iris_data()\n self._load_raw_sql()\n self._load_test1_data()\n\n @classmethod\n def setup_import(cls):\n # Skip this test if SQLAlchemy not available\n if not SQLALCHEMY_INSTALLED:\n raise nose.SkipTest('SQLAlchemy not installed')\n\n @classmethod\n def setup_driver(cls):\n raise NotImplementedError()\n\n @classmethod\n def connect(cls):\n raise NotImplementedError()\n\n def setup_connect(self):\n try:\n self.conn = self.connect()\n self.pandasSQL = sql.SQLDatabase(self.conn)\n # to test if connection can be made:\n self.conn.connect()\n except sqlalchemy.exc.OperationalError:\n raise nose.SkipTest(\n \"Can't connect to {0} server\".format(self.flavor))\n\n def test_aread_sql(self):\n self._read_sql_iris()\n\n def test_read_sql_parameter(self):\n self._read_sql_iris_parameter()\n\n def test_read_sql_named_parameter(self):\n self._read_sql_iris_named_parameter()\n\n def test_to_sql(self):\n self._to_sql()\n\n def test_to_sql_empty(self):\n self._to_sql_empty()\n\n def test_to_sql_fail(self):\n self._to_sql_fail()\n\n def test_to_sql_replace(self):\n self._to_sql_replace()\n\n def test_to_sql_append(self):\n self._to_sql_append()\n\n def test_create_table(self):\n temp_conn = self.connect()\n temp_frame = DataFrame(\n {'one': [1., 2., 3., 4.], 'two': [4., 3., 2., 1.]})\n\n pandasSQL = sql.SQLDatabase(temp_conn)\n pandasSQL.to_sql(temp_frame, 'temp_frame')\n\n self.assertTrue(\n temp_conn.has_table('temp_frame'), 'Table not written to DB')\n\n def test_drop_table(self):\n temp_conn = self.connect()\n\n temp_frame = DataFrame(\n {'one': [1., 2., 3., 4.], 'two': [4., 3., 2., 1.]})\n\n pandasSQL = sql.SQLDatabase(temp_conn)\n pandasSQL.to_sql(temp_frame, 'temp_frame')\n\n self.assertTrue(\n temp_conn.has_table('temp_frame'), 'Table not written to DB')\n\n pandasSQL.drop_table('temp_frame')\n\n self.assertFalse(\n temp_conn.has_table('temp_frame'), 'Table not deleted from DB')\n\n def test_roundtrip(self):\n self._roundtrip()\n\n def test_execute_sql(self):\n self._execute_sql()\n\n def test_read_table(self):\n iris_frame = sql.read_sql_table(\"iris\", con=self.conn)\n self._check_iris_loaded_frame(iris_frame)\n\n def test_read_table_columns(self):\n iris_frame = sql.read_sql_table(\n \"iris\", con=self.conn, columns=['SepalLength', 'SepalLength'])\n tm.equalContents(\n iris_frame.columns.values, ['SepalLength', 'SepalLength'])\n\n def test_read_table_absent(self):\n self.assertRaises(\n ValueError, sql.read_sql_table, \"this_doesnt_exist\", con=self.conn)\n\n def test_default_type_conversion(self):\n df = sql.read_sql_table(\"types_test_data\", self.conn)\n\n self.assertTrue(issubclass(df.FloatCol.dtype.type, np.floating),\n \"FloatCol loaded with incorrect type\")\n self.assertTrue(issubclass(df.IntCol.dtype.type, np.integer),\n \"IntCol loaded with incorrect type\")\n self.assertTrue(issubclass(df.BoolCol.dtype.type, np.bool_),\n \"BoolCol loaded with incorrect type\")\n\n # Int column with NA values stays as float\n self.assertTrue(issubclass(df.IntColWithNull.dtype.type, np.floating),\n \"IntColWithNull loaded with incorrect type\")\n # Bool column with NA values becomes object\n self.assertTrue(issubclass(df.BoolColWithNull.dtype.type, np.object),\n \"BoolColWithNull loaded with incorrect type\")\n\n def test_bigint(self):\n # int64 should be converted to BigInteger, GH7433\n df = DataFrame(data={'i64': [2**62]})\n df.to_sql('test_bigint', self.conn, index=False)\n result = sql.read_sql_table('test_bigint', self.conn)\n\n tm.assert_frame_equal(df, result)\n\n def test_default_date_load(self):\n df = sql.read_sql_table(\"types_test_data\", self.conn)\n\n # IMPORTANT - sqlite has no native date type, so shouldn't parse, but\n # MySQL SHOULD be converted.\n self.assertTrue(issubclass(df.DateCol.dtype.type, np.datetime64),\n \"DateCol loaded with incorrect type\")\n\n def test_datetime_with_timezone(self):\n # edge case that converts postgresql datetime with time zone types\n # to datetime64[ns,psycopg2.tz.FixedOffsetTimezone..], which is ok\n # but should be more natural, so coerce to datetime64[ns] for now\n\n def check(col):\n # check that a column is either datetime64[ns]\n # or datetime64[ns, UTC]\n if is_datetime64_dtype(col.dtype):\n\n # \"2000-01-01 00:00:00-08:00\" should convert to\n # \"2000-01-01 08:00:00\"\n self.assertEqual(col[0], Timestamp('2000-01-01 08:00:00'))\n\n # \"2000-06-01 00:00:00-07:00\" should convert to\n # \"2000-06-01 07:00:00\"\n self.assertEqual(col[1], Timestamp('2000-06-01 07:00:00'))\n\n elif is_datetime64tz_dtype(col.dtype):\n self.assertTrue(str(col.dt.tz) == 'UTC')\n\n # \"2000-01-01 00:00:00-08:00\" should convert to\n # \"2000-01-01 08:00:00\"\n self.assertEqual(col[0], Timestamp(\n '2000-01-01 08:00:00', tz='UTC'))\n\n # \"2000-06-01 00:00:00-07:00\" should convert to\n # \"2000-06-01 07:00:00\"\n self.assertEqual(col[1], Timestamp(\n '2000-06-01 07:00:00', tz='UTC'))\n\n else:\n raise AssertionError(\"DateCol loaded with incorrect type \"\n \"-> {0}\".format(col.dtype))\n\n # GH11216\n df = pd.read_sql_query(\"select * from types_test_data\", self.conn)\n if not hasattr(df, 'DateColWithTz'):\n raise nose.SkipTest(\"no column with datetime with time zone\")\n\n # this is parsed on Travis (linux), but not on macosx for some reason\n # even with the same versions of psycopg2 & sqlalchemy, possibly a\n # Postgrsql server version difference\n col = df.DateColWithTz\n self.assertTrue(is_object_dtype(col.dtype) or\n is_datetime64_dtype(col.dtype) or\n is_datetime64tz_dtype(col.dtype),\n \"DateCol loaded with incorrect type -> {0}\"\n .format(col.dtype))\n\n df = pd.read_sql_query(\"select * from types_test_data\",\n self.conn, parse_dates=['DateColWithTz'])\n if not hasattr(df, 'DateColWithTz'):\n raise nose.SkipTest(\"no column with datetime with time zone\")\n check(df.DateColWithTz)\n\n df = pd.concat(list(pd.read_sql_query(\"select * from types_test_data\",\n self.conn, chunksize=1)),\n ignore_index=True)\n col = df.DateColWithTz\n self.assertTrue(is_datetime64tz_dtype(col.dtype),\n \"DateCol loaded with incorrect type -> {0}\"\n .format(col.dtype))\n self.assertTrue(str(col.dt.tz) == 'UTC')\n expected = sql.read_sql_table(\"types_test_data\", self.conn)\n tm.assert_series_equal(df.DateColWithTz,\n expected.DateColWithTz\n .astype('datetime64[ns, UTC]'))\n\n # xref #7139\n # this might or might not be converted depending on the postgres driver\n df = sql.read_sql_table(\"types_test_data\", self.conn)\n check(df.DateColWithTz)\n\n def test_date_parsing(self):\n # No Parsing\n df = sql.read_sql_table(\"types_test_data\", self.conn)\n\n df = sql.read_sql_table(\"types_test_data\", self.conn,\n parse_dates=['DateCol'])\n self.assertTrue(issubclass(df.DateCol.dtype.type, np.datetime64),\n \"DateCol loaded with incorrect type\")\n\n df = sql.read_sql_table(\"types_test_data\", self.conn,\n parse_dates={'DateCol': '%Y-%m-%d %H:%M:%S'})\n self.assertTrue(issubclass(df.DateCol.dtype.type, np.datetime64),\n \"DateCol loaded with incorrect type\")\n\n df = sql.read_sql_table(\"types_test_data\", self.conn, parse_dates={\n 'DateCol': {'format': '%Y-%m-%d %H:%M:%S'}})\n self.assertTrue(issubclass(df.DateCol.dtype.type, np.datetime64),\n \"IntDateCol loaded with incorrect type\")\n\n df = sql.read_sql_table(\n \"types_test_data\", self.conn, parse_dates=['IntDateCol'])\n self.assertTrue(issubclass(df.IntDateCol.dtype.type, np.datetime64),\n \"IntDateCol loaded with incorrect type\")\n\n df = sql.read_sql_table(\n \"types_test_data\", self.conn, parse_dates={'IntDateCol': 's'})\n self.assertTrue(issubclass(df.IntDateCol.dtype.type, np.datetime64),\n \"IntDateCol loaded with incorrect type\")\n\n df = sql.read_sql_table(\"types_test_data\", self.conn,\n parse_dates={'IntDateCol': {'unit': 's'}})\n self.assertTrue(issubclass(df.IntDateCol.dtype.type, np.datetime64),\n \"IntDateCol loaded with incorrect type\")\n\n def test_datetime(self):\n df = DataFrame({'A': date_range('2013-01-01 09:00:00', periods=3),\n 'B': np.arange(3.0)})\n df.to_sql('test_datetime', self.conn)\n\n # with read_table -> type information from schema used\n result = sql.read_sql_table('test_datetime', self.conn)\n result = result.drop('index', axis=1)\n tm.assert_frame_equal(result, df)\n\n # with read_sql -> no type information -> sqlite has no native\n result = sql.read_sql_query('SELECT * FROM test_datetime', self.conn)\n result = result.drop('index', axis=1)\n if self.flavor == 'sqlite':\n self.assertTrue(isinstance(result.loc[0, 'A'], string_types))\n result['A'] = to_datetime(result['A'])\n tm.assert_frame_equal(result, df)\n else:\n tm.assert_frame_equal(result, df)\n\n def test_datetime_NaT(self):\n df = DataFrame({'A': date_range('2013-01-01 09:00:00', periods=3),\n 'B': np.arange(3.0)})\n df.loc[1, 'A'] = np.nan\n df.to_sql('test_datetime', self.conn, index=False)\n\n # with read_table -> type information from schema used\n result = sql.read_sql_table('test_datetime', self.conn)\n tm.assert_frame_equal(result, df)\n\n # with read_sql -> no type information -> sqlite has no native\n result = sql.read_sql_query('SELECT * FROM test_datetime', self.conn)\n if self.flavor == 'sqlite':\n self.assertTrue(isinstance(result.loc[0, 'A'], string_types))\n result['A'] = to_datetime(result['A'], errors='coerce')\n tm.assert_frame_equal(result, df)\n else:\n tm.assert_frame_equal(result, df)\n\n def test_datetime_date(self):\n # test support for datetime.date\n df = DataFrame([date(2014, 1, 1), date(2014, 1, 2)], columns=[\"a\"])\n df.to_sql('test_date', self.conn, index=False)\n res = read_sql_table('test_date', self.conn)\n # comes back as datetime64\n tm.assert_series_equal(res['a'], to_datetime(df['a']))\n\n def test_datetime_time(self):\n # test support for datetime.time\n df = DataFrame([time(9, 0, 0), time(9, 1, 30)], columns=[\"a\"])\n df.to_sql('test_time', self.conn, index=False)\n res = read_sql_table('test_time', self.conn)\n tm.assert_frame_equal(res, df)\n\n # GH8341\n # first, use the fallback to have the sqlite adapter put in place\n sqlite_conn = TestSQLiteFallback.connect()\n sql.to_sql(df, \"test_time2\", sqlite_conn, index=False)\n res = sql.read_sql_query(\"SELECT * FROM test_time2\", sqlite_conn)\n ref = df.applymap(lambda _: _.strftime(\"%H:%M:%S.%f\"))\n tm.assert_frame_equal(ref, res) # check if adapter is in place\n # then test if sqlalchemy is unaffected by the sqlite adapter\n sql.to_sql(df, \"test_time3\", self.conn, index=False)\n if self.flavor == 'sqlite':\n res = sql.read_sql_query(\"SELECT * FROM test_time3\", self.conn)\n ref = df.applymap(lambda _: _.strftime(\"%H:%M:%S.%f\"))\n tm.assert_frame_equal(ref, res)\n res = sql.read_sql_table(\"test_time3\", self.conn)\n tm.assert_frame_equal(df, res)\n\n def test_mixed_dtype_insert(self):\n # see GH6509\n s1 = Series(2**25 + 1, dtype=np.int32)\n s2 = Series(0.0, dtype=np.float32)\n df = DataFrame({'s1': s1, 's2': s2})\n\n # write and read again\n df.to_sql(\"test_read_write\", self.conn, index=False)\n df2 = sql.read_sql_table(\"test_read_write\", self.conn)\n\n tm.assert_frame_equal(df, df2, check_dtype=False, check_exact=True)\n\n def test_nan_numeric(self):\n # NaNs in numeric float column\n df = DataFrame({'A': [0, 1, 2], 'B': [0.2, np.nan, 5.6]})\n df.to_sql('test_nan', self.conn, index=False)\n\n # with read_table\n result = sql.read_sql_table('test_nan', self.conn)\n tm.assert_frame_equal(result, df)\n\n # with read_sql\n result = sql.read_sql_query('SELECT * FROM test_nan', self.conn)\n tm.assert_frame_equal(result, df)\n\n def test_nan_fullcolumn(self):\n # full NaN column (numeric float column)\n df = DataFrame({'A': [0, 1, 2], 'B': [np.nan, np.nan, np.nan]})\n df.to_sql('test_nan', self.conn, index=False)\n\n # with read_table\n result = sql.read_sql_table('test_nan', self.conn)\n tm.assert_frame_equal(result, df)\n\n # with read_sql -> not type info from table -> stays None\n df['B'] = df['B'].astype('object')\n df['B'] = None\n result = sql.read_sql_query('SELECT * FROM test_nan', self.conn)\n tm.assert_frame_equal(result, df)\n\n def test_nan_string(self):\n # NaNs in string column\n df = DataFrame({'A': [0, 1, 2], 'B': ['a', 'b', np.nan]})\n df.to_sql('test_nan', self.conn, index=False)\n\n # NaNs are coming back as None\n df.loc[2, 'B'] = None\n\n # with read_table\n result = sql.read_sql_table('test_nan', self.conn)\n tm.assert_frame_equal(result, df)\n\n # with read_sql\n result = sql.read_sql_query('SELECT * FROM test_nan', self.conn)\n tm.assert_frame_equal(result, df)\n\n def _get_index_columns(self, tbl_name):\n from sqlalchemy.engine import reflection\n insp = reflection.Inspector.from_engine(self.conn)\n ixs = insp.get_indexes(tbl_name)\n ixs = [i['column_names'] for i in ixs]\n return ixs\n\n def test_to_sql_save_index(self):\n self._to_sql_save_index()\n\n def test_transactions(self):\n self._transaction_test()\n\n def test_get_schema_create_table(self):\n # Use a dataframe without a bool column, since MySQL converts bool to\n # TINYINT (which read_sql_table returns as an int and causes a dtype\n # mismatch)\n\n self._load_test3_data()\n tbl = 'test_get_schema_create_table'\n create_sql = sql.get_schema(self.test_frame3, tbl, con=self.conn)\n blank_test_df = self.test_frame3.iloc[:0]\n\n self.drop_table(tbl)\n self.conn.execute(create_sql)\n returned_df = sql.read_sql_table(tbl, self.conn)\n tm.assert_frame_equal(returned_df, blank_test_df,\n check_index_type=False)\n self.drop_table(tbl)\n\n def test_dtype(self):\n cols = ['A', 'B']\n data = [(0.8, True),\n (0.9, None)]\n df = DataFrame(data, columns=cols)\n df.to_sql('dtype_test', self.conn)\n df.to_sql('dtype_test2', self.conn, dtype={'B': sqlalchemy.TEXT})\n meta = sqlalchemy.schema.MetaData(bind=self.conn)\n meta.reflect()\n sqltype = meta.tables['dtype_test2'].columns['B'].type\n self.assertTrue(isinstance(sqltype, sqlalchemy.TEXT))\n self.assertRaises(ValueError, df.to_sql,\n 'error', self.conn, dtype={'B': str})\n\n # GH9083\n df.to_sql('dtype_test3', self.conn, dtype={'B': sqlalchemy.String(10)})\n meta.reflect()\n sqltype = meta.tables['dtype_test3'].columns['B'].type\n self.assertTrue(isinstance(sqltype, sqlalchemy.String))\n self.assertEqual(sqltype.length, 10)\n\n # single dtype\n df.to_sql('single_dtype_test', self.conn, dtype=sqlalchemy.TEXT)\n meta = sqlalchemy.schema.MetaData(bind=self.conn)\n meta.reflect()\n sqltypea = meta.tables['single_dtype_test'].columns['A'].type\n sqltypeb = meta.tables['single_dtype_test'].columns['B'].type\n self.assertTrue(isinstance(sqltypea, sqlalchemy.TEXT))\n self.assertTrue(isinstance(sqltypeb, sqlalchemy.TEXT))\n\n def test_notnull_dtype(self):\n cols = {'Bool': Series([True, None]),\n 'Date': Series([datetime(2012, 5, 1), None]),\n 'Int': Series([1, None], dtype='object'),\n 'Float': Series([1.1, None])\n }\n df = DataFrame(cols)\n\n tbl = 'notnull_dtype_test'\n df.to_sql(tbl, self.conn)\n returned_df = sql.read_sql_table(tbl, self.conn) # noqa\n meta = sqlalchemy.schema.MetaData(bind=self.conn)\n meta.reflect()\n if self.flavor == 'mysql':\n my_type = sqltypes.Integer\n else:\n my_type = sqltypes.Boolean\n\n col_dict = meta.tables[tbl].columns\n\n self.assertTrue(isinstance(col_dict['Bool'].type, my_type))\n self.assertTrue(isinstance(col_dict['Date'].type, sqltypes.DateTime))\n self.assertTrue(isinstance(col_dict['Int'].type, sqltypes.Integer))\n self.assertTrue(isinstance(col_dict['Float'].type, sqltypes.Float))\n\n def test_double_precision(self):\n V = 1.23456789101112131415\n\n df = DataFrame({'f32': Series([V, ], dtype='float32'),\n 'f64': Series([V, ], dtype='float64'),\n 'f64_as_f32': Series([V, ], dtype='float64'),\n 'i32': Series([5, ], dtype='int32'),\n 'i64': Series([5, ], dtype='int64'),\n })\n\n df.to_sql('test_dtypes', self.conn, index=False, if_exists='replace',\n dtype={'f64_as_f32': sqlalchemy.Float(precision=23)})\n res = sql.read_sql_table('test_dtypes', self.conn)\n\n # check precision of float64\n self.assertEqual(np.round(df['f64'].iloc[0], 14),\n np.round(res['f64'].iloc[0], 14))\n\n # check sql types\n meta = sqlalchemy.schema.MetaData(bind=self.conn)\n meta.reflect()\n col_dict = meta.tables['test_dtypes'].columns\n self.assertEqual(str(col_dict['f32'].type),\n str(col_dict['f64_as_f32'].type))\n self.assertTrue(isinstance(col_dict['f32'].type, sqltypes.Float))\n self.assertTrue(isinstance(col_dict['f64'].type, sqltypes.Float))\n self.assertTrue(isinstance(col_dict['i32'].type, sqltypes.Integer))\n self.assertTrue(isinstance(col_dict['i64'].type, sqltypes.BigInteger))\n\n def test_connectable_issue_example(self):\n # This tests the example raised in issue\n # https://github.com/pydata/pandas/issues/10104\n\n def foo(connection):\n query = 'SELECT test_foo_data FROM test_foo_data'\n return sql.read_sql_query(query, con=connection)\n\n def bar(connection, data):\n data.to_sql(name='test_foo_data',\n con=connection, if_exists='append')\n\n def main(connectable):\n with connectable.connect() as conn:\n with conn.begin():\n foo_data = conn.run_callable(foo)\n conn.run_callable(bar, foo_data)\n\n DataFrame({'test_foo_data': [0, 1, 2]}).to_sql(\n 'test_foo_data', self.conn)\n main(self.conn)\n\n def test_temporary_table(self):\n test_data = u'Hello, World!'\n expected = DataFrame({'spam': [test_data]})\n Base = declarative.declarative_base()\n\n class Temporary(Base):\n __tablename__ = 'temp_test'\n __table_args__ = {'prefixes': ['TEMPORARY']}\n id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)\n spam = sqlalchemy.Column(sqlalchemy.Unicode(30), nullable=False)\n\n Session = sa_session.sessionmaker(bind=self.conn)\n session = Session()\n with session.transaction:\n conn = session.connection()\n Temporary.__table__.create(conn)\n session.add(Temporary(spam=test_data))\n session.flush()\n df = sql.read_sql_query(\n sql=sqlalchemy.select([Temporary.spam]),\n con=conn,\n )\n\n tm.assert_frame_equal(df, expected)\n\n\nclass _TestSQLAlchemyConn(_EngineToConnMixin, _TestSQLAlchemy):\n\n def test_transactions(self):\n raise nose.SkipTest(\n \"Nested transactions rollbacks don't work with Pandas\")\n\n\nclass _TestSQLiteAlchemy(object):\n \"\"\"\n Test the sqlalchemy backend against an in-memory sqlite database.\n\n \"\"\"\n flavor = 'sqlite'\n\n @classmethod\n def connect(cls):\n return sqlalchemy.create_engine('sqlite:///:memory:')\n\n @classmethod\n def setup_driver(cls):\n # sqlite3 is built-in\n cls.driver = None\n\n def test_default_type_conversion(self):\n df = sql.read_sql_table(\"types_test_data\", self.conn)\n\n self.assertTrue(issubclass(df.FloatCol.dtype.type, np.floating),\n \"FloatCol loaded with incorrect type\")\n self.assertTrue(issubclass(df.IntCol.dtype.type, np.integer),\n \"IntCol loaded with incorrect type\")\n # sqlite has no boolean type, so integer type is returned\n self.assertTrue(issubclass(df.BoolCol.dtype.type, np.integer),\n \"BoolCol loaded with incorrect type\")\n\n # Int column with NA values stays as float\n self.assertTrue(issubclass(df.IntColWithNull.dtype.type, np.floating),\n \"IntColWithNull loaded with incorrect type\")\n # Non-native Bool column with NA values stays as float\n self.assertTrue(issubclass(df.BoolColWithNull.dtype.type, np.floating),\n \"BoolColWithNull loaded with incorrect type\")\n\n def test_default_date_load(self):\n df = sql.read_sql_table(\"types_test_data\", self.conn)\n\n # IMPORTANT - sqlite has no native date type, so shouldn't parse, but\n self.assertFalse(issubclass(df.DateCol.dtype.type, np.datetime64),\n \"DateCol loaded with incorrect type\")\n\n def test_bigint_warning(self):\n # test no warning for BIGINT (to support int64) is raised (GH7433)\n df = DataFrame({'a': [1, 2]}, dtype='int64')\n df.to_sql('test_bigintwarning', self.conn, index=False)\n\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter(\"always\")\n sql.read_sql_table('test_bigintwarning', self.conn)\n self.assertEqual(len(w), 0, \"Warning triggered for other table\")\n\n\nclass _TestMySQLAlchemy(object):\n \"\"\"\n Test the sqlalchemy backend against an MySQL database.\n\n \"\"\"\n flavor = 'mysql'\n\n @classmethod\n def connect(cls):\n url = 'mysql+{driver}://root@localhost/pandas_nosetest'\n return sqlalchemy.create_engine(url.format(driver=cls.driver))\n\n @classmethod\n def setup_driver(cls):\n try:\n import pymysql # noqa\n cls.driver = 'pymysql'\n except ImportError:\n raise nose.SkipTest('pymysql not installed')\n\n def test_default_type_conversion(self):\n df = sql.read_sql_table(\"types_test_data\", self.conn)\n\n self.assertTrue(issubclass(df.FloatCol.dtype.type, np.floating),\n \"FloatCol loaded with incorrect type\")\n self.assertTrue(issubclass(df.IntCol.dtype.type, np.integer),\n \"IntCol loaded with incorrect type\")\n # MySQL has no real BOOL type (it's an alias for TINYINT)\n self.assertTrue(issubclass(df.BoolCol.dtype.type, np.integer),\n \"BoolCol loaded with incorrect type\")\n\n # Int column with NA values stays as float\n self.assertTrue(issubclass(df.IntColWithNull.dtype.type, np.floating),\n \"IntColWithNull loaded with incorrect type\")\n # Bool column with NA = int column with NA values => becomes float\n self.assertTrue(issubclass(df.BoolColWithNull.dtype.type, np.floating),\n \"BoolColWithNull loaded with incorrect type\")\n\n def test_read_procedure(self):\n # see GH7324. Although it is more an api test, it is added to the\n # mysql tests as sqlite does not have stored procedures\n df = DataFrame({'a': [1, 2, 3], 'b': [0.1, 0.2, 0.3]})\n df.to_sql('test_procedure', self.conn, index=False)\n\n proc = \"\"\"DROP PROCEDURE IF EXISTS get_testdb;\n\n CREATE PROCEDURE get_testdb ()\n\n BEGIN\n SELECT * FROM test_procedure;\n END\"\"\"\n\n connection = self.conn.connect()\n trans = connection.begin()\n try:\n r1 = connection.execute(proc) # noqa\n trans.commit()\n except:\n trans.rollback()\n raise\n\n res1 = sql.read_sql_query(\"CALL get_testdb();\", self.conn)\n tm.assert_frame_equal(df, res1)\n\n # test delegation to read_sql_query\n res2 = sql.read_sql(\"CALL get_testdb();\", self.conn)\n tm.assert_frame_equal(df, res2)\n\n\nclass _TestPostgreSQLAlchemy(object):\n \"\"\"\n Test the sqlalchemy backend against an PostgreSQL database.\n\n \"\"\"\n flavor = 'postgresql'\n\n @classmethod\n def connect(cls):\n url = 'postgresql+{driver}://postgres@localhost/pandas_nosetest'\n return sqlalchemy.create_engine(url.format(driver=cls.driver))\n\n @classmethod\n def setup_driver(cls):\n try:\n import psycopg2 # noqa\n cls.driver = 'psycopg2'\n except ImportError:\n raise nose.SkipTest('psycopg2 not installed')\n\n def test_schema_support(self):\n # only test this for postgresql (schema's not supported in\n # mysql/sqlite)\n df = DataFrame({'col1': [1, 2], 'col2': [\n 0.1, 0.2], 'col3': ['a', 'n']})\n\n # create a schema\n self.conn.execute(\"DROP SCHEMA IF EXISTS other CASCADE;\")\n self.conn.execute(\"CREATE SCHEMA other;\")\n\n # write dataframe to different schema's\n df.to_sql('test_schema_public', self.conn, index=False)\n df.to_sql('test_schema_public_explicit', self.conn, index=False,\n schema='public')\n df.to_sql('test_schema_other', self.conn, index=False, schema='other')\n\n # read dataframes back in\n res1 = sql.read_sql_table('test_schema_public', self.conn)\n tm.assert_frame_equal(df, res1)\n res2 = sql.read_sql_table('test_schema_public_explicit', self.conn)\n tm.assert_frame_equal(df, res2)\n res3 = sql.read_sql_table('test_schema_public_explicit', self.conn,\n schema='public')\n tm.assert_frame_equal(df, res3)\n res4 = sql.read_sql_table('test_schema_other', self.conn,\n schema='other')\n tm.assert_frame_equal(df, res4)\n self.assertRaises(ValueError, sql.read_sql_table, 'test_schema_other',\n self.conn, schema='public')\n\n # different if_exists options\n\n # create a schema\n self.conn.execute(\"DROP SCHEMA IF EXISTS other CASCADE;\")\n self.conn.execute(\"CREATE SCHEMA other;\")\n\n # write dataframe with different if_exists options\n df.to_sql('test_schema_other', self.conn, schema='other', index=False)\n df.to_sql('test_schema_other', self.conn, schema='other', index=False,\n if_exists='replace')\n df.to_sql('test_schema_other', self.conn, schema='other', index=False,\n if_exists='append')\n res = sql.read_sql_table(\n 'test_schema_other', self.conn, schema='other')\n tm.assert_frame_equal(concat([df, df], ignore_index=True), res)\n\n # specifying schema in user-provided meta\n\n # The schema won't be applied on another Connection\n # because of transactional schemas\n if isinstance(self.conn, sqlalchemy.engine.Engine):\n engine2 = self.connect()\n meta = sqlalchemy.MetaData(engine2, schema='other')\n pdsql = sql.SQLDatabase(engine2, meta=meta)\n pdsql.to_sql(df, 'test_schema_other2', index=False)\n pdsql.to_sql(df, 'test_schema_other2',\n index=False, if_exists='replace')\n pdsql.to_sql(df, 'test_schema_other2',\n index=False, if_exists='append')\n res1 = sql.read_sql_table(\n 'test_schema_other2', self.conn, schema='other')\n res2 = pdsql.read_table('test_schema_other2')\n tm.assert_frame_equal(res1, res2)\n\n\nclass TestMySQLAlchemy(_TestMySQLAlchemy, _TestSQLAlchemy):\n pass\n\n\nclass TestMySQLAlchemyConn(_TestMySQLAlchemy, _TestSQLAlchemyConn):\n pass\n\n\nclass TestPostgreSQLAlchemy(_TestPostgreSQLAlchemy, _TestSQLAlchemy):\n pass\n\n\nclass TestPostgreSQLAlchemyConn(_TestPostgreSQLAlchemy, _TestSQLAlchemyConn):\n pass\n\n\nclass TestSQLiteAlchemy(_TestSQLiteAlchemy, _TestSQLAlchemy):\n pass\n\n\nclass TestSQLiteAlchemyConn(_TestSQLiteAlchemy, _TestSQLAlchemyConn):\n pass\n\n\n# -----------------------------------------------------------------------------\n# -- Test Sqlite / MySQL fallback\n\nclass TestSQLiteFallback(SQLiteMixIn, PandasSQLTest):\n \"\"\"\n Test the fallback mode against an in-memory sqlite database.\n\n \"\"\"\n flavor = 'sqlite'\n\n @classmethod\n def connect(cls):\n return sqlite3.connect(':memory:')\n\n def setUp(self):\n self.conn = self.connect()\n self.pandasSQL = sql.SQLiteDatabase(self.conn)\n\n self._load_iris_data()\n\n self._load_test1_data()\n\n def test_read_sql(self):\n self._read_sql_iris()\n\n def test_read_sql_parameter(self):\n self._read_sql_iris_parameter()\n\n def test_read_sql_named_parameter(self):\n self._read_sql_iris_named_parameter()\n\n def test_to_sql(self):\n self._to_sql()\n\n def test_to_sql_empty(self):\n self._to_sql_empty()\n\n def test_to_sql_fail(self):\n self._to_sql_fail()\n\n def test_to_sql_replace(self):\n self._to_sql_replace()\n\n def test_to_sql_append(self):\n self._to_sql_append()\n\n def test_create_and_drop_table(self):\n temp_frame = DataFrame(\n {'one': [1., 2., 3., 4.], 'two': [4., 3., 2., 1.]})\n\n self.pandasSQL.to_sql(temp_frame, 'drop_test_frame')\n\n self.assertTrue(self.pandasSQL.has_table('drop_test_frame'),\n 'Table not written to DB')\n\n self.pandasSQL.drop_table('drop_test_frame')\n\n self.assertFalse(self.pandasSQL.has_table('drop_test_frame'),\n 'Table not deleted from DB')\n\n def test_roundtrip(self):\n self._roundtrip()\n\n def test_execute_sql(self):\n self._execute_sql()\n\n def test_datetime_date(self):\n # test support for datetime.date\n df = DataFrame([date(2014, 1, 1), date(2014, 1, 2)], columns=[\"a\"])\n df.to_sql('test_date', self.conn, index=False)\n res = read_sql_query('SELECT * FROM test_date', self.conn)\n if self.flavor == 'sqlite':\n # comes back as strings\n tm.assert_frame_equal(res, df.astype(str))\n elif self.flavor == 'mysql':\n tm.assert_frame_equal(res, df)\n\n def test_datetime_time(self):\n # test support for datetime.time, GH #8341\n df = DataFrame([time(9, 0, 0), time(9, 1, 30)], columns=[\"a\"])\n df.to_sql('test_time', self.conn, index=False)\n res = read_sql_query('SELECT * FROM test_time', self.conn)\n if self.flavor == 'sqlite':\n # comes back as strings\n expected = df.applymap(lambda _: _.strftime(\"%H:%M:%S.%f\"))\n tm.assert_frame_equal(res, expected)\n\n def _get_index_columns(self, tbl_name):\n ixs = sql.read_sql_query(\n \"SELECT * FROM sqlite_master WHERE type = 'index' \" +\n \"AND tbl_name = '%s'\" % tbl_name, self.conn)\n ix_cols = []\n for ix_name in ixs.name:\n ix_info = sql.read_sql_query(\n \"PRAGMA index_info(%s)\" % ix_name, self.conn)\n ix_cols.append(ix_info.name.tolist())\n return ix_cols\n\n def test_to_sql_save_index(self):\n self._to_sql_save_index()\n\n def test_transactions(self):\n self._transaction_test()\n\n def _get_sqlite_column_type(self, table, column):\n recs = self.conn.execute('PRAGMA table_info(%s)' % table)\n for cid, name, ctype, not_null, default, pk in recs:\n if name == column:\n return ctype\n raise ValueError('Table %s, column %s not found' % (table, column))\n\n def test_dtype(self):\n if self.flavor == 'mysql':\n raise nose.SkipTest('Not applicable to MySQL legacy')\n cols = ['A', 'B']\n data = [(0.8, True),\n (0.9, None)]\n df = DataFrame(data, columns=cols)\n df.to_sql('dtype_test', self.conn)\n df.to_sql('dtype_test2', self.conn, dtype={'B': 'STRING'})\n\n # sqlite stores Boolean values as INTEGER\n self.assertEqual(self._get_sqlite_column_type(\n 'dtype_test', 'B'), 'INTEGER')\n\n self.assertEqual(self._get_sqlite_column_type(\n 'dtype_test2', 'B'), 'STRING')\n self.assertRaises(ValueError, df.to_sql,\n 'error', self.conn, dtype={'B': bool})\n\n # single dtype\n df.to_sql('single_dtype_test', self.conn, dtype='STRING')\n self.assertEqual(\n self._get_sqlite_column_type('single_dtype_test', 'A'), 'STRING')\n self.assertEqual(\n self._get_sqlite_column_type('single_dtype_test', 'B'), 'STRING')\n\n def test_notnull_dtype(self):\n if self.flavor == 'mysql':\n raise nose.SkipTest('Not applicable to MySQL legacy')\n\n cols = {'Bool': Series([True, None]),\n 'Date': Series([datetime(2012, 5, 1), None]),\n 'Int': Series([1, None], dtype='object'),\n 'Float': Series([1.1, None])\n }\n df = DataFrame(cols)\n\n tbl = 'notnull_dtype_test'\n df.to_sql(tbl, self.conn)\n\n self.assertEqual(self._get_sqlite_column_type(tbl, 'Bool'), 'INTEGER')\n self.assertEqual(self._get_sqlite_column_type(\n tbl, 'Date'), 'TIMESTAMP')\n self.assertEqual(self._get_sqlite_column_type(tbl, 'Int'), 'INTEGER')\n self.assertEqual(self._get_sqlite_column_type(tbl, 'Float'), 'REAL')\n\n def test_illegal_names(self):\n # For sqlite, these should work fine\n df = DataFrame([[1, 2], [3, 4]], columns=['a', 'b'])\n\n # Raise error on blank\n self.assertRaises(ValueError, df.to_sql, \"\", self.conn)\n\n for ndx, weird_name in enumerate(\n ['test_weird_name]', 'test_weird_name[',\n 'test_weird_name`', 'test_weird_name\"', 'test_weird_name\\'',\n '_b.test_weird_name_01-30', '\"_b.test_weird_name_01-30\"',\n '99beginswithnumber', '12345', u'\\xe9']):\n df.to_sql(weird_name, self.conn)\n sql.table_exists(weird_name, self.conn)\n\n df2 = DataFrame([[1, 2], [3, 4]], columns=['a', weird_name])\n c_tbl = 'test_weird_col_name%d' % ndx\n df2.to_sql(c_tbl, self.conn)\n sql.table_exists(c_tbl, self.conn)\n\n\n# -----------------------------------------------------------------------------\n# -- Old tests from 0.13.1 (before refactor using sqlalchemy)\n\n\n_formatters = {\n datetime: lambda dt: \"'%s'\" % date_format(dt),\n str: lambda x: \"'%s'\" % x,\n np.str_: lambda x: \"'%s'\" % x,\n compat.text_type: lambda x: \"'%s'\" % x,\n compat.binary_type: lambda x: \"'%s'\" % x,\n float: lambda x: \"%.8f\" % x,\n int: lambda x: \"%s\" % x,\n type(None): lambda x: \"NULL\",\n np.float64: lambda x: \"%.10f\" % x,\n bool: lambda x: \"'%s'\" % x,\n}\n\n\ndef format_query(sql, *args):\n \"\"\"\n\n \"\"\"\n processed_args = []\n for arg in args:\n if isinstance(arg, float) and isnull(arg):\n arg = None\n\n formatter = _formatters[type(arg)]\n processed_args.append(formatter(arg))\n\n return sql % tuple(processed_args)\n\n\ndef tquery(query, con=None, cur=None):\n \"\"\"Replace removed sql.tquery function\"\"\"\n res = sql.execute(query, con=con, cur=cur).fetchall()\n if res is None:\n return None\n else:\n return list(res)\n\n\ndef _skip_if_no_pymysql():\n try:\n import pymysql # noqa\n except ImportError:\n raise nose.SkipTest('pymysql not installed, skipping')\n\n\nclass TestXSQLite(SQLiteMixIn, tm.TestCase):\n\n def setUp(self):\n self.conn = sqlite3.connect(':memory:')\n\n def test_basic(self):\n frame = tm.makeTimeDataFrame()\n self._check_roundtrip(frame)\n\n def test_write_row_by_row(self):\n\n frame = tm.makeTimeDataFrame()\n frame.ix[0, 0] = np.nan\n create_sql = sql.get_schema(frame, 'test')\n cur = self.conn.cursor()\n cur.execute(create_sql)\n\n cur = self.conn.cursor()\n\n ins = \"INSERT INTO test VALUES (%s, %s, %s, %s)\"\n for idx, row in frame.iterrows():\n fmt_sql = format_query(ins, *row)\n tquery(fmt_sql, cur=cur)\n\n self.conn.commit()\n\n result = sql.read_sql(\"select * from test\", con=self.conn)\n result.index = frame.index\n tm.assert_frame_equal(result, frame)\n\n def test_execute(self):\n frame = tm.makeTimeDataFrame()\n create_sql = sql.get_schema(frame, 'test')\n cur = self.conn.cursor()\n cur.execute(create_sql)\n ins = \"INSERT INTO test VALUES (?, ?, ?, ?)\"\n\n row = frame.ix[0]\n sql.execute(ins, self.conn, params=tuple(row))\n self.conn.commit()\n\n result = sql.read_sql(\"select * from test\", self.conn)\n result.index = frame.index[:1]\n tm.assert_frame_equal(result, frame[:1])\n\n def test_schema(self):\n frame = tm.makeTimeDataFrame()\n create_sql = sql.get_schema(frame, 'test')\n lines = create_sql.splitlines()\n for l in lines:\n tokens = l.split(' ')\n if len(tokens) == 2 and tokens[0] == 'A':\n self.assertTrue(tokens[1] == 'DATETIME')\n\n frame = tm.makeTimeDataFrame()\n create_sql = sql.get_schema(frame, 'test', keys=['A', 'B'])\n lines = create_sql.splitlines()\n self.assertTrue('PRIMARY KEY (\"A\", \"B\")' in create_sql)\n cur = self.conn.cursor()\n cur.execute(create_sql)\n\n def test_execute_fail(self):\n create_sql = \"\"\"\n CREATE TABLE test\n (\n a TEXT,\n b TEXT,\n c REAL,\n PRIMARY KEY (a, b)\n );\n \"\"\"\n cur = self.conn.cursor()\n cur.execute(create_sql)\n\n sql.execute('INSERT INTO test VALUES(\"foo\", \"bar\", 1.234)', self.conn)\n sql.execute('INSERT INTO test VALUES(\"foo\", \"baz\", 2.567)', self.conn)\n\n try:\n sys.stdout = StringIO()\n self.assertRaises(Exception, sql.execute,\n 'INSERT INTO test VALUES(\"foo\", \"bar\", 7)',\n self.conn)\n finally:\n sys.stdout = sys.__stdout__\n\n def test_execute_closed_connection(self):\n create_sql = \"\"\"\n CREATE TABLE test\n (\n a TEXT,\n b TEXT,\n c REAL,\n PRIMARY KEY (a, b)\n );\n \"\"\"\n cur = self.conn.cursor()\n cur.execute(create_sql)\n\n sql.execute('INSERT INTO test VALUES(\"foo\", \"bar\", 1.234)', self.conn)\n self.conn.close()\n try:\n sys.stdout = StringIO()\n self.assertRaises(Exception, tquery, \"select * from test\",\n con=self.conn)\n finally:\n sys.stdout = sys.__stdout__\n\n # Initialize connection again (needed for tearDown)\n self.setUp()\n\n def test_na_roundtrip(self):\n pass\n\n def _check_roundtrip(self, frame):\n sql.to_sql(frame, name='test_table', con=self.conn, index=False)\n result = sql.read_sql(\"select * from test_table\", self.conn)\n\n # HACK! Change this once indexes are handled properly.\n result.index = frame.index\n\n expected = frame\n tm.assert_frame_equal(result, expected)\n\n frame['txt'] = ['a'] * len(frame)\n frame2 = frame.copy()\n frame2['Idx'] = Index(lrange(len(frame2))) + 10\n sql.to_sql(frame2, name='test_table2', con=self.conn, index=False)\n result = sql.read_sql(\"select * from test_table2\", self.conn,\n index_col='Idx')\n expected = frame.copy()\n expected.index = Index(lrange(len(frame2))) + 10\n expected.index.name = 'Idx'\n tm.assert_frame_equal(expected, result)\n\n def test_keyword_as_column_names(self):\n df = DataFrame({'From': np.ones(5)})\n sql.to_sql(df, con=self.conn, name='testkeywords', index=False)\n\n def test_onecolumn_of_integer(self):\n # GH 3628\n # a column_of_integers dataframe should transfer well to sql\n\n mono_df = DataFrame([1, 2], columns=['c0'])\n sql.to_sql(mono_df, con=self.conn, name='mono_df', index=False)\n # computing the sum via sql\n con_x = self.conn\n the_sum = sum([my_c0[0]\n for my_c0 in con_x.execute(\"select * from mono_df\")])\n # it should not fail, and gives 3 ( Issue #3628 )\n self.assertEqual(the_sum, 3)\n\n result = sql.read_sql(\"select * from mono_df\", con_x)\n tm.assert_frame_equal(result, mono_df)\n\n def test_if_exists(self):\n df_if_exists_1 = DataFrame({'col1': [1, 2], 'col2': ['A', 'B']})\n df_if_exists_2 = DataFrame(\n {'col1': [3, 4, 5], 'col2': ['C', 'D', 'E']})\n table_name = 'table_if_exists'\n sql_select = \"SELECT * FROM %s\" % table_name\n\n def clean_up(test_table_to_drop):\n \"\"\"\n Drops tables created from individual tests\n so no dependencies arise from sequential tests\n \"\"\"\n self.drop_table(test_table_to_drop)\n\n # test if invalid value for if_exists raises appropriate error\n self.assertRaises(ValueError,\n sql.to_sql,\n frame=df_if_exists_1,\n con=self.conn,\n name=table_name,\n if_exists='notvalidvalue')\n clean_up(table_name)\n\n # test if_exists='fail'\n sql.to_sql(frame=df_if_exists_1, con=self.conn,\n name=table_name, if_exists='fail')\n self.assertRaises(ValueError,\n sql.to_sql,\n frame=df_if_exists_1,\n con=self.conn,\n name=table_name,\n if_exists='fail')\n\n # test if_exists='replace'\n sql.to_sql(frame=df_if_exists_1, con=self.conn, name=table_name,\n if_exists='replace', index=False)\n self.assertEqual(tquery(sql_select, con=self.conn),\n [(1, 'A'), (2, 'B')])\n sql.to_sql(frame=df_if_exists_2, con=self.conn, name=table_name,\n if_exists='replace', index=False)\n self.assertEqual(tquery(sql_select, con=self.conn),\n [(3, 'C'), (4, 'D'), (5, 'E')])\n clean_up(table_name)\n\n # test if_exists='append'\n sql.to_sql(frame=df_if_exists_1, con=self.conn, name=table_name,\n if_exists='fail', index=False)\n self.assertEqual(tquery(sql_select, con=self.conn),\n [(1, 'A'), (2, 'B')])\n sql.to_sql(frame=df_if_exists_2, con=self.conn, name=table_name,\n if_exists='append', index=False)\n self.assertEqual(tquery(sql_select, con=self.conn),\n [(1, 'A'), (2, 'B'), (3, 'C'), (4, 'D'), (5, 'E')])\n clean_up(table_name)\n\n\nclass TestSQLFlavorDeprecation(tm.TestCase):\n \"\"\"\n gh-13611: test that the 'flavor' parameter\n is appropriately deprecated by checking the\n functions that directly raise the warning\n \"\"\"\n\n con = 1234 # don't need real connection for this\n funcs = ['SQLiteDatabase', 'pandasSQL_builder']\n\n def test_unsupported_flavor(self):\n msg = 'is not supported'\n\n for func in self.funcs:\n tm.assertRaisesRegexp(ValueError, msg, getattr(sql, func),\n self.con, flavor='mysql')\n\n def test_deprecated_flavor(self):\n for func in self.funcs:\n with tm.assert_produces_warning(FutureWarning,\n check_stacklevel=False):\n getattr(sql, func)(self.con, flavor='sqlite')\n\n\[email protected](\"gh-13611: there is no support for MySQL \"\n \"if SQLAlchemy is not installed\")\nclass TestXMySQL(MySQLMixIn, tm.TestCase):\n\n @classmethod\n def setUpClass(cls):\n _skip_if_no_pymysql()\n\n # test connection\n import pymysql\n try:\n # Try Travis defaults.\n # No real user should allow root access with a blank password.\n pymysql.connect(host='localhost', user='root', passwd='',\n db='pandas_nosetest')\n except:\n pass\n else:\n return\n try:\n pymysql.connect(read_default_group='pandas')\n except pymysql.ProgrammingError:\n raise nose.SkipTest(\n \"Create a group of connection parameters under the heading \"\n \"[pandas] in your system's mysql default file, \"\n \"typically located at ~/.my.cnf or /etc/.my.cnf. \")\n except pymysql.Error:\n raise nose.SkipTest(\n \"Cannot connect to database. \"\n \"Create a group of connection parameters under the heading \"\n \"[pandas] in your system's mysql default file, \"\n \"typically located at ~/.my.cnf or /etc/.my.cnf. \")\n\n def setUp(self):\n _skip_if_no_pymysql()\n import pymysql\n try:\n # Try Travis defaults.\n # No real user should allow root access with a blank password.\n self.conn = pymysql.connect(host='localhost', user='root',\n passwd='', db='pandas_nosetest')\n except:\n pass\n else:\n return\n try:\n self.conn = pymysql.connect(read_default_group='pandas')\n except pymysql.ProgrammingError:\n raise nose.SkipTest(\n \"Create a group of connection parameters under the heading \"\n \"[pandas] in your system's mysql default file, \"\n \"typically located at ~/.my.cnf or /etc/.my.cnf. \")\n except pymysql.Error:\n raise nose.SkipTest(\n \"Cannot connect to database. \"\n \"Create a group of connection parameters under the heading \"\n \"[pandas] in your system's mysql default file, \"\n \"typically located at ~/.my.cnf or /etc/.my.cnf. \")\n\n def test_basic(self):\n _skip_if_no_pymysql()\n frame = tm.makeTimeDataFrame()\n self._check_roundtrip(frame)\n\n def test_write_row_by_row(self):\n\n _skip_if_no_pymysql()\n frame = tm.makeTimeDataFrame()\n frame.ix[0, 0] = np.nan\n drop_sql = \"DROP TABLE IF EXISTS test\"\n create_sql = sql.get_schema(frame, 'test')\n cur = self.conn.cursor()\n cur.execute(drop_sql)\n cur.execute(create_sql)\n ins = \"INSERT INTO test VALUES (%s, %s, %s, %s)\"\n for idx, row in frame.iterrows():\n fmt_sql = format_query(ins, *row)\n tquery(fmt_sql, cur=cur)\n\n self.conn.commit()\n\n result = sql.read_sql(\"select * from test\", con=self.conn)\n result.index = frame.index\n tm.assert_frame_equal(result, frame)\n\n def test_chunksize_read_type(self):\n _skip_if_no_pymysql()\n frame = tm.makeTimeDataFrame()\n frame.index.name = \"index\"\n drop_sql = \"DROP TABLE IF EXISTS test\"\n cur = self.conn.cursor()\n cur.execute(drop_sql)\n sql.to_sql(frame, name='test', con=self.conn)\n query = \"select * from test\"\n chunksize = 5\n chunk_gen = pd.read_sql_query(sql=query, con=self.conn,\n chunksize=chunksize, index_col=\"index\")\n chunk_df = next(chunk_gen)\n tm.assert_frame_equal(frame[:chunksize], chunk_df)\n\n def test_execute(self):\n _skip_if_no_pymysql()\n frame = tm.makeTimeDataFrame()\n drop_sql = \"DROP TABLE IF EXISTS test\"\n create_sql = sql.get_schema(frame, 'test')\n cur = self.conn.cursor()\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", \"Unknown table.*\")\n cur.execute(drop_sql)\n cur.execute(create_sql)\n ins = \"INSERT INTO test VALUES (%s, %s, %s, %s)\"\n\n row = frame.ix[0].values.tolist()\n sql.execute(ins, self.conn, params=tuple(row))\n self.conn.commit()\n\n result = sql.read_sql(\"select * from test\", self.conn)\n result.index = frame.index[:1]\n tm.assert_frame_equal(result, frame[:1])\n\n def test_schema(self):\n _skip_if_no_pymysql()\n frame = tm.makeTimeDataFrame()\n create_sql = sql.get_schema(frame, 'test')\n lines = create_sql.splitlines()\n for l in lines:\n tokens = l.split(' ')\n if len(tokens) == 2 and tokens[0] == 'A':\n self.assertTrue(tokens[1] == 'DATETIME')\n\n frame = tm.makeTimeDataFrame()\n drop_sql = \"DROP TABLE IF EXISTS test\"\n create_sql = sql.get_schema(frame, 'test', keys=['A', 'B'])\n lines = create_sql.splitlines()\n self.assertTrue('PRIMARY KEY (`A`, `B`)' in create_sql)\n cur = self.conn.cursor()\n cur.execute(drop_sql)\n cur.execute(create_sql)\n\n def test_execute_fail(self):\n _skip_if_no_pymysql()\n drop_sql = \"DROP TABLE IF EXISTS test\"\n create_sql = \"\"\"\n CREATE TABLE test\n (\n a TEXT,\n b TEXT,\n c REAL,\n PRIMARY KEY (a(5), b(5))\n );\n \"\"\"\n cur = self.conn.cursor()\n cur.execute(drop_sql)\n cur.execute(create_sql)\n\n sql.execute('INSERT INTO test VALUES(\"foo\", \"bar\", 1.234)', self.conn)\n sql.execute('INSERT INTO test VALUES(\"foo\", \"baz\", 2.567)', self.conn)\n\n try:\n sys.stdout = StringIO()\n self.assertRaises(Exception, sql.execute,\n 'INSERT INTO test VALUES(\"foo\", \"bar\", 7)',\n self.conn)\n finally:\n sys.stdout = sys.__stdout__\n\n def test_execute_closed_connection(self):\n _skip_if_no_pymysql()\n drop_sql = \"DROP TABLE IF EXISTS test\"\n create_sql = \"\"\"\n CREATE TABLE test\n (\n a TEXT,\n b TEXT,\n c REAL,\n PRIMARY KEY (a(5), b(5))\n );\n \"\"\"\n cur = self.conn.cursor()\n cur.execute(drop_sql)\n cur.execute(create_sql)\n\n sql.execute('INSERT INTO test VALUES(\"foo\", \"bar\", 1.234)', self.conn)\n self.conn.close()\n try:\n sys.stdout = StringIO()\n self.assertRaises(Exception, tquery, \"select * from test\",\n con=self.conn)\n finally:\n sys.stdout = sys.__stdout__\n\n # Initialize connection again (needed for tearDown)\n self.setUp()\n\n def test_na_roundtrip(self):\n _skip_if_no_pymysql()\n pass\n\n def _check_roundtrip(self, frame):\n _skip_if_no_pymysql()\n drop_sql = \"DROP TABLE IF EXISTS test_table\"\n cur = self.conn.cursor()\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", \"Unknown table.*\")\n cur.execute(drop_sql)\n sql.to_sql(frame, name='test_table', con=self.conn, index=False)\n result = sql.read_sql(\"select * from test_table\", self.conn)\n\n # HACK! Change this once indexes are handled properly.\n result.index = frame.index\n result.index.name = frame.index.name\n\n expected = frame\n tm.assert_frame_equal(result, expected)\n\n frame['txt'] = ['a'] * len(frame)\n frame2 = frame.copy()\n index = Index(lrange(len(frame2))) + 10\n frame2['Idx'] = index\n drop_sql = \"DROP TABLE IF EXISTS test_table2\"\n cur = self.conn.cursor()\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", \"Unknown table.*\")\n cur.execute(drop_sql)\n sql.to_sql(frame2, name='test_table2',\n con=self.conn, index=False)\n result = sql.read_sql(\"select * from test_table2\", self.conn,\n index_col='Idx')\n expected = frame.copy()\n\n # HACK! Change this once indexes are handled properly.\n expected.index = index\n expected.index.names = result.index.names\n tm.assert_frame_equal(expected, result)\n\n def test_keyword_as_column_names(self):\n _skip_if_no_pymysql()\n df = DataFrame({'From': np.ones(5)})\n sql.to_sql(df, con=self.conn, name='testkeywords',\n if_exists='replace', index=False)\n\n def test_if_exists(self):\n _skip_if_no_pymysql()\n df_if_exists_1 = DataFrame({'col1': [1, 2], 'col2': ['A', 'B']})\n df_if_exists_2 = DataFrame(\n {'col1': [3, 4, 5], 'col2': ['C', 'D', 'E']})\n table_name = 'table_if_exists'\n sql_select = \"SELECT * FROM %s\" % table_name\n\n def clean_up(test_table_to_drop):\n \"\"\"\n Drops tables created from individual tests\n so no dependencies arise from sequential tests\n \"\"\"\n self.drop_table(test_table_to_drop)\n\n # test if invalid value for if_exists raises appropriate error\n self.assertRaises(ValueError,\n sql.to_sql,\n frame=df_if_exists_1,\n con=self.conn,\n name=table_name,\n if_exists='notvalidvalue')\n clean_up(table_name)\n\n # test if_exists='fail'\n sql.to_sql(frame=df_if_exists_1, con=self.conn, name=table_name,\n if_exists='fail', index=False)\n self.assertRaises(ValueError,\n sql.to_sql,\n frame=df_if_exists_1,\n con=self.conn,\n name=table_name,\n if_exists='fail')\n\n # test if_exists='replace'\n sql.to_sql(frame=df_if_exists_1, con=self.conn, name=table_name,\n if_exists='replace', index=False)\n self.assertEqual(tquery(sql_select, con=self.conn),\n [(1, 'A'), (2, 'B')])\n sql.to_sql(frame=df_if_exists_2, con=self.conn, name=table_name,\n if_exists='replace', index=False)\n self.assertEqual(tquery(sql_select, con=self.conn),\n [(3, 'C'), (4, 'D'), (5, 'E')])\n clean_up(table_name)\n\n # test if_exists='append'\n sql.to_sql(frame=df_if_exists_1, con=self.conn, name=table_name,\n if_exists='fail', index=False)\n self.assertEqual(tquery(sql_select, con=self.conn),\n [(1, 'A'), (2, 'B')])\n sql.to_sql(frame=df_if_exists_2, con=self.conn, name=table_name,\n if_exists='append', index=False)\n self.assertEqual(tquery(sql_select, con=self.conn),\n [(1, 'A'), (2, 'B'), (3, 'C'), (4, 'D'), (5, 'E')])\n clean_up(table_name)\n\n\nif __name__ == '__main__':\n nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],\n exit=False)\n"
] | [
[
"pandas.core.config.register_option",
"pandas.Series",
"pandas.core.categorical.Categorical.from_array",
"numpy.asarray",
"pandas.MultiIndex.from_tuples",
"pandas.DataFrame",
"pandas.compat.iteritems",
"pandas.types.common.is_timedelta64_dtype",
"pandas.lib.fast_unique",
"pandas.core.config.get_option",
"numpy.arange",
"pandas.core.algorithms.unique",
"pandas.core.config.config_prefix",
"pandas.Index",
"pandas.core.index._ensure_index",
"pandas.core.internals.BlockManager",
"pandas.tseries.api.DatetimeIndex",
"pandas.types.common._ensure_int64",
"pandas.types.common.is_list_like",
"pandas.core.algorithms.match",
"numpy.repeat",
"pandas.core.internals._factor_indexer",
"pandas.tslib.get_timezone",
"pandas.core.internals._block_shape",
"pandas.types.missing.array_equivalent",
"pandas.compat.u_safe",
"pandas.MultiIndex",
"pandas.types.common.is_categorical_dtype",
"pandas.lib.string_array_replace_from_nan_rep",
"pandas.sparse.array.IntIndex",
"pandas.tools.merge.concat",
"pandas.io.common._stringify_path",
"pandas.lib.time64_to_datetime",
"pandas.tseries.api.PeriodIndex._simple_new",
"pandas.computation.pytables.maybe_expression",
"pandas.types.common._ensure_platform_int",
"numpy.array",
"pandas.formats.printing.adjoin",
"pandas.computation.pytables.Expr",
"pandas.sparse.array.BlockIndex",
"pandas.sparse.api.SparseDataFrame",
"pandas.core.common._asarray_tuplesafe",
"pandas.isnull",
"pandas.types.common._ensure_object",
"pandas.formats.printing.pprint_thing",
"pandas.MultiIndex.from_arrays",
"pandas.core.config.is_one_of_factory",
"numpy.tile",
"pandas.lib.infer_dtype",
"pandas.tseries.tdi.TimedeltaIndex",
"numpy.prod",
"pandas.types.common.is_datetime64tz_dtype",
"pandas.compat.lrange",
"pandas.types.common.is_datetime64_dtype",
"pandas.compat.filter",
"pandas.tseries.api.DatetimeIndex._simple_new",
"numpy.empty",
"pandas.compat.range"
],
[
"pandas.util.testing.assert_numpy_array_equal",
"pandas.notnull",
"pandas.Series",
"pandas.util.testing.assertRaisesRegexp",
"numpy.arange",
"pandas.util.testing.assertNotIsInstance",
"pandas.util.testing.assert_produces_warning",
"pandas.Index",
"pandas.option_context",
"pandas.util.testing.equalContents",
"pandas.util.testing.assertRaises",
"pandas.util.testing.assert_index_equal",
"pandas.compat.iteritems",
"numpy.errstate",
"numpy.argsort",
"numpy.repeat",
"numpy.array"
],
[
"pandas.io.sql.table_exists",
"pandas.to_datetime",
"pandas.util.testing.ensure_clean",
"pandas.Series",
"pandas.io.sql.read_sql_table",
"pandas.util.testing.assert_produces_warning",
"pandas.io.sql.read_sql",
"pandas.DataFrame",
"pandas.util.testing.assert_frame_equal",
"numpy.round",
"pandas.util.testing.makePanel",
"numpy.random.randn",
"pandas.io.sql.SQLiteDatabase",
"pandas.DataFrame.from_records",
"pandas.util.testing.makeTimeDataFrame",
"pandas.io.sql.get_schema",
"pandas.io.sql.SQLTable",
"numpy.arange",
"pandas.compat.StringIO",
"pandas.io.sql.read_sql_query",
"pandas.io.sql.execute",
"pandas.io.sql.SQLiteTable",
"pandas.io.sql._get_valid_mysql_name",
"pandas.util.testing.equalContents",
"pandas.concat",
"pandas.io.sql._get_valid_sqlite_name",
"pandas.MultiIndex.from_product",
"pandas.date_range",
"pandas.types.common.is_object_dtype",
"pandas.read_sql_query",
"pandas.io.sql.has_table",
"pandas.isnull",
"pandas.io.sql.SQLDatabase",
"pandas.util.testing.get_data_path",
"numpy.ones",
"pandas.types.common.is_datetime64tz_dtype",
"pandas.types.common.is_datetime64_dtype",
"pandas.Timestamp",
"pandas.io.sql.to_sql",
"pandas.core.datetools.format",
"pandas.compat.range"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.19"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
BioinfoTongLI/deepBlink | [
"aa819b71f380507f9fcfa0664ab0f5a8eca4b209",
"aa819b71f380507f9fcfa0664ab0f5a8eca4b209",
"aa819b71f380507f9fcfa0664ab0f5a8eca4b209"
] | [
"tests/test_augment.py",
"deepblink/optimizers.py",
"deepblink/inference.py"
] | [
"\"\"\"Unittests for the deepblink.augment module.\"\"\"\n# pylint: disable=missing-function-docstring\n\nfrom hypothesis import given\nfrom hypothesis.extra.numpy import arrays\nimport numpy as np\nimport pytest\n\nfrom deepblink.augment import augment_batch_baseline\nfrom deepblink.augment import flip\nfrom deepblink.augment import gaussian_noise\nfrom deepblink.augment import illuminate\nfrom deepblink.augment import rotate\nfrom deepblink.augment import translate\n\n\n@given(arrays(np.float32, (3, 5, 5)))\ndef test_augment_batch_baseline(arr):\n imgs, masks = augment_batch_baseline(arr, arr)\n assert imgs.shape == masks.shape == arr.shape\n\n with pytest.warns(UserWarning):\n misshaped_arr = np.zeros((10, 5, 5))\n augment_batch_baseline(misshaped_arr, misshaped_arr)\n\n\n@given(arrays(np.int8, (5, 5)))\ndef test_flip(matrix):\n img, mask = flip(matrix, matrix)\n assert np.sum(np.sum(img)) == np.sum(np.sum(matrix))\n assert mask.shape == matrix.shape\n\n\n@given(arrays(np.int8, (5, 5)))\ndef test_illuminate(matrix):\n img, mask = illuminate(matrix, matrix)\n assert img.shape == matrix.shape\n assert mask.shape == matrix.shape\n\n\n@given(arrays(np.int8, (5, 5)))\ndef test_gaussian_noise(matrix):\n img, mask = gaussian_noise(matrix, matrix)\n assert img.shape == matrix.shape\n assert mask.shape == matrix.shape\n\n\n@given(arrays(np.int8, (5, 5)))\ndef test_rotate(matrix):\n img, mask = rotate(matrix, matrix)\n assert np.sum(np.sum(img)) == np.sum(np.sum(matrix))\n assert mask.shape == matrix.shape\n\n\n@given(arrays(np.int8, (5, 5)))\ndef test_translate(matrix):\n img, mask = translate(matrix, matrix)\n assert np.sum(np.sum(img)) == np.sum(np.sum(matrix))\n assert mask.shape == matrix.shape\n",
"\"\"\"Optimizers are used to update weight parameters in a neural network.\n\nThe learning rate defines what stepsizes are taken during one iteration of training.\nThis file contains functions to return standard or custom optimizers.\n\"\"\"\n\nimport tensorflow as tf\n\n\ndef adam(learning_rate: float):\n \"\"\"Keras' adam optimizer with a specified learning rate.\"\"\"\n return tf.keras.optimizers.Adam(learning_rate=learning_rate)\n\n\ndef rmsprop(learning_rate: float):\n \"\"\"Keras' rmsprop optimizer with a specified learning rate.\"\"\"\n return tf.keras.optimizers.RMSprop(learning_rate=learning_rate)\n\n\ndef amsgrad(learning_rate: float):\n \"\"\"Keras' amsgrad optimizer with a specified learning rate.\"\"\"\n return tf.keras.optimizers.Adam(learning_rate=learning_rate, amsgrad=True)\n",
"\"\"\"Model prediction / inference functions.\"\"\"\n\nfrom typing import Union\nimport math\n\nimport numpy as np\nimport skimage.morphology\nimport tensorflow as tf\n\nfrom .data import get_coordinate_list\nfrom .data import next_power\nfrom .data import normalize_image\n\n\ndef predict(\n image: np.ndarray,\n model: tf.keras.models.Model,\n probability: Union[None, float] = None,\n) -> np.ndarray:\n \"\"\"Returns a binary or categorical model based prediction of an image.\n\n Args:\n image: Image to be predicted.\n model: Model used to predict the image.\n probability: Cutoff value to round model prediction probability.\n\n Returns:\n List of coordinates [r, c].\n \"\"\"\n # Normalisation and padding\n image = normalize_image(image)\n pad_bottom = next_power(image.shape[0], 2) - image.shape[0]\n pad_right = next_power(image.shape[1], 2) - image.shape[1]\n image_pad = np.pad(image, ((0, pad_bottom), (0, pad_right)), \"reflect\")\n\n # Predict on image\n pred = model.predict(image_pad[None, ..., None]).squeeze()\n prob = 0.5 if probability is None else probability\n coords = get_coordinate_list(\n pred, image_size=max(image_pad.shape), probability=prob\n )\n\n # Remove spots in padded part of image\n coords = np.array([coords[..., 0], coords[..., 1]])\n coords = np.delete(\n coords,\n np.where((coords[0] > image.shape[0]) | (coords[1] > image.shape[1])),\n axis=1,\n )\n coords = coords.T # Transposition to save as rows\n\n # Add third, probability containing column\n if probability is not None:\n probs = get_probabilities(pred, coords, image_size=max(image_pad.shape))\n probs = np.expand_dims(probs, axis=-1)\n coords = np.append(coords, probs, axis=-1)\n\n return coords\n\n\ndef get_probabilities(\n matrix: np.ndarray, coordinates: np.ndarray, image_size: int = 512\n) -> np.ndarray:\n \"\"\"Find prediction probability given the matrix and coordinates.\n\n Args:\n matrix: Matrix representation of spot coordinates.\n coordinates: Coordinates at which the probability should be determined.\n image_size: Default image size the grid was layed on.\n\n Returns:\n Array with all probabilities matching the coordinates.\n \"\"\"\n matrix_size = max(matrix.shape)\n cell_size = image_size // matrix_size\n nrow = ncol = math.ceil(image_size / cell_size)\n\n probabilities = []\n for r, c in coordinates:\n # Position of cell coordinate in prediction matrix\n cell_r = min(nrow - 1, int(np.floor(r)) // cell_size)\n cell_c = min(ncol - 1, int(np.floor(c)) // cell_size)\n\n probabilities.append(matrix[cell_r, cell_c, 0])\n return np.array(probabilities)\n\n\ndef get_intensities(\n image: np.ndarray, coordinate_list: np.ndarray, radius: int, method: str = \"sum\",\n) -> np.ndarray:\n \"\"\"Finds integrated intensities in a radius around each coordinate.\n\n Args:\n image: Input image with pixel values.\n coordinate_list: List of r, c coordinates in shape (n, 2).\n radius: Radius of kernel to determine intensities.\n method: How the integrated intensity should be calculated\n [options: sum, mean, std].\n\n Returns:\n Array with all integrated intensities.\n \"\"\"\n kernel = skimage.morphology.disk(radius)\n\n for r, c in coordinate_list:\n if not all([isinstance(i, float) for i in [r, c]]):\n print(r, c)\n\n intensities = np.zeros((len(coordinate_list), 1))\n for idx, (r, c) in enumerate(np.round(coordinate_list).astype(int)):\n # Selection with indexes will be truncated to the max index possible automatically\n area = (\n image[\n max(r - radius, 0) : r + radius + 1,\n max(c - radius, 0) : c + radius + 1,\n ]\n * kernel[\n max(radius - r, 0) : radius + image.shape[0] - r,\n max(radius - c, 0) : radius + image.shape[1] - c,\n ]\n )\n if method == \"sum\":\n intensities[idx] = np.sum(area)\n elif method == \"mean\":\n intensities[idx] = np.mean(area)\n elif method == \"std\":\n intensities[idx] = np.std(area)\n else:\n options = [\"sum\", \"mean\", \"std\"]\n raise ValueError(f'Method must be one of \"{options}\". {method} is not.')\n\n return intensities\n"
] | [
[
"numpy.zeros",
"numpy.sum"
],
[
"tensorflow.keras.optimizers.RMSprop",
"tensorflow.keras.optimizers.Adam"
],
[
"numpy.expand_dims",
"numpy.pad",
"numpy.round",
"numpy.append",
"numpy.std",
"numpy.mean",
"numpy.floor",
"numpy.array",
"numpy.where",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.6",
"2.4",
"2.3",
"2.5",
"2.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ethz-asl/data-driven-dynamics | [
"decf4bec19c9fc4a1789f5eb4d6e6003774c75d6"
] | [
"Tools/parametric_model/src/models/multirotor_model.py"
] | [
"\"\"\"\n *\n * Copyright (c) 2021 Manuel Yves Galliker\n * 2021 Autonomous Systems Lab ETH Zurich\n * All rights reserved.\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions\n * are met:\n *\n * 1. Redistributions of source code must retain the above copyright\n * notice, this list of conditions and the following disclaimer.\n * 2. Redistributions in binary form must reproduce the above copyright\n * notice, this list of conditions and the following disclaimer in\n * the documentation and/or other materials provided with the\n * distribution.\n * 3. Neither the name Data Driven Dynamics nor the names of its contributors may be\n * used to endorse or promote products derived from this software\n * without specific prior written permission.\n *\n * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n * \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS\n * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE\n * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,\n * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,\n * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS\n * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED\n * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\n * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN\n * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n * POSSIBILITY OF SUCH DAMAGE.\n *\n\nThe model in this file estimates a simple force motor model for a multirotor.\n\nModel Parameters:\nu : normalized actuator output scaled between 0 and 1\nangular_vel_const : angular velocity constant\nangular_vel_offset : angular velocity offset\nmot_const : motor constant\nm : mass of UAV\naccel_const : combined acceleration constant k_2/m\n\nModel:\nangular_vel [rad/s] = angular_vel_const*u + angular_vel_offset\nF_thrust = - mot_const * angular_vel^2\nF_thrust_tot = - mot_const * \\\n (angular_vel_1^2 + angular_vel_2^2 + angular_vel_3^2 + angular_vel_4^2)\n\nNote that the forces are calculated in the NED body frame and are therefore negative.\n\"\"\"\n\n__author__ = \"Manuel Yves Galliker\"\n__maintainer__ = \"Manuel Yves Galliker\"\n__license__ = \"BSD 3\"\n\nfrom sklearn.linear_model import LinearRegression\nfrom .dynamics_model import DynamicsModel\nfrom .rotor_models import RotorModel\nfrom .aerodynamic_models import FuselageDragModel\nfrom .model_config import ModelConfig\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport math\n\n\nclass MultiRotorModel(DynamicsModel):\n def __init__(self, config_file, model_name=\"multirotor_model\"):\n self.config = ModelConfig(config_file)\n super(MultiRotorModel, self).__init__(\n config_dict=self.config.dynamics_model_config)\n self.mass = self.config.model_config[\"mass\"]\n self.moment_of_inertia = np.diag([self.config.model_config[\"moment_of_inertia\"][\"Ixx\"],\n self.config.model_config[\"moment_of_inertia\"][\"Iyy\"], self.config.model_config[\"moment_of_inertia\"][\"Izz\"]])\n\n self.rotor_config_dict = self.config.model_config[\"actuators\"][\"rotors\"]\n\n self.model_name = model_name\n\n def prepare_force_regression_matrices(self):\n\n accel_mat = self.data_df[[\n \"acc_b_x\", \"acc_b_y\", \"acc_b_z\"]].to_numpy()\n force_mat = accel_mat * self.mass\n #self.y_forces = (force_mat).flatten()\n self.data_df[[\"measured_force_x\", \"measured_force_y\",\n \"measured_force_z\"]] = force_mat\n\n airspeed_mat = self.data_df[[\"V_air_body_x\",\n \"V_air_body_y\", \"V_air_body_z\"]].to_numpy()\n aero_model = FuselageDragModel()\n X_aero, coef_dict_aero, col_names_aero = aero_model.compute_fuselage_features(\n airspeed_mat)\n self.data_df[col_names_aero] = X_aero\n self.coef_dict.update(coef_dict_aero)\n self.y_dict.update({\"lin\":{\"x\":\"measured_force_x\",\"y\":\"measured_force_y\",\"z\":\"measured_force_z\"}})\n\n def prepare_moment_regression_matrices(self):\n moment_mat = np.matmul(self.data_df[[\n \"ang_acc_b_x\", \"ang_acc_b_y\", \"ang_acc_b_z\"]].to_numpy(), self.moment_of_inertia)\n #self.y_moments = (moment_mat).flatten()\n self.data_df[[\"measured_moment_x\", \"measured_moment_y\",\n \"measured_moment_z\"]] = moment_mat\n \n self.y_dict.update({\"rot\":{\"x\":\"measured_moment_x\",\"y\":\"measured_moment_y\",\"z\":\"measured_moment_z\"}})\n"
] | [
[
"numpy.diag"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
rhoposit/tacotron2 | [
"2dad8df5ea50459789e16d9effb83fc2a25e42ed"
] | [
"tacotron/models.py"
] | [
"# ==============================================================================\n# Copyright (c) 2018, Yamagishi Laboratory, National Institute of Informatics\n# Author: Yusuke Yasuda ([email protected])\n# All rights reserved.\n# ==============================================================================\n\"\"\" Models. \"\"\"\n\nimport tensorflow as tf\nfrom tacotron.modules import Embedding\nfrom tacotron.tacotron_v1 import EncoderV1, DecoderV1\nfrom tacotron.hooks import MetricsSaver, PostNetMetricsSaver\nfrom util.audio import Audio\n\n\nclass SingleSpeakerTacotronV1Model(tf.estimator.Estimator):\n\n def __init__(self, params, model_dir=None, config=None, warm_start_from=None):\n def model_fn(features, labels, mode, params):\n is_training = mode == tf.estimator.ModeKeys.TRAIN\n is_validation = mode == tf.estimator.ModeKeys.EVAL\n is_prediction = mode == tf.estimator.ModeKeys.PREDICT\n\n embedding = Embedding(params.num_symbols, embedding_dim=params.embedding_dim)\n\n encoder = EncoderV1(is_training,\n cbhg_out_units=params.cbhg_out_units,\n conv_channels=params.conv_channels,\n max_filter_width=params.max_filter_width,\n projection1_out_channels=params.projection1_out_channels,\n projection2_out_channels=params.projection2_out_channels,\n num_highway=params.num_highway,\n prenet_out_units=params.encoder_prenet_out_units,\n drop_rate=params.encoder_prenet_drop_rate)\n\n decoder = DecoderV1(prenet_out_units=params.decoder_prenet_out_units,\n drop_rate=params.decoder_prenet_drop_rate,\n attention_out_units=params.attention_out_units,\n decoder_out_units=params.decoder_out_units,\n num_codes=params.num_codes,\n outputs_per_step=params.outputs_per_step,\n max_iters=params.max_iters,\n n_feed_frame=params.n_feed_frame)\n\n target = labels.codes if (is_training or is_validation) else None\n\n embedding_output = embedding(features.source)\n encoder_output = encoder(embedding_output)\n codes_output, stop_token, decoder_state = decoder(encoder_output,\n is_training=is_training,\n is_validation=is_validation,\n memory_sequence_length=features.source_length,target=target)\n alignment = tf.transpose(decoder_state[0].alignment_history.stack(), [1, 2, 0])\n\n global_step = tf.train.get_global_step()\n\n if mode is not tf.estimator.ModeKeys.PREDICT:\n codes_loss = self.codes_loss(code_output, labels.codes,\n labels.codes_loss_mask)\n done_loss = self.binary_loss(stop_token, labels.done, labels.binary_loss_mask)\n loss = code_loss + done_loss\n\n if is_training:\n lr = self.learning_rate_decay(\n params.initial_learning_rate, global_step) if params.decay_learning_rate else tf.convert_to_tensor(\n params.initial_learning_rate)\n optimizer = tf.train.AdamOptimizer(learning_rate=lr, beta1=params.adam_beta1,\n beta2=params.adam_beta2, epsilon=params.adam_eps)\n\n gradients, variables = zip(*optimizer.compute_gradients(loss))\n clipped_gradients, _ = tf.clip_by_global_norm(gradients, 1.0)\n self.add_training_stats(loss, codes_loss, done_loss, lr)\n # Add dependency on UPDATE_OPS; otherwise batchnorm won't work correctly. See:\n # https://github.com/tensorflow/tensorflow/issues/1122\n with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):\n train_op = optimizer.apply_gradients(zip(clipped_gradients, variables), global_step=global_step)\n summary_writer = tf.summary.FileWriter(model_dir)\n alignment_saver = MetricsSaver([alignment],\n global_step,\n codes_output,\n labels.codes,\n labels.target_length,\n features.id,\n features.text,\n params.alignment_save_steps,\n mode, summary_writer,\n params.save_training_time_metrics,\n params.keep_eval_results_max_epoch)\n hooks = [alignment_saver]\n if params.record_profile:\n profileHook = tf.train.ProfilerHook(save_steps=params.profile_steps, output_dir=model_dir,\n show_dataflow=True, show_memory=True)\n hooks.append(profileHook)\n return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op,\n training_hooks=hooks)\n\n if is_validation:\n # validation with teacher forcing\n codes_output_with_teacher, stop_token_with_teacher, _ = decoder(encoder_output,\n is_training=is_training,\n is_validation=is_validation,\n memory_sequence_length=features.source_length,\n target=target,\n teacher_forcing=True)\n codes_loss_with_teacher = self.spec_loss(codes_output_with_teacher, labels.codes, labels.codes_loss_mask)\n done_loss_with_teacher = self.binary_loss(stop_token_with_teacher, labels.done, labels.binary_loss_mask)\n loss_with_teacher = codes_loss_with_teacher + done_loss_with_teacher\n eval_metric_ops = self.get_validation_metrics(codes_loss, done_loss, loss_with_teacher, codes_loss_with_teacher, done_loss_with_teacher)\n\n summary_writer = tf.summary.FileWriter(model_dir)\n alignment_saver = MetricsSaver([alignment],\n global_step,\n codes_output,\n labels.codes,\n labels.target_length,\n features.id,\n features.text,\n 1,\n mode, summary_writer,\n params.save_training_time_metrics,\n params.keep_eval_results_max_epoch)\n return tf.estimator.EstimatorSpec(mode, loss=loss,\n evaluation_hooks=[alignment_saver],\n eval_metric_ops=eval_metric_ops)\n\n if is_prediction:\n return tf.estimator.EstimatorSpec(mode, predictions={\n \"id\": features.id,\n \"codes\": codes_output,\n \"alignment\": alignment,\n \"source\": features.source,\n \"text\": features.text,\n })\n\n super(SingleSpeakerTacotronV1Model, self).__init__(\n model_fn=model_fn, model_dir=model_dir, config=config,\n params=params, warm_start_from=warm_start_from)\n\n @staticmethod\n def codes_loss(y_hat, y, mask, n_priority_freq=None, priority_w=0):\n l1_loss = tf.abs(y_hat - y)\n\n # Priority L1 loss\n if n_priority_freq is not None and priority_w > 0:\n priority_loss = tf.abs(y_hat[:, :, :n_priority_freq] - y[:, :, :n_priority_freq])\n l1_loss = (1 - priority_w) * l1_loss + priority_w * priority_loss\n\n return tf.losses.compute_weighted_loss(l1_loss, weights=tf.expand_dims(mask, axis=2))\n\n @staticmethod\n def binary_loss(done_hat, done, mask):\n return tf.losses.sigmoid_cross_entropy(done, tf.squeeze(done_hat, axis=-1), weights=mask)\n\n @staticmethod\n def learning_rate_decay(init_rate, global_step):\n warmup_steps = 4000.0\n step = tf.to_float(global_step + 1)\n return init_rate * warmup_steps ** 0.5 * tf.minimum(step * warmup_steps ** -1.5, step ** -0.5)\n\n @staticmethod\n def add_training_stats(loss, codes_loss, done_loss, learning_rate):\n if loss is not None:\n tf.summary.scalar(\"loss_with_teacher\", loss)\n if codes_loss is not None:\n tf.summary.scalar(\"codes_loss\", codes_loss)\n tf.summary.scalar(\"codes_loss_with_teacher\", codes_loss)\n if done_loss is not None:\n tf.summary.scalar(\"done_loss\", done_loss)\n tf.summary.scalar(\"done_loss_with_teacher\", done_loss)\n tf.summary.scalar(\"learning_rate\", learning_rate)\n return tf.summary.merge_all()\n\n @staticmethod\n def get_validation_metrics(codes_loss, done_loss, loss_with_teacher, codes_loss_with_teacher, done_loss_with_teacher):\n metrics = {}\n if codes_loss is not None:\n metrics[\"codes_loss\"] = tf.metrics.mean(codes_loss)\n if done_loss is not None:\n metrics[\"done_loss\"] = tf.metrics.mean(done_loss)\n if loss_with_teacher is not None:\n metrics[\"loss_with_teacher\"] = tf.metrics.mean(loss_with_teacher)\n if codes_loss_with_teacher is not None:\n metrics[\"codes_loss_with_teacher\"] = tf.metrics.mean(codes_loss_with_teacher)\n if done_loss_with_teacher is not None:\n metrics[\"done_loss_with_teacher\"] = tf.metrics.mean(done_loss_with_teacher)\n return metrics\n\n"
] | [
[
"tensorflow.convert_to_tensor",
"tensorflow.metrics.mean",
"tensorflow.summary.FileWriter",
"tensorflow.get_collection",
"tensorflow.minimum",
"tensorflow.squeeze",
"tensorflow.train.get_global_step",
"tensorflow.expand_dims",
"tensorflow.clip_by_global_norm",
"tensorflow.summary.merge_all",
"tensorflow.to_float",
"tensorflow.train.AdamOptimizer",
"tensorflow.estimator.EstimatorSpec",
"tensorflow.train.ProfilerHook",
"tensorflow.summary.scalar",
"tensorflow.abs"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
ChristopherChudzicki/mitx-grading-library | [
"1d9a7107f26b5e0ebe24deb552cf943779693e18"
] | [
"mitxgraders/helpers/calc/mathfuncs.py"
] | [
"\"\"\"\nmathfuncs.py\n\nContains mathematical functions for use in interpreting formulas.\n\nContains some helper functions used in grading formulae:\n* within_tolerance\n\nDefines:\n* DEFAULT_FUNCTIONS\n* DEFAULT_VARIABLES\n* DEFAULT_SUFFIXES\n* METRIC_SUFFIXES\n\"\"\"\nfrom __future__ import print_function, division, absolute_import, unicode_literals\n\nimport six\nimport numpy as np\nimport scipy.special as special\nfrom mitxgraders.helpers.calc.specify_domain import SpecifyDomain\nfrom mitxgraders.helpers.calc.exceptions import FunctionEvalError\nfrom mitxgraders.helpers.calc.math_array import MathArray\n\n# Normal Trig\ndef sec(arg):\n \"\"\"Secant\"\"\"\n return 1 / np.cos(arg)\n\ndef csc(arg):\n \"\"\"Cosecant\"\"\"\n return 1 / np.sin(arg)\n\ndef cot(arg):\n \"\"\"Cotangent\"\"\"\n return 1 / np.tan(arg)\n\n# Inverse Trig\n# http://en.wikipedia.org/wiki/Inverse_trigonometric_functions#Relationships_among_the_inverse_trigonometric_functions\ndef arcsec(val):\n \"\"\"Inverse secant\"\"\"\n return np.arccos(1. / val)\n\ndef arccsc(val):\n \"\"\"Inverse cosecant\"\"\"\n return np.arcsin(1. / val)\n\ndef arccot(val):\n \"\"\"Inverse cotangent\"\"\"\n if np.real(val) < 0:\n return -np.pi / 2 - np.arctan(val)\n else:\n return np.pi / 2 - np.arctan(val)\n\n# Hyperbolic Trig\ndef sech(arg):\n \"\"\"Hyperbolic secant\"\"\"\n return 1 / np.cosh(arg)\n\ndef csch(arg):\n \"\"\"Hyperbolic cosecant\"\"\"\n return 1 / np.sinh(arg)\n\ndef coth(arg):\n \"\"\"Hyperbolic cotangent\"\"\"\n return 1 / np.tanh(arg)\n\n# And their inverses\ndef arcsech(val):\n \"\"\"Inverse hyperbolic secant\"\"\"\n return np.arccosh(1. / val)\n\ndef arccsch(val):\n \"\"\"Inverse hyperbolic cosecant\"\"\"\n return np.arcsinh(1. / val)\n\ndef arccoth(val):\n \"\"\"Inverse hyperbolic cotangent\"\"\"\n return np.arctanh(1. / val)\n\n# NOTE: tests are in a separate file, NOT doctests.\n# see https://bugs.python.org/issue6835\[email protected]_decorator((1,), (1,))\ndef arctan2(x, y):\n \"\"\"\n Returns the an angle in range (-pi, pi] whose tangent is y/x, taking into\n account the quadrant that (x, y) is in.\n \"\"\"\n if x == 0 and y == 0:\n raise FunctionEvalError(\"arctan2(0, 0) is undefined\")\n\n return np.arctan2(y, x)\n\n# NOTE: tests are in a separate file, NOT doctests.\n# see https://bugs.python.org/issue6835\[email protected]_decorator((1,), (1,))\ndef kronecker(x, y):\n \"\"\"\n Returns 1 if x==y, and 0 otherwise.\n Note that this should really only be used for integer expressions.\n \"\"\"\n if x == y:\n return 1\n return 0\n\ndef content_if_0d_array(obj):\n \"\"\"\n If obj is a 0d numpy array, return its contents. Otherwise, return item.\n\n Usage:\n ======\n\n >>> content_if_0d_array(5) == 5\n True\n >>> content_if_0d_array(np.array(5)) == 5\n True\n >>> content_if_0d_array(np.array([1, 2, 3]))\n array([1, 2, 3])\n \"\"\"\n return obj.item() if isinstance(obj, np.ndarray) and obj.ndim == 0 else obj\n\ndef real(z):\n \"\"\"\n Returns the real part of z.\n >>> real(2+3j)\n 2.0\n\n If the input is a number, a number is returned:\n >>> isinstance(real(2+3j), float)\n True\n\n Can be used with arrays, too: # doctest: +NORMALIZE_WHITESPACE\n >>> real(np.array([1+10j, 2+20j, 3+30j]))\n array([ 1., 2., 3.])\n \"\"\"\n # np.real seems to return 0d arrays for numerical inputs. For example,\n # np.real(2+3j) is a 0d array.\n return content_if_0d_array(np.real(z))\n\ndef imag(z):\n \"\"\"\n Returns the imaginary part of z.\n >>> imag(2+3j)\n 3.0\n\n If the input is a number, a number is returned:\n >>> isinstance(imag(2+3j), float)\n True\n\n Can be used with arrays, too:\n >>> imag(np.array([1+10j, 2+20j, 3+30j]))\n array([ 10., 20., 30.])\n \"\"\"\n return content_if_0d_array(np.imag(z))\n\ndef factorial(z):\n \"\"\"\n Factorial function over complex numbers, using the gamma function.\n Note that math.factorial will return long ints, which are problematic when running\n into overflow issues. The gamma function just returns inf.\n\n Usage\n =====\n\n Non-negative integer input returns floats:\n >>> factorial(4)\n 24.0\n\n Floats and complex numbers use scipy's gamma function:\n >>> import math\n >>> factorial(0.5) # doctest: +ELLIPSIS\n 0.8862269...\n >>> math.sqrt(math.pi)/2 # doctest: +ELLIPSIS\n 0.8862269...\n >>> factorial(3.2+4.1j) # doctest: +ELLIPSIS\n (1.0703272...-0.3028032...j)\n >>> factorial(2.2+4.1j)*(3.2+4.1j) # doctest: +ELLIPSIS\n (1.0703272...-0.3028032...j)\n\n Works with numpy arrays:\n >>> np.array_equal(\n ... factorial(np.array([1, 2, 3, 4])),\n ... np.array([1, 2, 6, 24])\n ... )\n True\n\n Really big numbers return inf:\n >>> factorial(500) == float('inf')\n True\n >>> factorial(500.5) == float('inf')\n True\n\n Throws errors at poles:\n >>> try: # doctest: +ELLIPSIS\n ... factorial(-2)\n ... except FunctionEvalError as error:\n ... print(error)\n Error evaluating factorial() or fact() in input...\n \"\"\"\n\n try:\n is_integer = isinstance(z, int) or z.is_integer()\n except AttributeError:\n is_integer = False\n\n if is_integer and z < 0:\n msg = (\"Error evaluating factorial() or fact() in input. These \"\n \"functions cannot be used at negative integer values.\")\n raise FunctionEvalError(msg)\n\n value = special.gamma(z+1)\n # value is a numpy array; If it's 0d, we can just get its item:\n try:\n return value.item()\n except ValueError:\n return value\n\[email protected]_decorator((3,), (3,))\ndef cross(a, b):\n return MathArray([\n a[1]*b[2] - b[1]*a[2],\n a[2]*b[0] - b[2]*a[0],\n a[0]*b[1] - b[0]*a[1]\n ])\n\n# Variables available by default\nDEFAULT_VARIABLES = {\n 'i': np.complex(0, 1),\n 'j': np.complex(0, 1),\n 'e': np.e,\n 'pi': np.pi\n}\n\n# These act element-wise on numpy arrays\nELEMENTWISE_FUNCTIONS = {\n 'sin': np.sin,\n 'cos': np.cos,\n 'tan': np.tan,\n 'sec': sec,\n 'csc': csc,\n 'cot': cot,\n # We use scimath variants which give complex results when needed. For example:\n # np.sqrt(-4+0j) = 2j\n # np.sqrt(-4) = nan, but\n # np.lib.scimath.sqrt(-4) = 2j\n 'sqrt': np.lib.scimath.sqrt,\n 'log10': np.lib.scimath.log10,\n 'log2': np.lib.scimath.log2,\n 'ln': np.lib.scimath.log,\n 'exp': np.exp,\n 'arccos': np.lib.scimath.arccos,\n 'arcsin': np.lib.scimath.arcsin,\n 'arctan': np.arctan,\n 'arcsec': arcsec,\n 'arccsc': arccsc,\n 'arccot': arccot,\n 'abs': np.abs,\n 'fact': factorial,\n 'factorial': factorial,\n 'sinh': np.sinh,\n 'cosh': np.cosh,\n 'tanh': np.tanh,\n 'sech': sech,\n 'csch': csch,\n 'coth': coth,\n 'arcsinh': np.arcsinh,\n 'arccosh': np.arccosh,\n 'arctanh': np.lib.scimath.arctanh,\n 'arcsech': arcsech,\n 'arccsch': arccsch,\n 'arccoth': arccoth,\n 'floor': np.floor,\n 'ceil': np.ceil\n}\n\ndef has_one_scalar_input(display_name):\n return SpecifyDomain.make_decorator((1,), display_name=display_name)\n\ndef has_at_least_2_scalar_inputs(display_name):\n return SpecifyDomain.make_decorator((1,), display_name=display_name, min_length=2)\n\nSCALAR_FUNCTIONS = {key: has_one_scalar_input(key)(ELEMENTWISE_FUNCTIONS[key])\n for key in ELEMENTWISE_FUNCTIONS}\n\nSCALAR_FUNCTIONS['arctan2'] = arctan2\nSCALAR_FUNCTIONS['kronecker'] = kronecker\n\nMULTI_SCALAR_FUNCTIONS = {\n 'min': has_at_least_2_scalar_inputs('min')(min),\n 'max': has_at_least_2_scalar_inputs('max')(max)\n}\n\nARRAY_FUNCTIONS = {\n 're': real,\n 'im': imag,\n 'conj': np.conj\n}\n\ndef has_one_square_input(display_name):\n return SpecifyDomain.make_decorator('square', display_name=display_name)\n\ndef array_abs(obj):\n \"\"\"\n Takes absolute value of numbers or vectors and suggests norm(...) instead\n for matrix/tensors.\n\n NOTE: The decision to limit abs(...) to scalars and vectors was motivated\n by pedagogy not software.\n \"\"\"\n if isinstance(obj, MathArray) and obj.ndim > 1:\n msg = (\"The abs(...) function expects a scalar or vector. To take the \"\n \"norm of a {}, try norm(...) instead.\".format(\n MathArray.get_shape_name(obj.ndim)))\n raise FunctionEvalError(msg)\n return np.linalg.norm(obj)\n\nARRAY_ONLY_FUNCTIONS = {\n 'norm': np.linalg.norm,\n 'abs': array_abs,\n 'trans': np.transpose,\n 'det': has_one_square_input('det')(np.linalg.det),\n 'trace': has_one_square_input('trace')(np.trace),\n 'ctrans': lambda x: np.conj(np.transpose(x)),\n 'adj': lambda x: np.conj(np.transpose(x)),\n 'cross': cross\n}\n\ndef merge_dicts(*source_dicts):\n \"\"\"Create a new dictionary and merge sources into it.\"\"\"\n target = {}\n for source in source_dicts:\n target.update(source)\n return target\n\nDEFAULT_FUNCTIONS = merge_dicts(SCALAR_FUNCTIONS, MULTI_SCALAR_FUNCTIONS, ARRAY_FUNCTIONS)\n\nDEFAULT_SUFFIXES = {\n '%': 0.01\n}\n\nMETRIC_SUFFIXES = {\n 'k': 1e3, 'M': 1e6, 'G': 1e9, 'T': 1e12,\n 'm': 1e-3, 'u': 1e-6, 'n': 1e-9, 'p': 1e-12\n}\n\npauli = {\n 'sigma_x': MathArray([\n [0, 1],\n [1, 0]\n ]),\n 'sigma_y': MathArray([\n [0, -1j],\n [1j, 0]\n ]),\n 'sigma_z': MathArray([\n [1, 0],\n [0, -1]\n ])\n}\n\ncartesian_xyz = {\n 'hatx': MathArray([1, 0, 0]),\n 'haty': MathArray([0, 1, 0]),\n 'hatz': MathArray([0, 0, 1])\n}\n\ncartesian_ijk = {\n 'hati': MathArray([1, 0, 0]),\n 'hatj': MathArray([0, 1, 0]),\n 'hatk': MathArray([0, 0, 1])\n}\n\ndef percentage_as_number(percent_str):\n \"\"\"\n Convert a percentage string to a number.\n\n Args:\n percent_str: A percent string, for example '5%' or '1.2%'\n\n Usage\n =====\n >>> percentage_as_number('8%')\n 0.08\n >>> percentage_as_number('250%')\n 2.5\n >>> percentage_as_number('-10%')\n -0.1\n \"\"\"\n return float(percent_str.strip()[:-1]) * 0.01\n\ndef within_tolerance(x, y, tolerance):\n \"\"\"\n Check that |x-y| <= tolerance with appropriate norm.\n\n Args:\n x: number or array (np array_like)\n y: number or array (np array_like)\n tolerance: Number or PercentageString\n\n NOTE: Calculates x - y; may raise an error for incompatible shapes.\n\n Usage\n =====\n\n The tolerance can be a number:\n >>> within_tolerance(10, 9.01, 1)\n True\n >>> within_tolerance(10, 9.01, 0.5)\n False\n\n If tolerance is a percentage, it is a percent of (the norm of) x:\n >>> within_tolerance(10, 9.01, '10%')\n True\n >>> within_tolerance(9.01, 10, '10%')\n False\n\n Works for vectors and matrices:\n >>> A = np.array([[1,2],[-3,1]])\n >>> B = np.array([[1.1, 2], [-2.8, 1]])\n >>> diff = round(np.linalg.norm(A-B), 6)\n >>> diff\n 0.223607\n >>> within_tolerance(A, B, 0.25)\n True\n \"\"\"\n # When used within graders, tolerance has already been\n # validated as a Number or PercentageString\n if isinstance(tolerance, six.text_type):\n tolerance = np.linalg.norm(x) * percentage_as_number(tolerance)\n\n difference = x - y\n\n return np.linalg.norm(difference) <= tolerance\n\ndef is_nearly_zero(x, tolerance, reference=None):\n \"\"\"\n Check that x is within tolerance of zero. If tolerance is provided as a\n percentage, a reference value is requied.\n\n Args:\n x: number or array (np array_like)\n reference: None number or array (np array_like), only used when\n tolerance is provided as a percentage\n tolerance: Number or PercentageString\n\n Usage\n =====\n >>> is_nearly_zero(0.4, 0.5)\n True\n >>> is_nearly_zero(0.4, 0.3)\n False\n >>> is_nearly_zero(0.4, '5%', reference=10)\n True\n >>> is_nearly_zero(0.4, '3%', reference=10)\n False\n\n Works for arrays, too:\n >>> x = np.array([[1, 1], [0, -1]])\n >>> np.linalg.norm(x) # doctest: +ELLIPSIS\n 1.732050...\n >>> is_nearly_zero(x, '18%', reference=10)\n True\n >>> is_nearly_zero(x, '17%', reference=10)\n False\n\n A ValueError is raised when percentage tolerance is used without reference:\n >>> try:\n ... is_nearly_zero(0.4, '3%')\n ... except ValueError as error:\n ... print(error)\n When tolerance is a percentage, reference must not be None.\n \"\"\"\n # When used within graders, tolerance has already been\n # validated as a Number or PercentageString\n if isinstance(tolerance, six.text_type):\n if reference is None:\n raise ValueError('When tolerance is a percentage, reference must '\n 'not be None.')\n tolerance = np.linalg.norm(reference) * percentage_as_number(tolerance)\n\n return np.linalg.norm(x) <= tolerance\n"
] | [
[
"numpy.imag",
"numpy.arctanh",
"numpy.arctan",
"numpy.arctan2",
"numpy.arcsin",
"numpy.sin",
"numpy.real",
"scipy.special.gamma",
"numpy.cosh",
"numpy.arccosh",
"numpy.arccos",
"numpy.tan",
"numpy.transpose",
"numpy.tanh",
"numpy.arcsinh",
"numpy.linalg.norm",
"numpy.cos",
"numpy.sinh",
"numpy.complex"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Moetaz-M-Mokhtar/ITIintake40_FaceRecognition | [
"570ceb5d1353efa8b8754243ee8d5db36a951998"
] | [
"detection/docker/model_handler_cpu.py"
] | [
"# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n# Licensed under the Apache License, Version 2.0 (the \"License\").\n# You may not use this file except in compliance with the License.\n# A copy of the License is located at\n# http://www.apache.org/licenses/LICENSE-2.0\n# or in the \"license\" file accompanying this file. This file is distributed\n# on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n# express or implied. See the License for the specific language governing\n# permissions and limitations under the License.\n\n\"\"\"\nModelHandler defines a base model handler.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport sys\nimport os\nimport numpy as np\nimport cv2\nimport logging\nimport time\nimport base64\n\n\nfrom mms.utils.mxnet import image, ndarray\n\nsys.path.append('/root')\nfrom insightface.RetinaFace.retinaface import RetinaFace\n\ndef decode_img(img_str):\n # img_bytes = bytes(img_str, 'utf-8')\n img_buff = base64.b64decode(img_str)\n img_jpg = np.frombuffer(img_buff, dtype=np.uint8)\n img = cv2.imdecode(img_jpg, cv2.IMREAD_COLOR)\n return img\n \nclass ModelHandler(object):\n \"\"\"\n A base Model handler implementation.\n \"\"\"\n\n def __init__(self):\t\t\t\t\t\t \t \n detection_model = 'retinaface-R50/R50' # Name of the detetion model for example 'R50' for LResNet50E\n det_epoch = 0 # Detection model epoch number\n self._batch_size = 1\n self.det_threshold = 0.8\n self.image_size = 160 \t# check recognition model input layer before changing this value\n self.margin = 20 \t# Number of margin pixels to crop faces function\n self.gpuid = -1\t\t\t\t\t\t \t # use CPU\n det_model = '/root/models/detection/' + detection_model \t\t # path to the detection model\n self._detector = RetinaFace(det_model, det_epoch, self.gpuid, 'net3')\n\n def initialize(self, context):\n \"\"\"\n Initialize model. This will be called during model loading time\n :param context: Initial context contains model server system properties.\n :return:\n \"\"\"\n self._context = context\n self.initialized = True\n\n def preprocess(self, data):\n \"\"\"\n Transform raw input into model input data.\n :param batch: list of raw requests, should match batch size\n :return: list of preprocessed model input data\n \"\"\"\n assert self._batch_size == len(data), \"Invalid input batch size: {}\".format(len(batch))\n img_list = []\n for idx, img in enumerate(data):\n # We are assuming input shape is NCHW\n # [h, w] = [1024, 1024]\n img_arr = decode_img(img['body'])\n # img_arr = mx.nd.array(img_arr)\n # img_arr = image.resize(img_arr, w, h)\n # img_arr = image.transform_shape(img_arr)\n img_list.append(img_arr)\n return img_list\n\n def inference(self, model_input):\n \"\"\"\n Internal inference methods\n :param model_input: transformed model input data\n :return: list of inference output in NDArray\n \"\"\"\n inference_output = []\n for frame in model_input:\n assert frame.ndim != 2 or frame.ndim != 3, \"expected input image dimension to be 2 or 3 but got data with {}\".format(frame.ndim)\n if frame.ndim == 2:\n frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2BGR)\n im_shape = frame.shape\n scales = [1024, 1920]\n target_size = scales[0]\n max_size = scales[1]\n im_size_min = np.min(im_shape[0:2])\n im_size_max = np.max(im_shape[0:2])\n im_scale = float(target_size) / float(im_size_min)\n if np.round(im_scale * im_size_max) > max_size:\n im_scale = float(max_size) / float(im_size_max)\n scales = [im_scale]\n flip = False\n faces_bb, landmarks = self._detector.detect(frame, threshold=self.det_threshold, scales=scales, do_flip=flip)\n inference_output.append([faces_bb.tolist(), landmarks.tolist()])\n \n print('inference output: ', inference_output)\n return inference_output\n\n def postprocess(self, inference_output):\n \"\"\"\n Return predict result in batch.\n :param inference_output: list of inference output\n :return: list of predict results\n \"\"\"\n # faces_bb = [output[0] for output in inference_output]\n # landmarks = [output[1] for output in inference_output]\n return inference_output\n \n def handle(self, data, context):\n \"\"\"\n Custom service entry point function.\n :param data: list of objects, raw input from request\n :param context: model server context\n :return: list of outputs to be send back to client\n \"\"\"\n try:\n preprocess_start = time.time()\n data = self.preprocess(data)\n inference_start = time.time()\n data = self.inference(data)\n postprocess_start = time.time()\n data = self.postprocess(data)\n end_time = time.time()\n\n metrics = context.metrics\n metrics.add_time(\"PreprocessTime\", round((inference_start - preprocess_start) * 1000, 2))\n metrics.add_time(\"InferenceTime\", round((postprocess_start - inference_start) * 1000, 2))\n metrics.add_time(\"PostprocessTime\", round((end_time - postprocess_start) * 1000, 2))\n\n return data\n\n except Exception as e:\n logging.error(e, exc_info=True)\n request_processor = context.request_processor\n request_processor.report_status(500, \"Unknown inference error\")\n return [str(e)] * self._batch_size\n \n"
] | [
[
"numpy.round",
"numpy.frombuffer",
"numpy.max",
"numpy.min"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
zxc1342802/leijmtrader | [
"f24d5593d8708e48f2a9180d9469a6c2af93a08d"
] | [
"examples/strategies/king_keltner_strategy.py"
] | [
"from jiamtrader.app.cta_strategy import (\n CtaTemplate,\n StopOrder,\n TickData,\n BarData,\n TradeData,\n OrderData,\n BarGenerator,\n ArrayManager,\n)\n\nimport pandas_ta as ta\nimport pandas as pd\n\nclass KingKeltnerStrategy(CtaTemplate):\n \"\"\"\"\"\"\n\n author = \"用Python的交易员\"\n\n kk_length = 11\n kk_dev = 1.6\n trailing_percent = 0.8\n fixed_size = 1\n\n kk_up = 0\n kk_down = 0\n intra_trade_high = 0\n intra_trade_low = 0\n\n long_vt_orderids = []\n short_vt_orderids = []\n vt_orderids = []\n\n parameters = [\"kk_length\", \"kk_dev\", \"trailing_percent\", \"fixed_size\"]\n variables = [\"kk_up\", \"kk_down\"]\n\n def __init__(self, cta_engine, strategy_name, vt_symbol, setting):\n \"\"\"\"\"\"\n super().__init__(cta_engine, strategy_name, vt_symbol, setting)\n\n self.bg = BarGenerator(self.on_bar, 5, self.on_5min_bar)\n self.am = ArrayManager()\n\n def on_init(self):\n \"\"\"\n Callback when strategy is inited.\n \"\"\"\n self.write_log(\"策略初始化\")\n self.load_bar(10)\n\n def on_start(self):\n \"\"\"\n Callback when strategy is started.\n \"\"\"\n self.write_log(\"策略启动\")\n\n def on_stop(self):\n \"\"\"\n Callback when strategy is stopped.\n \"\"\"\n self.write_log(\"策略停止\")\n\n def on_tick(self, tick: TickData):\n \"\"\"\n Callback of new tick data update.\n \"\"\"\n self.bg.update_tick(tick)\n\n def on_bar(self, bar: BarData):\n \"\"\"\n Callback of new bar data update.\n \"\"\"\n self.bg.update_bar(bar)\n\n def on_5min_bar(self, bar: BarData):\n \"\"\"\"\"\"\n for orderid in self.vt_orderids:\n self.cancel_order(orderid)\n self.vt_orderids.clear()\n\n am = self.am\n am.update_bar(bar)\n if not am.inited:\n return\n\n high = pd.Series(am.high_array)\n low = pd.Series(am.low_array)\n close = pd.Series(am.close_array)\n\n range_ = ta.true_range(high, low, close)\n\n basis = ta.sma(close, self.kk_length)\n band = ta.sma(range_, self.kk_length)\n up = basis + self.kk_dev * band\n down = basis - self.kk_dev * band\n\n self.kk_up, self.kk_down = up.iloc[-1], down.iloc[-1]\n\n if self.pos == 0:\n self.intra_trade_high = bar.high_price\n self.intra_trade_low = bar.low_price\n self.send_oco_order(self.kk_up, self.kk_down, self.fixed_size)\n\n elif self.pos > 0:\n self.intra_trade_high = max(self.intra_trade_high, bar.high_price)\n self.intra_trade_low = bar.low_price\n\n vt_orderids = self.sell(self.intra_trade_high * (1 - self.trailing_percent / 100),\n abs(self.pos), True)\n self.vt_orderids.extend(vt_orderids)\n\n elif self.pos < 0:\n self.intra_trade_high = bar.high_price\n self.intra_trade_low = min(self.intra_trade_low, bar.low_price)\n\n vt_orderids = self.cover(self.intra_trade_low * (1 + self.trailing_percent / 100),\n abs(self.pos), True)\n self.vt_orderids.extend(vt_orderids)\n\n self.put_event()\n\n def on_order(self, order: OrderData):\n \"\"\"\n Callback of new order data update.\n \"\"\"\n pass\n\n def on_trade(self, trade: TradeData):\n \"\"\"\n Callback of new trade data update.\n \"\"\"\n if self.pos != 0:\n if self.pos > 0:\n for short_orderid in self.short_vt_orderids:\n self.cancel_order(short_orderid)\n\n elif self.pos < 0:\n for buy_orderid in self.long_vt_orderids:\n self.cancel_order(buy_orderid)\n\n for orderid in (self.long_vt_orderids + self.short_vt_orderids):\n if orderid in self.vt_orderids:\n self.vt_orderids.remove(orderid)\n\n self.put_event()\n\n def send_oco_order(self, buy_price, short_price, volume):\n \"\"\"\"\"\"\n self.long_vt_orderids = self.buy(buy_price, volume, True)\n self.short_vt_orderids = self.short(short_price, volume, True)\n\n self.vt_orderids.extend(self.long_vt_orderids)\n self.vt_orderids.extend(self.short_vt_orderids)\n\n def on_stop_order(self, stop_order: StopOrder):\n \"\"\"\n Callback of stop order update.\n \"\"\"\n pass\n"
] | [
[
"pandas.Series"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
abhishekmaha23/synthetic_data_generation_attempt | [
"99ee858cdf405641fd0e2797bfc14c1a736547eb"
] | [
"util/utils.py"
] | [
"import matplotlib.pyplot as plt\nfrom datetime import datetime\nimport numpy as np\nimport torch\nimport os\nimport time\nfrom scipy.ndimage.filters import gaussian_filter1d\nfrom itertools import repeat\nimport copy\nimport gym\n# import torch.multiprocessing as multiprocessing\nimport multiprocessing\nimport pickle\nimport matplotlib\nmatplotlib.use(\"TkAgg\")\nfrom collections import defaultdict, Counter\n\n\ndef plot_fig(x, y, std=None, title=None, draw_grid=True,\n xlabel=None, ylabel=None, add_legend=False,\n label=None, display_fig=True,\n save_fig=False, save_name=None, xlim=[None, None], ylim=[None, None], img_size=(10, 6), update_fig=None, smooth_fill=False, smooth_fill_sigma=None):\n # plt.ion()\n plt.figure(figsize=img_size)\n assert type(x) == list, 'X is not a list'\n # assert type(y) == list, 'Y is not a list'\n if update_fig is None:\n fig, = plt.plot(x, y, label=label)\n axes = plt.gca()\n axes.set_autoscale_on(True) # enable autoscale\n axes.autoscale_view(True, True, True)\n axes.set_xlim(xlim)\n axes.set_ylim(ylim)\n if std is not None:\n if type(std) == list:\n lower_list = [y[i] - std[i] for i in range(len(x))]\n upper_list = [y[i] + std[i] for i in range(len(x))]\n else:\n lower_list = [i - std for i in y]\n upper_list = [i + std for i in y]\n if smooth_fill is True:\n if smooth_fill_sigma is None:\n smooth_fill_sigma = (len(x) // 1000) + 1\n # smooth upper and lower parts of the filling\n lower_list = gaussian_filter1d(lower_list, sigma=smooth_fill_sigma)\n upper_list = gaussian_filter1d(upper_list, sigma=smooth_fill_sigma)\n plt.fill_between(x, lower_list, upper_list, color='b', alpha=.1)\n if add_legend:\n plt.legend(fontsize=22)\n plt.xticks(size=22)\n plt.yticks(size=22)\n if draw_grid:\n plt.grid()\n if xlabel is not None:\n plt.xlabel(xlabel, fontsize=22)\n if ylabel is not None:\n plt.ylabel(None, fontsize=22)\n if title is not None:\n plt.title(title, fontsize=22)\n\n if save_fig:\n if save_name is None:\n save_name = 'plot_-'+xlabel+' vs. ' + ylabel + str(datetime.now()) + \".pdf\"\n plt.savefig(save_name)\n if display_fig:\n plt.show()\n return fig, axes\n\n\ndef test_agent_performance(agent, env, device, num_tests=10, agent_id=999, mode='supervised'):\n agent.eval()\n rewards_so_far = []\n time_steps_so_far = []\n agent_type = agent.action_space_type\n actions_dict = defaultdict(int)\n # print(agent_id, 'starting testing of agent', agent.dim)\n for test in range(num_tests):\n done = False\n observation = env.reset()\n i = 0\n time_step = 0\n while not done:\n action = agent.get_action(agent(observation), context='test')\n\n if agent_type == 'discrete':\n actions_dict[action] += 1\n elif agent_type == 'continuous':\n action = action.reshape(agent.dim[-1],)\n observation, reward, done, info = env.step(action)\n # if mode == 'ga':\n # if i < -200:\n # done = True\n i += reward\n time_step += 1\n rewards_so_far.append(i)\n time_steps_so_far.append(time_step)\n if mode == 'ga':\n return np.mean(rewards_so_far), np.std(rewards_so_far), np.mean(time_steps_so_far)\n else:\n return np.mean(rewards_so_far), np.std(rewards_so_far), np.mean(time_steps_so_far), actions_dict\n\n\ndef test_generator_performance(random_actor_sampler, generator, actual_test_env, config, generator_input_sampler, multi=True, mode='normal'):\n multi_performances = []\n trained_agents = []\n if mode == 'retest':\n outer_test_loops = config.retest_generator_testing_loops\n inner_test_loops = config.retest_actor_testing_loops\n else:\n outer_test_loops = config.generator_testing_loops\n inner_test_loops = config.actor_testing_loops\n count = Counter(defaultdict(int))\n time_steps = []\n for i in range(outer_test_loops):\n # new_actor = get_random_agent(config.state_dim, config.action_dim, config.env_config.action_space_type, batch_norm=config.batch_norm)\n new_actor = random_actor_sampler.sample()\n new_actor_opt = torch.optim.SGD(new_actor.parameters(), lr=config.actor_init_learning_rate)\n for inner_loop_num in range(config.inner_loop_iterations):\n new_actor_opt.zero_grad()\n actor_criterion = torch.nn.MSELoss(reduction='sum')\n # softmax_actor_predicted_actions = new_actor(generator(get_generator_input()))\n generator_input, actor_target_output = generator_input_sampler.sample()\n softmax_actor_predicted_actions = new_actor(generator(generator_input), source='generator')\n new_actor_loss = actor_criterion(softmax_actor_predicted_actions, actor_target_output)\n new_actor_loss.backward()\n new_actor_opt.step()\n trained_agents.append(new_actor)\n if multi is False:\n performances_mean = []\n performances_std = []\n for agent in trained_agents:\n performance = test_agent_performance(agent, actual_test_env, config.dev, num_tests=inner_test_loops, mode='ga')\n performances_mean.append(performance[0])\n performances_std.append(performance[1])\n time_steps.append(performance[2])\n # count += Counter(performance[3])\n else:\n pool = multiprocessing.Pool(5)\n envs_list = []\n for i in range(len(trained_agents)):\n envs_list.append(copy.deepcopy(actual_test_env))\n ids = [i for i in range(len(trained_agents))]\n multi_performances = pool.starmap(test_agent_performance, zip(trained_agents, envs_list, repeat(config.dev), repeat(inner_test_loops), ids, repeat('ga')))\n performances_mean, performances_std, time_steps = zip(*multi_performances)\n pool.close()\n\n return np.mean(performances_mean), np.mean(performances_std), np.mean(time_steps), sorted(dict(count).items())\n\n\ndef check_convergence_of_generator(config, random_actor_sampler, current_generator_performance, generator, test_env, generator_input_sampler):\n def reached_threshold(generator_performance):\n return generator_performance[0] >= config.env_config.reward_threshold and generator_performance[1] <= config.env_config.reward_std_threshold\n\n if reached_threshold(current_generator_performance):\n print('Crossed threshold once, testing again.')\n final_test_performance_mean = [current_generator_performance[0]]\n final_test_performance_std = [current_generator_performance[1]]\n for i in range(1):\n extra_generator_performance = test_generator_performance(random_actor_sampler, generator, test_env, config,\n generator_input_sampler, multi=config.multi,\n mode='retest')\n final_test_performance_mean.append(extra_generator_performance[0])\n final_test_performance_std.append(extra_generator_performance[1])\n final_test_performance = (np.mean(final_test_performance_mean), np.mean(final_test_performance_std))\n if reached_threshold(final_test_performance):\n config.ended_early = True\n config.converged_performance_mean = final_test_performance[0]\n config.converged_performance_std = final_test_performance[1]\n return config.ended_early\n\n\ndef check_convergence_of_actor(config, actor, current_actor_performance, test_env):\n def reached_threshold(actor_performance):\n return actor_performance[0] >= config.env_config.reward_threshold and actor_performance[1] <= config.env_config.reward_std_threshold\n\n if reached_threshold(current_actor_performance):\n print('Crossed threshold once, testing again.')\n final_test_performance_mean = [current_actor_performance[0]]\n final_test_performance_std = [current_actor_performance[1]]\n for i in range(1):\n extra_actor_performance = test_agent_performance(actor, test_env, config.dev)\n final_test_performance_mean.append(extra_actor_performance[0])\n final_test_performance_std.append(extra_actor_performance[1])\n final_test_performance = (np.mean(final_test_performance_mean), np.mean(final_test_performance_std))\n if reached_threshold(final_test_performance):\n config.ended_early = True\n config.converged_performance_mean = final_test_performance[0]\n config.converged_performance_std = final_test_performance[1]\n return config.ended_early\n\n\ndef generate_backprop_plots(config, logs, show_plots=True):\n\n generator_losses_smoothed = gaussian_filter1d(logs.meta_losses, sigma=config.plot_smoothing_sigma)\n plot_fig([i for i in range(len(generator_losses_smoothed))],\n generator_losses_smoothed,\n title=\"Meta Losses\", draw_grid=True, xlabel=\"steps\", ylabel=\"generator loss\", add_legend=True,\n label=\"generator loss\", display_fig=show_plots, save_fig=True,\n save_name=os.path.join(config.log_path, 'Generator_losses.pdf'))\n\n critic_losses_smoothed = gaussian_filter1d(logs.critic_losses, sigma=config.plot_smoothing_sigma)\n plot_fig([i for i in range(len(critic_losses_smoothed))],\n critic_losses_smoothed,\n title=\"Critic Losses\", draw_grid=True, xlabel=\"steps\", ylabel=\"critic loss\", add_legend=True,\n label=\"critic loss\", display_fig=show_plots, save_fig=True,\n save_name=os.path.join(config.log_path, 'Critic_losses.pdf'))\n\n actor_performances_mean_plot_smoothed = gaussian_filter1d(logs.new_actor_performances_mean, sigma=config.plot_smoothing_sigma)\n plot_fig([i * config.plot_save_iterations for i in range(len(actor_performances_mean_plot_smoothed))],\n actor_performances_mean_plot_smoothed, std=logs.new_actor_performances_std,\n title=\"New actor performance\", draw_grid=True, xlabel=\"steps\", ylabel=\"a2c actor perf\", add_legend=True,\n label=\"New actor performance\", display_fig=show_plots, save_fig=True,\n save_name=os.path.join(config.log_path, 'Actor_perf_smoothed.pdf'),\n ylim=[config.env_config.plot_performance_low, config.env_config.plot_performance_high],\n smooth_fill=False, smooth_fill_sigma=config.plot_smoothing_sigma)\n plot_fig([i * config.plot_save_iterations for i in range(len(logs.new_actor_performances_mean))],\n logs.new_actor_performances_mean,\n std=logs.new_actor_performances_std, title=str(config.algorithm)+\" Actor Performance\", draw_grid=True, xlabel=\"steps\",\n ylabel=\"reward\", add_legend=True, label=\"reward\", display_fig=show_plots, save_fig=True,\n save_name=os.path.join(config.log_path, 'Actor_Perf.pdf'), ylim=[config.env_config.plot_performance_low, config.env_config.plot_performance_high])\n\n\ndef generate_rl_plots(config, logs, show_plots=True):\n actor_losses_smoothed = gaussian_filter1d(logs.actor_losses, sigma=config.plot_smoothing_sigma)\n plot_fig([i for i in range(len(actor_losses_smoothed))],\n actor_losses_smoothed,\n title=\"Actor Losses\", draw_grid=True, xlabel=\"steps\", ylabel=\"actor loss\", add_legend=True,\n label=\"actor loss\", display_fig=show_plots, save_fig=True,\n save_name=os.path.join(config.log_path, 'Actor_losses.pdf'))\n\n critic_losses_smoothed = gaussian_filter1d(logs.critic_losses, sigma=config.plot_smoothing_sigma)\n plot_fig([i for i in range(len(critic_losses_smoothed))],\n critic_losses_smoothed,\n title=\"Critic Losses\", draw_grid=True, xlabel=\"steps\", ylabel=\"critic loss\", add_legend=True,\n label=\"critic loss\", display_fig=show_plots, save_fig=True,\n save_name=os.path.join(config.log_path, 'Critic_losses.pdf'))\n\n actor_performances_mean_plot_smoothed = gaussian_filter1d(logs.actor_performances_mean, sigma=config.plot_smoothing_sigma)\n plot_fig([i * config.plot_save_iterations for i in range(len(actor_performances_mean_plot_smoothed))],\n actor_performances_mean_plot_smoothed, std=logs.actor_performances_std,\n title=\"Actor performance\", draw_grid=True, xlabel=\"steps\", ylabel=\"reward\", add_legend=True,\n label=\"reward\", display_fig=show_plots, save_fig=True,\n save_name=os.path.join(config.log_path, 'Actor_perf_smoothed.pdf'),\n ylim=[config.env_config.plot_performance_low, config.env_config.plot_performance_high],\n smooth_fill=False, smooth_fill_sigma=config.plot_smoothing_sigma)\n plot_fig([i * config.plot_save_iterations for i in range(len(logs.actor_performances_mean))],\n logs.actor_performances_mean,\n std=logs.actor_performances_std, title=\"Actor Performance\", draw_grid=True, xlabel=\"steps\",\n ylabel=\"reward\", add_legend=True, label=\"reward\", display_fig=show_plots, save_fig=True,\n save_name=os.path.join(config.log_path, 'Actor_Perf.pdf'), ylim=[config.env_config.plot_performance_low, config.env_config.plot_performance_high])\n\n\ndef generate_ga_plots(config, logs, show_plots=True):\n generator_performances_mean_smoothed = gaussian_filter1d(logs.generator_performance_mean,\n sigma=config.plot_smoothing_sigma)\n plot_fig([i * config.plot_save_iterations for i in range(len(generator_performances_mean_smoothed))],\n generator_performances_mean_smoothed, std=logs.generator_performance_std,\n title=\"Generator-actor performance\", draw_grid=True, xlabel=\"steps\", ylabel=\"actor perf\", add_legend=True,\n label=\"reward\", display_fig=show_plots, save_fig=True,\n save_name=os.path.join(config.log_path, 'GA-Generator-actor_perf_smoothed.pdf'), ylim=[config.env_config.plot_performance_low, config.env_config.plot_performance_high], smooth_fill=True,\n smooth_fill_sigma=config.plot_smoothing_sigma)\n plot_fig([i * config.plot_save_iterations for i in range(len(logs.generator_performance_mean))],\n logs.generator_performance_mean,\n std=logs.generator_performance_std, title=\"Generator-actor performance\", draw_grid=True, xlabel=\"steps\",\n ylabel=\"Actor perf\", add_legend=True, label=\"reward\", display_fig=show_plots, save_fig=True,\n save_name=os.path.join(config.log_path, 'GA-Generator-actor_perf.pdf'), ylim=[config.env_config.plot_performance_low, config.env_config.plot_performance_high])\n\n\ndef generate_all_logs(config, log):\n time_taken = time.time() - config.run_id\n config_file_name = os.path.join(config.log_path, 'config.log')\n with open(config_file_name, 'a+') as f:\n f.write('time_taken--' + str(time_taken) + '\\n')\n variables = vars(config)\n for item in variables:\n f.write(str(item) + '--' + str(variables[item]))\n f.write('\\n')\n log_file_name = os.path.join(config.log_path, 'data.log')\n with open(log_file_name, 'a+') as f:\n variables = vars(log)\n for item in variables:\n f.write(str(item) + '--' + str(variables[item]))\n f.write('\\n')\n\n\ndef save_meta_models(generator, critic, save_path):\n torch.save(generator.state_dict(), os.path.join(save_path, 'generator.pt'))\n torch.save(critic.state_dict(), os.path.join(save_path, 'critic.pt'))\n\n\ndef save_rl_models(actor, critic, save_path):\n torch.save(actor.state_dict(), os.path.join(save_path, 'actor.pt'))\n torch.save(critic.state_dict(), os.path.join(save_path, 'critic.pt'))\n\n\ndef save_object(obj, filename):\n with open(filename, 'wb') as output: # Overwrites any existing file.\n pickle.dump(obj, output, pickle.HIGHEST_PROTOCOL)\n\n\ndef generate_discrete_one_hot_output(action_space_size, num_generator_samples):\n # Creating expected output for the generator\n # num_generator_samples must be divisible by action_space_size\n with torch.no_grad():\n indices = list(np.linspace(0, num_generator_samples, num=action_space_size, endpoint=False, dtype=np.int8))\n inclusive_indices = list(np.linspace(0, num_generator_samples, num=action_space_size+1, dtype=np.int8))\n generator_one_hot_expected_actions = torch.zeros((num_generator_samples, action_space_size))\n for idx, num in enumerate(indices):\n generator_one_hot_expected_actions[num:inclusive_indices[idx+1], idx] += 1\n return generator_one_hot_expected_actions"
] | [
[
"matplotlib.pyplot.legend",
"numpy.linspace",
"torch.zeros",
"matplotlib.pyplot.plot",
"numpy.mean",
"torch.no_grad",
"matplotlib.pyplot.gca",
"numpy.std",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.title",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.fill_between",
"scipy.ndimage.filters.gaussian_filter1d",
"matplotlib.pyplot.show",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.ylabel",
"matplotlib.use",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.yticks",
"torch.nn.MSELoss"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"0.15",
"1.4",
"0.16",
"1.0",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"0.10",
"0.17",
"1.3"
],
"tensorflow": []
}
] |
ngbsLab/Korean-Speech-Recognition | [
"3867bf7d23222da6812c9b98a93d3c6f7b3c80fc"
] | [
"package/loss.py"
] | [
"import torch\nimport torch.nn as nn\n\nclass LabelSmoothingLoss(nn.Module):\n \"\"\"\n Provides Label-Smoothing loss.\n\n Args:\n class_num (int): the number of classfication\n ignore_index (int): Indexes that are ignored when calculating loss\n smoothing (float): ratio of smoothing (confidence = 1.0 - smoothing)\n dim (int): dimention of calculation loss\n logit (torch.Tensor): probability distribution value from model and it has a logarithm shape\n target (torch.Tensor): ground-thruth encoded to integers which directly point a word in label\n\n Returns: label_smoothed\n - **label_smoothed** (float): sum of loss\n\n Reference:\n https://github.com/pytorch/pytorch/issues/7455\n \"\"\"\n def __init__(self, class_num, ignore_index, smoothing=0.1, dim=-1):\n super(LabelSmoothingLoss, self).__init__()\n self.confidence = 1.0 - smoothing\n self.smoothing = smoothing\n self.class_num = class_num\n self.dim = dim\n self.ignore_index = ignore_index\n\n def forward(self, logit, target):\n with torch.no_grad():\n label_smoothed = torch.zeros_like(logit)\n label_smoothed.fill_(self.smoothing / (self.class_num - 1))\n label_smoothed.scatter_(1, target.data.unsqueeze(1), self.confidence)\n label_smoothed[target == self.ignore_index, :] = 0\n\n return torch.sum(-label_smoothed * logit)"
] | [
[
"torch.no_grad",
"torch.zeros_like",
"torch.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
bhevencious/EvalNE | [
"a62bd11901ea891535f6cb2a05e7abb65b1f3e6f"
] | [
"evalne/evaluation/pipeline.py"
] | [
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Author: Mara Alexandru Cristian\n# Contact: [email protected]\n# Date: 18/12/2018\n\n# The manager module contains functions and classes for reading, parsing and using a configuration file to\n# run a complete evaluation of network embedding methods.\n\nfrom __future__ import division\n\nimport os\n\nfrom evalne.utils import util\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.linear_model import LogisticRegressionCV\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.svm import LinearSVC\nfrom sklearn.model_selection import GridSearchCV\n\n\nclass EvalSetup(object):\n r\"\"\"\n This class is a wrapper that parses the config file and provides the options as properties of the class.\n Also performs basic input checks.\n\n Parameters\n ----------\n configpath : basestring\n The path of the configuration file.\n \"\"\"\n\n def __init__(self, configpath):\n # Import config parser\n try:\n from ConfigParser import ConfigParser\n except ImportError:\n from configparser import ConfigParser\n\n # Read the configuration file\n config = ConfigParser()\n config.read(configpath)\n self._config = config\n\n self._check_inpaths()\n self._check_methods('opne')\n self._check_methods('other')\n self._checkparams()\n self._check_edges()\n self._check_task()\n\n def _check_task(self):\n task = self.__getattribute__('task')\n if task not in ['lp', 'nc', 'nr']:\n raise ValueError('Incorrect value for `TASK`. Options are: `lp`, `nc` or `nr`.')\n if self.__getattribute__('task') == 'lp' and self.__getattribute__('lp_num_edge_splits') is None:\n raise ValueError('Parameter `LP_NUM_EDGE_SPLITS` needs to be defined.')\n if self.__getattribute__('task') == 'nr' and self.__getattribute__('nr_edge_samp_frac') is None:\n raise ValueError('Parameter `NR_EDGE_SAMP_FRAC` needs to be defined.')\n if self.__getattribute__('task') == 'nc':\n if self.__getattribute__('nc_num_node_splits') is None or self.__getattribute__('nc_node_fracs') is None:\n raise ValueError('Parameters `NC_NUM_NODE_SPLITS` and `NC_NODE_FRACS` need to be defined.')\n if all(x == 'ne' for x in self.__getattribute__('embtype_other')):\n pass\n else:\n raise ValueError('TASK = `nc` is currently only supported for node embedding methods.')\n\n def _check_edges(self):\n if self.__getattribute__('traintest_frac') is None or self.__getattribute__('trainvalid_frac') is None:\n raise ValueError('Train/test and train/validation fractions are required!')\n if self.__getattribute__('traintest_frac') == 0.0:\n raise ValueError('The train/test fraction, `TRAINTEST_FRAC`, can not be 0!')\n if self.__getattribute__('trainvalid_frac') == 0.0:\n raise ValueError('The train/valid fraction, `TRAINVALID_FRAC`, can not be 0!')\n if self.__getattribute__('fe_ratio') == 0.0:\n raise ValueError('The ratio of false edges, `FE_RATIO`, can not be 0!')\n\n def _check_inpaths(self):\n numnws = len(self.__getattribute__('names'))\n if self.__getattribute__('task') == 'nc' and self.__getattribute__('labelpaths') is None:\n raise ValueError('LABELPATHS for each network are required for node classification!')\n for k in self._config.options('NETWORKS'):\n if self.__getattribute__('task') == 'nc':\n if k != 'directed' and len(self.__getattribute__(k)) != numnws:\n raise ValueError('Parameter `{}` in `NETWORKS` section does not have the required num. entries ({})'\n .format(k, self.__getattribute__(k)))\n else:\n if k != 'directed' and k != 'labelpaths' and len(self.__getattribute__(k)) != numnws:\n raise ValueError('Parameter `{}` in `NETWORKS` section does not have the required num. entries ({})'\n .format(k, self.__getattribute__(k)))\n # Check if the input file exist\n for path in self.__getattribute__('inpaths'):\n if not os.path.exists(path):\n raise ValueError('Input network path {} does not exist'.format(path))\n\n def _check_methods(self, library):\n names = self.__getattribute__('names_' + library)\n methods = self.__getattribute__('methods_' + library)\n if names is not None and methods is not None and len(names) != len(methods):\n raise ValueError('Mismatch in the number of `NAMES` and `METHODS` to run in section `{} METHODS`'\n .format(library))\n\n def _checkparams(self):\n # Check if the maximize attribute is a correct one\n if self.__getattribute__('task') == 'nc':\n if self.__getattribute__('maximize') not in ['f1_micro', 'f1_macro', 'f1_weighted']:\n raise ValueError('The selected metric in `REPORT.MAXIMIZE` does not exist!')\n # Check if the scores attribute is a correct one\n if self.__getattribute__('scores') not in ['', 'f1_micro', 'f1_macro', 'f1_weighted', 'all']:\n raise ValueError('The selected metric in `REPORT.SCORES` does not exist!')\n else:\n if self.__getattribute__('maximize') not in ['auroc', 'f_score', 'precision', 'recall',\n 'accuracy', 'fallout', 'miss']:\n raise ValueError('The selected metric in `REPORT.MAXIMIZE` does not exist!')\n # Check if the scores attribute is a correct one\n if self.__getattribute__('scores') not in ['', 'auroc', 'f_score', 'precision', 'recall', 'accuracy',\n 'fallout', 'miss', 'all']:\n raise ValueError('The selected metric in `REPORT.SCORES` does not exist!')\n # Check if the curves attribute is a correct one\n if self.__getattribute__('curves') not in ['', 'roc', 'pr', 'all']:\n raise ValueError('The value of `REPORT.CURVES` is incorrect!')\n\n def getlist(self, section, option, dtype):\n r\"\"\"\n Returns option as a list of specified type, split by any kind of white space.\n\n Parameters\n ----------\n section : basestring\n The config file section name.\n option : basestring\n The config file option name.\n dtype : primitive type\n The type to which the output should be cast.\n\n Returns\n -------\n list : list\n A list of elements cast to the specified primitive type.\n \"\"\"\n res = self._config.get(section, option).split()\n if len(res) == 0 or res[0] == '' or res[0] == 'None':\n return None\n else:\n return list(map(dtype, res))\n\n def getboollist(self, section, option):\n r\"\"\"\n Returns option as a list of booleans split by any kind of white space.\n Elements such as 'True', 'true', '1', 'yes', 'on' are considered True.\n Elements such as 'False', 'false', '0', 'no', 'off' are considered False.\n\n Parameters\n ----------\n section : basestring\n The config file section name.\n option : basestring\n The config file option name.\n\n Returns\n -------\n list : list\n A list of booleans.\n \"\"\"\n res = self._config.get(section, option).split()\n if len(res) == 0 or res[0] == '' or res[0] == 'None':\n return None\n else:\n r = list()\n for elem in res:\n if elem in ['True', 'true', '1', 'yes', 'on']:\n r.append(True)\n elif elem in ['False', 'false', '0', 'no', 'off']:\n r.append(False)\n return r\n\n def getlinelist(self, section, option):\n r\"\"\"\n Returns option as a list of string, split specifically by a newline.\n\n Parameters\n ----------\n section : basestring\n The config file section name.\n option : basestring\n The config file option name.\n\n Returns\n -------\n list : list\n A list of strings.\n \"\"\"\n res = self._config.get(section, option).split('\\n')\n if len(res) == 0 or res[0] == '' or res[0] == 'None':\n return None\n else:\n return list(res)\n\n def getseplist(self, section, option):\n r\"\"\"\n Processes an options which contains a list of separators.\n Transforms \\s, \\t and \\n to white space, tab and new line respectively\n\n Parameters\n ----------\n section : basestring\n The config file section name.\n option : basestring\n The config file option name.\n\n Returns\n -------\n list : list\n A list of strings.\n \"\"\"\n separators = self.getlist(section, option, str)\n res = list()\n for sep in separators:\n s = sep.strip('\\'')\n if s == '\\\\t':\n s = '\\t'\n elif s == '\\\\s':\n s = ' '\n elif s == '\\\\n':\n s = '\\n'\n res.append(s)\n return list(res)\n\n def gettuneparams(self, library):\n r\"\"\"\n Processes the tune parameters option. Generates a list of Nones the size of the number of methods.\n The list is filled in order with each line found in the TUNE_PARAMS option.\n\n Parameters\n ----------\n library : basestring\n This parameter indicates if the TUNE_PARAMETERS option processed if from OPNE METHODS of OTHER METHODS.\n\n Returns\n -------\n tune_params : list\n A list of string containing the parameters that need to be tuned.\n \"\"\"\n methods = self.__getattribute__('methods_' + library)\n if library == 'opne':\n tune_params = self.getlinelist('OPENNE METHODS', 'tune_params_opne')\n elif library == 'other':\n tune_params = self.getlinelist('OTHER METHODS', 'tune_params_other')\n else:\n raise ValueError('Attribute name {}, does not exist'.format(library))\n if tune_params is None:\n tune_params = list()\n for i in range(len(methods) - len(tune_params)):\n tune_params.append(None)\n return tune_params\n\n @property\n def task(self):\n return self._config.get('GENERAL', 'task')\n\n @property\n def lp_num_edge_splits(self):\n return self._config.getint('GENERAL', 'lp_num_edge_splits')\n\n @property\n def nc_num_node_splits(self):\n return self._config.getint('GENERAL', 'nc_num_node_splits')\n\n @property\n def nc_node_fracs(self):\n return self.getlist('GENERAL', 'nc_node_fracs', float)\n\n @property\n def nr_edge_samp_frac(self):\n aux = self._config.getfloat('GENERAL', 'nr_edge_samp_frac')\n if aux > 1.0:\n return aux/100\n else:\n return aux\n\n @property\n def edge_embedding_methods(self):\n return self.getlist('GENERAL', 'edge_embedding_methods', str)\n\n @property\n def lp_model(self):\n model = self._config.get('GENERAL', 'lp_model')\n if model == 'LogisticRegression':\n return LogisticRegression(solver='liblinear')\n elif model == 'LogisticRegressionCV':\n return LogisticRegressionCV(Cs=10, cv=5, penalty='l2', scoring='roc_auc', solver='lbfgs', max_iter=100)\n elif model == 'DecisionTreeClassifier':\n return DecisionTreeClassifier()\n elif model == 'SVM':\n parameters = {'C': [0.1, 1, 10, 100, 1000]}\n return GridSearchCV(LinearSVC(), parameters, cv=5)\n else:\n return util.auto_import(model)\n\n @property\n def embed_dim(self):\n return self._config.getint('GENERAL', 'embed_dim')\n\n @property\n def timeout(self):\n res = self._config.get('GENERAL', 'timeout')\n if res == '' or res == 'None' or res == 'NONE':\n return None\n else:\n return int(res)\n\n @property\n def verbose(self):\n return self._config.getboolean('GENERAL', 'verbose')\n\n @property\n def seed(self):\n val = self._config.get('GENERAL', 'seed')\n if val == '' or val == 'None':\n return None\n else:\n return int(val)\n\n @property\n def names(self):\n return self.getlist('NETWORKS', 'names', str)\n\n @property\n def inpaths(self):\n return self.getlinelist('NETWORKS', 'inpaths')\n\n @property\n def directed(self):\n return self._config.getboolean('NETWORKS', 'directed')\n\n @property\n def separators(self):\n return self.getseplist('NETWORKS', 'separators')\n\n @property\n def comments(self):\n return self.getseplist('NETWORKS', 'comments')\n\n @property\n def labelpaths(self):\n return self.getlinelist('NETWORKS', 'labelpaths')\n\n @property\n def relabel(self):\n return self._config.getboolean('PREPROCESSING', 'relabel')\n\n @property\n def del_selfloops(self):\n return self._config.getboolean('PREPROCESSING', 'del_selfloops')\n\n @property\n def save_prep_nw(self):\n return self._config.getboolean('PREPROCESSING', 'save_prep_nw')\n\n @property\n def write_stats(self):\n return self._config.getboolean('PREPROCESSING', 'write_stats')\n\n @property\n def delimiter(self):\n return self._config.get('PREPROCESSING', 'delimiter').strip('\\'')\n\n @property\n def traintest_frac(self):\n return self._config.getfloat('EDGESPLIT', 'traintest_frac')\n\n @property\n def trainvalid_frac(self):\n return self._config.getfloat('EDGESPLIT', 'trainvalid_frac')\n\n @property\n def split_alg(self):\n return self._config.get('EDGESPLIT', 'split_alg')\n\n @property\n def owa(self):\n return self._config.getboolean('EDGESPLIT', 'owa')\n\n @property\n def fe_ratio(self):\n return self._config.getfloat('EDGESPLIT', 'fe_ratio')\n\n @property\n def lp_baselines(self):\n return self.getlinelist('BASELINES', 'lp_baselines')\n\n @property\n def neighbourhood(self):\n return self.getlist('BASELINES', 'neighbourhood', str)\n\n @property\n def names_opne(self):\n return self.getlist('OPENNE METHODS', 'names_opne', str)\n\n @property\n def methods_opne(self):\n return self.getlinelist('OPENNE METHODS', 'methods_opne')\n\n @property\n def tune_params_opne(self):\n return self.gettuneparams('opne')\n\n @property\n def names_other(self):\n return self.getlist('OTHER METHODS', 'names_other', str)\n\n @property\n def embtype_other(self):\n return self.getlist('OTHER METHODS', 'embtype_other', str)\n\n @property\n def write_weights_other(self):\n return self.getboollist('OTHER METHODS', 'write_weights_other')\n\n @property\n def write_dir_other(self):\n return self.getboollist('OTHER METHODS', 'write_dir_other')\n\n @property\n def methods_other(self):\n return self.getlinelist('OTHER METHODS', 'methods_other')\n\n @property\n def tune_params_other(self):\n return self.gettuneparams('other')\n\n @property\n def output_format_other(self):\n return self.getlinelist('OTHER METHODS', 'output_format_other')\n\n @property\n def input_delim_other(self):\n return self.getseplist('OTHER METHODS', 'input_delim_other')\n\n @property\n def output_delim_other(self):\n return self.getseplist('OTHER METHODS', 'output_delim_other')\n\n @property\n def maximize(self):\n return self._config.get('REPORT', 'maximize')\n\n @property\n def scores(self):\n return self._config.get('REPORT', 'scores')\n\n @property\n def curves(self):\n return self._config.get('REPORT', 'curves')\n\n @property\n def precatk_vals(self):\n return self.getlist('REPORT', 'precatk_vals', int)\n"
] | [
[
"sklearn.linear_model.LogisticRegressionCV",
"sklearn.tree.DecisionTreeClassifier",
"sklearn.linear_model.LogisticRegression",
"sklearn.svm.LinearSVC"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
GiovanniCalore/BigGAN-Tensorflow-master | [
"1fcf72fc8b9cbfdd047b9641f656afcfd0972604"
] | [
"metrics/perceptual_path_length.py"
] | [
"# Copyright (c) 2019, NVIDIA Corporation. All rights reserved.\n#\n# This work is made available under the Nvidia Source Code License-NC.\n# To view a copy of this license, visit\n# https://nvlabs.github.io/stylegan2/license.html\n\n\"\"\"Perceptual Path Length (PPL).\"\"\"\n\nimport numpy as np\nimport tensorflow as tf\nimport dnnlib.tflib as tflib\n\nfrom metrics import metric_base\nimport misc\nimport sys\n\n#----------------------------------------------------------------------------\n\n# Normalize batch of vectors.\ndef normalize(v):\n return v / tf.sqrt(tf.reduce_sum(tf.square(v), axis=-1, keepdims=True))\n\n# Spherical interpolation of a batch of vectors.\ndef slerp(a, b, t):\n a = normalize(a)\n b = normalize(b)\n d = tf.reduce_sum(a * b, axis=-1, keepdims=True)\n p = t * tf.math.acos(d)\n c = normalize(b - d * a)\n d = a * tf.math.cos(p) + c * tf.math.sin(p)\n return normalize(d)\n\n#----------------------------------------------------------------------------\n\nclass PPL(metric_base.MetricBase):\n def __init__(self, num_samples, epsilon, space, sampling, crop, minibatch_per_gpu, Gs_overrides, **kwargs):\n assert space in ['z', 'w']\n assert sampling in ['full', 'end']\n super().__init__(**kwargs)\n self.num_samples = num_samples\n self.epsilon = epsilon\n self.space = space\n self.sampling = sampling\n self.crop = crop\n self.minibatch_per_gpu = minibatch_per_gpu\n self.Gs_overrides = Gs_overrides\n\n def _evaluate(self, sess, fake_images_random_normal, interp_images, Gs_kwargs, num_gpus):\n #Gs_kwargs = dict(Gs_kwargs)\n #Gs_kwargs.update(self.Gs_overrides)\n minibatch_size = num_gpus * self.minibatch_per_gpu\n\n # Construct TensorFlow graph.\n distance_expr = []\n for gpu_idx in range(num_gpus):\n with tf.device('/gpu:%d' % gpu_idx):\n #Gs_clone = Gs.clone()\n #noise_vars = [var for name, var in Gs_clone.components.synthesis.vars.items() if name.startswith('noise')]\n\n # Generate random latents and interpolation t-values.\n #lat_t01 = tf.random_normal(shape=[self.minibatch_per_gpu * 2, 128])\n #lerp_t = tf.random_uniform([self.minibatch_per_gpu], 0.0, 1.0 if self.sampling == 'full' else 0.0)\n #print(lat_t01)\n #print(lerp_t)\n #labels = tf.reshape(tf.tile(self._get_random_labels_tf(self.minibatch_per_gpu), [1, 2]), [self.minibatch_per_gpu * 2, -1])\n\n '''\n # Interpolate in W or Z.\n if self.space == 'w':\n print('ERROR')\n sys.exit()\n dlat_t01 = Gs_clone.components.mapping.get_output_for(lat_t01, labels, **Gs_kwargs)\n dlat_t01 = tf.cast(dlat_t01, tf.float32)\n dlat_t0, dlat_t1 = dlat_t01[0::2], dlat_t01[1::2]\n dlat_e0 = tflib.lerp(dlat_t0, dlat_t1, lerp_t[:, np.newaxis, np.newaxis])\n dlat_e1 = tflib.lerp(dlat_t0, dlat_t1, lerp_t[:, np.newaxis, np.newaxis] + self.epsilon)\n dlat_e01 = tf.reshape(tf.stack([dlat_e0, dlat_e1], axis=1), dlat_t01.shape)\n else: # space == 'z'\n lat_t0, lat_t1 = lat_t01[0::2], lat_t01[1::2]\n print(lat_t0)\n print(lat_t1)\n lat_e0 = slerp(lat_t0, lat_t1, lerp_t[:, np.newaxis])\n lat_e1 = slerp(lat_t0, lat_t1, lerp_t[:, np.newaxis] + self.epsilon)\n print(lat_e0)\n print(lat_e1)\n\n lat_e01 = tf.reshape(tf.stack([lat_e0, lat_e1], axis=1), lat_t01.shape)\n print(lat_e01)\n\n #dlat_e01 = Gs_clone.components.mapping.get_output_for(lat_e01, labels, **Gs_kwargs)\n dlat_e01 = Gs(lat_e01)\n sys.exit()\n '''\n\n # Synthesize images.\n #with tf.control_dependencies([var.initializer for var in noise_vars]): # use same noise inputs for the entire minibatch\n #images = Gs_clone.components.synthesis.get_output_for(dlat_e01, randomize_noise=False, **Gs_kwargs)\n images = tf.cast(tf.transpose(interp_images, perm=[0, 3, 1, 2]), tf.float32)\n\n '''\n # Crop only the face region.\n if self.crop:\n c = int(images.shape[2] // 8)\n images = images[:, :, c*3 : c*7, c*2 : c*6]\n\n # Downsample image to 256x256 if it's larger than that. VGG was built for 224x224 images.\n factor = images.shape[2] // 256\n if factor > 1:\n images = tf.reshape(images, [-1, images.shape[1], images.shape[2] // factor, factor, images.shape[3] // factor, factor])\n images = tf.reduce_mean(images, axis=[3,5])\n\n '''\n # Scale dynamic range from [-1,1] to [0,255] for VGG.\n images = (images + 1) * (255 / 2)\n\n # Evaluate perceptual distance.\n img_e0, img_e1 = images[0::2], images[1::2]\n distance_measure = misc.load_pkl('http://d36zk2xti64re0.cloudfront.net/stylegan1/networks/metrics/vgg16_zhang_perceptual.pkl')\n distance_expr.append(distance_measure.get_output_for(img_e0, img_e1) * (1 / self.epsilon**2))\n\n # Sampling loop.\n all_distances = []\n for begin in range(0, self.num_samples, 8):\n print(\"PPL: \" + str(begin), end=\"\\r\")\n self._report_progress(begin, self.num_samples)\n all_distances += sess.run(distance_expr)\n all_distances = np.concatenate(all_distances, axis=0)\n\n # Reject outliers.\n lo = np.percentile(all_distances, 1, interpolation='lower')\n hi = np.percentile(all_distances, 99, interpolation='higher')\n filtered_distances = np.extract(np.logical_and(lo <= all_distances, all_distances <= hi), all_distances)\n self._report_result(np.mean(filtered_distances))\n\n#----------------------------------------------------------------------------\n"
] | [
[
"tensorflow.device",
"tensorflow.transpose",
"tensorflow.math.cos",
"tensorflow.reduce_sum",
"numpy.percentile",
"numpy.concatenate",
"tensorflow.math.sin",
"numpy.mean",
"tensorflow.square",
"tensorflow.math.acos",
"numpy.logical_and"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
rupei/probability | [
"4aa1ee652853a19c4e80d39216c3fa535ed3e589",
"4aa1ee652853a19c4e80d39216c3fa535ed3e589",
"4aa1ee652853a19c4e80d39216c3fa535ed3e589"
] | [
"tensorflow_probability/python/internal/backend/numpy/numpy_array.py",
"tensorflow_probability/python/experimental/mcmc/sample_fold.py",
"tensorflow_probability/python/distributions/half_cauchy.py"
] | [
"# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Numpy implementations of TensorFlow general top-level functions.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport functools\n# Dependency imports\nimport numpy as np\nimport numpy as onp # pylint: disable=reimported\n\nfrom tensorflow_probability.python.internal.backend.numpy import _utils as utils\nfrom tensorflow_probability.python.internal.backend.numpy import ops\nfrom tensorflow_probability.python.internal.backend.numpy.linalg_impl import einsum\nfrom tensorflow_probability.python.internal.backend.numpy.linalg_impl import norm\nfrom tensorflow_probability.python.internal.backend.numpy.linalg_impl import tensordot\n\n\n__all__ = [\n 'concat',\n 'einsum',\n 'expand_dims',\n 'fill',\n 'gather',\n 'gather_nd',\n 'linspace',\n 'meshgrid',\n 'norm',\n 'one_hot',\n 'ones',\n 'ones_like',\n 'pad',\n 'range',\n 'rank',\n 'reshape',\n 'reverse',\n 'repeat',\n 'roll',\n 'searchsorted',\n 'shape',\n 'size',\n 'slice',\n 'split',\n 'squeeze',\n 'stack',\n 'tensordot',\n 'tile',\n 'transpose',\n 'unstack',\n 'where',\n 'zeros',\n 'zeros_like',\n # 'boolean_mask',\n # 'foldl',\n # 'foldr',\n]\n\n\nJAX_MODE = False\n\n\nif JAX_MODE:\n import jax # pylint: disable=g-import-not-at-top\n\n\ndef _astuple(x):\n try:\n return tuple(x)\n except TypeError:\n return x\n\n\ndef _gather( # pylint: disable=unused-argument\n params,\n indices,\n validate_indices=None,\n axis=None,\n batch_dims=0,\n name=None):\n \"\"\"gather.\"\"\"\n indices = ops.convert_to_tensor(indices, dtype_hint=np.int32)\n if validate_indices is not None:\n raise NotImplementedError(\n 'Argument `validate_indices != None` is currently unimplemented.')\n if batch_dims < 0:\n raise NotImplementedError('Negative `batch_dims` is currently unsupported.')\n if axis is None:\n axis = batch_dims\n if axis < 0:\n axis = axis + len(params.shape)\n # NOTE: For only the numpy backend, this function could create a single result\n # ndarray and use in-place updates. For the Jax backend, this function\n # vmaps `np.take`.\n if JAX_MODE:\n take = lambda params, indices: np.take(params, indices, # pylint: disable=g-long-lambda\n axis=axis - batch_dims)\n take = functools.reduce(\n lambda g, f: f(g), [jax.vmap] * int(batch_dims),\n take\n )\n return take(params, indices)\n params = ops.convert_to_tensor(params)\n res = np.array([\n np.take(params[i], indices[i], axis=axis - batch_dims)\n for i in np.ndindex(*params.shape[:batch_dims])\n ])\n return np.reshape(\n res,\n params.shape[:axis] + indices.shape[batch_dims:] + params.shape[axis+1:])\n\n\ndef _args_to_matching_arrays(args_list, dtype_hint=None):\n \"\"\"Converts a list to array using the first element for dtype.\n\n This method is used to match the behavior of `tf.concat`.\n\n Args:\n args_list: A list or tuple of arguments.\n dtype_hint: An optional hint used when converting the args to tensors.\n Returns:\n A list of tensors.\n \"\"\"\n dtype = None\n for arg in args_list:\n if ops.is_tensor(arg):\n dtype = arg.dtype\n break\n if dtype is None:\n ret = []\n for arg in args_list:\n ret.append(ops.convert_to_tensor(arg, dtype, dtype_hint=dtype_hint))\n if dtype is None:\n dtype = ret[-1].dtype\n else:\n ret = [ops.convert_to_tensor(arg, dtype) for arg in args_list]\n return ret\n\n\ndef _concat(values, axis, name='concat'):\n del name\n if axis is None:\n raise ValueError('None values for `axis` argument not supported.')\n if not isinstance(values, (list, tuple)):\n values = [values]\n if len(values) == 1:\n return values[0]\n values = _args_to_matching_arrays(values)\n return np.concatenate(values, axis=axis)\n\n\ndef _gather_nd_single(params, indices):\n idx = tuple(np.moveaxis(indices, -1, 0))\n return params[idx]\n\n\ndef _gather_nd( # pylint: disable=unused-argument\n params,\n indices,\n batch_dims=0,\n name=None):\n \"\"\"gather_nd.\"\"\"\n indices = ops.convert_to_tensor(indices, dtype_hint=np.int32)\n if batch_dims < 0:\n raise NotImplementedError('Negative `batch_dims` is currently unsupported.')\n if not JAX_MODE and batch_dims > 0:\n raise NotImplementedError(\n '`batch_dims > 0` currently unsupported in NumPy backend.')\n gather_nd_ = _gather_nd_single\n if JAX_MODE:\n gather_nd_ = functools.reduce(\n lambda g, f: f(g), [jax.vmap] * int(batch_dims),\n gather_nd_\n )\n return gather_nd_(params, indices)\n\n\ndef _linspace(start, stop, num, name=None, axis=0): # pylint: disable=unused-argument\n \"\"\"Match TF behavior with np.linspace.\"\"\"\n start = ops.convert_to_tensor(start)\n # Match TF weirdness arising from truediv(int32, int32) = float64\n if np.issubdtype(start.dtype, np.integer):\n start = start.astype(np.float64)\n stop = ops.convert_to_tensor(stop, dtype=start.dtype)\n num = ops.convert_to_tensor(num, dtype_hint=np.int32)\n if not np.issubdtype(num.dtype, np.integer):\n raise TypeError('`num` must be an integer but got {}'.format(num.dtype))\n num = num.astype(np.int32)\n return np.linspace(start, stop, num, axis=axis).astype(start.dtype)\n\n\ndef _one_hot( # pylint: disable=unused-argument\n indices,\n depth,\n on_value=None,\n off_value=None,\n axis=None,\n dtype=None,\n name=None):\n \"\"\"One hot.\"\"\"\n if on_value is None:\n on_value = 1\n if off_value is None:\n off_value = 0\n if dtype is None:\n dtype = utils.common_dtype([on_value, off_value], np.float32)\n indices = np.array(indices)\n depth = np.array(depth)\n pred = abs(np.arange(depth, dtype=indices.dtype) -\n indices[..., np.newaxis]) > 0\n y_out = np.where(pred, np.array(off_value, dtype), np.array(on_value, dtype))\n if axis is not None:\n y_out = np.moveaxis(y_out, -1, axis)\n return y_out\n\n\ndef _ones_like(input, dtype=None, name=None): # pylint: disable=redefined-builtin,unused-argument\n return np.ones_like(input, dtype=utils.numpy_dtype(dtype))\n\n\n# TODO(b/136555907): Add unit-test.\ndef _pad( # pylint: disable=unused-argument\n tensor,\n paddings,\n mode='CONSTANT',\n constant_values=0,\n name=None):\n tensor = ops.convert_to_tensor(tensor)\n constant_values = ops.convert_to_tensor(constant_values)\n return np.pad(\n tensor, paddings,\n mode=mode.lower(),\n constant_values=constant_values)\n\n\ndef _range(start, limit=None, delta=1, dtype=None, name='range'): # pylint: disable=unused-argument\n \"\"\"Emulates tf.range.\"\"\"\n # Emulating dtype inference logic from tf.range\n dtype = utils.numpy_dtype(dtype)\n infer_dtype = lambda t: ops.convert_to_tensor(t, dtype=dtype).dtype\n # We must keep start, limit, and delta static np.array since they determine\n # the size of the result array, which JAX requires to be static.\n start = onp.array(start, dtype=infer_dtype(start))\n limit = None if limit is None else onp.array(limit, dtype=infer_dtype(limit))\n delta = onp.array(delta, dtype=infer_dtype(delta))\n if dtype is None:\n dtype_hierarchy = [np.int32, np.int64, np.float32, np.float64]\n inferred_dtype = max([arg.dtype for arg in [start, limit, delta]\n if arg is not None],\n key=dtype_hierarchy.index)\n else:\n inferred_dtype = dtype\n return np.arange(start, limit, delta).astype(inferred_dtype)\n\n\ndef _reverse(tensor, axis, name=None): # pylint: disable=unused-argument\n if np.array(axis).ndim == 0:\n return np.flip(tensor, axis)\n for ax in axis:\n tensor = np.flip(tensor, ax)\n return tensor\n\n\nif JAX_MODE:\n _searchsorted_vmap_sides = {\n side: jax.vmap(functools.partial(jax.numpy.searchsorted, side=side))\n for side in ('left', 'right')\n }\n\n\ndef _searchsorted( # pylint: disable=unused-argument\n sorted_sequence,\n values,\n side='left',\n out_type=np.int32,\n name=None):\n \"\"\"Find indices for insertion for list to remain sorted.\"\"\"\n if JAX_MODE:\n try:\n func = _searchsorted_vmap_sides[side]\n except KeyError:\n raise ValueError(\"'%s' is an invalid value for keyword 'side'\" % side)\n sorted_sequence_2d = np.reshape(sorted_sequence,\n (-1, sorted_sequence.shape[-1]))\n values_2d = np.reshape(values, (-1, values.shape[-1]))\n if sorted_sequence_2d.shape[0] != values_2d.shape[0]:\n raise ValueError('Leading dim_size of both tensors must match.')\n return np.reshape(func(sorted_sequence_2d, values_2d).astype(out_type),\n values.shape)\n # We don't use np.searchsorted in the numpy backend because it doesn't support\n # batching.\n sorted_sequence = sorted_sequence[..., np.newaxis, :]\n values = values[..., :, np.newaxis]\n if side == 'left':\n is_in_right_location = sorted_sequence < values\n elif side == 'right':\n is_in_right_location = sorted_sequence <= values\n return np.sum(is_in_right_location, axis=-1).astype(out_type)\n\n\ndef _shape(input, out_type=np.int32, name=None): # pylint: disable=redefined-builtin,unused-argument\n return ops.convert_to_tensor(ops.convert_to_tensor(input).shape).astype(\n out_type)\n\n\ndef _size(input, out_type=np.int32, name=None): # pylint: disable=redefined-builtin, unused-argument\n return np.asarray(np.prod(ops.convert_to_tensor(input).shape), dtype=out_type)\n\n\nbuiltin_slice = slice # pylint: disable=invalid-name\n\n\ndef _slice(input_, begin, size, name=None): # pylint: disable=unused-argument,redefined-outer-name\n slices = tuple(\n builtin_slice(b, b + s if s != -1 else None) for b, s in zip(begin, size))\n return input_[slices]\n\n\ndef _split(value, num_or_size_splits, axis=0, num=None, name='split'): # pylint: disable=unused-argument\n \"\"\"Map tf.split -> np.split.\"\"\"\n indices_or_sections = onp.array(num_or_size_splits)\n if indices_or_sections.ndim == 1:\n if any(idx == -1 for idx in indices_or_sections):\n # Numpy parameterizes by split indices and returns nsplits+1 arrays.\n total_splits = sum(idx for idx in indices_or_sections if idx != -1)\n remainder = int(max(0, np.array(value).shape[axis] - total_splits))\n indices_or_sections = [\n idx if idx != -1 else remainder for idx in indices_or_sections\n ]\n indices_or_sections = onp.cumsum(onp.array(indices_or_sections))[:-1]\n return np.split(value, indices_or_sections, axis)\n\n\ndef _transpose(a, perm=None, conjugate=False, name='transpose'): # pylint: disable=unused-argument\n x = np.transpose(a, perm)\n return np.conjugate(x) if conjugate else x\n\n\ndef _zeros_like(input, dtype=None, name=None): # pylint: disable=redefined-builtin,unused-argument\n return np.zeros_like(input, dtype=utils.numpy_dtype(dtype))\n\n\n# --- Begin Public Functions --------------------------------------------------\n\n\nconcat = utils.copy_docstring(\n 'tf.concat',\n _concat)\n\n\nexpand_dims = utils.copy_docstring(\n 'tf.expand_dims',\n lambda input, axis, name=None: np.expand_dims(input, axis))\n\nfill = utils.copy_docstring(\n 'tf.fill',\n lambda dims, value, name=None: np.full(dims, value))\n\ngather = utils.copy_docstring(\n 'tf.gather',\n _gather)\n\ngather_nd = utils.copy_docstring(\n 'tf.gather_nd',\n _gather_nd)\n\nreverse = utils.copy_docstring('tf.reverse', _reverse)\n\nlinspace = utils.copy_docstring(\n 'tf.linspace',\n _linspace)\n\nmeshgrid = utils.copy_docstring(\n 'tf.meshgrid',\n np.meshgrid)\n\nnorm = utils.copy_docstring(\n 'tf.norm',\n norm)\n\none_hot = utils.copy_docstring(\n 'tf.one_hot',\n _one_hot)\n\nones = utils.copy_docstring(\n 'tf.ones',\n lambda shape, dtype=np.float32, name=None: np.ones( # pylint: disable=g-long-lambda\n shape, utils.numpy_dtype(dtype)))\n\nones_like = utils.copy_docstring(\n 'tf.ones_like',\n _ones_like)\n\npad = utils.copy_docstring(\n 'tf.pad',\n _pad)\n\nrange = utils.copy_docstring( # pylint: disable=redefined-builtin\n 'tf.range',\n _range)\n\nrank = utils.copy_docstring(\n 'tf.rank',\n lambda input, name=None: np.int32(np.array(input).ndim)) # pylint: disable=redefined-builtin,g-long-lambda\n\nrepeat = utils.copy_docstring(\n 'tf.repeat',\n lambda input, repeats, axis=None, name=None: np.repeat( # pylint: disable=g-long-lambda\n input, repeats, axis=axis))\n\nreshape = utils.copy_docstring(\n 'tf.reshape',\n lambda tensor, shape, name=None: np.reshape( # pylint: disable=g-long-lambda\n ops.convert_to_tensor(tensor), shape))\n\nroll = utils.copy_docstring(\n 'tf.roll',\n lambda input, shift, axis: np.roll(input, shift, axis)) # pylint: disable=unnecessary-lambda\n\nsearchsorted = utils.copy_docstring(\n 'tf.searchsorted',\n _searchsorted)\n\nshape = utils.copy_docstring(\n 'tf.shape',\n _shape)\n\nsize = utils.copy_docstring(\n 'tf.size',\n _size)\n\nslice = utils.copy_docstring( # pylint: disable=redefined-builtin\n 'tf.slice', _slice)\n\nsplit = utils.copy_docstring('tf.split', _split)\n\nsqueeze = utils.copy_docstring(\n 'tf.squeeze',\n lambda input, axis=None, name=None: np.squeeze(input, _astuple(axis)))\n\nstack = utils.copy_docstring(\n 'tf.stack', lambda values, axis=0, name='stack': np.moveaxis( # pylint: disable=g-long-lambda\n ops.convert_to_tensor(values), 0, axis))\n\ntile = utils.copy_docstring(\n 'tf.tile',\n lambda input, multiples, name=None: np.tile(np.array(input), multiples))\n\ntranspose = utils.copy_docstring(\n 'tf.transpose',\n _transpose)\n\nunstack = utils.copy_docstring(\n 'tf.unstack',\n lambda value, num=None, axis=0, name='unstack': list( # pylint: disable=g-long-lambda\n np.squeeze(x, axis=axis) for x in\n np.split(value, value.shape[axis] if num is None else num, axis)))\n\nwhere = utils.copy_docstring(\n 'tf.where',\n lambda condition, x=None, y=None, name=None: np.where(condition, x, y))\n\nzeros = utils.copy_docstring(\n 'tf.zeros',\n lambda shape, dtype=np.float32, name=None: np.zeros( # pylint: disable=g-long-lambda\n shape, utils.numpy_dtype(dtype)))\n\nzeros_like = utils.copy_docstring(\n 'tf.zeros_like',\n _zeros_like)\n",
"# Copyright 2020 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Drivers for streaming reductions framework.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport warnings\n\n# Dependency imports\nimport tensorflow.compat.v2 as tf\nfrom tensorflow_probability.python.experimental.mcmc import sample as exp_sample_lib\nfrom tensorflow_probability.python.experimental.mcmc import sample_discarding_kernel\nfrom tensorflow_probability.python.experimental.mcmc import tracing_reducer\nfrom tensorflow_probability.python.experimental.mcmc import with_reductions\nfrom tensorflow_probability.python.mcmc import sample\nfrom tensorflow.python.util import nest # pylint: disable=g-direct-tensorflow-import\n\n\n__all__ = [\n 'sample_chain',\n 'sample_fold',\n]\n\n\ndef sample_fold(\n num_steps,\n current_state,\n previous_kernel_results=None,\n kernel=None,\n reducer=None,\n num_burnin_steps=0,\n num_steps_between_results=0,\n parallel_iterations=10,\n seed=None,\n name=None,\n):\n \"\"\"Computes the requested reductions over the `kernel`'s samples.\n\n To wit, runs the given `kernel` for `num_steps` steps, and consumes\n the stream of samples with the given `Reducer`s' `one_step` method(s).\n This runs in constant memory (unless a given `Reducer` builds a\n large structure).\n\n The driver internally composes the correct onion of `WithReductions`\n and `SampleDiscardingKernel` to implement the requested optionally\n thinned reduction; however, the kernel results of those applied\n Transition Kernels will not be returned. Hence, if warm-restarting\n reductions is desired, one should manually build the Transition Kernel\n onion and use `tfp.experimental.mcmc.step_kernel`.\n\n An arbitrary collection of `reducer` can be provided, and the resulting\n finalized statistic(s) will be returned in an identical structure.\n\n Args:\n num_steps: Integer or scalar `Tensor` representing the number of `Reducer`\n steps.\n current_state: `Tensor` or Python `list` of `Tensor`s representing the\n current state(s) of the Markov chain(s).\n previous_kernel_results: A `Tensor` or a nested collection of `Tensor`s.\n Warm-start for the auxiliary state needed by the given `kernel`.\n If not supplied, `sample_fold` will cold-start with\n `kernel.bootstrap_results`.\n kernel: An instance of `tfp.mcmc.TransitionKernel` which implements one step\n of the Markov chain.\n reducer: A (possibly nested) structure of `Reducer`s to be evaluated\n on the `kernel`'s samples. If no reducers are given (`reducer=None`),\n then `None` will be returned in place of streaming calculations.\n num_burnin_steps: Integer or scalar `Tensor` representing the number\n of chain steps to take before starting to collect results.\n Defaults to 0 (i.e., no burn-in).\n num_steps_between_results: Integer or scalar `Tensor` representing\n the number of chain steps between collecting a result. Only one out\n of every `num_steps_between_samples + 1` steps is included in the\n returned results. Defaults to 0 (i.e., no thinning).\n parallel_iterations: The number of iterations allowed to run in parallel. It\n must be a positive integer. See `tf.while_loop` for more details.\n seed: Optional seed for reproducible sampling.\n name: Python `str` name prefixed to Ops created by this function.\n Default value: `None` (i.e., 'mcmc_sample_fold').\n\n Returns:\n reduction_results: A (possibly nested) structure of finalized reducer\n statistics. The structure identically mimics that of `reducer`.\n end_state: The final state of the Markov chain(s).\n final_kernel_results: `collections.namedtuple` of internal calculations\n used to advance the supplied `kernel`. These results do not include\n the kernel results of `WithReductions` or `SampleDiscardingKernel`.\n \"\"\"\n with tf.name_scope(name or 'mcmc_sample_fold'):\n num_steps = tf.convert_to_tensor(\n num_steps, dtype=tf.int32, name='num_steps')\n current_state = tf.nest.map_structure(\n lambda x: tf.convert_to_tensor(x, name='current_state'),\n current_state)\n reducer_was_none = False\n if reducer is None:\n reducer = []\n reducer_was_none = True\n reduction_kernel = with_reductions.WithReductions(\n inner_kernel=sample_discarding_kernel.SampleDiscardingKernel(\n inner_kernel=kernel,\n num_burnin_steps=num_burnin_steps,\n num_steps_between_results=num_steps_between_results),\n reducer=reducer,\n )\n end_state, final_kernel_results = exp_sample_lib.step_kernel(\n num_steps=num_steps,\n current_state=current_state,\n previous_kernel_results=previous_kernel_results,\n kernel=reduction_kernel,\n return_final_kernel_results=True,\n parallel_iterations=parallel_iterations,\n seed=seed,\n name=name,\n )\n reduction_results = nest.map_structure_up_to(\n reducer,\n lambda r, s: r.finalize(s),\n reducer,\n final_kernel_results.streaming_calculations,\n check_types=False)\n if reducer_was_none:\n reduction_results = None\n return (reduction_results,\n end_state,\n final_kernel_results.inner_results.inner_results)\n\n\ndef _trace_kernel_results(current_state, kernel_results):\n del current_state\n return kernel_results\n\n\ndef sample_chain(\n num_results,\n current_state,\n previous_kernel_results=None,\n kernel=None,\n num_burnin_steps=0,\n num_steps_between_results=0,\n trace_fn=_trace_kernel_results,\n return_final_kernel_results=False,\n parallel_iterations=10,\n seed=None,\n name=None,\n):\n \"\"\"Implements Markov chain Monte Carlo via repeated `TransitionKernel` steps.\n\n This function samples from a Markov chain at `current_state` whose\n stationary distribution is governed by the supplied `TransitionKernel`\n instance (`kernel`).\n\n This function can sample from multiple chains, in parallel. (Whether or not\n there are multiple chains is dictated by the `kernel`.)\n\n The `current_state` can be represented as a single `Tensor` or a `list` of\n `Tensors` which collectively represent the current state.\n\n Since MCMC states are correlated, it is sometimes desirable to produce\n additional intermediate states, and then discard them, ending up with a set of\n states with decreased autocorrelation. See [Owen (2017)][1]. Such 'thinning'\n is made possible by setting `num_steps_between_results > 0`. The chain then\n takes `num_steps_between_results` extra steps between the steps that make it\n into the results. The extra steps are never materialized, and thus do not\n increase memory requirements.\n\n In addition to returning the chain state, this function supports tracing of\n auxiliary variables used by the kernel. The traced values are selected by\n specifying `trace_fn`. By default, all kernel results are traced but in the\n future the default will be changed to no results being traced, so plan\n accordingly. See below for some examples of this feature.\n\n Args:\n num_results: Integer number of Markov chain draws.\n current_state: `Tensor` or Python `list` of `Tensor`s representing the\n current state(s) of the Markov chain(s).\n previous_kernel_results: A `Tensor` or a nested collection of `Tensor`s\n representing internal calculations made within the previous call to this\n function (or as returned by `bootstrap_results`).\n kernel: An instance of `tfp.mcmc.TransitionKernel` which implements one step\n of the Markov chain.\n num_burnin_steps: Integer number of chain steps to take before starting to\n collect results.\n Default value: 0 (i.e., no burn-in).\n num_steps_between_results: Integer number of chain steps between collecting\n a result. Only one out of every `num_steps_between_samples + 1` steps is\n included in the returned results. The number of returned chain states is\n still equal to `num_results`. Default value: 0 (i.e., no thinning).\n trace_fn: A callable that takes in the current chain state and the previous\n kernel results and return a `Tensor` or a nested collection of `Tensor`s\n that is then traced along with the chain state.\n return_final_kernel_results: If `True`, then the final kernel results are\n returned alongside the chain state and the trace specified by the\n `trace_fn`.\n parallel_iterations: The number of iterations allowed to run in parallel. It\n must be a positive integer. See `tf.while_loop` for more details.\n seed: Optional, a seed for reproducible sampling.\n name: Python `str` name prefixed to Ops created by this function.\n Default value: `None` (i.e., 'experimental_mcmc_sample_chain').\n\n Returns:\n checkpointable_states_and_trace: if `return_final_kernel_results` is\n `True`. The return value is an instance of\n `CheckpointableStatesAndTrace`.\n all_states: if `return_final_kernel_results` is `False` and `trace_fn` is\n `None`. The return value is a `Tensor` or Python list of `Tensor`s\n representing the state(s) of the Markov chain(s) at each result step. Has\n same shape as input `current_state` but with a prepended\n `num_results`-size dimension.\n states_and_trace: if `return_final_kernel_results` is `False` and\n `trace_fn` is not `None`. The return value is an instance of\n `StatesAndTrace`.\n\n #### References\n\n [1]: Art B. Owen. Statistically efficient thinning of a Markov chain sampler.\n _Technical Report_, 2017.\n http://statweb.stanford.edu/~owen/reports/bestthinning.pdf\n \"\"\"\n with tf.name_scope(name or 'experimental_mcmc_sample_chain'):\n if not kernel.is_calibrated:\n warnings.warn('supplied `TransitionKernel` is not calibrated. Markov '\n 'chain may not converge to intended target distribution.')\n\n if trace_fn is None:\n trace_fn = lambda *args: ()\n no_trace = True\n else:\n no_trace = False\n\n if trace_fn is sample_chain.__defaults__[4]:\n warnings.warn('Tracing all kernel results by default is deprecated. Set '\n 'the `trace_fn` argument to None (the future default '\n 'value) or an explicit callback that traces the values '\n 'you are interested in.')\n\n # `WithReductions` assumes all its reducers want to reduce over the\n # immediate inner results of its kernel results. However,\n # We don't care about the kernel results of `SampleDiscardingKernel`; hence,\n # we evaluate the `trace_fn` on a deeper level of inner results.\n def real_trace_fn(curr_state, kr):\n return curr_state, trace_fn(curr_state, kr.inner_results)\n trace_reducer = tracing_reducer.TracingReducer(\n trace_fn=real_trace_fn,\n size=num_results\n )\n trace_results, _, final_kernel_results = sample_fold(\n num_steps=num_results,\n current_state=current_state,\n previous_kernel_results=previous_kernel_results,\n kernel=kernel,\n reducer=trace_reducer,\n num_burnin_steps=num_burnin_steps,\n num_steps_between_results=num_steps_between_results,\n parallel_iterations=parallel_iterations,\n seed=seed,\n name=name,\n )\n\n all_states, trace = trace_results\n if return_final_kernel_results:\n return sample.CheckpointableStatesAndTrace(\n all_states=all_states,\n trace=trace,\n final_kernel_results=final_kernel_results)\n else:\n if no_trace:\n return all_states\n else:\n return sample.StatesAndTrace(all_states=all_states, trace=trace)\n",
"# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Half-Cauchy Distribution Class.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# Dependency imports\nimport numpy as np\nimport tensorflow.compat.v2 as tf\n\nfrom tensorflow_probability.python.bijectors import chain as chain_bijector\nfrom tensorflow_probability.python.bijectors import exp as exp_bijector\nfrom tensorflow_probability.python.bijectors import shift as shift_bijector\nfrom tensorflow_probability.python.distributions import distribution\nfrom tensorflow_probability.python.internal import assert_util\nfrom tensorflow_probability.python.internal import dtype_util\nfrom tensorflow_probability.python.internal import prefer_static as ps\nfrom tensorflow_probability.python.internal import reparameterization\nfrom tensorflow_probability.python.internal import samplers\nfrom tensorflow_probability.python.internal import tensor_util\n\n__all__ = [\n 'HalfCauchy',\n]\n\n\nclass HalfCauchy(distribution.Distribution):\n \"\"\"Half-Cauchy distribution.\n\n The half-Cauchy distribution is parameterized by a `loc` and a\n `scale` parameter. It represents the right half of the two symmetric halves in\n a [Cauchy distribution](https://en.wikipedia.org/wiki/Cauchy_distribution).\n\n #### Mathematical Details\n The probability density function (pdf) for the half-Cauchy distribution\n is given by\n\n ```none\n pdf(x; loc, scale) = 2 / (pi scale (1 + z**2))\n z = (x - loc) / scale\n ```\n\n where `loc` is a scalar in `R` and `scale` is a positive scalar in `R`.\n\n The support of the distribution is given by the interval `[loc, infinity)`.\n\n \"\"\"\n\n def __init__(self,\n loc,\n scale,\n validate_args=False,\n allow_nan_stats=True,\n name='HalfCauchy'):\n \"\"\"Construct a half-Cauchy distribution with `loc` and `scale`.\n\n Args:\n loc: Floating-point `Tensor`; the location(s) of the distribution(s).\n scale: Floating-point `Tensor`; the scale(s) of the distribution(s).\n Must contain only positive values.\n validate_args: Python `bool`, default `False`. When `True` distribution\n parameters are checked for validity despite possibly degrading runtime\n performance. When `False` invalid inputs may silently render incorrect\n outputs. Default value: `False` (i.e. do not validate args).\n allow_nan_stats: Python `bool`, default `True`. When `True`, statistics\n (e.g., mean, mode, variance) use the value \"`NaN`\" to indicate the\n result is undefined. When `False`, an exception is raised if one or\n more of the statistic's batch members are undefined.\n Default value: `True`.\n name: Python `str` name prefixed to Ops created by this class.\n Default value: 'HalfCauchy'.\n\n Raises:\n TypeError: if `loc` and `scale` have different `dtype`.\n \"\"\"\n parameters = dict(locals())\n with tf.name_scope(name) as name:\n dtype = dtype_util.common_dtype([loc, scale], dtype_hint=tf.float32)\n self._loc = tensor_util.convert_nonref_to_tensor(\n loc, name='loc', dtype=dtype)\n self._scale = tensor_util.convert_nonref_to_tensor(\n scale, name='scale', dtype=dtype)\n super(HalfCauchy, self).__init__(\n dtype=dtype,\n reparameterization_type=reparameterization.FULLY_REPARAMETERIZED,\n validate_args=validate_args,\n allow_nan_stats=allow_nan_stats,\n parameters=parameters,\n name=name)\n\n @staticmethod\n def _param_shapes(sample_shape):\n return dict(\n zip(('loc', 'scale'),\n ([tf.convert_to_tensor(sample_shape, dtype=tf.int32)] * 2)))\n\n @classmethod\n def _params_event_ndims(cls):\n return dict(loc=0, scale=0)\n\n @property\n def loc(self):\n \"\"\"Distribution parameter for the location.\"\"\"\n return self._loc\n\n @property\n def scale(self):\n \"\"\"Distribution parameter for the scale.\"\"\"\n return self._scale\n\n def _batch_shape_tensor(self, loc=None, scale=None):\n return ps.broadcast_shape(\n ps.shape(self.loc if loc is None else loc),\n ps.shape(self.scale if scale is None else scale))\n\n def _batch_shape(self):\n return tf.broadcast_static_shape(self.loc.shape, self.scale.shape)\n\n def _event_shape_tensor(self):\n return tf.constant([], dtype=tf.int32)\n\n def _event_shape(self):\n return tf.TensorShape([])\n\n def _sample_n(self, n, seed=None):\n loc = tf.convert_to_tensor(self.loc)\n scale = tf.convert_to_tensor(self.scale)\n shape = ps.concat([[n], self._batch_shape_tensor(\n loc=loc, scale=scale)], 0)\n probs = samplers.uniform(\n shape, minval=0., maxval=1., dtype=self.dtype, seed=seed)\n # Quantile function.\n return loc + scale * tf.tan((np.pi / 2) * probs)\n\n def _log_prob(self, x):\n loc = tf.convert_to_tensor(self.loc)\n scale = tf.convert_to_tensor(self.scale)\n safe_x = self._get_safe_input(x, loc=loc, scale=scale)\n log_prob = (np.log(2 / np.pi) - tf.math.log(scale) - tf.math.log1p(\n ((safe_x - loc) / scale)**2))\n return tf.where(x < loc, dtype_util.as_numpy_dtype(\n self.dtype)(-np.inf), log_prob)\n\n def _log_cdf(self, x):\n loc = tf.convert_to_tensor(self.loc)\n scale = tf.convert_to_tensor(self.scale)\n safe_x = self._get_safe_input(x, loc=loc, scale=scale)\n log_cdf = np.log(2 / np.pi) + tf.math.log(tf.atan((safe_x - loc) / scale))\n return tf.where(x < loc, dtype_util.as_numpy_dtype(\n self.dtype)(-np.inf), log_cdf)\n\n def _entropy(self):\n h = np.log(2 * np.pi) + tf.math.log(self.scale)\n return h * tf.ones_like(self.loc)\n\n def _quantile(self, p):\n return self.loc + self.scale * tf.tan((np.pi / 2) * p)\n\n def _mode(self):\n return self.loc * tf.ones_like(self.scale)\n\n def _mean(self):\n if self.allow_nan_stats:\n return tf.fill(self.batch_shape_tensor(),\n dtype_util.as_numpy_dtype(self.dtype)(np.nan))\n raise ValueError('`mean` is undefined for the half-Cauchy distribution.')\n\n def _stddev(self):\n if self.allow_nan_stats:\n return tf.fill(self.batch_shape_tensor(),\n dtype_util.as_numpy_dtype(self.dtype)(np.nan))\n raise ValueError('`stddev` is undefined for the half-Cauchy distribution.')\n\n def _variance(self):\n if self.allow_nan_stats:\n return tf.fill(self.batch_shape_tensor(),\n dtype_util.as_numpy_dtype(self.dtype)(np.nan))\n raise ValueError(\n '`variance` is undefined for the half-Cauchy distribution.')\n\n def _get_safe_input(self, x, loc, scale):\n safe_value = 0.5 * scale + loc\n return tf.where(x < loc, safe_value, x)\n\n def _default_event_space_bijector(self):\n return chain_bijector.Chain([\n shift_bijector.Shift(\n shift=self.loc, validate_args=self.validate_args),\n exp_bijector.Exp(validate_args=self.validate_args)\n ], validate_args=self.validate_args)\n\n def _sample_control_dependencies(self, x):\n \"\"\"Checks the validity of a sample.\"\"\"\n assertions = []\n if not self.validate_args:\n return assertions\n loc = tf.convert_to_tensor(self.loc)\n assertions.append(assert_util.assert_greater_equal(\n x, loc, message='Sample must be greater than or equal to `loc`.'))\n return assertions\n\n def _parameter_control_dependencies(self, is_init):\n if not self.validate_args:\n return []\n assertions = []\n if is_init != tensor_util.is_ref(self.scale):\n assertions.append(assert_util.assert_positive(\n self.scale,\n message='Argument `scale` must be positive.'))\n return assertions\n\n"
] | [
[
"numpy.split",
"numpy.expand_dims",
"numpy.take",
"numpy.linspace",
"numpy.issubdtype",
"numpy.squeeze",
"numpy.concatenate",
"numpy.moveaxis",
"numpy.where",
"numpy.roll",
"numpy.conjugate",
"numpy.reshape",
"numpy.arange",
"numpy.full",
"numpy.repeat",
"numpy.transpose",
"numpy.array",
"numpy.flip",
"numpy.sum",
"numpy.ndindex"
],
[
"tensorflow.compat.v2.convert_to_tensor",
"tensorflow.compat.v2.name_scope"
],
[
"numpy.log",
"tensorflow.compat.v2.ones_like",
"tensorflow.compat.v2.atan",
"tensorflow.compat.v2.name_scope",
"tensorflow.compat.v2.where",
"tensorflow.compat.v2.math.log1p",
"tensorflow.compat.v2.convert_to_tensor",
"tensorflow.compat.v2.broadcast_static_shape",
"tensorflow.compat.v2.tan",
"tensorflow.compat.v2.math.log",
"tensorflow.compat.v2.TensorShape",
"tensorflow.compat.v2.constant"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
foobug/suzieq | [
"c5927616a0e1a1fd9283f2a3eeb120d24ff0f2b5",
"c5927616a0e1a1fd9283f2a3eeb120d24ff0f2b5"
] | [
"suzieq/poller/services/evpnVni.py",
"suzieq/sqobjects/bgp.py"
] | [
"import re\nimport numpy as np\n\nfrom suzieq.poller.services.service import Service\nfrom suzieq.utils import (convert_rangestring_to_list,\n convert_macaddr_format_to_colon)\n\n\nclass EvpnVniService(Service):\n \"\"\"evpnVni service. Different class because output needs to be munged\"\"\"\n\n def clean_json_input(self, data):\n \"\"\"FRR JSON data needs some work\"\"\"\n\n devtype = data.get(\"devtype\", None)\n if any(x == devtype for x in [\"cumulus\", \"sonic\", \"linux\"]):\n data['data'] = '[' + re.sub(r'}\\n\\n{\\n', r'},\\n\\n{\\n',\n data['data']) + ']'\n return data['data']\n\n def _clean_eos_data(self, processed_data, raw_data):\n new_entries = []\n\n if not processed_data:\n return processed_data\n\n for entry in processed_data:\n vni2vrfmap = {}\n for vrf in entry['_vrf2VniMap']:\n vni2vrfmap[entry['_vrf2VniMap'][vrf]] = vrf\n\n vtepMap = entry.get('_vlan2VtepMap', {})\n replType = entry.get('replicationType')\n if replType == 'headendVcs':\n replType = 'ingressBGP'\n elif entry.get('mcastGroup', '') != \"0.0.0.0\":\n replType = \"multicast\"\n else:\n replType = ''\n for vlan in entry['_vlan2VniMap']:\n new_entry = {}\n vni = entry['_vlan2VniMap'][vlan].get('vni', 0)\n new_entry['vni'] = vni\n new_entry['vrf'] = vni2vrfmap.get(vni, '')\n new_entry['state'] = entry['state']\n new_entry['ifname'] = entry['ifname']\n new_entry['vlan'] = vlan\n new_entry['priVtepIp'] = entry['priVtepIp']\n vteplist = vtepMap.get(vlan, {})\n vteplist = (vteplist.get('remoteVtepAddr', []) +\n vteplist.get('remoteVtepAddr6', []))\n new_entry['remoteVtepList'] = vteplist\n new_entry['replicationType'] = replType\n new_entry['mcastGroup'] = entry['mcastGroup']\n if new_entry['vrf']:\n new_entry['type'] = 'L3'\n else:\n new_entry['type'] = 'L2'\n new_entry['ifname'] = entry.get('ifname', '')\n\n new_entries.append(new_entry)\n\n processed_data = new_entries\n return processed_data\n\n def _clean_cumulus_data(self, processed_data, raw_data):\n \"\"\"Clean out null entries among other cleanup\"\"\"\n\n del_indices = []\n for i, entry in enumerate(processed_data):\n if entry['vni'] is None:\n del_indices.append(i)\n if entry['mcastGroup'] and entry['mcastGroup'] != \"0.0.0.0\":\n entry['replicationType'] = 'multicast'\n elif entry['type'] != 'L3':\n entry['replicationType'] = 'ingressBGP'\n entry['mcastGroup'] = \"0.0.0.0\"\n else:\n entry['replicationType'] = ''\n entry['mcastGroup'] = \"0.0.0.0\"\n entry['remoteVtepList'] = None\n\n entry['state'] = entry.get('state', 'up').lower()\n entry['l2VniList'] = set(entry['l2VniList'])\n processed_data = np.delete(processed_data, del_indices).tolist()\n\n return processed_data\n\n def _clean_nxos_data(self, processed_data, raw_data):\n \"\"\"Merge peer records with VNI records to yield VNI-based records\"\"\"\n\n vni_dict = {}\n drop_indices = []\n\n for i, entry in enumerate(processed_data):\n if not entry['vni']:\n drop_indices.append(i)\n continue\n\n if entry['_entryType'] == 'VNI':\n type, vrf = entry['type'].split()\n if type == 'L3':\n entry['vrf'] = vrf[1:-1] # strip off '[' and ']'\n entry['type'] = type\n if 'sviState' in entry:\n entry['state'] = entry['sviState'].split()[0].lower()\n if re.search(r'[0-9.]+', entry.get('replicationType', '')):\n entry['mcastGroup'] = entry['replicationType']\n entry['replicationType'] = 'multicast'\n elif entry['type'] != 'L3':\n entry['replicationType'] = 'ingressBGP'\n entry['mcastGroup'] = \"0.0.0.0\"\n else:\n entry['replicationType'] = ''\n entry['mcastGroup'] = \"0.0.0.0\"\n\n # we'll fill this with the peers entries\n entry['remoteVtepList'] = []\n entry['state'] = entry['state'].lower()\n entry['vlan'] = int(entry['vlan'])\n vni_dict[entry['vni']] = entry\n\n elif entry['_entryType'] == 'peers':\n vni_list = convert_rangestring_to_list(\n entry.get('_vniList', ''))\n for vni in vni_list:\n vni_entry = vni_dict.get(vni, None)\n if vni_entry:\n vni_entry['remoteVtepList'].append(entry['vni'])\n drop_indices.append(i)\n\n elif entry['_entryType'] == 'iface':\n if entry.get('encapType', '') != \"VXLAN\":\n continue\n\n for vni in vni_dict:\n if vni_dict[vni]['ifname'] != entry['ifname']:\n continue\n vni_dict[vni]['priVtepIp'] = entry.get('priVtepIp', '')\n secIP = entry.get('secVtepIp', '')\n if secIP == '0.0.0.0':\n secIP = ''\n vni_dict[vni]['secVtepIp'] = secIP\n vni_dict[vni]['routerMac'] = convert_macaddr_format_to_colon(\n entry.get('routerMac', '00:00:00:00:00:00'))\n\n drop_indices.append(i)\n\n processed_data = np.delete(processed_data, drop_indices).tolist()\n\n return processed_data\n\n def _clean_junos_data(self, processed_data, raw_data):\n\n newntries = {}\n\n for entry in processed_data:\n if entry['_entryType'] == 'instance':\n if entry['_vniList'] is None:\n continue\n for i, vni in enumerate(entry['_vniList']):\n irb_iflist = entry.get('_irbIfList', [])\n vrflist = entry.get('_vrfList', [])\n vlan = entry['_vlanList'][i]\n irbif = f'irb.{vlan}'\n try:\n index = irb_iflist.index(irbif)\n vrf = vrflist[index]\n except ValueError:\n vrf = ''\n except IndexError:\n vrf = ''\n\n if vni not in newntries:\n vni_entry = {\n 'vni': int(vni),\n 'remoteVtepList': [],\n 'type': 'L2',\n 'state': 'up',\n 'vlan': int(vlan),\n 'numRemoteVteps': 0,\n 'numMacs': 0,\n 'numArpNd': 0,\n 'vrf': vrf,\n 'os': 'junos'\n }\n newntries[vni] = vni_entry\n continue\n elif entry['_entryType'] == 'l3':\n vni = int(entry.get('vni', '0'))\n priVtepIp = entry.get('priVtepIp', '')\n\n if not priVtepIp and not vni:\n continue\n\n vni_entry = {\n 'vni': vni,\n 'remoteVtepList': [],\n 'priVtepIp': priVtepIp,\n 'type': 'L3',\n 'state': 'up',\n 'numRemoteVteps': 0,\n 'routerMac': entry['routerMac'],\n 'numMacs': 0,\n 'numArpNd': 0,\n 'mcastGroup': '0.0.0.0',\n 'vrf': entry['vrf'],\n 'os': 'junos'\n }\n # Add the primary VTEP IP into the L2 entries as well\n for l2vni in newntries:\n newntries[l2vni]['priVtepIp'] = priVtepIp\n\n newntries[vni] = vni_entry\n continue\n elif entry['_entryType'] == 'remote':\n priVtepIp = entry.get('priVtepIp', '[{\"data\": \"\"}]')[0]['data']\n for i, vni in enumerate(entry.get('_vniList', [])):\n vni_entry = newntries.get(vni, {})\n if not vni_entry:\n vni_entry = {\n 'vni': int(vni),\n 'remoteVtepList': [],\n 'priVtepIp': priVtepIp,\n 'type': 'L2',\n 'state': 'up',\n 'numRemoteVteps': len(entry['_floodVtepList']),\n 'numMacs': 0,\n 'numArpNd': 0,\n 'os': 'junos'\n }\n newntries[vni] = vni_entry\n\n vni_entry['priVtepIp'] = priVtepIp\n if entry['replicationType'][i] == '0.0.0.0':\n vni_entry['replicationType'] = 'ingressBGP'\n vni_entry['mcastGroup'] = \"0.0.0.0\"\n else:\n vni_entry['replicationType'] = 'multicast'\n vni_entry['mcastGroup'] = entry['replicationType'][i]\n\n vni_entry['remoteVtepList'].append(\n entry.get('_floodVtepList', ''))\n\n processed_data = list(newntries.values())\n return processed_data\n",
"from suzieq.sqobjects.basicobj import SqObject\nimport pandas as pd\nfrom suzieq.utils import humanize_timestamp\n\n\nclass BgpObj(SqObject):\n def __init__(self, **kwargs):\n super().__init__(table='bgp', **kwargs)\n self._valid_get_args = ['namespace', 'hostname', 'columns', 'state',\n 'vrf', 'peer', 'query_str']\n self._valid_arg_vals = {\n 'state': ['Established', 'NotEstd', ''],\n 'status': ['all', 'pass', 'fail'],\n }\n self._valid_assert_args = ['namespace', 'hostname', 'vrf', 'status']\n\n def aver(self, **kwargs):\n \"\"\"Assert that the BGP state is OK\"\"\"\n\n if not self.ctxt.engine:\n raise AttributeError('No analysis engine specified')\n try:\n self.validate_assert_input(**kwargs)\n except Exception as error:\n df = pd.DataFrame({'error': [f'{error}']})\n return df\n\n return self.engine.aver(**kwargs)\n\n def humanize_fields(self, df: pd.DataFrame, subset=None) -> pd.DataFrame:\n '''Humanize the timestamp and boot time fields'''\n if df.empty:\n return df\n\n if 'estdTime' in df.columns:\n df['estdTime'] = humanize_timestamp(df.estdTime,\n self.cfg.get('analyzer', {})\n .get('timezone', None))\n\n return df\n"
] | [
[
"numpy.delete"
],
[
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
PriyankaH21/astropy | [
"159fb9637ce4acdc60329d20517ed3dc7ba79581",
"159fb9637ce4acdc60329d20517ed3dc7ba79581",
"159fb9637ce4acdc60329d20517ed3dc7ba79581",
"159fb9637ce4acdc60329d20517ed3dc7ba79581"
] | [
"astropy/nddata/tests/test_utils.py",
"astropy/io/fits/tests/test_connect.py",
"astropy/convolution/tests/test_convolve.py",
"astropy/io/fits/util.py"
] | [
"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\nimport pytest\nimport numpy as np\nfrom numpy.testing import assert_allclose\n\nfrom ...tests.helper import assert_quantity_allclose\nfrom ..utils import (extract_array, add_array, subpixel_indices,\n block_reduce, block_replicate,\n overlap_slices, NoOverlapError, PartialOverlapError,\n Cutout2D)\nfrom ...wcs import WCS, Sip\nfrom ...wcs.utils import proj_plane_pixel_area\nfrom ...coordinates import SkyCoord\nfrom ... import units as u\n\ntry:\n import skimage # pylint: disable=W0611\n HAS_SKIMAGE = True\nexcept ImportError:\n HAS_SKIMAGE = False\n\n\ntest_positions = [(10.52, 3.12), (5.62, 12.97), (31.33, 31.77),\n (0.46, 0.94), (20.45, 12.12), (42.24, 24.42)]\n\ntest_position_indices = [(0, 3), (0, 2), (4, 1),\n (4, 2), (4, 3), (3, 4)]\n\ntest_slices = [slice(10.52, 3.12), slice(5.62, 12.97),\n slice(31.33, 31.77), slice(0.46, 0.94),\n slice(20.45, 12.12), slice(42.24, 24.42)]\n\nsubsampling = 5\n\ntest_pos_bad = [(-1, -4), (-1, 0), (6, 2), (6, 6)]\n\n\ndef test_slices_different_dim():\n '''Overlap from arrays with different number of dim is undefined.'''\n with pytest.raises(ValueError) as e:\n overlap_slices((4, 5, 6), (1, 2), (0, 0))\n assert \"the same number of dimensions\" in str(e.value)\n\n\ndef test_slices_pos_different_dim():\n '''Position must have same dim as arrays.'''\n with pytest.raises(ValueError) as e:\n overlap_slices((4, 5), (1, 2), (0, 0, 3))\n assert \"the same number of dimensions\" in str(e.value)\n\n\[email protected]('pos', test_pos_bad)\ndef test_slices_no_overlap(pos):\n '''If there is no overlap between arrays, an error should be raised.'''\n with pytest.raises(NoOverlapError):\n overlap_slices((5, 5), (2, 2), pos)\n\n\ndef test_slices_partial_overlap():\n '''Compute a slice for partially overlapping arrays.'''\n temp = overlap_slices((5,), (3,), (0,))\n assert temp == ((slice(0, 2, None),), (slice(1, 3, None),))\n\n temp = overlap_slices((5,), (3,), (0,), mode='partial')\n assert temp == ((slice(0, 2, None),), (slice(1, 3, None),))\n\n for pos in [0, 4]:\n with pytest.raises(PartialOverlapError) as e:\n temp = overlap_slices((5,), (3,), (pos,), mode='strict')\n assert 'Arrays overlap only partially.' in str(e.value)\n\n\ndef test_slices_overlap_wrong_mode():\n '''Call overlap_slices with non-existing mode.'''\n with pytest.raises(ValueError) as e:\n overlap_slices((5,), (3,), (0,), mode='full')\n assert \"Mode can be only\" in str(e.value)\n\n\ndef test_extract_array_wrong_mode():\n '''Call extract_array with non-existing mode.'''\n with pytest.raises(ValueError) as e:\n extract_array(np.arange(4), (2, ), (0, ), mode='full')\n assert \"Valid modes are 'partial', 'trim', and 'strict'.\" == str(e.value)\n\n\ndef test_extract_array_1d_even():\n '''Extract 1 d arrays.\n\n All dimensions are treated the same, so we can test in 1 dim.\n '''\n assert np.all(extract_array(np.arange(4), (2, ), (0, ), fill_value=-99) == np.array([-99, 0]))\n for i in [1, 2, 3]:\n assert np.all(extract_array(np.arange(4), (2, ), (i, )) == np.array([i - 1, i]))\n assert np.all(extract_array(np.arange(4.), (2, ), (4, ), fill_value=np.inf) == np.array([3, np.inf]))\n\n\ndef test_extract_array_1d_odd():\n '''Extract 1 d arrays.\n\n All dimensions are treated the same, so we can test in 1 dim.\n The first few lines test the most error-prone part: Extraction of an\n array on the boundaries.\n Additional tests (e.g. dtype of return array) are done for the last\n case only.\n '''\n assert np.all(extract_array(np.arange(4), (3,), (-1, ), fill_value=-99) == np.array([-99, -99, 0]))\n assert np.all(extract_array(np.arange(4), (3,), (0, ), fill_value=-99) == np.array([-99, 0, 1]))\n for i in [1, 2]:\n assert np.all(extract_array(np.arange(4), (3,), (i, )) == np.array([i-1, i, i+1]))\n assert np.all(extract_array(np.arange(4), (3,), (3, ), fill_value=-99) == np.array([2, 3, -99]))\n arrayin = np.arange(4.)\n extracted = extract_array(arrayin, (3,), (4, ))\n assert extracted[0] == 3\n assert np.isnan(extracted[1]) # since I cannot use `==` to test for nan\n assert extracted.dtype == arrayin.dtype\n\n\ndef test_extract_array_1d():\n \"\"\"In 1d, shape can be int instead of tuple\"\"\"\n assert np.all(extract_array(np.arange(4), 3, (-1, ), fill_value=-99) == np.array([-99, -99, 0]))\n assert np.all(extract_array(np.arange(4), 3, -1, fill_value=-99) == np.array([-99, -99, 0]))\n\n\ndef test_extract_Array_float():\n \"\"\"integer is at bin center\"\"\"\n for a in np.arange(2.51, 3.49, 0.1):\n assert np.all(extract_array(np.arange(5), 3, a) == np.array([2, 3, 4]))\n\n\ndef test_extract_array_1d_trim():\n '''Extract 1 d arrays.\n\n All dimensions are treated the same, so we can test in 1 dim.\n '''\n assert np.all(extract_array(np.arange(4), (2, ), (0, ), mode='trim') == np.array([0]))\n for i in [1, 2, 3]:\n assert np.all(extract_array(np.arange(4), (2, ), (i, ), mode='trim') == np.array([i - 1, i]))\n assert np.all(extract_array(np.arange(4.), (2, ), (4, ), mode='trim') == np.array([3]))\n\n\[email protected]('mode', ['partial', 'trim', 'strict'])\ndef test_extract_array_easy(mode):\n \"\"\"\n Test extract_array utility function.\n\n Test by extracting an array of ones out of an array of zeros.\n \"\"\"\n large_test_array = np.zeros((11, 11))\n small_test_array = np.ones((5, 5))\n large_test_array[3:8, 3:8] = small_test_array\n extracted_array = extract_array(large_test_array, (5, 5), (5, 5), mode=mode)\n assert np.all(extracted_array == small_test_array)\n\n\ndef test_extract_array_return_pos():\n '''Check that the return position is calculated correctly.\n\n The result will differ by mode. All test here are done in 1d because it's\n easier to construct correct test cases.\n '''\n large_test_array = np.arange(5)\n for i in np.arange(-1, 6):\n extracted, new_pos = extract_array(large_test_array, 3, i,\n mode='partial', return_position=True)\n assert new_pos == (1, )\n # Now check an array with an even number\n for i, expected in zip([1.49, 1.51, 3], [1.49, 0.51, 1]):\n extracted, new_pos = extract_array(large_test_array, (2,), (i,),\n mode='strict', return_position=True)\n assert new_pos == (expected, )\n # For mode='trim' the answer actually depends\n for i, expected in zip(np.arange(-1, 6), (-1, 0, 1, 1, 1, 1, 1)):\n extracted, new_pos = extract_array(large_test_array, (3,), (i,),\n mode='trim', return_position=True)\n assert new_pos == (expected, )\n\n\ndef test_add_array_odd_shape():\n \"\"\"\n Test add_array utility function.\n\n Test by adding an array of ones out of an array of zeros.\n \"\"\"\n large_test_array = np.zeros((11, 11))\n small_test_array = np.ones((5, 5))\n large_test_array_ref = large_test_array.copy()\n large_test_array_ref[3:8, 3:8] += small_test_array\n\n added_array = add_array(large_test_array, small_test_array, (5, 5))\n assert np.all(added_array == large_test_array_ref)\n\n\ndef test_add_array_even_shape():\n \"\"\"\n Test add_array_2D utility function.\n\n Test by adding an array of ones out of an array of zeros.\n \"\"\"\n large_test_array = np.zeros((11, 11))\n small_test_array = np.ones((4, 4))\n large_test_array_ref = large_test_array.copy()\n large_test_array_ref[0:2, 0:2] += small_test_array[2:4, 2:4]\n\n added_array = add_array(large_test_array, small_test_array, (0, 0))\n assert np.all(added_array == large_test_array_ref)\n\n\[email protected](('position', 'subpixel_index'),\n zip(test_positions, test_position_indices))\ndef test_subpixel_indices(position, subpixel_index):\n \"\"\"\n Test subpixel_indices utility function.\n\n Test by asserting that the function returns correct results for\n given test values.\n \"\"\"\n assert np.all(subpixel_indices(position, subsampling) == subpixel_index)\n\n\[email protected]('not HAS_SKIMAGE')\nclass TestBlockReduce:\n def test_1d(self):\n \"\"\"Test 1D array.\"\"\"\n data = np.arange(4)\n expected = np.array([1, 5])\n result = block_reduce(data, 2)\n assert np.all(result == expected)\n\n def test_1d_mean(self):\n \"\"\"Test 1D array with func=np.mean.\"\"\"\n data = np.arange(4)\n block_size = 2.\n expected = block_reduce(data, block_size, func=np.sum) / block_size\n result_mean = block_reduce(data, block_size, func=np.mean)\n assert np.all(result_mean == expected)\n\n def test_2d(self):\n \"\"\"Test 2D array.\"\"\"\n data = np.arange(4).reshape(2, 2)\n expected = np.array([[6]])\n result = block_reduce(data, 2)\n assert np.all(result == expected)\n\n def test_2d_mean(self):\n \"\"\"Test 2D array with func=np.mean.\"\"\"\n data = np.arange(4).reshape(2, 2)\n block_size = 2.\n expected = (block_reduce(data, block_size, func=np.sum) /\n block_size**2)\n result = block_reduce(data, block_size, func=np.mean)\n assert np.all(result == expected)\n\n def test_2d_trim(self):\n \"\"\"\n Test trimming of 2D array when size is not perfectly divisible\n by block_size.\n \"\"\"\n\n data1 = np.arange(15).reshape(5, 3)\n result1 = block_reduce(data1, 2)\n data2 = data1[0:4, 0:2]\n result2 = block_reduce(data2, 2)\n assert np.all(result1 == result2)\n\n def test_block_size_broadcasting(self):\n \"\"\"Test scalar block_size broadcasting.\"\"\"\n data = np.arange(16).reshape(4, 4)\n result1 = block_reduce(data, 2)\n result2 = block_reduce(data, (2, 2))\n assert np.all(result1 == result2)\n\n def test_block_size_len(self):\n \"\"\"Test block_size length.\"\"\"\n data = np.ones((2, 2))\n with pytest.raises(ValueError):\n block_reduce(data, (2, 2, 2))\n\n\[email protected]('not HAS_SKIMAGE')\nclass TestBlockReplicate:\n def test_1d(self):\n \"\"\"Test 1D array.\"\"\"\n data = np.arange(2)\n expected = np.array([0, 0, 0.5, 0.5])\n result = block_replicate(data, 2)\n assert np.all(result == expected)\n\n def test_1d_conserve_sum(self):\n \"\"\"Test 1D array with conserve_sum=False.\"\"\"\n data = np.arange(2)\n block_size = 2.\n expected = block_replicate(data, block_size) * block_size\n result = block_replicate(data, block_size, conserve_sum=False)\n assert np.all(result == expected)\n\n def test_2d(self):\n \"\"\"Test 2D array.\"\"\"\n data = np.arange(2).reshape(2, 1)\n expected = np.array([[0, 0], [0, 0], [0.25, 0.25], [0.25, 0.25]])\n result = block_replicate(data, 2)\n assert np.all(result == expected)\n\n def test_2d_conserve_sum(self):\n \"\"\"Test 2D array with conserve_sum=False.\"\"\"\n data = np.arange(6).reshape(2, 3)\n block_size = 2.\n expected = block_replicate(data, block_size) * block_size**2\n result = block_replicate(data, block_size, conserve_sum=False)\n assert np.all(result == expected)\n\n def test_block_size_broadcasting(self):\n \"\"\"Test scalar block_size broadcasting.\"\"\"\n data = np.arange(4).reshape(2, 2)\n result1 = block_replicate(data, 2)\n result2 = block_replicate(data, (2, 2))\n assert np.all(result1 == result2)\n\n def test_block_size_len(self):\n \"\"\"Test block_size length.\"\"\"\n data = np.arange(5)\n with pytest.raises(ValueError):\n block_replicate(data, (2, 2))\n\n\nclass TestCutout2D:\n def setup_class(self):\n self.data = np.arange(20.).reshape(5, 4)\n self.position = SkyCoord('13h11m29.96s -01d19m18.7s', frame='icrs')\n wcs = WCS(naxis=2)\n rho = np.pi / 3.\n scale = 0.05 / 3600.\n wcs.wcs.cd = [[scale*np.cos(rho), -scale*np.sin(rho)],\n [scale*np.sin(rho), scale*np.cos(rho)]]\n wcs.wcs.ctype = ['RA---TAN', 'DEC--TAN']\n wcs.wcs.crval = [self.position.ra.to_value(u.deg),\n self.position.dec.to_value(u.deg)]\n wcs.wcs.crpix = [3, 3]\n self.wcs = wcs\n\n # add SIP\n sipwcs = wcs.deepcopy()\n sipwcs.wcs.ctype = ['RA---TAN-SIP', 'DEC--TAN-SIP']\n a = np.array(\n [[0, 0, 5.33092692e-08, 3.73753773e-11, -2.02111473e-13],\n [0, 2.44084308e-05, 2.81394789e-11, 5.17856895e-13, 0.0],\n [-2.41334657e-07, 1.29289255e-10, 2.35753629e-14, 0.0, 0.0],\n [-2.37162007e-10, 5.43714947e-13, 0.0, 0.0, 0.0],\n [ -2.81029767e-13, 0.0, 0.0, 0.0, 0.0]]\n )\n b = np.array(\n [[0, 0, 2.99270374e-05, -2.38136074e-10, 7.23205168e-13],\n [0, -1.71073858e-07, 6.31243431e-11, -5.16744347e-14, 0.0],\n [6.95458963e-06, -3.08278961e-10, -1.75800917e-13, 0.0, 0.0],\n [3.51974159e-11, 5.60993016e-14, 0.0, 0.0, 0.0],\n [-5.92438525e-13, 0.0, 0.0, 0.0, 0.0]]\n )\n sipwcs.sip = Sip(a, b, None, None, wcs.wcs.crpix)\n sipwcs.wcs.set()\n self.sipwcs = sipwcs\n\n def test_cutout(self):\n sizes = [3, 3*u.pixel, (3, 3), (3*u.pixel, 3*u.pix), (3., 3*u.pixel),\n (2.9, 3.3)]\n for size in sizes:\n position = (2.1, 1.9)\n c = Cutout2D(self.data, position, size)\n assert c.data.shape == (3, 3)\n assert c.data[1, 1] == 10\n assert c.origin_original == (1, 1)\n assert c.origin_cutout == (0, 0)\n assert c.input_position_original == position\n assert_allclose(c.input_position_cutout, (1.1, 0.9))\n assert c.position_original == (2., 2.)\n assert c.position_cutout == (1., 1.)\n assert c.center_original == (2., 2.)\n assert c.center_cutout == (1., 1.)\n assert c.bbox_original == ((1, 3), (1, 3))\n assert c.bbox_cutout == ((0, 2), (0, 2))\n assert c.slices_original == (slice(1, 4), slice(1, 4))\n assert c.slices_cutout == (slice(0, 3), slice(0, 3))\n\n def test_size_length(self):\n with pytest.raises(ValueError):\n Cutout2D(self.data, (2, 2), (1, 1, 1))\n\n def test_size_units(self):\n for size in [3 * u.cm, (3, 3 * u.K)]:\n with pytest.raises(ValueError):\n Cutout2D(self.data, (2, 2), size)\n\n def test_size_pixel(self):\n \"\"\"\n Check size in derived pixel units.\n \"\"\"\n size = 0.3*u.arcsec / (0.1*u.arcsec/u.pixel)\n c = Cutout2D(self.data, (2, 2), size)\n assert c.data.shape == (3, 3)\n assert c.data[0, 0] == 5\n assert c.slices_original == (slice(1, 4), slice(1, 4))\n assert c.slices_cutout == (slice(0, 3), slice(0, 3))\n\n def test_size_angle(self):\n c = Cutout2D(self.data, (2, 2), (0.1*u.arcsec), wcs=self.wcs)\n assert c.data.shape == (2, 2)\n assert c.data[0, 0] == 5\n assert c.slices_original == (slice(1, 3), slice(1, 3))\n assert c.slices_cutout == (slice(0, 2), slice(0, 2))\n\n def test_size_angle_without_wcs(self):\n with pytest.raises(ValueError):\n Cutout2D(self.data, (2, 2), (3, 3 * u.arcsec))\n\n def test_cutout_trim_overlap(self):\n c = Cutout2D(self.data, (0, 0), (3, 3), mode='trim')\n assert c.data.shape == (2, 2)\n assert c.data[0, 0] == 0\n assert c.slices_original == (slice(0, 2), slice(0, 2))\n assert c.slices_cutout == (slice(0, 2), slice(0, 2))\n\n def test_cutout_partial_overlap(self):\n c = Cutout2D(self.data, (0, 0), (3, 3), mode='partial')\n assert c.data.shape == (3, 3)\n assert c.data[1, 1] == 0\n assert c.slices_original == (slice(0, 2), slice(0, 2))\n assert c.slices_cutout == (slice(1, 3), slice(1, 3))\n\n def test_cutout_partial_overlap_fill_value(self):\n fill_value = -99\n c = Cutout2D(self.data, (0, 0), (3, 3), mode='partial',\n fill_value=fill_value)\n assert c.data.shape == (3, 3)\n assert c.data[1, 1] == 0\n assert c.data[0, 0] == fill_value\n\n def test_copy(self):\n data = np.copy(self.data)\n c = Cutout2D(data, (2, 3), (3, 3))\n xy = (0, 0)\n value = 100.\n c.data[xy] = value\n xy_orig = c.to_original_position(xy)\n yx = xy_orig[::-1]\n assert data[yx] == value\n\n data = np.copy(self.data)\n c2 = Cutout2D(self.data, (2, 3), (3, 3), copy=True)\n c2.data[xy] = value\n assert data[yx] != value\n\n def test_to_from_large(self):\n position = (2, 2)\n c = Cutout2D(self.data, position, (3, 3))\n xy = (0, 0)\n result = c.to_cutout_position(c.to_original_position(xy))\n assert_allclose(result, xy)\n\n def test_skycoord_without_wcs(self):\n with pytest.raises(ValueError):\n Cutout2D(self.data, self.position, (3, 3))\n\n def test_skycoord(self):\n c = Cutout2D(self.data, self.position, (3, 3), wcs=self.wcs)\n skycoord_original = self.position.from_pixel(c.center_original[1],\n c.center_original[0],\n self.wcs)\n skycoord_cutout = self.position.from_pixel(c.center_cutout[1],\n c.center_cutout[0], c.wcs)\n assert_quantity_allclose(skycoord_original.ra, skycoord_cutout.ra)\n assert_quantity_allclose(skycoord_original.dec, skycoord_cutout.dec)\n\n def test_skycoord_partial(self):\n c = Cutout2D(self.data, self.position, (3, 3), wcs=self.wcs,\n mode='partial')\n skycoord_original = self.position.from_pixel(c.center_original[1],\n c.center_original[0],\n self.wcs)\n skycoord_cutout = self.position.from_pixel(c.center_cutout[1],\n c.center_cutout[0], c.wcs)\n assert_quantity_allclose(skycoord_original.ra, skycoord_cutout.ra)\n assert_quantity_allclose(skycoord_original.dec, skycoord_cutout.dec)\n\n def test_naxis_update(self):\n xsize = 2\n ysize = 3\n c = Cutout2D(self.data, self.position, (ysize, xsize), wcs=self.wcs)\n assert c.wcs._naxis[0] == xsize\n assert c.wcs._naxis[1] == ysize\n\n def test_crpix_maps_to_crval(self):\n w = Cutout2D(self.data, (0, 0), (3, 3), wcs=self.sipwcs,\n mode='partial').wcs\n pscale = np.sqrt(proj_plane_pixel_area(w))\n assert_allclose(\n w.wcs_pix2world(*w.wcs.crpix, 1), w.wcs.crval,\n rtol=0.0, atol=1e-6 * pscale\n )\n assert_allclose(\n w.all_pix2world(*w.wcs.crpix, 1), w.wcs.crval,\n rtol=0.0, atol=1e-6 * pscale\n )\n",
"import os\nimport gc\nimport pathlib\nimport warnings\n\nimport pytest\nimport numpy as np\nfrom numpy.testing import assert_allclose\n\nfrom ..column import _parse_tdisp_format, _fortran_to_python_format, \\\n python_to_tdisp\n\nfrom .. import HDUList, PrimaryHDU, BinTableHDU\n\nfrom ... import fits\n\nfrom .... import units as u\nfrom ....table import Table, QTable, NdarrayMixin, Column\nfrom ....table.table_helpers import simple_table\nfrom ....tests.helper import catch_warnings\nfrom ....units.format.fits import UnitScaleError\n\nfrom ....coordinates import SkyCoord, Latitude, Longitude, Angle, EarthLocation\nfrom ....time import Time, TimeDelta\nfrom ....units import allclose as quantity_allclose\nfrom ....units.quantity import QuantityInfo\n\ntry:\n import yaml # pylint: disable=W0611\n HAS_YAML = True\nexcept ImportError:\n HAS_YAML = False\n\nDATA = os.path.join(os.path.dirname(__file__), 'data')\n\n\ndef equal_data(a, b):\n for name in a.dtype.names:\n if not np.all(a[name] == b[name]):\n return False\n return True\n\n\nclass TestSingleTable:\n\n def setup_class(self):\n self.data = np.array(list(zip([1, 2, 3, 4],\n ['a', 'b', 'c', 'd'],\n [2.3, 4.5, 6.7, 8.9])),\n dtype=[(str('a'), int), (str('b'), str('U1')), (str('c'), float)])\n\n def test_simple(self, tmpdir):\n filename = str(tmpdir.join('test_simple.fts'))\n t1 = Table(self.data)\n t1.write(filename, overwrite=True)\n t2 = Table.read(filename)\n assert equal_data(t1, t2)\n\n def test_simple_pathlib(self, tmpdir):\n filename = pathlib.Path(str(tmpdir.join('test_simple.fit')))\n t1 = Table(self.data)\n t1.write(filename, overwrite=True)\n t2 = Table.read(filename)\n assert equal_data(t1, t2)\n\n def test_simple_meta(self, tmpdir):\n filename = str(tmpdir.join('test_simple.fits'))\n t1 = Table(self.data)\n t1.meta['A'] = 1\n t1.meta['B'] = 2.3\n t1.meta['C'] = 'spam'\n t1.meta['comments'] = ['this', 'is', 'a', 'long', 'comment']\n t1.meta['HISTORY'] = ['first', 'second', 'third']\n t1.write(filename, overwrite=True)\n t2 = Table.read(filename)\n assert equal_data(t1, t2)\n for key in t1.meta:\n if isinstance(t1.meta, list):\n for i in range(len(t1.meta[key])):\n assert t1.meta[key][i] == t2.meta[key][i]\n else:\n assert t1.meta[key] == t2.meta[key]\n\n def test_simple_meta_conflicting(self, tmpdir):\n filename = str(tmpdir.join('test_simple.fits'))\n t1 = Table(self.data)\n t1.meta['ttype1'] = 'spam'\n with catch_warnings() as l:\n t1.write(filename, overwrite=True)\n assert len(l) == 1\n assert str(l[0].message).startswith(\n 'Meta-data keyword ttype1 will be ignored since it conflicts with a FITS reserved keyword')\n\n def test_simple_noextension(self, tmpdir):\n \"\"\"\n Test that file type is recognized without extension\n \"\"\"\n filename = str(tmpdir.join('test_simple'))\n t1 = Table(self.data)\n t1.write(filename, overwrite=True, format='fits')\n t2 = Table.read(filename)\n assert equal_data(t1, t2)\n\n @pytest.mark.parametrize('table_type', (Table, QTable))\n def test_with_units(self, table_type, tmpdir):\n filename = str(tmpdir.join('test_with_units.fits'))\n t1 = table_type(self.data)\n t1['a'].unit = u.m\n t1['c'].unit = u.km / u.s\n t1.write(filename, overwrite=True)\n t2 = table_type.read(filename)\n assert equal_data(t1, t2)\n assert t2['a'].unit == u.m\n assert t2['c'].unit == u.km / u.s\n\n @pytest.mark.parametrize('table_type', (Table, QTable))\n def test_with_format(self, table_type, tmpdir):\n filename = str(tmpdir.join('test_with_format.fits'))\n t1 = table_type(self.data)\n t1['a'].format = '{:5d}'\n t1['b'].format = '{:>20}'\n t1['c'].format = '{:6.2f}'\n t1.write(filename, overwrite=True)\n t2 = table_type.read(filename)\n assert equal_data(t1, t2)\n assert t2['a'].format == '{:5d}'\n assert t2['b'].format == '{:>20}'\n assert t2['c'].format == '{:6.2f}'\n\n def test_masked(self, tmpdir):\n filename = str(tmpdir.join('test_masked.fits'))\n t1 = Table(self.data, masked=True)\n t1.mask['a'] = [1, 0, 1, 0]\n t1.mask['b'] = [1, 0, 0, 1]\n t1.mask['c'] = [0, 1, 1, 0]\n t1.write(filename, overwrite=True)\n t2 = Table.read(filename)\n assert t2.masked\n assert equal_data(t1, t2)\n assert np.all(t1['a'].mask == t2['a'].mask)\n # Disabled for now, as there is no obvious way to handle masking of\n # non-integer columns in FITS\n # TODO: Re-enable these tests if some workaround for this can be found\n # assert np.all(t1['b'].mask == t2['b'].mask)\n # assert np.all(t1['c'].mask == t2['c'].mask)\n\n def test_masked_nan(self, tmpdir):\n filename = str(tmpdir.join('test_masked_nan.fits'))\n data = np.array(list(zip([5.2, 8.4, 3.9, 6.3],\n [2.3, 4.5, 6.7, 8.9])),\n dtype=[(str('a'), np.float64), (str('b'), np.float32)])\n t1 = Table(data, masked=True)\n t1.mask['a'] = [1, 0, 1, 0]\n t1.mask['b'] = [1, 0, 0, 1]\n t1.write(filename, overwrite=True)\n t2 = Table.read(filename)\n np.testing.assert_array_almost_equal(t2['a'], [np.nan, 8.4, np.nan, 6.3])\n np.testing.assert_array_almost_equal(t2['b'], [np.nan, 4.5, 6.7, np.nan])\n # assert t2.masked\n # t2.masked = false currently, as the only way to determine whether a table is masked\n # while reading is to check whether col.null is present. For float columns, col.null\n # is not initialized\n\n def test_read_from_fileobj(self, tmpdir):\n filename = str(tmpdir.join('test_read_from_fileobj.fits'))\n hdu = BinTableHDU(self.data)\n hdu.writeto(filename, overwrite=True)\n with open(filename, 'rb') as f:\n t = Table.read(f)\n assert equal_data(t, self.data)\n\n def test_read_with_nonstandard_units(self):\n hdu = BinTableHDU(self.data)\n hdu.columns[0].unit = 'RADIANS'\n hdu.columns[1].unit = 'spam'\n hdu.columns[2].unit = 'millieggs'\n t = Table.read(hdu)\n assert equal_data(t, self.data)\n\n def test_memmap(self, tmpdir):\n filename = str(tmpdir.join('test_simple.fts'))\n t1 = Table(self.data)\n t1.write(filename, overwrite=True)\n t2 = Table.read(filename, memmap=False)\n t3 = Table.read(filename, memmap=True)\n assert equal_data(t2, t3)\n # To avoid issues with --open-files, we need to remove references to\n # data that uses memory mapping and force the garbage collection\n del t1, t2, t3\n gc.collect()\n\n @pytest.mark.parametrize('memmap', (False, True))\n def test_character_as_bytes(self, tmpdir, memmap):\n filename = str(tmpdir.join('test_simple.fts'))\n t1 = Table(self.data)\n t1.write(filename, overwrite=True)\n t2 = Table.read(filename, character_as_bytes=False, memmap=memmap)\n t3 = Table.read(filename, character_as_bytes=True, memmap=memmap)\n assert t2['b'].dtype.kind == 'U'\n assert t3['b'].dtype.kind == 'S'\n assert equal_data(t2, t3)\n # To avoid issues with --open-files, we need to remove references to\n # data that uses memory mapping and force the garbage collection\n del t1, t2, t3\n gc.collect()\n\n\nclass TestMultipleHDU:\n\n def setup_class(self):\n self.data1 = np.array(list(zip([1, 2, 3, 4],\n ['a', 'b', 'c', 'd'],\n [2.3, 4.5, 6.7, 8.9])),\n dtype=[(str('a'), int), (str('b'), str('U1')), (str('c'), float)])\n self.data2 = np.array(list(zip([1.4, 2.3, 3.2, 4.7],\n [2.3, 4.5, 6.7, 8.9])),\n dtype=[(str('p'), float), (str('q'), float)])\n hdu1 = PrimaryHDU()\n hdu2 = BinTableHDU(self.data1, name='first')\n hdu3 = BinTableHDU(self.data2, name='second')\n\n self.hdus = HDUList([hdu1, hdu2, hdu3])\n\n def teardown_class(self):\n del self.hdus\n\n def setup_method(self, method):\n warnings.filterwarnings('always')\n\n def test_read(self, tmpdir):\n filename = str(tmpdir.join('test_read.fits'))\n self.hdus.writeto(filename)\n with catch_warnings() as l:\n t = Table.read(filename)\n assert len(l) == 1\n assert str(l[0].message).startswith(\n 'hdu= was not specified but multiple tables are present, reading in first available table (hdu=1)')\n assert equal_data(t, self.data1)\n\n def test_read_with_hdu_0(self, tmpdir):\n filename = str(tmpdir.join('test_read_with_hdu_0.fits'))\n self.hdus.writeto(filename)\n with pytest.raises(ValueError) as exc:\n Table.read(filename, hdu=0)\n assert exc.value.args[0] == 'No table found in hdu=0'\n\n @pytest.mark.parametrize('hdu', [1, 'first'])\n def test_read_with_hdu_1(self, tmpdir, hdu):\n filename = str(tmpdir.join('test_read_with_hdu_1.fits'))\n self.hdus.writeto(filename)\n with catch_warnings() as l:\n t = Table.read(filename, hdu=hdu)\n assert len(l) == 0\n assert equal_data(t, self.data1)\n\n @pytest.mark.parametrize('hdu', [2, 'second'])\n def test_read_with_hdu_2(self, tmpdir, hdu):\n filename = str(tmpdir.join('test_read_with_hdu_2.fits'))\n self.hdus.writeto(filename)\n with catch_warnings() as l:\n t = Table.read(filename, hdu=hdu)\n assert len(l) == 0\n assert equal_data(t, self.data2)\n\n def test_read_from_hdulist(self):\n with catch_warnings() as l:\n t = Table.read(self.hdus)\n assert len(l) == 1\n assert str(l[0].message).startswith(\n 'hdu= was not specified but multiple tables are present, reading in first available table (hdu=1)')\n assert equal_data(t, self.data1)\n\n def test_read_from_hdulist_with_hdu_0(self, tmpdir):\n with pytest.raises(ValueError) as exc:\n Table.read(self.hdus, hdu=0)\n assert exc.value.args[0] == 'No table found in hdu=0'\n\n @pytest.mark.parametrize('hdu', [1, 'first'])\n def test_read_from_hdulist_with_hdu_1(self, tmpdir, hdu):\n with catch_warnings() as l:\n t = Table.read(self.hdus, hdu=hdu)\n assert len(l) == 0\n assert equal_data(t, self.data1)\n\n @pytest.mark.parametrize('hdu', [2, 'second'])\n def test_read_from_hdulist_with_hdu_2(self, tmpdir, hdu):\n with catch_warnings() as l:\n t = Table.read(self.hdus, hdu=hdu)\n assert len(l) == 0\n assert equal_data(t, self.data2)\n\n def test_read_from_single_hdu(self):\n with catch_warnings() as l:\n t = Table.read(self.hdus[1])\n assert len(l) == 0\n assert equal_data(t, self.data1)\n\n\ndef test_masking_regression_1795():\n \"\"\"\n Regression test for #1795 - this bug originally caused columns where TNULL\n was not defined to have their first element masked.\n \"\"\"\n t = Table.read(os.path.join(DATA, 'tb.fits'))\n assert np.all(t['c1'].mask == np.array([False, False]))\n assert np.all(t['c2'].mask == np.array([False, False]))\n assert np.all(t['c3'].mask == np.array([False, False]))\n assert np.all(t['c4'].mask == np.array([False, False]))\n assert np.all(t['c1'].data == np.array([1, 2]))\n assert np.all(t['c2'].data == np.array([b'abc', b'xy ']))\n assert_allclose(t['c3'].data, np.array([3.70000007153, 6.6999997139]))\n assert np.all(t['c4'].data == np.array([False, True]))\n\n\ndef test_scale_error():\n a = [1, 4, 5]\n b = [2.0, 5.0, 8.2]\n c = ['x', 'y', 'z']\n t = Table([a, b, c], names=('a', 'b', 'c'), meta={'name': 'first table'})\n t['a'].unit = '1.2'\n with pytest.raises(UnitScaleError) as exc:\n t.write('t.fits', format='fits', overwrite=True)\n assert exc.value.args[0] == \"The column 'a' could not be stored in FITS format because it has a scale '(1.2)' that is not recognized by the FITS standard. Either scale the data or change the units.\"\n\n\[email protected]('tdisp_str, format_return',\n [('EN10.5', ('EN', '10', '5', None)),\n ('F6.2', ('F', '6', '2', None)),\n ('B5.10', ('B', '5', '10', None)),\n ('E10.5E3', ('E', '10', '5', '3')),\n ('A21', ('A', '21', None, None))])\ndef test_parse_tdisp_format(tdisp_str, format_return):\n assert _parse_tdisp_format(tdisp_str) == format_return\n\n\[email protected]('tdisp_str, format_str_return',\n [('G15.4E2', '{:15.4g}'),\n ('Z5.10', '{:5x}'),\n ('I6.5', '{:6d}'),\n ('L8', '{:>8}'),\n ('E20.7', '{:20.7e}')])\ndef test_fortran_to_python_format(tdisp_str, format_str_return):\n assert _fortran_to_python_format(tdisp_str) == format_str_return\n\n\[email protected]('fmt_str, tdisp_str',\n [('{:3d}', 'I3'),\n ('3d', 'I3'),\n ('7.3f', 'F7.3'),\n ('{:>4}', 'A4'),\n ('{:7.4f}', 'F7.4'),\n ('%5.3g', 'G5.3'),\n ('%10s', 'A10'),\n ('%.4f', 'F13.4')])\ndef test_python_to_tdisp(fmt_str, tdisp_str):\n assert python_to_tdisp(fmt_str) == tdisp_str\n\n\ndef test_logical_python_to_tdisp():\n assert python_to_tdisp('{:>7}', logical_dtype=True) == 'L7'\n\n\ndef test_bool_column(tmpdir):\n \"\"\"\n Regression test for https://github.com/astropy/astropy/issues/1953\n\n Ensures that Table columns of bools are properly written to a FITS table.\n \"\"\"\n\n arr = np.ones(5, dtype=bool)\n arr[::2] == np.False_\n\n t = Table([arr])\n t.write(str(tmpdir.join('test.fits')), overwrite=True)\n\n with fits.open(str(tmpdir.join('test.fits'))) as hdul:\n assert hdul[1].data['col0'].dtype == np.dtype('bool')\n assert np.all(hdul[1].data['col0'] == arr)\n\n\ndef test_unicode_column(tmpdir):\n \"\"\"\n Test that a column of unicode strings is still written as one\n byte-per-character in the FITS table (so long as the column can be ASCII\n encoded).\n\n Regression test for one of the issues fixed in\n https://github.com/astropy/astropy/pull/4228\n \"\"\"\n\n t = Table([np.array([u'a', u'b', u'cd'])])\n t.write(str(tmpdir.join('test.fits')), overwrite=True)\n\n with fits.open(str(tmpdir.join('test.fits'))) as hdul:\n assert np.all(hdul[1].data['col0'] == ['a', 'b', 'cd'])\n assert hdul[1].header['TFORM1'] == '2A'\n\n t2 = Table([np.array([u'\\N{SNOWMAN}'])])\n\n with pytest.raises(UnicodeEncodeError):\n t2.write(str(tmpdir.join('test.fits')), overwrite=True)\n\n\ndef test_unit_warnings_read_write(tmpdir):\n filename = str(tmpdir.join('test_unit.fits'))\n t1 = Table([[1, 2], [3, 4]], names=['a', 'b'])\n t1['a'].unit = 'm/s'\n t1['b'].unit = 'not-a-unit'\n\n with catch_warnings() as l:\n t1.write(filename, overwrite=True)\n assert len(l) == 1\n assert str(l[0].message).startswith(\"'not-a-unit' did not parse as fits unit\")\n\n with catch_warnings() as l:\n Table.read(filename, hdu=1)\n assert len(l) == 0\n\n\ndef test_convert_comment_convention(tmpdir):\n \"\"\"\n Regression test for https://github.com/astropy/astropy/issues/6079\n \"\"\"\n filename = os.path.join(DATA, 'stddata.fits')\n t = Table.read(filename)\n\n assert t.meta['comments'] == [\n '',\n ' *** End of mandatory fields ***',\n '',\n '',\n ' *** Column names ***',\n '',\n '',\n ' *** Column formats ***',\n ''\n ]\n\n\ndef assert_objects_equal(obj1, obj2, attrs, compare_class=True):\n if compare_class:\n assert obj1.__class__ is obj2.__class__\n\n info_attrs = ['info.name', 'info.format', 'info.unit', 'info.description', 'info.meta']\n for attr in attrs + info_attrs:\n a1 = obj1\n a2 = obj2\n for subattr in attr.split('.'):\n try:\n a1 = getattr(a1, subattr)\n a2 = getattr(a2, subattr)\n except AttributeError:\n a1 = a1[subattr]\n a2 = a2[subattr]\n\n # Mixin info.meta can None instead of empty OrderedDict(), #6720 would\n # fix this.\n if attr == 'info.meta':\n if a1 is None:\n a1 = {}\n if a2 is None:\n a2 = {}\n\n assert np.all(a1 == a2)\n\n# Testing FITS table read/write with mixins. This is mostly\n# copied from ECSV mixin testing.\n\nel = EarthLocation(x=1 * u.km, y=3 * u.km, z=5 * u.km)\nel2 = EarthLocation(x=[1, 2] * u.km, y=[3, 4] * u.km, z=[5, 6] * u.km)\nsc = SkyCoord([1, 2], [3, 4], unit='deg,deg', frame='fk4',\n obstime='J1990.5')\nscc = sc.copy()\nscc.representation = 'cartesian'\ntm = Time([2450814.5, 2450815.5], format='jd', scale='tai', location=el)\n\n\nmixin_cols = {\n 'tm': tm,\n 'dt': TimeDelta([1, 2] * u.day),\n 'sc': sc,\n 'scc': scc,\n 'scd': SkyCoord([1, 2], [3, 4], [5, 6], unit='deg,deg,m', frame='fk4',\n obstime=['J1990.5', 'J1991.5']),\n 'q': [1, 2] * u.m,\n 'lat': Latitude([1, 2] * u.deg),\n 'lon': Longitude([1, 2] * u.deg, wrap_angle=180. * u.deg),\n 'ang': Angle([1, 2] * u.deg),\n 'el2': el2,\n}\n\ntime_attrs = ['value', 'shape', 'format', 'scale', 'location']\ncompare_attrs = {\n 'c1': ['data'],\n 'c2': ['data'],\n 'tm': time_attrs,\n 'dt': ['shape', 'value', 'format', 'scale'],\n 'sc': ['ra', 'dec', 'representation', 'frame.name'],\n 'scc': ['x', 'y', 'z', 'representation', 'frame.name'],\n 'scd': ['ra', 'dec', 'distance', 'representation', 'frame.name'],\n 'q': ['value', 'unit'],\n 'lon': ['value', 'unit', 'wrap_angle'],\n 'lat': ['value', 'unit'],\n 'ang': ['value', 'unit'],\n 'el2': ['x', 'y', 'z', 'ellipsoid'],\n 'nd': ['x', 'y', 'z'],\n}\n\n\[email protected]('not HAS_YAML')\ndef test_fits_mixins_qtable_to_table(tmpdir):\n \"\"\"Test writing as QTable and reading as Table. Ensure correct classes\n come out.\n \"\"\"\n filename = str(tmpdir.join('test_simple.fits'))\n\n names = sorted(mixin_cols)\n\n t = QTable([mixin_cols[name] for name in names], names=names)\n t.write(filename, format='fits')\n t2 = Table.read(filename, format='fits', astropy_native=True)\n\n assert t.colnames == t2.colnames\n\n for name, col in t.columns.items():\n col2 = t2[name]\n\n # Special-case Time, which does not yet support round-tripping\n # the format.\n if isinstance(col2, Time):\n col2.format = col.format\n\n attrs = compare_attrs[name]\n compare_class = True\n\n if isinstance(col.info, QuantityInfo):\n # Downgrade Quantity to Column + unit\n assert type(col2) is Column\n # Class-specific attributes like `value` or `wrap_angle` are lost.\n attrs = ['unit']\n compare_class = False\n # Compare data values here (assert_objects_equal doesn't know how in this case)\n assert np.all(col.value == col2)\n\n assert_objects_equal(col, col2, attrs, compare_class)\n\n\[email protected]('not HAS_YAML')\[email protected]('table_cls', (Table, QTable))\ndef test_fits_mixins_as_one(table_cls, tmpdir):\n \"\"\"Test write/read all cols at once and validate intermediate column names\"\"\"\n filename = str(tmpdir.join('test_simple.fits'))\n names = sorted(mixin_cols)\n\n serialized_names = ['ang',\n 'dt.jd1', 'dt.jd2',\n 'el2.x', 'el2.y', 'el2.z',\n 'lat',\n 'lon',\n 'q',\n 'sc.ra', 'sc.dec',\n 'scc.x', 'scc.y', 'scc.z',\n 'scd.ra', 'scd.dec', 'scd.distance',\n 'scd.obstime.jd1', 'scd.obstime.jd2',\n 'tm', # serialize_method is formatted_value\n ]\n\n t = table_cls([mixin_cols[name] for name in names], names=names)\n t.meta['C'] = 'spam'\n t.meta['comments'] = ['this', 'is', 'a', 'comment']\n t.meta['history'] = ['first', 'second', 'third']\n\n t.write(filename, format=\"fits\")\n\n t2 = table_cls.read(filename, format='fits', astropy_native=True)\n assert t2.meta['C'] == 'spam'\n assert t2.meta['comments'] == ['this', 'is', 'a', 'comment']\n assert t2.meta['HISTORY'] == ['first', 'second', 'third']\n\n assert t.colnames == t2.colnames\n\n # Read directly via fits and confirm column names\n hdus = fits.open(filename)\n assert hdus[1].columns.names == serialized_names\n\n\[email protected]('not HAS_YAML')\[email protected]('name_col', list(mixin_cols.items()))\[email protected]('table_cls', (Table, QTable))\ndef test_fits_mixins_per_column(table_cls, name_col, tmpdir):\n \"\"\"Test write/read one col at a time and do detailed validation\"\"\"\n filename = str(tmpdir.join('test_simple.fits'))\n name, col = name_col\n\n c = [1.0, 2.0]\n t = table_cls([c, col, c], names=['c1', name, 'c2'])\n t[name].info.description = 'my \\n\\n\\n description'\n t[name].info.meta = {'list': list(range(50)), 'dict': {'a': 'b' * 200}}\n\n if not t.has_mixin_columns:\n pytest.skip('column is not a mixin (e.g. Quantity subclass in Table)')\n\n if isinstance(t[name], NdarrayMixin):\n pytest.xfail('NdarrayMixin not supported')\n\n t.write(filename, format=\"fits\")\n t2 = table_cls.read(filename, format='fits', astropy_native=True)\n\n assert t.colnames == t2.colnames\n\n for colname in t.colnames:\n assert_objects_equal(t[colname], t2[colname], compare_attrs[colname])\n\n # Special case to make sure Column type doesn't leak into Time class data\n if name.startswith('tm'):\n assert t2[name]._time.jd1.__class__ is np.ndarray\n assert t2[name]._time.jd2.__class__ is np.ndarray\n\n\[email protected]('HAS_YAML')\ndef test_warn_for_dropped_info_attributes(tmpdir):\n filename = str(tmpdir.join('test.fits'))\n t = Table([[1, 2]])\n t['col0'].info.description = 'hello'\n with catch_warnings() as warns:\n t.write(filename, overwrite=True)\n assert len(warns) == 1\n assert str(warns[0].message).startswith(\n \"table contains column(s) with defined 'format'\")\n\n\[email protected]('HAS_YAML')\ndef test_error_for_mixins_but_no_yaml(tmpdir):\n filename = str(tmpdir.join('test.fits'))\n t = Table([mixin_cols['sc']])\n with pytest.raises(TypeError) as err:\n t.write(filename)\n assert \"cannot write type SkyCoord column 'col0' to FITS without PyYAML\" in str(err)\n\n\[email protected]('not HAS_YAML')\ndef test_info_attributes_with_no_mixins(tmpdir):\n \"\"\"Even if there are no mixin columns, if there is metadata that would be lost it still\n gets serialized\n \"\"\"\n filename = str(tmpdir.join('test.fits'))\n t = Table([[1.0, 2.0]])\n t['col0'].description = 'hello' * 40\n t['col0'].format = '{:8.4f}'\n t['col0'].meta['a'] = {'b': 'c'}\n t.write(filename, overwrite=True)\n\n t2 = Table.read(filename)\n assert t2['col0'].description == 'hello' * 40\n assert t2['col0'].format == '{:8.4f}'\n assert t2['col0'].meta['a'] == {'b': 'c'}\n\n\[email protected]('not HAS_YAML')\[email protected]('method', ['set_cols', 'names', 'class'])\ndef test_round_trip_masked_table_serialize_mask(tmpdir, method):\n \"\"\"\n Same as previous test but set the serialize_method to 'data_mask' so mask is\n written out and the behavior is all correct.\n \"\"\"\n filename = str(tmpdir.join('test.fits'))\n\n t = simple_table(masked=True) # int, float, and str cols with one masked element\n\n if method == 'set_cols':\n for col in t.itercols():\n col.info.serialize_method['fits'] = 'data_mask'\n t.write(filename)\n elif method == 'names':\n t.write(filename, serialize_method={'a': 'data_mask', 'b': 'data_mask',\n 'c': 'data_mask'})\n elif method == 'class':\n t.write(filename, serialize_method='data_mask')\n\n t2 = Table.read(filename)\n assert t2.masked is True\n assert t2.colnames == t.colnames\n for name in t2.colnames:\n assert np.all(t2[name].mask == t[name].mask)\n assert np.all(t2[name] == t[name])\n\n # Data under the mask round-trips also (unmask data to show this).\n t[name].mask = False\n t2[name].mask = False\n assert np.all(t2[name] == t[name])\n",
"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\nimport pytest\nimport numpy as np\nimport numpy.ma as ma\n\nfrom ..convolve import convolve, convolve_fft\n\nfrom numpy.testing import assert_array_almost_equal_nulp, assert_array_almost_equal\n\nimport itertools\n\nVALID_DTYPES = ('>f4', '<f4', '>f8', '<f8')\nVALID_DTYPE_MATRIX = list(itertools.product(VALID_DTYPES, VALID_DTYPES))\n\nBOUNDARY_OPTIONS = [None, 'fill', 'wrap', 'extend']\nNANHANDLING_OPTIONS = ['interpolate', 'fill']\nNORMALIZE_OPTIONS = [True, False]\nPRESERVE_NAN_OPTIONS = [True, False]\n\nBOUNDARIES_AND_CONVOLUTIONS = (list(zip(itertools.cycle((convolve,)),\n BOUNDARY_OPTIONS)) + [(convolve_fft,\n 'wrap'),\n (convolve_fft,\n 'fill')])\nHAS_SCIPY = True\ntry:\n import scipy\nexcept ImportError:\n HAS_SCIPY = False\n\nHAS_PANDAS = True\ntry:\n import pandas\nexcept ImportError:\n HAS_PANDAS = False\n\n\nclass TestConvolve1D:\n def test_list(self):\n \"\"\"\n Test that convolve works correctly when inputs are lists\n \"\"\"\n\n x = [1, 4, 5, 6, 5, 7, 8]\n y = [0.2, 0.6, 0.2]\n z = convolve(x, y, boundary=None)\n assert_array_almost_equal_nulp(z,\n np.array([0., 3.6, 5., 5.6, 5.6, 6.8, 0.]), 10)\n\n def test_tuple(self):\n \"\"\"\n Test that convolve works correctly when inputs are tuples\n \"\"\"\n\n x = (1, 4, 5, 6, 5, 7, 8)\n y = (0.2, 0.6, 0.2)\n z = convolve(x, y, boundary=None)\n assert_array_almost_equal_nulp(z,\n np.array([0., 3.6, 5., 5.6, 5.6, 6.8, 0.]), 10)\n\n @pytest.mark.parametrize(('boundary', 'nan_treatment',\n 'normalize_kernel', 'preserve_nan', 'dtype'),\n itertools.product(BOUNDARY_OPTIONS,\n NANHANDLING_OPTIONS,\n NORMALIZE_OPTIONS,\n PRESERVE_NAN_OPTIONS,\n VALID_DTYPES))\n def test_input_unmodified(self, boundary, nan_treatment,\n normalize_kernel, preserve_nan, dtype):\n \"\"\"\n Test that convolve works correctly when inputs are lists\n \"\"\"\n\n array = [1., 4., 5., 6., 5., 7., 8.]\n kernel = [0.2, 0.6, 0.2]\n x = np.array(array, dtype=dtype)\n y = np.array(kernel, dtype=dtype)\n\n # Make pseudoimmutable\n x.flags.writeable = False\n y.flags.writeable = False\n\n z = convolve(x, y, boundary=boundary, nan_treatment=nan_treatment,\n normalize_kernel=normalize_kernel, preserve_nan=preserve_nan)\n\n assert np.all(np.array(array, dtype=dtype) == x)\n assert np.all(np.array(kernel, dtype=dtype) == y)\n\n @pytest.mark.parametrize(('boundary', 'nan_treatment',\n 'normalize_kernel', 'preserve_nan', 'dtype'),\n itertools.product(BOUNDARY_OPTIONS,\n NANHANDLING_OPTIONS,\n NORMALIZE_OPTIONS,\n PRESERVE_NAN_OPTIONS,\n VALID_DTYPES))\n def test_input_unmodified_with_nan(self, boundary, nan_treatment,\n normalize_kernel, preserve_nan, dtype):\n \"\"\"\n Test that convolve doesn't modify the input data\n \"\"\"\n\n array = [1., 4., 5., np.nan, 5., 7., 8.]\n kernel = [0.2, 0.6, 0.2]\n x = np.array(array, dtype=dtype)\n y = np.array(kernel, dtype=dtype)\n\n # Make pseudoimmutable\n x.flags.writeable = False\n y.flags.writeable = False\n\n # make copies for post call comparison\n x_copy = x.copy()\n y_copy = y.copy()\n\n z = convolve(x, y, boundary=boundary, nan_treatment=nan_treatment,\n normalize_kernel=normalize_kernel, preserve_nan=preserve_nan)\n\n # ( NaN == NaN ) = False\n # Only compare non NaN values for canonical equivilance\n # and then check NaN explicitly with np.isnan()\n array_is_nan = np.isnan(array)\n kernel_is_nan = np.isnan(kernel)\n array_not_nan = ~array_is_nan\n kernel_not_nan = ~kernel_is_nan\n assert np.all(x_copy[array_not_nan] == x[array_not_nan])\n assert np.all(y_copy[kernel_not_nan] == y[kernel_not_nan])\n assert np.all(np.isnan(x[array_is_nan]))\n assert np.all(np.isnan(y[kernel_is_nan]))\n\n @pytest.mark.parametrize(('dtype_array', 'dtype_kernel'), VALID_DTYPE_MATRIX)\n def test_dtype(self, dtype_array, dtype_kernel):\n '''\n Test that 32- and 64-bit floats are correctly handled\n '''\n\n x = np.array([1., 2., 3.], dtype=dtype_array)\n\n y = np.array([0., 1., 0.], dtype=dtype_kernel)\n\n z = convolve(x, y)\n\n assert x.dtype == z.dtype\n\n @pytest.mark.parametrize(('convfunc', 'boundary',), BOUNDARIES_AND_CONVOLUTIONS)\n def test_unity_1_none(self, boundary, convfunc):\n '''\n Test that a unit kernel with a single element returns the same array\n '''\n\n x = np.array([1., 2., 3.], dtype='>f8')\n\n y = np.array([1.], dtype='>f8')\n\n z = convfunc(x, y, boundary=boundary)\n\n np.testing.assert_allclose(z, x)\n\n @pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS)\n def test_unity_3(self, boundary):\n '''\n Test that a unit kernel with three elements returns the same array\n (except when boundary is None).\n '''\n\n x = np.array([1., 2., 3.], dtype='>f8')\n\n y = np.array([0., 1., 0.], dtype='>f8')\n\n z = convolve(x, y, boundary=boundary)\n\n if boundary is None:\n assert np.all(z == np.array([0., 2., 0.], dtype='>f8'))\n else:\n assert np.all(z == x)\n\n @pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS)\n def test_uniform_3(self, boundary):\n '''\n Test that the different modes are producing the correct results using\n a uniform kernel with three elements\n '''\n\n x = np.array([1., 0., 3.], dtype='>f8')\n\n y = np.array([1., 1., 1.], dtype='>f8')\n\n z = convolve(x, y, boundary=boundary, normalize_kernel=False)\n\n if boundary is None:\n assert np.all(z == np.array([0., 4., 0.], dtype='>f8'))\n elif boundary == 'fill':\n assert np.all(z == np.array([1., 4., 3.], dtype='>f8'))\n elif boundary == 'wrap':\n assert np.all(z == np.array([4., 4., 4.], dtype='>f8'))\n else:\n assert np.all(z == np.array([2., 4., 6.], dtype='>f8'))\n\n @pytest.mark.parametrize(('boundary', 'nan_treatment',\n 'normalize_kernel', 'preserve_nan'),\n itertools.product(BOUNDARY_OPTIONS,\n NANHANDLING_OPTIONS,\n NORMALIZE_OPTIONS,\n PRESERVE_NAN_OPTIONS))\n def test_unity_3_withnan(self, boundary, nan_treatment,\n normalize_kernel, preserve_nan):\n '''\n Test that a unit kernel with three elements returns the same array\n (except when boundary is None). This version includes a NaN value in\n the original array.\n '''\n\n x = np.array([1., np.nan, 3.], dtype='>f8')\n\n y = np.array([0., 1., 0.], dtype='>f8')\n\n z = convolve(x, y, boundary=boundary, nan_treatment=nan_treatment,\n normalize_kernel=normalize_kernel,\n preserve_nan=preserve_nan)\n\n if preserve_nan:\n assert np.isnan(z[1])\n\n x = np.nan_to_num(z)\n z = np.nan_to_num(z)\n\n if boundary is None:\n assert np.all(z == np.array([0., 0., 0.], dtype='>f8'))\n else:\n assert np.all(z == x)\n\n @pytest.mark.parametrize(('boundary', 'nan_treatment',\n 'normalize_kernel', 'preserve_nan'),\n itertools.product(BOUNDARY_OPTIONS,\n NANHANDLING_OPTIONS,\n NORMALIZE_OPTIONS,\n PRESERVE_NAN_OPTIONS))\n def test_uniform_3_withnan(self, boundary, nan_treatment, normalize_kernel,\n preserve_nan):\n '''\n Test that the different modes are producing the correct results using\n a uniform kernel with three elements. This version includes a NaN\n value in the original array.\n '''\n\n x = np.array([1., np.nan, 3.], dtype='>f8')\n\n y = np.array([1., 1., 1.], dtype='>f8')\n\n z = convolve(x, y, boundary=boundary, nan_treatment=nan_treatment,\n normalize_kernel=normalize_kernel,\n preserve_nan=preserve_nan)\n\n if preserve_nan:\n assert np.isnan(z[1])\n\n z = np.nan_to_num(z)\n\n # boundary, nan_treatment, normalize_kernel\n rslt = {\n (None, 'interpolate', True): [0, 2, 0],\n (None, 'interpolate', False): [0, 6, 0],\n (None, 'fill', True): [0, 4/3., 0],\n (None, 'fill', False): [0, 4, 0],\n ('fill', 'interpolate', True): [1/2., 2, 3/2.],\n ('fill', 'interpolate', False): [3/2., 6, 9/2.],\n ('fill', 'fill', True): [1/3., 4/3., 3/3.],\n ('fill', 'fill', False): [1, 4, 3],\n ('wrap', 'interpolate', True): [2, 2, 2],\n ('wrap', 'interpolate', False): [6, 6, 6],\n ('wrap', 'fill', True): [4/3., 4/3., 4/3.],\n ('wrap', 'fill', False): [4, 4, 4],\n ('extend', 'interpolate', True): [1, 2, 3],\n ('extend', 'interpolate', False): [3, 6, 9],\n ('extend', 'fill', True): [2/3., 4/3., 6/3.],\n ('extend', 'fill', False): [2, 4, 6],\n }[boundary, nan_treatment, normalize_kernel]\n if preserve_nan:\n rslt[1] = 0\n\n assert_array_almost_equal_nulp(z, np.array(rslt, dtype='>f8'), 10)\n\n @pytest.mark.parametrize(('boundary', 'normalize_kernel'),\n itertools.product(BOUNDARY_OPTIONS,\n NORMALIZE_OPTIONS))\n def test_zero_sum_kernel(self, boundary, normalize_kernel):\n \"\"\"\n Test that convolve works correctly with zero sum kernels.\n \"\"\"\n\n if normalize_kernel:\n pytest.xfail(\"You can't normalize by a zero sum kernel\")\n\n x = [1, 2, 3, 4, 5, 6, 7, 8, 9]\n y = [-1, -1, -1, -1, 8, -1, -1, -1, -1]\n assert(np.isclose(sum(y), 0, atol=1e-8))\n\n z = convolve(x, y, boundary=boundary, normalize_kernel=normalize_kernel)\n\n # boundary, normalize_kernel == False\n rslt = {\n (None): [0., 0., 0., 0., 0., 0., 0., 0., 0.],\n ('fill'): [-6., -3., -1., 0., 0., 10., 21., 33., 46.],\n ('wrap'): [-36., -27., -18., -9., 0., 9., 18., 27., 36.],\n ('extend'): [-10., -6., -3., -1., 0., 1., 3., 6., 10.]\n }[boundary]\n\n assert_array_almost_equal_nulp(z, np.array(rslt, dtype='>f8'), 10)\n\n @pytest.mark.parametrize(('boundary', 'normalize_kernel'),\n itertools.product(BOUNDARY_OPTIONS,\n NORMALIZE_OPTIONS))\n def test_int_masked_kernel(self, boundary, normalize_kernel):\n \"\"\"\n Test that convolve works correctly with integer masked kernels.\n \"\"\"\n\n if normalize_kernel:\n pytest.xfail(\"You can't normalize by a zero sum kernel\")\n\n x = [1, 2, 3, 4, 5, 6, 7, 8, 9]\n y = ma.array([-1, -1, -1, -1, 8, -1, -1, -1, -1], mask=[1, 0, 0, 0, 0, 0, 0, 0, 0], fill_value=0.)\n\n z = convolve(x, y, boundary=boundary, normalize_kernel=normalize_kernel)\n\n # boundary, normalize_kernel == False\n rslt = {\n (None): [0., 0., 0., 0., 9., 0., 0., 0., 0.],\n ('fill'): [-1., 3., 6., 8., 9., 10., 21., 33., 46.],\n ('wrap'): [-31., -21., -11., -1., 9., 10., 20., 30., 40.],\n ('extend'): [-5., 0., 4., 7., 9., 10., 12., 15., 19.]\n }[boundary]\n\n assert_array_almost_equal_nulp(z, np.array(rslt, dtype='>f8'), 10)\n\n @pytest.mark.parametrize('preserve_nan', PRESERVE_NAN_OPTIONS)\n def test_int_masked_array(self, preserve_nan):\n \"\"\"\n Test that convolve works correctly with integer masked arrays.\n \"\"\"\n\n x = ma.array([3, 5, 7, 11, 13], mask=[0, 0, 1, 0, 0], fill_value=0.)\n y = np.array([1., 1., 1.], dtype='>f8')\n\n z = convolve(x, y, preserve_nan=preserve_nan)\n\n if preserve_nan:\n assert np.isnan(z[2])\n z[2] = 8\n\n assert_array_almost_equal_nulp(z, (8/3., 4, 8, 12, 8), 10)\n\nclass TestConvolve2D:\n def test_list(self):\n \"\"\"\n Test that convolve works correctly when inputs are lists\n \"\"\"\n x = [[1, 1, 1],\n [1, 1, 1],\n [1, 1, 1]]\n\n z = convolve(x, x, boundary='fill', fill_value=1, normalize_kernel=True)\n assert_array_almost_equal_nulp(z, x, 10)\n z = convolve(x, x, boundary='fill', fill_value=1, normalize_kernel=False)\n assert_array_almost_equal_nulp(z, np.array(x, float)*9, 10)\n\n @pytest.mark.parametrize(('dtype_array', 'dtype_kernel'), VALID_DTYPE_MATRIX)\n def test_dtype(self, dtype_array, dtype_kernel):\n '''\n Test that 32- and 64-bit floats are correctly handled\n '''\n\n x = np.array([[1., 2., 3.],\n [4., 5., 6.],\n [7., 8., 9.]], dtype=dtype_array)\n\n y = np.array([[0., 0., 0.],\n [0., 1., 0.],\n [0., 0., 0.]], dtype=dtype_kernel)\n\n z = convolve(x, y)\n\n assert x.dtype == z.dtype\n\n @pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS)\n def test_unity_1x1_none(self, boundary):\n '''\n Test that a 1x1 unit kernel returns the same array\n '''\n\n x = np.array([[1., 2., 3.],\n [4., 5., 6.],\n [7., 8., 9.]], dtype='>f8')\n\n y = np.array([[1.]], dtype='>f8')\n\n z = convolve(x, y, boundary=boundary)\n\n assert np.all(z == x)\n\n @pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS)\n def test_unity_3x3(self, boundary):\n '''\n Test that a 3x3 unit kernel returns the same array (except when\n boundary is None).\n '''\n\n x = np.array([[1., 2., 3.],\n [4., 5., 6.],\n [7., 8., 9.]], dtype='>f8')\n\n y = np.array([[0., 0., 0.],\n [0., 1., 0.],\n [0., 0., 0.]], dtype='>f8')\n\n z = convolve(x, y, boundary=boundary)\n\n if boundary is None:\n assert np.all(z == np.array([[0., 0., 0.],\n [0., 5., 0.],\n [0., 0., 0.]], dtype='>f8'))\n else:\n assert np.all(z == x)\n\n @pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS)\n def test_uniform_3x3(self, boundary):\n '''\n Test that the different modes are producing the correct results using\n a 3x3 uniform kernel.\n '''\n\n x = np.array([[0., 0., 3.],\n [1., 0., 0.],\n [0., 2., 0.]], dtype='>f8')\n\n y = np.array([[1., 1., 1.],\n [1., 1., 1.],\n [1., 1., 1.]], dtype='>f8')\n\n z = convolve(x, y, boundary=boundary, normalize_kernel=False)\n\n if boundary is None:\n assert_array_almost_equal_nulp(z, np.array([[0., 0., 0.],\n [0., 6., 0.],\n [0., 0., 0.]], dtype='>f8'), 10)\n elif boundary == 'fill':\n assert_array_almost_equal_nulp(z, np.array([[1., 4., 3.],\n [3., 6., 5.],\n [3., 3., 2.]], dtype='>f8'), 10)\n elif boundary == 'wrap':\n assert_array_almost_equal_nulp(z, np.array([[6., 6., 6.],\n [6., 6., 6.],\n [6., 6., 6.]], dtype='>f8'), 10)\n else:\n assert_array_almost_equal_nulp(z, np.array([[2., 7., 12.],\n [4., 6., 8.],\n [6., 5., 4.]], dtype='>f8'), 10)\n\n @pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS)\n def test_unity_3x3_withnan(self, boundary):\n '''\n Test that a 3x3 unit kernel returns the same array (except when\n boundary is None). This version includes a NaN value in the original\n array.\n '''\n\n x = np.array([[1., 2., 3.],\n [4., np.nan, 6.],\n [7., 8., 9.]], dtype='>f8')\n\n y = np.array([[0., 0., 0.],\n [0., 1., 0.],\n [0., 0., 0.]], dtype='>f8')\n\n z = convolve(x, y, boundary=boundary, nan_treatment='fill',\n preserve_nan=True)\n\n assert np.isnan(z[1, 1])\n x = np.nan_to_num(z)\n z = np.nan_to_num(z)\n\n if boundary is None:\n assert np.all(z == np.array([[0., 0., 0.],\n [0., 0., 0.],\n [0., 0., 0.]], dtype='>f8'))\n else:\n assert np.all(z == x)\n\n @pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS)\n def test_uniform_3x3_withnanfilled(self, boundary):\n '''\n Test that the different modes are producing the correct results using\n a 3x3 uniform kernel. This version includes a NaN value in the\n original array.\n '''\n\n x = np.array([[0., 0., 4.],\n [1., np.nan, 0.],\n [0., 3., 0.]], dtype='>f8')\n\n y = np.array([[1., 1., 1.],\n [1., 1., 1.],\n [1., 1., 1.]], dtype='>f8')\n\n z = convolve(x, y, boundary=boundary, nan_treatment='fill',\n normalize_kernel=False)\n\n if boundary is None:\n assert_array_almost_equal_nulp(z, np.array([[0., 0., 0.],\n [0., 8., 0.],\n [0., 0., 0.]], dtype='>f8'), 10)\n elif boundary == 'fill':\n assert_array_almost_equal_nulp(z, np.array([[1., 5., 4.],\n [4., 8., 7.],\n [4., 4., 3.]], dtype='>f8'), 10)\n elif boundary == 'wrap':\n assert_array_almost_equal_nulp(z, np.array([[8., 8., 8.],\n [8., 8., 8.],\n [8., 8., 8.]], dtype='>f8'), 10)\n elif boundary == 'extend':\n assert_array_almost_equal_nulp(z, np.array([[2., 9., 16.],\n [5., 8., 11.],\n [8., 7., 6.]], dtype='>f8'), 10)\n else:\n raise ValueError(\"Invalid boundary specification\")\n\n @pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS)\n def test_uniform_3x3_withnaninterped(self, boundary):\n '''\n Test that the different modes are producing the correct results using\n a 3x3 uniform kernel. This version includes a NaN value in the\n original array.\n '''\n\n x = np.array([[0., 0., 4.],\n [1., np.nan, 0.],\n [0., 3., 0.]], dtype='>f8')\n\n y = np.array([[1., 1., 1.],\n [1., 1., 1.],\n [1., 1., 1.]], dtype='>f8')\n\n z = convolve(x, y, boundary=boundary, nan_treatment='interpolate',\n normalize_kernel=True)\n\n if boundary is None:\n assert_array_almost_equal_nulp(z, np.array([[0., 0., 0.],\n [0., 1., 0.],\n [0., 0., 0.]], dtype='>f8'), 10)\n elif boundary == 'fill':\n assert_array_almost_equal_nulp(z, np.array([[1./8, 5./8, 4./8],\n [4./8, 8./8, 7./8],\n [4./8, 4./8, 3./8]], dtype='>f8'), 10)\n elif boundary == 'wrap':\n assert_array_almost_equal_nulp(z, np.array([[1., 1., 1.],\n [1., 1., 1.],\n [1., 1., 1.]], dtype='>f8'), 10)\n elif boundary == 'extend':\n assert_array_almost_equal_nulp(z, np.array([[2./8, 9./8, 16./8],\n [5./8, 8./8, 11./8],\n [8./8, 7./8, 6./8]], dtype='>f8'), 10)\n else:\n raise ValueError(\"Invalid boundary specification\")\n\n @pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS)\n def test_non_normalized_kernel_2D(self, boundary):\n\n x = np.array([[0., 0., 4.],\n [1., 2., 0.],\n [0., 3., 0.]], dtype='float')\n\n y = np.array([[1., -1., 1.],\n [-1., 0., -1.],\n [1., -1., 1.]], dtype='float')\n\n z = convolve(x, y, boundary=boundary, nan_treatment='fill',\n normalize_kernel=False)\n\n if boundary is None:\n assert_array_almost_equal_nulp(z, np.array([[0., 0., 0.],\n [0., 0., 0.],\n [0., 0., 0.]], dtype='float'), 10)\n elif boundary == 'fill':\n assert_array_almost_equal_nulp(z, np.array([[1., -5., 2.],\n [1., 0., -3.],\n [-2., -1., -1.]], dtype='float'), 10)\n elif boundary == 'wrap':\n assert_array_almost_equal_nulp(z, np.array([[0., -8., 6.],\n [5., 0., -4.],\n [2., 3., -4.]], dtype='float'), 10)\n elif boundary == 'extend':\n assert_array_almost_equal_nulp(z, np.array([[2., -1., -2.],\n [0., 0., 1.],\n [2., -4., 2.]], dtype='float'), 10)\n else:\n raise ValueError(\"Invalid boundary specification\")\n\n\nclass TestConvolve3D:\n def test_list(self):\n \"\"\"\n Test that convolve works correctly when inputs are lists\n \"\"\"\n x = [[[1, 1, 1],\n [1, 1, 1],\n [1, 1, 1]],\n [[1, 1, 1],\n [1, 1, 1],\n [1, 1, 1]],\n [[1, 1, 1],\n [1, 1, 1],\n [1, 1, 1]]]\n\n z = convolve(x, x, boundary='fill', fill_value=1, normalize_kernel=False)\n assert_array_almost_equal_nulp(z / 27, x, 10)\n\n @pytest.mark.parametrize(('dtype_array', 'dtype_kernel'), VALID_DTYPE_MATRIX)\n def test_dtype(self, dtype_array, dtype_kernel):\n '''\n Test that 32- and 64-bit floats are correctly handled\n '''\n\n x = np.array([[1., 2., 3.],\n [4., 5., 6.],\n [7., 8., 9.]], dtype=dtype_array)\n\n y = np.array([[0., 0., 0.],\n [0., 1., 0.],\n [0., 0., 0.]], dtype=dtype_kernel)\n\n z = convolve(x, y)\n\n assert x.dtype == z.dtype\n\n @pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS)\n def test_unity_1x1x1_none(self, boundary):\n '''\n Test that a 1x1x1 unit kernel returns the same array\n '''\n\n x = np.array([[[1., 2., 1.], [2., 3., 1.], [3., 2., 5.]],\n [[4., 3., 1.], [5., 0., 2.], [6., 1., 1.]],\n [[7., 0., 2.], [8., 2., 3.], [9., 2., 2.]]], dtype='>f8')\n\n y = np.array([[[1.]]], dtype='>f8')\n\n z = convolve(x, y, boundary=boundary)\n\n assert np.all(z == x)\n\n @pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS)\n def test_unity_3x3x3(self, boundary):\n '''\n Test that a 3x3x3 unit kernel returns the same array (except when\n boundary is None).\n '''\n\n x = np.array([[[1., 2., 1.], [2., 3., 1.], [3., 2., 5.]],\n [[4., 3., 1.], [5., 3., 2.], [6., 1., 1.]],\n [[7., 0., 2.], [8., 2., 3.], [9., 2., 2.]]], dtype='>f8')\n\n y = np.zeros((3, 3, 3), dtype='>f8')\n y[1, 1, 1] = 1.\n\n z = convolve(x, y, boundary=boundary)\n\n if boundary is None:\n assert np.all(z == np.array([[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],\n [[0., 0., 0.], [0., 3., 0.], [0., 0., 0.]],\n [[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]], dtype='>f8'))\n else:\n assert np.all(z == x)\n\n @pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS)\n def test_uniform_3x3x3(self, boundary):\n '''\n Test that the different modes are producing the correct results using\n a 3x3 uniform kernel.\n '''\n\n x = np.array([[[1., 2., 1.], [2., 3., 1.], [3., 2., 5.]],\n [[4., 3., 1.], [5., 3., 2.], [6., 1., 1.]],\n [[7., 0., 2.], [8., 2., 3.], [9., 2., 2.]]], dtype='>f8')\n\n y = np.ones((3, 3, 3), dtype='>f8')\n\n z = convolve(x, y, boundary=boundary, normalize_kernel=False)\n\n if boundary is None:\n assert_array_almost_equal_nulp(z, np.array([[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],\n [[0., 0., 0.], [0., 81., 0.], [0., 0., 0.]],\n [[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]], dtype='>f8'), 10)\n elif boundary == 'fill':\n assert_array_almost_equal_nulp(z, np.array([[[23., 28., 16.], [35., 46., 25.], [25., 34., 18.]],\n [[40., 50., 23.], [63., 81., 36.], [46., 60., 27.]],\n [[32., 40., 16.], [50., 61., 22.], [36., 44., 16.]]], dtype='>f8'), 10)\n elif boundary == 'wrap':\n assert_array_almost_equal_nulp(z, np.array([[[81., 81., 81.], [81., 81., 81.], [81., 81., 81.]],\n [[81., 81., 81.], [81., 81., 81.], [81., 81., 81.]],\n [[81., 81., 81.], [81., 81., 81.], [81., 81., 81.]]], dtype='>f8'), 10)\n else:\n assert_array_almost_equal_nulp(z, np.array([[[65., 54., 43.], [75., 66., 57.], [85., 78., 71.]],\n [[96., 71., 46.], [108., 81., 54.], [120., 91., 62.]],\n [[127., 88., 49.], [141., 96., 51.], [155., 104., 53.]]], dtype='>f8'), 10)\n\n @pytest.mark.parametrize(('boundary', 'nan_treatment'),\n itertools.product(BOUNDARY_OPTIONS,\n NANHANDLING_OPTIONS))\n def test_unity_3x3x3_withnan(self, boundary, nan_treatment):\n '''\n Test that a 3x3x3 unit kernel returns the same array (except when\n boundary is None). This version includes a NaN value in the original\n array.\n '''\n\n x = np.array([[[1., 2., 1.], [2., 3., 1.], [3., 2., 5.]],\n [[4., 3., 1.], [5., np.nan, 2.], [6., 1., 1.]],\n [[7., 0., 2.], [8., 2., 3.], [9., 2., 2.]]], dtype='>f8')\n\n y = np.zeros((3, 3, 3), dtype='>f8')\n y[1, 1, 1] = 1.\n\n z = convolve(x, y, boundary=boundary, nan_treatment=nan_treatment,\n preserve_nan=True)\n\n assert np.isnan(z[1, 1, 1])\n x = np.nan_to_num(z)\n z = np.nan_to_num(z)\n\n if boundary is None:\n assert np.all(z == np.array([[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],\n [[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],\n [[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]], dtype='>f8'))\n else:\n assert np.all(z == x)\n\n @pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS)\n def test_uniform_3x3x3_withnan_filled(self, boundary):\n '''\n Test that the different modes are producing the correct results using\n a 3x3 uniform kernel. This version includes a NaN value in the\n original array.\n '''\n\n x = np.array([[[1., 2., 1.], [2., 3., 1.], [3., 2., 5.]],\n [[4., 3., 1.], [5., np.nan, 2.], [6., 1., 1.]],\n [[7., 0., 2.], [8., 2., 3.], [9., 2., 2.]]], dtype='>f8')\n\n y = np.ones((3, 3, 3), dtype='>f8')\n\n z = convolve(x, y, boundary=boundary, nan_treatment='fill',\n normalize_kernel=False)\n\n if boundary is None:\n assert_array_almost_equal_nulp(z, np.array([[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],\n [[0., 0., 0.], [0., 78., 0.], [0., 0., 0.]],\n [[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]], dtype='>f8'), 10)\n elif boundary == 'fill':\n assert_array_almost_equal_nulp(z, np.array([[[20., 25., 13.],\n [32., 43., 22.],\n [22., 31., 15.]],\n [[37., 47., 20.],\n [60., 78., 33.],\n [43., 57., 24.]],\n [[29., 37., 13.],\n [47., 58., 19.],\n [33., 41., 13.]]], dtype='>f8'), 10)\n elif boundary == 'wrap':\n assert_array_almost_equal_nulp(z, np.array([[[78., 78., 78.], [78., 78., 78.], [78., 78., 78.]],\n [[78., 78., 78.], [78., 78., 78.], [78., 78., 78.]],\n [[78., 78., 78.], [78., 78., 78.], [78., 78., 78.]]], dtype='>f8'), 10)\n elif boundary == 'extend':\n assert_array_almost_equal_nulp(z, np.array([[[62., 51., 40.],\n [72., 63., 54.],\n [82., 75., 68.]],\n [[93., 68., 43.],\n [105., 78., 51.],\n [117., 88., 59.]],\n [[124., 85., 46.],\n [138., 93., 48.],\n [152., 101., 50.]]],\n dtype='>f8'), 10)\n else:\n raise ValueError(\"Invalid Boundary Option\")\n\n @pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS)\n def test_uniform_3x3x3_withnan_interped(self, boundary):\n '''\n Test that the different modes are producing the correct results using\n a 3x3 uniform kernel. This version includes a NaN value in the\n original array.\n '''\n\n x = np.array([[[1., 2., 1.], [2., 3., 1.], [3., 2., 5.]],\n [[4., 3., 1.], [5., np.nan, 2.], [6., 1., 1.]],\n [[7., 0., 2.], [8., 2., 3.], [9., 2., 2.]]], dtype='>f8')\n\n y = np.ones((3, 3, 3), dtype='>f8')\n\n z = convolve(x, y, boundary=boundary, nan_treatment='interpolate',\n normalize_kernel=True)\n\n kernsum = y.sum() - 1 # one nan is missing\n mid = x[np.isfinite(x)].sum() / kernsum\n\n if boundary is None:\n assert_array_almost_equal_nulp(z, np.array([[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],\n [[0., 0., 0.], [0., 78., 0.], [0., 0., 0.]],\n [[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]],\n dtype='>f8')/kernsum, 10)\n elif boundary == 'fill':\n assert_array_almost_equal_nulp(z, np.array([[[20., 25., 13.],\n [32., 43., 22.],\n [22., 31., 15.]],\n [[37., 47., 20.],\n [60., 78., 33.],\n [43., 57., 24.]],\n [[29., 37., 13.],\n [47., 58., 19.],\n [33., 41., 13.]]],\n dtype='>f8')/kernsum, 10)\n elif boundary == 'wrap':\n assert_array_almost_equal_nulp(z, np.tile(mid.astype('>f8'), [3, 3, 3]), 10)\n elif boundary == 'extend':\n assert_array_almost_equal_nulp(z, np.array([[[62., 51., 40.],\n [72., 63., 54.],\n [82., 75., 68.]],\n [[93., 68., 43.],\n [105., 78., 51.],\n [117., 88., 59.]],\n [[124., 85., 46.],\n [138., 93., 48.],\n [152., 101., 50.]]],\n dtype='>f8')/kernsum, 10)\n else:\n raise ValueError(\"Invalid Boundary Option\")\n\n\[email protected](('boundary'), BOUNDARY_OPTIONS)\ndef test_asymmetric_kernel(boundary):\n '''\n Regression test for #6264: make sure that asymmetric convolution\n functions go the right direction\n '''\n\n x = np.array([3., 0., 1.], dtype='>f8')\n\n y = np.array([1, 2, 3], dtype='>f8')\n\n z = convolve(x, y, boundary=boundary, normalize_kernel=False)\n\n if boundary == 'fill':\n assert_array_almost_equal_nulp(z, np.array([6., 10., 2.], dtype='float'), 10)\n elif boundary is None:\n assert_array_almost_equal_nulp(z, np.array([0., 10., 0.], dtype='float'), 10)\n elif boundary == 'extend':\n assert_array_almost_equal_nulp(z, np.array([15., 10., 3.], dtype='float'), 10)\n elif boundary == 'wrap':\n assert_array_almost_equal_nulp(z, np.array([9., 10., 5.], dtype='float'), 10)\n\n\[email protected]('ndims', (1, 2, 3))\ndef test_convolution_consistency(ndims):\n\n np.random.seed(0)\n array = np.random.randn(*([3]*ndims))\n np.random.seed(0)\n kernel = np.random.rand(*([3]*ndims))\n\n conv_f = convolve_fft(array, kernel, boundary='fill')\n conv_d = convolve(array, kernel, boundary='fill')\n\n assert_array_almost_equal_nulp(conv_f, conv_d, 30)\n\n\ndef test_astropy_convolution_against_numpy():\n x = np.array([1, 2, 3])\n y = np.array([5, 4, 3, 2, 1])\n\n assert_array_almost_equal(np.convolve(y, x, 'same'),\n convolve(y, x, normalize_kernel=False))\n assert_array_almost_equal(np.convolve(y, x, 'same'),\n convolve_fft(y, x, normalize_kernel=False))\n\n\[email protected]('not HAS_SCIPY')\ndef test_astropy_convolution_against_scipy():\n from scipy.signal import fftconvolve\n x = np.array([1, 2, 3])\n y = np.array([5, 4, 3, 2, 1])\n\n assert_array_almost_equal(fftconvolve(y, x, 'same'),\n convolve(y, x, normalize_kernel=False))\n assert_array_almost_equal(fftconvolve(y, x, 'same'),\n convolve_fft(y, x, normalize_kernel=False))\n\[email protected]('not HAS_PANDAS')\ndef test_regression_6099():\n wave = np.array((np.linspace(5000, 5100, 10)))\n boxcar = 3\n nonseries_result = convolve(wave, np.ones((boxcar,))/boxcar)\n\n wave_series = pandas.Series(wave)\n series_result = convolve(wave_series, np.ones((boxcar,))/boxcar)\n\n assert_array_almost_equal(nonseries_result, series_result)\n\ndef test_invalid_array_convolve():\n kernel = np.ones(3)/3.\n\n with pytest.raises(TypeError):\n convolve('glork', kernel)\n",
"# Licensed under a 3-clause BSD style license - see PYFITS.rst\n\n\nimport gzip\nimport itertools\nimport io\nimport mmap\nimport operator\nimport os\nimport platform\nimport signal\nimport sys\nimport tempfile\nimport textwrap\nimport threading\nimport warnings\nimport weakref\nfrom contextlib import contextmanager, suppress\nfrom ...utils import data\n\nfrom distutils.version import LooseVersion\n\nimport numpy as np\n\nfrom ...utils import wraps\nfrom ...utils.exceptions import AstropyUserWarning\n\ncmp = lambda a, b: (a > b) - (a < b)\n\nall_integer_types = (int, np.integer)\n\n\nclass NotifierMixin:\n \"\"\"\n Mixin class that provides services by which objects can register\n listeners to changes on that object.\n\n All methods provided by this class are underscored, since this is intended\n for internal use to communicate between classes in a generic way, and is\n not machinery that should be exposed to users of the classes involved.\n\n Use the ``_add_listener`` method to register a listener on an instance of\n the notifier. This registers the listener with a weak reference, so if\n no other references to the listener exist it is automatically dropped from\n the list and does not need to be manually removed.\n\n Call the ``_notify`` method on the notifier to update all listeners\n upon changes. ``_notify('change_type', *args, **kwargs)`` results\n in calling ``listener._update_change_type(*args, **kwargs)`` on all\n listeners subscribed to that notifier.\n\n If a particular listener does not have the appropriate update method\n it is ignored.\n\n Examples\n --------\n\n >>> class Widget(NotifierMixin):\n ... state = 1\n ... def __init__(self, name):\n ... self.name = name\n ... def update_state(self):\n ... self.state += 1\n ... self._notify('widget_state_changed', self)\n ...\n >>> class WidgetListener:\n ... def _update_widget_state_changed(self, widget):\n ... print('Widget {0} changed state to {1}'.format(\n ... widget.name, widget.state))\n ...\n >>> widget = Widget('fred')\n >>> listener = WidgetListener()\n >>> widget._add_listener(listener)\n >>> widget.update_state()\n Widget fred changed state to 2\n \"\"\"\n\n _listeners = None\n\n def _add_listener(self, listener):\n \"\"\"\n Add an object to the list of listeners to notify of changes to this\n object. This adds a weakref to the list of listeners that is\n removed from the listeners list when the listener has no other\n references to it.\n \"\"\"\n\n if self._listeners is None:\n self._listeners = weakref.WeakValueDictionary()\n\n self._listeners[id(listener)] = listener\n\n def _remove_listener(self, listener):\n \"\"\"\n Removes the specified listener from the listeners list. This relies\n on object identity (i.e. the ``is`` operator).\n \"\"\"\n\n if self._listeners is None:\n return\n\n with suppress(KeyError):\n del self._listeners[id(listener)]\n\n def _notify(self, notification, *args, **kwargs):\n \"\"\"\n Notify all listeners of some particular state change by calling their\n ``_update_<notification>`` method with the given ``*args`` and\n ``**kwargs``.\n\n The notification does not by default include the object that actually\n changed (``self``), but it certainly may if required.\n \"\"\"\n\n if self._listeners is None:\n return\n\n method_name = '_update_{0}'.format(notification)\n for listener in self._listeners.valuerefs():\n # Use valuerefs instead of itervaluerefs; see\n # https://github.com/astropy/astropy/issues/4015\n listener = listener() # dereference weakref\n if listener is None:\n continue\n\n if hasattr(listener, method_name):\n method = getattr(listener, method_name)\n if callable(method):\n method(*args, **kwargs)\n\n def __getstate__(self):\n \"\"\"\n Exclude listeners when saving the listener's state, since they may be\n ephemeral.\n \"\"\"\n\n # TODO: This hasn't come up often, but if anyone needs to pickle HDU\n # objects it will be necessary when HDU objects' states are restored to\n # re-register themselves as listeners on their new column instances.\n try:\n state = super().__getstate__()\n except AttributeError:\n # Chances are the super object doesn't have a getstate\n state = self.__dict__.copy()\n\n state['_listeners'] = None\n return state\n\n\ndef first(iterable):\n \"\"\"\n Returns the first item returned by iterating over an iterable object.\n\n Example:\n\n >>> a = [1, 2, 3]\n >>> first(a)\n 1\n \"\"\"\n\n return next(iter(iterable))\n\n\ndef itersubclasses(cls, _seen=None):\n \"\"\"\n Generator over all subclasses of a given class, in depth first order.\n\n >>> class A: pass\n >>> class B(A): pass\n >>> class C(A): pass\n >>> class D(B,C): pass\n >>> class E(D): pass\n >>>\n >>> for cls in itersubclasses(A):\n ... print(cls.__name__)\n B\n D\n E\n C\n >>> # get ALL classes currently defined\n >>> [cls.__name__ for cls in itersubclasses(object)]\n [...'tuple', ...'type', ...]\n\n From http://code.activestate.com/recipes/576949/\n \"\"\"\n\n if _seen is None:\n _seen = set()\n try:\n subs = cls.__subclasses__()\n except TypeError: # fails only when cls is type\n subs = cls.__subclasses__(cls)\n for sub in sorted(subs, key=operator.attrgetter('__name__')):\n if sub not in _seen:\n _seen.add(sub)\n yield sub\n for sub in itersubclasses(sub, _seen):\n yield sub\n\n\ndef ignore_sigint(func):\n \"\"\"\n This decorator registers a custom SIGINT handler to catch and ignore SIGINT\n until the wrapped function is completed.\n \"\"\"\n\n @wraps(func)\n def wrapped(*args, **kwargs):\n # Get the name of the current thread and determine if this is a single\n # threaded application\n curr_thread = threading.currentThread()\n single_thread = (threading.activeCount() == 1 and\n curr_thread.getName() == 'MainThread')\n\n class SigintHandler:\n def __init__(self):\n self.sigint_received = False\n\n def __call__(self, signum, frame):\n warnings.warn('KeyboardInterrupt ignored until {} is '\n 'complete!'.format(func.__name__),\n AstropyUserWarning)\n self.sigint_received = True\n\n sigint_handler = SigintHandler()\n\n # Define new signal interput handler\n if single_thread:\n # Install new handler\n old_handler = signal.signal(signal.SIGINT, sigint_handler)\n\n try:\n func(*args, **kwargs)\n finally:\n if single_thread:\n if old_handler is not None:\n signal.signal(signal.SIGINT, old_handler)\n else:\n signal.signal(signal.SIGINT, signal.SIG_DFL)\n\n if sigint_handler.sigint_received:\n raise KeyboardInterrupt\n\n return wrapped\n\n\ndef pairwise(iterable):\n \"\"\"Return the items of an iterable paired with its next item.\n\n Ex: s -> (s0,s1), (s1,s2), (s2,s3), ....\n \"\"\"\n\n a, b = itertools.tee(iterable)\n for _ in b:\n # Just a little trick to advance b without having to catch\n # StopIter if b happens to be empty\n break\n return zip(a, b)\n\n\ndef encode_ascii(s):\n if isinstance(s, str):\n return s.encode('ascii')\n elif (isinstance(s, np.ndarray) and\n issubclass(s.dtype.type, np.str_)):\n ns = np.char.encode(s, 'ascii').view(type(s))\n if ns.dtype.itemsize != s.dtype.itemsize / 4:\n ns = ns.astype((np.bytes_, s.dtype.itemsize / 4))\n return ns\n elif (isinstance(s, np.ndarray) and\n not issubclass(s.dtype.type, np.bytes_)):\n raise TypeError('string operation on non-string array')\n return s\n\n\ndef decode_ascii(s):\n if isinstance(s, bytes):\n try:\n return s.decode('ascii')\n except UnicodeDecodeError:\n warnings.warn('non-ASCII characters are present in the FITS '\n 'file header and have been replaced by \"?\" '\n 'characters', AstropyUserWarning)\n s = s.decode('ascii', errors='replace')\n return s.replace(u'\\ufffd', '?')\n elif (isinstance(s, np.ndarray) and\n issubclass(s.dtype.type, np.bytes_)):\n # np.char.encode/decode annoyingly don't preserve the type of the\n # array, hence the view() call\n # It also doesn't necessarily preserve widths of the strings,\n # hence the astype()\n if s.size == 0:\n # Numpy apparently also has a bug that if a string array is\n # empty calling np.char.decode on it returns an empty float64\n # array wth\n dt = s.dtype.str.replace('S', 'U')\n ns = np.array([], dtype=dt).view(type(s))\n else:\n ns = np.char.decode(s, 'ascii').view(type(s))\n if ns.dtype.itemsize / 4 != s.dtype.itemsize:\n ns = ns.astype((np.str_, s.dtype.itemsize))\n return ns\n elif (isinstance(s, np.ndarray) and\n not issubclass(s.dtype.type, np.str_)):\n # Don't silently pass through on non-string arrays; we don't want\n # to hide errors where things that are not stringy are attempting\n # to be decoded\n raise TypeError('string operation on non-string array')\n return s\n\n\ndef isreadable(f):\n \"\"\"\n Returns True if the file-like object can be read from. This is a common-\n sense approximation of io.IOBase.readable.\n \"\"\"\n\n if hasattr(f, 'readable'):\n return f.readable()\n\n if hasattr(f, 'closed') and f.closed:\n # This mimics the behavior of io.IOBase.readable\n raise ValueError('I/O operation on closed file')\n\n if not hasattr(f, 'read'):\n return False\n\n if hasattr(f, 'mode') and not any(c in f.mode for c in 'r+'):\n return False\n\n # Not closed, has a 'read()' method, and either has no known mode or a\n # readable mode--should be good enough to assume 'readable'\n return True\n\n\ndef iswritable(f):\n \"\"\"\n Returns True if the file-like object can be written to. This is a common-\n sense approximation of io.IOBase.writable.\n \"\"\"\n\n if hasattr(f, 'writable'):\n return f.writable()\n\n if hasattr(f, 'closed') and f.closed:\n # This mimics the behavior of io.IOBase.writable\n raise ValueError('I/O operation on closed file')\n\n if not hasattr(f, 'write'):\n return False\n\n if hasattr(f, 'mode') and not any(c in f.mode for c in 'wa+'):\n return False\n\n # Note closed, has a 'write()' method, and either has no known mode or a\n # mode that supports writing--should be good enough to assume 'writable'\n return True\n\n\ndef isfile(f):\n \"\"\"\n Returns True if the given object represents an OS-level file (that is,\n ``isinstance(f, file)``).\n\n On Python 3 this also returns True if the given object is higher level\n wrapper on top of a FileIO object, such as a TextIOWrapper.\n \"\"\"\n\n if isinstance(f, io.FileIO):\n return True\n elif hasattr(f, 'buffer'):\n return isfile(f.buffer)\n elif hasattr(f, 'raw'):\n return isfile(f.raw)\n return False\n\n\ndef fileobj_open(filename, mode):\n \"\"\"\n A wrapper around the `open()` builtin.\n\n This exists because `open()` returns an `io.BufferedReader` by default.\n This is bad, because `io.BufferedReader` doesn't support random access,\n which we need in some cases. We must call open with buffering=0 to get\n a raw random-access file reader.\n \"\"\"\n\n return open(filename, mode, buffering=0)\n\n\ndef fileobj_name(f):\n \"\"\"\n Returns the 'name' of file-like object f, if it has anything that could be\n called its name. Otherwise f's class or type is returned. If f is a\n string f itself is returned.\n \"\"\"\n\n if isinstance(f, str):\n return f\n elif isinstance(f, gzip.GzipFile):\n # The .name attribute on GzipFiles does not always represent the name\n # of the file being read/written--it can also represent the original\n # name of the file being compressed\n # See the documentation at\n # https://docs.python.org/3/library/gzip.html#gzip.GzipFile\n # As such, for gzip files only return the name of the underlying\n # fileobj, if it exists\n return fileobj_name(f.fileobj)\n elif hasattr(f, 'name'):\n return f.name\n elif hasattr(f, 'filename'):\n return f.filename\n elif hasattr(f, '__class__'):\n return str(f.__class__)\n else:\n return str(type(f))\n\n\ndef fileobj_closed(f):\n \"\"\"\n Returns True if the given file-like object is closed or if f is a string\n (and assumed to be a pathname).\n\n Returns False for all other types of objects, under the assumption that\n they are file-like objects with no sense of a 'closed' state.\n \"\"\"\n\n if isinstance(f, str):\n return True\n\n if hasattr(f, 'closed'):\n return f.closed\n elif hasattr(f, 'fileobj') and hasattr(f.fileobj, 'closed'):\n return f.fileobj.closed\n elif hasattr(f, 'fp') and hasattr(f.fp, 'closed'):\n return f.fp.closed\n else:\n return False\n\n\ndef fileobj_mode(f):\n \"\"\"\n Returns the 'mode' string of a file-like object if such a thing exists.\n Otherwise returns None.\n \"\"\"\n\n # Go from most to least specific--for example gzip objects have a 'mode'\n # attribute, but it's not analogous to the file.mode attribute\n\n # gzip.GzipFile -like\n if hasattr(f, 'fileobj') and hasattr(f.fileobj, 'mode'):\n fileobj = f.fileobj\n\n # astropy.io.fits._File -like, doesn't need additional checks because it's\n # already validated\n elif hasattr(f, 'fileobj_mode'):\n return f.fileobj_mode\n\n # PIL-Image -like investigate the fp (filebuffer)\n elif hasattr(f, 'fp') and hasattr(f.fp, 'mode'):\n fileobj = f.fp\n\n # FILEIO -like (normal open(...)), keep as is.\n elif hasattr(f, 'mode'):\n fileobj = f\n\n # Doesn't look like a file-like object, for example strings, urls or paths.\n else:\n return None\n\n return _fileobj_normalize_mode(fileobj)\n\n\ndef _fileobj_normalize_mode(f):\n \"\"\"Takes care of some corner cases in Python where the mode string\n is either oddly formatted or does not truly represent the file mode.\n \"\"\"\n mode = f.mode\n\n # Special case: Gzip modes:\n if isinstance(f, gzip.GzipFile):\n # GzipFiles can be either readonly or writeonly\n if mode == gzip.READ:\n return 'rb'\n elif mode == gzip.WRITE:\n return 'wb'\n else:\n return None # This shouldn't happen?\n\n # Sometimes Python can produce modes like 'r+b' which will be normalized\n # here to 'rb+'\n if '+' in mode:\n mode = mode.replace('+', '')\n mode += '+'\n\n return mode\n\n\ndef fileobj_is_binary(f):\n \"\"\"\n Returns True if the give file or file-like object has a file open in binary\n mode. When in doubt, returns True by default.\n \"\"\"\n\n # This is kind of a hack for this to work correctly with _File objects,\n # which, for the time being, are *always* binary\n if hasattr(f, 'binary'):\n return f.binary\n\n if isinstance(f, io.TextIOBase):\n return False\n\n mode = fileobj_mode(f)\n if mode:\n return 'b' in mode\n else:\n return True\n\n\ndef translate(s, table, deletechars):\n if deletechars:\n table = table.copy()\n for c in deletechars:\n table[ord(c)] = None\n return s.translate(table)\n\n\ndef fill(text, width, **kwargs):\n \"\"\"\n Like :func:`textwrap.wrap` but preserves existing paragraphs which\n :func:`textwrap.wrap` does not otherwise handle well. Also handles section\n headers.\n \"\"\"\n\n paragraphs = text.split('\\n\\n')\n\n def maybe_fill(t):\n if all(len(l) < width for l in t.splitlines()):\n return t\n else:\n return textwrap.fill(t, width, **kwargs)\n\n return '\\n\\n'.join(maybe_fill(p) for p in paragraphs)\n\n\n# On MacOS X 10.8 and earlier, there is a bug that causes numpy.fromfile to\n# fail when reading over 2Gb of data. If we detect these versions of MacOS X,\n# we can instead read the data in chunks. To avoid performance penalties at\n# import time, we defer the setting of this global variable until the first\n# time it is needed.\nCHUNKED_FROMFILE = None\n\n\ndef _array_from_file(infile, dtype, count):\n \"\"\"Create a numpy array from a file or a file-like object.\"\"\"\n\n if isfile(infile):\n\n global CHUNKED_FROMFILE\n if CHUNKED_FROMFILE is None:\n if (sys.platform == 'darwin' and\n LooseVersion(platform.mac_ver()[0]) < LooseVersion('10.9')):\n CHUNKED_FROMFILE = True\n else:\n CHUNKED_FROMFILE = False\n\n if CHUNKED_FROMFILE:\n chunk_size = int(1024 ** 3 / dtype.itemsize) # 1Gb to be safe\n if count < chunk_size:\n return np.fromfile(infile, dtype=dtype, count=count)\n else:\n array = np.empty(count, dtype=dtype)\n for beg in range(0, count, chunk_size):\n end = min(count, beg + chunk_size)\n array[beg:end] = np.fromfile(infile, dtype=dtype, count=end - beg)\n return array\n else:\n return np.fromfile(infile, dtype=dtype, count=count)\n else:\n # treat as file-like object with \"read\" method; this includes gzip file\n # objects, because numpy.fromfile just reads the compressed bytes from\n # their underlying file object, instead of the decompressed bytes\n read_size = np.dtype(dtype).itemsize * count\n s = infile.read(read_size)\n array = np.frombuffer(s, dtype=dtype, count=count)\n # copy is needed because np.frombuffer returns a read-only view of the\n # underlying buffer\n array = array.copy()\n return array\n\n\n_OSX_WRITE_LIMIT = (2 ** 32) - 1\n_WIN_WRITE_LIMIT = (2 ** 31) - 1\n\n\ndef _array_to_file(arr, outfile):\n \"\"\"\n Write a numpy array to a file or a file-like object.\n\n Parameters\n ----------\n arr : `~numpy.ndarray`\n The Numpy array to write.\n outfile : file-like\n A file-like object such as a Python file object, an `io.BytesIO`, or\n anything else with a ``write`` method. The file object must support\n the buffer interface in its ``write``.\n\n If writing directly to an on-disk file this delegates directly to\n `ndarray.tofile`. Otherwise a slower Python implementation is used.\n \"\"\"\n\n if isfile(outfile) and not isinstance(outfile, io.BufferedIOBase):\n write = lambda a, f: a.tofile(f)\n else:\n write = _array_to_file_like\n\n # Implements a workaround for a bug deep in OSX's stdlib file writing\n # functions; on 64-bit OSX it is not possible to correctly write a number\n # of bytes greater than 2 ** 32 and divisible by 4096 (or possibly 8192--\n # whatever the default blocksize for the filesystem is).\n # This issue should have a workaround in Numpy too, but hasn't been\n # implemented there yet: https://github.com/astropy/astropy/issues/839\n #\n # Apparently Windows has its own fwrite bug:\n # https://github.com/numpy/numpy/issues/2256\n\n if (sys.platform == 'darwin' and arr.nbytes >= _OSX_WRITE_LIMIT + 1 and\n arr.nbytes % 4096 == 0):\n # chunksize is a count of elements in the array, not bytes\n chunksize = _OSX_WRITE_LIMIT // arr.itemsize\n elif sys.platform.startswith('win'):\n chunksize = _WIN_WRITE_LIMIT // arr.itemsize\n else:\n # Just pass the whole array to the write routine\n return write(arr, outfile)\n\n # Write one chunk at a time for systems whose fwrite chokes on large\n # writes.\n idx = 0\n arr = arr.view(np.ndarray).flatten()\n while idx < arr.nbytes:\n write(arr[idx:idx + chunksize], outfile)\n idx += chunksize\n\n\ndef _array_to_file_like(arr, fileobj):\n \"\"\"\n Write a `~numpy.ndarray` to a file-like object (which is not supported by\n `numpy.ndarray.tofile`).\n \"\"\"\n\n # If the array is empty, we can simply take a shortcut and return since\n # there is nothing to write.\n if len(arr) == 0:\n return\n\n if arr.flags.contiguous:\n\n # It suffices to just pass the underlying buffer directly to the\n # fileobj's write (assuming it supports the buffer interface). If\n # it does not have the buffer interface, a TypeError should be returned\n # in which case we can fall back to the other methods.\n\n try:\n fileobj.write(arr.data)\n except TypeError:\n pass\n else:\n return\n\n if hasattr(np, 'nditer'):\n # nditer version for non-contiguous arrays\n for item in np.nditer(arr):\n fileobj.write(item.tostring())\n else:\n # Slower version for Numpy versions without nditer;\n # The problem with flatiter is it doesn't preserve the original\n # byteorder\n byteorder = arr.dtype.byteorder\n if ((sys.byteorder == 'little' and byteorder == '>')\n or (sys.byteorder == 'big' and byteorder == '<')):\n for item in arr.flat:\n fileobj.write(item.byteswap().tostring())\n else:\n for item in arr.flat:\n fileobj.write(item.tostring())\n\n\ndef _write_string(f, s):\n \"\"\"\n Write a string to a file, encoding to ASCII if the file is open in binary\n mode, or decoding if the file is open in text mode.\n \"\"\"\n\n # Assume if the file object doesn't have a specific mode, that the mode is\n # binary\n binmode = fileobj_is_binary(f)\n\n if binmode and isinstance(s, str):\n s = encode_ascii(s)\n elif not binmode and not isinstance(f, str):\n s = decode_ascii(s)\n\n f.write(s)\n\n\ndef _convert_array(array, dtype):\n \"\"\"\n Converts an array to a new dtype--if the itemsize of the new dtype is\n the same as the old dtype and both types are not numeric, a view is\n returned. Otherwise a new array must be created.\n \"\"\"\n\n if array.dtype == dtype:\n return array\n elif (array.dtype.itemsize == dtype.itemsize and not\n (np.issubdtype(array.dtype, np.number) and\n np.issubdtype(dtype, np.number))):\n # Includes a special case when both dtypes are at least numeric to\n # account for ticket #218: https://aeon.stsci.edu/ssb/trac/pyfits/ticket/218\n return array.view(dtype)\n else:\n return array.astype(dtype)\n\n\ndef _unsigned_zero(dtype):\n \"\"\"\n Given a numpy dtype, finds its \"zero\" point, which is exactly in the\n middle of its range.\n \"\"\"\n\n assert dtype.kind == 'u'\n return 1 << (dtype.itemsize * 8 - 1)\n\n\ndef _is_pseudo_unsigned(dtype):\n return dtype.kind == 'u' and dtype.itemsize >= 2\n\n\ndef _is_int(val):\n return isinstance(val, all_integer_types)\n\n\ndef _str_to_num(val):\n \"\"\"Converts a given string to either an int or a float if necessary.\"\"\"\n\n try:\n num = int(val)\n except ValueError:\n # If this fails then an exception should be raised anyways\n num = float(val)\n return num\n\n\ndef _words_group(input, strlen):\n \"\"\"\n Split a long string into parts where each part is no longer\n than ``strlen`` and no word is cut into two pieces. But if\n there is one single word which is longer than ``strlen``, then\n it will be split in the middle of the word.\n \"\"\"\n\n words = []\n nblanks = input.count(' ')\n nmax = max(nblanks, len(input) // strlen + 1)\n arr = np.frombuffer((input + ' ').encode('utf8'), dtype=(bytes, 1))\n\n # locations of the blanks\n blank_loc = np.nonzero(arr == b' ')[0]\n offset = 0\n xoffset = 0\n for idx in range(nmax):\n try:\n loc = np.nonzero(blank_loc >= strlen + offset)[0][0]\n offset = blank_loc[loc - 1] + 1\n if loc == 0:\n offset = -1\n except Exception:\n offset = len(input)\n\n # check for one word longer than strlen, break in the middle\n if offset <= xoffset:\n offset = xoffset + strlen\n\n # collect the pieces in a list\n words.append(input[xoffset:offset])\n if len(input) == offset:\n break\n xoffset = offset\n\n return words\n\n\ndef _tmp_name(input):\n \"\"\"\n Create a temporary file name which should not already exist. Use the\n directory of the input file as the base name of the mkstemp() output.\n \"\"\"\n\n if input is not None:\n input = os.path.dirname(input)\n f, fn = tempfile.mkstemp(dir=input)\n os.close(f)\n return fn\n\n\ndef _get_array_mmap(array):\n \"\"\"\n If the array has an mmap.mmap at base of its base chain, return the mmap\n object; otherwise return None.\n \"\"\"\n\n if isinstance(array, mmap.mmap):\n return array\n\n base = array\n while hasattr(base, 'base') and base.base is not None:\n if isinstance(base.base, mmap.mmap):\n return base.base\n base = base.base\n\n\n@contextmanager\ndef _free_space_check(hdulist, dirname=None):\n try:\n yield\n except OSError as exc:\n error_message = ''\n if not isinstance(hdulist, list):\n hdulist = [hdulist, ]\n if dirname is None:\n dirname = os.path.dirname(hdulist._file.name)\n if os.path.isdir(dirname):\n free_space = data.get_free_space_in_dir(dirname)\n hdulist_size = sum(hdu.size for hdu in hdulist)\n if free_space < hdulist_size:\n error_message = (\"Not enough space on disk: requested {}, \"\n \"available {}. \".format(hdulist_size, free_space))\n\n for hdu in hdulist:\n hdu._close()\n\n raise OSError(error_message + str(exc))\n\n\ndef _extract_number(value, default):\n \"\"\"\n Attempts to extract an integer number from the given value. If the\n extraction fails, the value of the 'default' argument is returned.\n \"\"\"\n\n try:\n # The _str_to_num method converts the value to string/float\n # so we need to perform one additional conversion to int on top\n return int(_str_to_num(value))\n except (TypeError, ValueError):\n return default\n\n\ndef get_testdata_filepath(filename):\n \"\"\"\n Return a string representing the path to the file requested from the\n io.fits test data set.\n\n .. versionadded:: 2.0.3\n\n Parameters\n ----------\n filename : str\n The filename of the test data file.\n\n Returns\n -------\n filepath : str\n The path to the requested file.\n \"\"\"\n return data.get_pkg_data_filename(\n 'io/fits/tests/data/{}'.format(filename), 'astropy')\n\n\ndef _rstrip_inplace(array):\n \"\"\"\n Performs an in-place rstrip operation on string arrays. This is necessary\n since the built-in `np.char.rstrip` in Numpy does not perform an in-place\n calculation.\n \"\"\"\n\n # The following implementation convert the string to unsigned integers of\n # the right length. Trailing spaces (which are represented as 32) are then\n # converted to null characters (represented as zeros). To avoid creating\n # large temporary mask arrays, we loop over chunks (attempting to do that\n # on a 1-D version of the array; large memory may still be needed in the\n # unlikely case that a string array has small first dimension and cannot\n # be represented as a contiguous 1-D array in memory).\n\n dt = array.dtype\n\n if dt.kind not in 'SU':\n raise TypeError(\"This function can only be used on string arrays\")\n # View the array as appropriate integers. The last dimension will\n # equal the number of characters in each string.\n bpc = 1 if dt.kind == 'S' else 4\n dt_int = \"{0}{1}u{2}\".format(dt.itemsize // bpc, dt.byteorder, bpc)\n b = array.view(dt_int, np.ndarray)\n # For optimal speed, work in chunks of the internal ufunc buffer size.\n bufsize = np.getbufsize()\n # Attempt to have the strings as a 1-D array to give the chunk known size.\n # Note: the code will work if this fails; the chunks will just be larger.\n if b.ndim > 2:\n try:\n b.shape = -1, b.shape[-1]\n except AttributeError: # can occur for non-contiguous arrays\n pass\n for j in range(0, b.shape[0], bufsize):\n c = b[j:j + bufsize]\n # Mask which will tell whether we're in a sequence of trailing spaces.\n mask = np.ones(c.shape[:-1], dtype=bool)\n # Loop over the characters in the strings, in reverse order. We process\n # the i-th character of all strings in the chunk at the same time. If\n # the character is 32, this corresponds to a space, and we then change\n # this to 0. We then construct a new mask to find rows where the\n # i-th character is 0 (null) and the i-1-th is 32 (space) and repeat.\n for i in range(-1, -c.shape[-1], -1):\n mask &= c[..., i] == 32\n c[..., i][mask] = 0\n mask = c[..., i] == 0\n\n return array\n"
] | [
[
"numpy.isnan",
"numpy.arange",
"numpy.cos",
"numpy.ones",
"numpy.all",
"numpy.sin",
"numpy.copy",
"numpy.testing.assert_allclose",
"numpy.array",
"numpy.zeros"
],
[
"numpy.dtype",
"numpy.ones",
"numpy.all",
"numpy.array",
"numpy.testing.assert_array_almost_equal"
],
[
"numpy.convolve",
"pandas.Series",
"numpy.random.seed",
"scipy.signal.fftconvolve",
"numpy.testing.assert_array_almost_equal_nulp",
"numpy.isnan",
"numpy.linspace",
"numpy.isfinite",
"numpy.nan_to_num",
"numpy.ones",
"numpy.all",
"numpy.random.randn",
"numpy.random.rand",
"numpy.testing.assert_allclose",
"numpy.ma.array",
"numpy.array",
"numpy.zeros",
"numpy.testing.assert_array_almost_equal"
],
[
"numpy.char.encode",
"numpy.fromfile",
"numpy.nditer",
"numpy.nonzero",
"numpy.char.decode",
"numpy.issubdtype",
"numpy.dtype",
"numpy.ones",
"numpy.getbufsize",
"numpy.frombuffer",
"numpy.array",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mateussangalli/SE2DINNets | [
"c4d9b6d2577a5044c243d0eb80ebe5879a7673c9"
] | [
"train_SE2DINNet.py"
] | [
"import tensorflow as tf\nimport numpy as np\nfrom tensorflow.keras import layers, regularizers\nfrom tensorflow.keras.utils import to_categorical\nimport matplotlib.pyplot as plt\nimport os\nimport argparse\n\nfrom SE2DIN import *\n\n\nfrom load_data import load_data\n\n\nmnist_rot_dir = 'mnist_rot'\nmnist_12k_dir = 'mnist12k'\nmodel_dir = 'models'\n\n\n\n\nparser = argparse.ArgumentParser(description='trains a SE2DINNet with the specified parameters(some parts of the architecture are fixed)')\nparser.add_argument('-o', '--order', type=int, default=2, help='order of the differential invariants')\nparser.add_argument('-d', '--dropout', type=int, default=30, help='dropout rate in percentage between 1 x 1 convolutions')\nparser.add_argument('-w', '--weight_decay', type=float, default=1e-4, help='weight decay')\nparser.add_argument('-f', '--data_dir', type=str, default='./', help='directory containing both MNIST-Rot and MNIST12K dataset in separate folders')\nparser.add_argument('--train_on_mnist12k', action='store_true', help='whether to train on MNIST12K or MNIST-Rot(False)')\nparser.add_argument('--lr', type=float, default=1e-2, help='initial learning rate')\nparser.add_argument('--batch_size', type=int, default=256, help='batch size')\nparser.add_argument('--epochs', type=int, default=2000, help='maximum number of epochs')\nparser.add_argument('--n_filters', type=int, default=20, help='number of filters in the middle layers')\n\nargs = parser.parse_args()\n\nweight_decay = args.weight_decay\ndropout = args.dropout / 100\nn_filters = args.n_filters\nlr = args.lr\nbatch_size = args.batch_size\nepochs = args.epochs\norder = args.order\ndata_dir = args.data_dir\n\nif args.train_on_mnist12k:\n _, _, (x_test, y_test) = load_data(os.path.join(data_dir, mnist_rot_dir))\n (x_train, y_train), (x_val, y_val), _ = load_data(os.path.join(data_dir, mnist_12k_dir))\nelse:\n (x_train, y_train), (x_val, y_val), (x_test, y_test) = load_data(os.path.join(data_dir, mnist_rot_dir))\n\ndef se2din_block(n_in, n_out, sigma, width, order=2, dropout=0, weight_decay=0):\n block = tf.keras.models.Sequential()\n block.add(layers.Input((None, None, n_in)))\n block.add(SE2DIN(sigma, width, order=order))\n block.add(layers.BatchNormalization(beta_regularizer=regularizers.l2(weight_decay),\n gamma_regularizer=regularizers.l2(weight_decay)))\n\n block.add(layers.Conv2D(n_out,1,\n kernel_regularizer=regularizers.l2(weight_decay),\n bias_regularizer=regularizers.l2(weight_decay)))\n block.add(layers.BatchNormalization(beta_regularizer=regularizers.l2(weight_decay),\n gamma_regularizer=regularizers.l2(weight_decay)))\n block.add(layers.ReLU())\n if dropout > 0:\n block.add(layers.Dropout(dropout))\n\n block.add(layers.Conv2D(n_out,1,\n kernel_regularizer=regularizers.l2(weight_decay),\n bias_regularizer=regularizers.l2(weight_decay)))\n block.add(layers.BatchNormalization(beta_regularizer=regularizers.l2(weight_decay),\n gamma_regularizer=regularizers.l2(weight_decay)))\n \n #block.add(layers.ReLU())\n return block\n\ndef get_model(n_filters, weight_decay, dropout, lr, order=2):\n input_layer = layers.Input((None,None,1))\n \n \n x = se2din_block(1,n_filters,1.,4,2,dropout,weight_decay)(input_layer)\n features0 = tf.keras.models.Model(input_layer, x)\n x += se2din_block(n_filters,n_filters,1.,4,order,dropout,weight_decay)(x)\n x += se2din_block(n_filters,n_filters,2.,8,order,dropout,weight_decay)(x)\n x += se2din_block(n_filters,n_filters,2.,8,order,dropout,weight_decay)(x)\n x += se2din_block(n_filters,n_filters,2.,8,order,dropout,weight_decay)(x)\n \n x = se2din_block(n_filters,10,2.,8,2,0,weight_decay)(x)\n \n features1 = tf.keras.models.Model(input_layer, x)\n \n x = layers.GlobalMaxPooling2D()(x)\n \n \n model = tf.keras.models.Model(input_layer, x)\n model.summary()\n model.compile(loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), optimizer=tf.keras.optimizers.Adam(lr), metrics=['accuracy'])\n return model\n\n \n\n\nmodel = get_model(n_filters, weight_decay, dropout, lr, order)\n\n# reduces learning rate when validation loss stagnates\ncb_lr = tf.keras.callbacks.ReduceLROnPlateau(\n monitor='val_loss', factor=0.1, patience=100, verbose=0,\n mode='auto', min_delta=0.0001, min_lr=1e-5\n )\n\n# stops training if validation loss remains unchanged for too long\ncb_es = tf.keras.callbacks.EarlyStopping(\n monitor='val_loss', min_delta=0, patience=300, verbose=0,\n mode='auto', restore_best_weights=True\n )\n \nmodel.fit(x_train, y_train, validation_data=(x_val, y_val), epochs=epochs, batch_size=batch_size, callbacks=[cb_lr, cb_es], verbose=2)\nmodel.evaluate(x_test, y_test)\n\n\nmodel.save(os.path.join(model_dir, f'SE2DINNetOrd{order}'))\n"
] | [
[
"tensorflow.keras.layers.ReLU",
"tensorflow.keras.models.Model",
"tensorflow.keras.losses.SparseCategoricalCrossentropy",
"tensorflow.keras.regularizers.l2",
"tensorflow.keras.callbacks.ReduceLROnPlateau",
"tensorflow.keras.optimizers.Adam",
"tensorflow.keras.layers.GlobalMaxPooling2D",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.callbacks.EarlyStopping",
"tensorflow.keras.models.Sequential",
"tensorflow.keras.layers.Input"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.2"
]
}
] |
Ayon134/code_for_Kids | [
"d90698bb38efe5e26c31f02bd129bfdadea158e2"
] | [
"lst.py"
] | [
"import cv2\nimport io\nfrom PIL import Image, ImageEnhance\nimport pytesseract\nfrom wand.image import Image as wi\nimport re\nimport pandas as pd\nfrom PyPDF2 import PdfFileWriter, PdfFileReader\nfrom pdf2image import convert_from_path\n\n\nclaim = '15232353'\nfile = \"a.pdf\"\npages_to_keep = [0]\ninfile = PdfFileReader(file, 'rb')\noutput = PdfFileWriter()\n\nfor i in pages_to_keep:\n p = infile.getPage(i)\n output.addPage(p)\n\n\nwith open('A1.pdf', 'wb') as f:\n output.write(f)\n\n\npages = convert_from_path('A1.pdf', 500)\n\nfor page in pages:\n page.save('A1.jpg', 'JPEG')\n\n#img = Image.open('B1.jpg').convert('LA')\n#img.save('B1.png')\n\n\n\n# Grayscale, Gaussian blur, Otsu's threshold\nimage = cv2.imread('A1.jpg')\ngray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\nblur = cv2.GaussianBlur(gray, (3,3), 0)\nthresh = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]\n\n# Morph open to remove noise and invert image\nkernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3,3))\nopening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel, iterations=1)\ninvert = 255 - opening\n\n\n#read the image\n#im = Image.open(\"B1.jpg\")\n\n#enhancer = ImageEnhance.Contrast(im)\n#img = enhancer.enhance(2.70)\n\n#read the image\n#im = Image.open(\"B1.png\")\n\n#image brightness enhancer\n#enhancer = ImageEnhance.Contrast(im)\n\n#factor = 2 #increase contrast\n#im_output = enhancer.enhance(factor)\n#im_output.save('BF.png')\n\n\ntext = pytesseract.image_to_string(invert, lang='eng', config='--psm 6')\n#text = pytesseract.image_to_string(Image.open('BF.png'))\nprint(text)\n\nx1 = text.find(\"Bill No\")\nprint(x1)\ny1 = text.find(\"Regn No\")\nprint(y1)\nz1 = text[x1:y1]\nprint(z1)\nz1 = z1.split()\nprint(z1)\nbillnum = z1[2]\nbillnum = billnum.split('-')\nif billnum[0] == '1' or billnum[0] == 'l':\n print(\"change hobe\")\n \n billnum[0] = 'I'\n billno = '-'\n billno = billno.join(billnum)\n print(billno)\nelse:\n print(\"ager\")\n billno = '-'\n billno = billno.join(billnum)\n print(billno)\n\n\nx1 = text.find(\"Bed No\")\nprint(x1)\ny1 = text.find(\"Discharge\")\nprint(y1)\nz1 = text[x1:y1]\nprint(z1)\nz1 = z1.split()\nprint(z1)\nroomno=z1[2]\nroomno = roomno.split('/')\nprint(roomno)\nroomno = roomno[0]\nprint(roomno)\n\nx1 = text.find(\"Discharge Date\")\nprint(x1)\ny1 = text.find(\"Service Group\")\nprint(y1)\nz1 = text[x1:y1]\nprint(z1)\nz1 = z1.split()\nprint(z1)\nddate = z1[2]\nprint(ddate)\ndtime = z1[3]\ndtime = dtime.split(':')\ndhr = int(dtime[0])\ndmin = int(dtime[1])\nprint(dhr)\nprint(dmin)\n\nx1 = text.find(\"Consultant:\")\nprint(x1)\ny1 = text.find(\"Adm. Date:\")\nprint(y1)\nz1 = text[x1:y1]\nprint(z1)\nz1 = z1.split()\nprint(z1)\nlength = len(z1)\nx = []\nfname = z1[1]\nsname = z1[2]\nif fname == 'OR.':\n fname = 'DR.'\nx.append(fname)\nx.append(sname)\nprint(x)\n\n\ndoc = ' '\ndoc = doc.join(x)\nprint(doc)\n\nx2 = text.find(\"Net Bill Amount :\")\nprint(x2)\ny2 = text.find(\"Net Corporate Payable :\")\nprint(y2)\nz2 = text[x2:y2]\nprint(z2)\nz2 = z2.split()\nprint(z2)\nnetbill = z2[4]\nprint(netbill)\n\nx2 = text.find(\"Discharge Type:\")\nprint(x2)\ny2 = text.find(\"Service Group\")\nprint(y2)\nz2 = text[x2:y2]\nprint(z2)\nz2 = z2.split()\nprint(z2)\nif z2[3] == 'but' or z2[3] == 'Admitted':\n dtype = 'Normal Discharge'\n \nelif z2[3] == 'against' or z2[3] == 'on':\n dtype = 'LAMA'\n\nelif z2[3] == 'Dead':\n dtype = 'NA'\n\nelif z2[3] == 'Birth' or z2[3] == 'Death' or z2[3] == 'Infant':\n dtype = 'Death Discharge'\n\nelse:\n \n dtype = z2[2]\nprint(dtype)\n\n\nx2 = text.find(\"Bill Date:\")\nprint(x2)\ny2 = text.find(\"Consultant:\")\nprint(y2)\nz2 = text[x2:y2]\nprint(z2)\nz2 = z2.split()\nprint(z2)\nbilldate = z2[2]\nprint(billdate)\nbilldate=billdate.split('-')\nprint(billdate)\nbilldin = int(billdate[0])\nbillmn = int(billdate[1])\nbillyr = int(billdate[2])\nprint(billdin)\nprint(billmn)\nprint(billyr)\n\n\ndtype = 'Stable'\nfun = pd.DataFrame(\n [[claim, ddate, dhr, dmin, dtype, roomno, doc, billno, billdin, billmn, billyr, netbill]],\n columns=['Claim_Id', 'Discharge_Date', 'Discharge_Hour', 'Discharge_Minute',\n 'Discharge_Type', 'Room_No', 'Consultant_Name', 'Bill_No', 'Bill_Day', 'Bill_Month', 'Bill_Year',\n 'Net_Bill_Amount'])\n\nfun.to_csv('reader.csv', index=False, header=False, mode='a')\n"
] | [
[
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
giangtranml/framgia-training | [
"c7fb343bd43b1bceb241b447ff956febb99c94a8"
] | [
"decision_tree/decision_tree.py"
] | [
"\"\"\"\nAuthor: Giang Tran.\n\"\"\"\n\nimport numpy as np\nfrom math import log2\n\n\nclass NodeDT:\n \"\"\"\n Class Node represents in Decision Tree\n \"\"\"\n\n def __init__(self, X, y, feature_name):\n self.feature_name = feature_name\n self.X = X\n self.y = y\n self.is_leaf = False\n self.label = None\n self.used = []\n\n def entropy(self):\n \"\"\"\n Compute entropy at a given node.\n E(X) = - sum_v(p(X_v) * log_2(p(X_v))) with X_v is a subset of X = (X_1, X_2, ..., X_n)\n :return: entropy coefficient.\n \"\"\"\n n = len(self.y)\n sum_ = 0\n for i in np.unique(self.y):\n v = len(self.y[self.y == i])\n sum_ += -((v/n) * log2(v/n))\n return sum_\n\n def classification_error(self):\n pass\n\n\nclass DecisionTree:\n \"\"\"\n Metrics: either entropy/information gain or classification error.\n \"\"\"\n\n _metrics = {'ce': '_classification_error', 'ig': '_information_gain'}\n\n def __init__(self, max_depth=None, criterion='ig'):\n \"\"\"\n :param max_depth: define what depth of the tree should be.\n :param criterion: either 'ce' or 'ig'.\n \"\"\"\n self.max_depth = max_depth\n self.criterion = criterion\n if self.criterion not in self._metrics.keys():\n self.criterion = 'ig'\n self.num_class = 0\n self.tree = None\n self.thresholds = {}\n\n def _is_numerical(self, feature):\n return len(np.unique(feature)) >= 100\n\n def _find_threshold(self, feature, y_train, num_class):\n \"\"\"\n The main point is find a good threshold that is the optimal split label.\n A good threshold is the threshold that minimize mis-classification error.\n\n The algorithm:\n - If there are `n` data points in feature data => there are `n-1` available thresholds.\n - For each available threshold, split feature data to 2 partitions.\n - For each partition, we check and compute mis-classification error for each label.\n\n :param feature: numerical value of `feature`.\n :param y_train: label.\n :param num_class: number of class\n :return: categorical value of `feature`.\n \"\"\"\n assert len(num_class) == 2, \"This function only assumes work with binary classification.\"\n best_threshold = 0.0\n max_exact_classification = 0.0\n is_positive_negative = False\n sorted_feature = sorted(np.unique(feature))\n for i in range(len(sorted_feature)-1):\n # assume the value less than threshold is negative (0), greater than threshold is positive (1)\n threshold = (sorted_feature[i] + sorted_feature[i+1]) / 2\n left_partition = y_train[feature < threshold]\n right_partition = y_train[feature > threshold]\n negative_positive = ((len(left_partition[left_partition == 0]) + len(right_partition[right_partition == 1]))\n / len(feature))\n # assume the value less than threshold is positive (1), greater than threshold is negative. (0)\n positive_negative = ((len(left_partition[left_partition == 1]) + len(right_partition[right_partition == 0]))\n / len(feature))\n # make decision here\n is_positive_negative = positive_negative > negative_positive\n choose = positive_negative if is_positive_negative else negative_positive\n if max_exact_classification < choose:\n max_exact_classification = choose\n best_threshold = threshold\n return best_threshold, is_positive_negative\n\n def _entropy(self, feature, node):\n \"\"\"\n Compute entropy each partition of specific feature in a given node.\n :param feature: specific feature in dataset of `node`.\n :param node: a node we're checking on.\n :return: an entropy scalar that measure the uncertainty of a feature in data.\n \"\"\"\n entropy = 0\n categories = np.unique(feature)\n num_point = len(feature)\n for category in categories:\n # for each category in that feature\n num_category = len(feature[feature == category])\n for c in self.num_class:\n # count the number of each class\n num_category_class = len(feature[np.logical_and(feature == category, node.y == c)])\n if num_category_class == 0:\n continue\n # compute entropy/information gain or classification error\n entropy += num_category / num_point * (\n -num_category_class / num_category * log2(num_category_class / num_category))\n return entropy\n\n def _information_gain(self, feature, node):\n \"\"\"\n Compute information gain between a node with that feature.\n :param feature:\n :param node:\n :return: information gain coefficient.\n \"\"\"\n return node.entropy() - self._entropy(feature, node)\n\n def _classification_error(self, feature, node):\n pass\n\n def _stop(self, node):\n \"\"\"\n Stop condition:\n - Reach max depth or already reach all features.\n - If entropy of that node is 0\n :return: True if the node meets stop condition. False otherwise.\n \"\"\"\n return len(node.used) == node.X.shape[1] or len(node.used) == self.max_depth or node.entropy() == 0\n\n def _build_dt(self, root, column_name):\n \"\"\"\n Algorithm:\n - Start from the root. Find the best feature that has optimum entropy/information gain or classification error.\n - From that best feature, loop through all categories to build subtree.\n ...\n - If entropy/classification erorr is 0, or reach all features then that node is leaf or reach the max depth,\n then stop and move to other subtrees\n :param root: root node at current level\n :return:\n \"\"\"\n N, D = root.X.shape\n best_coef = 0.0\n best_feature = 0\n for d in range(D):\n if column_name[d] in root.used:\n continue\n feature = root.X[:, d]\n coef = getattr(self, self._metrics[self.criterion])(feature, root)\n if best_coef < coef:\n best_coef = coef\n best_feature = d\n # after choose the best feature to split.\n # loop through all its categories to build subtree\n feature = root.X[:, best_feature]\n categories = np.unique(feature)\n for category in categories:\n node = NodeDT(root.X[feature == category], root.y[feature == category], column_name[best_feature])\n node.used = root.used + [column_name[best_feature]]\n setattr(root, 'feature_' + str(category), node)\n setattr(root, 'feature_split', best_feature)\n if not self._stop(node):\n self._build_dt(node, column_name)\n else:\n node.is_leaf = True\n node.label = 1 if len(node.y[node.y == 1]) >= len(node.y[node.y == 0]) else 0\n\n def _train(self, X_train, y_train, column_name):\n self.tree = NodeDT(X_train, y_train, 'root')\n self._build_dt(self.tree, column_name)\n\n def train(self, X_train, y_train, column_name):\n self.num_class = np.unique(y_train)\n _, D = X_train.shape\n for d in range(D):\n feature = X_train[:, d]\n if self._is_numerical(feature):\n threshold, is_positive_negative = self._find_threshold(feature, y_train, self.num_class)\n feature[feature < threshold] = int(is_positive_negative)\n feature[feature > threshold] = int(not is_positive_negative)\n X_train[:, d] = feature\n self.thresholds[d] = (threshold, is_positive_negative)\n self._train(X_train, y_train, column_name)\n\n def _predict(self, X_new, node):\n if not node.is_leaf:\n node = getattr(node, 'feature_' + str(X_new[node.feature_split]))\n return self._predict(X_new, node)\n return node.label\n\n def predict(self, X_new):\n # First convert numerical feature to categorical feature.\n for key, (threshold, is_positive_negative) in self.thresholds.items():\n X_new[key] = int(is_positive_negative) if X_new[key] < threshold else int(not is_positive_negative)\n tree = self.tree\n label = self._predict(X_new, tree)\n return label\n\n def representation(self):\n print(self.tree)\n \n\nif __name__ == '__main__':\n import pandas as pd\n from sklearn.tree import DecisionTreeClassifier\n\n df = pd.read_csv('data/titanic_train.csv')\n X = df.loc[:, :].drop(['Survived', 'PassengerId'], axis=1).values\n y = df.loc[:, 'Survived'].values\n\n dt = DecisionTree(criterion='ig', max_depth=5)\n dt.train(X, y, df.columns.drop(['Survived', 'PassengerId']))\n\n df_test = pd.read_csv('data/titanic_test.csv')\n X_test = df_test.loc[:, :].drop(['Survived', 'PassengerId'], axis=1).values\n y_test = df_test.loc[:, 'Survived'].values\n predicts = []\n for x in X_test:\n predicts.append(dt.predict(x))\n predicts = np.asarray(predicts)\n print(\"Accuracy:\", len(predicts[predicts == y_test])/len(predicts))\n\n dt_sk = DecisionTreeClassifier(max_depth=5)\n X[X[:, 7] == 'male', 7] = 1\n X[X[:, 7] == 'female', 7] = 0\n\n X_test[X_test[:, 7] == 'male', 7] = 1\n X_test[X_test[:, 7] == 'female', 7] = 0\n dt_sk.fit(X, y)\n y_pred = dt_sk.predict(X_test)\n print(\"Accuracy of Sk-learn:\", len(y_pred[y_pred == y_test]) / len(y_pred))\n\n\n\n"
] | [
[
"pandas.read_csv",
"numpy.unique",
"numpy.asarray",
"sklearn.tree.DecisionTreeClassifier",
"numpy.logical_and"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
mnemocron/TelegramChatStats | [
"10b9ebb97bfb28f835fd05050f03dcb10525f7a3"
] | [
"telegram-statistics.py"
] | [
"#! /usr/bin/python3\n\n#_*_ coding: utf-8 _*_\n\n'''\n@file \t\ttelegram-statistics.py\n@author \tSimon Burkhardt - github.com/mnemocron\n@date \t\t2018.10.01\n\nPost about this code:\nhttps://www.reddit.com/r/LongDistance/comments/9mgcol/oc_chat_statistics_from_telegram_using_python/\n\nInspiration:\nhttps://www.reddit.com/r/LongDistance/comments/9jud8j/analysis_of_texts_from_a_long_distance/\n'''\n\nfrom __future__ import print_function\n\nimport sys\nimport os\nimport optparse\nimport re\nimport json\nimport codecs\nimport numpy as np # install with pip3\nimport pandas as pd # install with pip3\nimport bokeh # install with pip3\nfrom pprint import pprint\nfrom collections import Counter\nfrom datetime import datetime\nfrom datetime import timedelta\n\nfrom _message_numerics import _message_numerics\nfrom _message_graphs import _message_graphs\n\nparser = optparse.OptionParser('telegram-stats')\nparser.add_option('-i', '--input-file', \tdest='indir', \ttype='string', \thelp='chat history file')\nparser.add_option('-n', '--name', \t\t\tdest='name', \ttype='string', \thelp='name of the person')\nparser.add_option('-c', '--id', \t\t\tdest='id', \t\ttype='string', \thelp='chat id of the person')\nparser.add_option('-d', '--date-max', \t\tdest='date', \ttype='string', \thelp='only count messages after date [YYYY-MM-DD]')\nparser.add_option('-w', '--word-list', \t\tdest='words', \ttype='string', \thelp='count occurrences of words -w \"John;Vacation\"')\n(opts, args) = parser.parse_args()\n\n# Writes a dict in json format to a file\ndef dump_to_json_file(filename, data):\n\twith open(filename, 'w', encoding='utf-8') as fh:\n\t\tjson.dump(data, fh, indent=4, sort_keys=True)\n\n# writes data utf-8 encoded to a file\n# important for emojis\ndef dump_to_unicode_file(filename, data):\n\tfh = codecs.open(filename, 'w', 'utf-8')\n\tfh.write(data)\n\tfh.close()\n\n# writes a dict to a csv format\n\ndef dump_dict_to_csv_file(filename, dict):\n\t(pd.DataFrame.from_dict(data=dict, orient='index')\n\t\t.to_csv(filename, header=False, sep=';'))\n\ndef load_file_to_raw(path):\n\ttry:\n\t\twith open(path, encoding='utf-8-sig') as fh:\n\t\t\tdata = json.load(fh)\n\t\treturn data\n\texcept IOError:\n\t\tprint('Error: could not open the file')\n\t\texit(-1)\n\ndef select_chat_from_name(data, name):\n\ttry:\n\t\tfound = False\n\t\tfor chat in data['chats']['list']:\n\t\t\tif('name' in chat):\n\t\t\t\tif(name == chat['name']):\n\t\t\t\t\tif(found == True):\n\t\t\t\t\t\tprint('Error: The name \"' + str(name) + '\" is ambiguous. Use the chat ID instead.')\n\t\t\t\t\t\tprint('Use <telegram-stats -i [result.json]> to list the available chats.')\n\t\t\t\t\t\texit(-1)\n\t\t\t\t\tfound = True\n\t\t\t\t\tdata = chat\n\t\tif(found == False):\n\t\t\tprint('Error: invalid chat name: ' + name)\n\t\t\texit(-1)\n\t\treturn data\n\texcept KeyError:\n\t\tprint('Error: wrong file format (name not found)')\n\ndef select_chat_from_id(data, id):\n\tid = str(id)\n\ttry:\n\t\tfound = False\n\t\tfor chat in data['chats']['list']:\n\t\t\tif('id' in chat):\n\t\t\t\tif(id == str(chat['id'])):\n\t\t\t\t\tfound = True\n\t\t\t\t\tdata = chat\n\t\tif(found == False):\n\t\t\tprint('Error: invalid chat ID: ' + str(id))\n\t\t\texit(-1)\n\t\treturn data\n\texcept KeyError:\n\t\tprint('Error: wrong file format (keys not found)')\n\ndef calculate_metrics(chat_data, date_filter):\n\tmetrics = _message_numerics(chat_data, date_filter)\n\tdump_to_json_file('raw_metrics.json', metrics)\n\tustr = u'' + metrics['A']['name'] + '\\n'\n\tfor e in metrics['A']['emojilist']:\n\t\tustr += str(e[0]) + u' : ' + str(e[1]) + u'\\n'\n\tustr += metrics['B']['name'] + '\\n'\n\tfor e in metrics['B']['emojilist']:\n\t\tustr += str(e[0]) + u' : ' + str(e[1]) + u'\\n'\n\tdump_to_unicode_file('emojis.txt', ustr)\n\ndef calculate_graphs(chat_data, date_filter, wordlist):\n\treturn _message_graphs(chat_data, date_filter, wordlist)\n\n# https://stackoverflow.com/questions/16870663/how-do-i-validate-a-date-string-format-in-python\ndef validate_date(date_text):\n\ttry:\n\t\tdatetime.strptime(date_text, '%Y-%m-%d')\n\texcept ValueError:\n\t\tprint('Incorrect date format, should be YYYY-MM-DD')\n\t\texit(-1)\n\ndef print_available_names(raw_data):\n\tprint('')\n\tprint('available chat names:')\n\tfor chat in raw_data['chats']['list']:\n\t\tif ('name' in chat):\n\t\t\tname = chat['name']\n\t\t\tif(len(name) > 13):\n\t\t\t\tname = name[:11] + '...'\n\t\t\tif(len(name) < 7):\n\t\t\t\tname = name + '\\t'\n\t\t\tprint(name + ' \\t' + str(chat['id']) + ' \\t(' + chat['type'] + ')')\n\n### MAIN\ndef main():\n\tif (opts.indir is None):\n\t\tparser.print_help() \n\t\texit(0)\n\n\tdate_filter = '1970-01-01'\n\tif ( opts.date is not None):\n\t\tvalidate_date(opts.date)\n\t\tdate_filter = opts.date\n\n\tprint('importing raw data...')\n\traw_data = load_file_to_raw(opts.indir)\n\n\tif('chats' in raw_data):\n\t\tprint('input data is full chat export')\n\t\tif (opts.id is None and opts.name is None):\n\t\t\tprint('Error: argument <name> not specified.')\n\t\t\tprint('I do now know which chat to analyze.')\n\t\t\tprint('Available chats are:')\n\t\t\tprint_available_names(raw_data)\n\t\t\texit(0)\n\t\tif (opts.id is not None):\n\t\t\tchat_data = select_chat_from_id(raw_data, opts.id)\n\t\telif (opts.name is not None):\n\t\t\tchat_data = select_chat_from_name(raw_data, opts.name)\n\telse:\n\t\tprint('input data is a single chat export')\n\t\tchat_data = raw_data\n\n\twordlist = ''\n\tif(opts.words is not None):\n\t\twordlist = opts.words.lower().split(';')\n\t\n\tprint('calculating metrics...')\n\tcalculate_metrics(chat_data, date_filter)\n\tprint('generating graphs...')\n\traw = calculate_graphs(chat_data, date_filter, wordlist)\n\tdump_dict_to_csv_file('raw_weekdays_person_' + raw['A']['name'] + '.csv', raw['A']['hourofday'])\n\tdump_dict_to_csv_file('raw_weekdays_person_' + raw['B']['name'] + '.csv', raw['B']['hourofday'])\n\tdump_dict_to_csv_file('raw_months_person_' + raw['A']['name'] + '.csv', raw['A']['months'])\n\tdump_dict_to_csv_file('raw_months_person_' + raw['B']['name'] + '.csv', raw['B']['months'])\n\tdump_dict_to_csv_file('raw_months_chars_person_' + raw['A']['name'] + '.csv', raw['A']['months_chars'])\n\tdump_dict_to_csv_file('raw_months_chars_person_' + raw['B']['name'] + '.csv', raw['B']['months_chars'])\n\tdump_dict_to_csv_file('raw_monthly_pictures_person_' + raw['A']['name'] + '.csv', raw['A']['monthly_pictures'])\n\tdump_dict_to_csv_file('raw_monthly_pictures_person_' + raw['B']['name'] + '.csv', raw['B']['monthly_pictures'])\n\tdump_dict_to_csv_file('raw_monthly_calls_person_' + raw['A']['name'] + '.csv', raw['A']['monthly_calls'])\n\tdump_dict_to_csv_file('raw_monthly_calls_person_' + raw['B']['name'] + '.csv', raw['B']['monthly_calls'])\n\tdump_dict_to_csv_file('raw_monthly_call_duration_person_' + raw['A']['name'] + '.csv', raw['A']['monthly_call_duration'])\n\tdump_dict_to_csv_file('raw_monthly_call_duration_person_' + raw['B']['name'] + '.csv', raw['B']['monthly_call_duration'])\n\tdump_dict_to_csv_file('raw_monthly_time_to_reply_person_' + raw['A']['name'] + '.csv', raw['A']['monthly_time_to_reply'])\n\tdump_dict_to_csv_file('raw_monthly_time_to_reply_person_' + raw['B']['name'] + '.csv', raw['B']['monthly_time_to_reply'])\n\tprint('done')\n\nif __name__ == '__main__':\n\ttry:\n\t\tmain()\n\texcept KeyboardInterrupt as e:\n\t\tprint('Aborted by KeyboardInterrupt')\n\t\texit(0)\n"
] | [
[
"pandas.DataFrame.from_dict"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
bentzinir/ray | [
"39b84166f88e271b279bd0b3ce56f81d24a1852c"
] | [
"rllib/agents/sac/sac_ensemble_tf_model_unstack.py"
] | [
"from gym.spaces import MultiDiscrete\nimport numpy as np\n\nfrom ray.rllib.models.tf.tf_modelv2 import TFModelV2\nfrom ray.rllib.utils.framework import try_import_tf\n\ntf = try_import_tf()\n\n\nclass SACEnsembleTFModel(TFModelV2):\n \"\"\"Extension of standard TFModel for SAC.\n\n Data flow:\n obs -> forward() -> model_out\n model_out -> get_policy_output() -> pi(s)\n model_out, actions -> get_q_values() -> Q(s, a)\n model_out, actions -> get_twin_q_values() -> Q_twin(s, a)\n\n Note that this class by itself is not a valid model unless you\n implement forward() in a subclass.\"\"\"\n\n def __init__(self,\n obs_space,\n action_space,\n num_outputs,\n model_config,\n name,\n actor_hidden_activation=\"relu\",\n actor_hiddens=(256, 256),\n critic_hidden_activation=\"relu\",\n critic_hiddens=(256, 256),\n twin_q=False,\n initial_alpha=1.0,\n target_entropy=None,\n ensemble_size=1,\n shared_actor_body=False,\n shared_critic_body=False,\n constant_alpha=False,\n shared_entropy=False,\n ):\n \"\"\"Initialize variables of this model.\n\n Extra model kwargs:\n actor_hidden_activation (str): activation for actor network\n actor_hiddens (list): hidden layers sizes for actor network\n critic_hidden_activation (str): activation for critic network\n critic_hiddens (list): hidden layers sizes for critic network\n twin_q (bool): build twin Q networks.\n initial_alpha (float): The initial value for the to-be-optimized\n alpha parameter (default: 1.0).\n\n Note that the core layers for forward() are not defined here, this\n only defines the layers for the output heads. Those layers for\n forward() should be defined in subclasses of SACModel.\n \"\"\"\n super(SACEnsembleTFModel, self).__init__(obs_space, action_space, num_outputs,\n model_config, name)\n if isinstance(action_space, MultiDiscrete):\n ensemble_action_dims = action_space.nvec\n assert all(x == ensemble_action_dims[0] for x in ensemble_action_dims)\n self.action_dim = ensemble_action_dims[0]\n self.discrete = True\n action_outs = q_outs = self.action_dim\n else:\n self.action_dim = np.product(action_space.shape[1:])\n self.discrete = False\n action_outs = 2 * self.action_dim\n q_outs = 1\n\n self.model_out = tf.keras.layers.Input(\n shape=(self.num_outputs,), name=\"model_out\")\n\n self.twin_q = twin_q\n self.ensemble_size = ensemble_size\n self.shared_actor_body = shared_actor_body\n self.constant_alpha = constant_alpha\n self.action_model = [None for _ in range(ensemble_size)]\n self.shift_and_log_scale_diag = [None for _ in range(ensemble_size)]\n\n if self.shared_actor_body:\n print(f\"=============SHARED ACTOR BODY=============\")\n x = None\n for i, hidden in enumerate(actor_hiddens):\n if x is None:\n x = self.model_out\n x = tf.keras.layers.Dense(\n units=hidden,\n activation=getattr(tf.nn, actor_hidden_activation, None),\n name=\"action_{}\".format(i + 1))(x)\n\n for eidx in range(ensemble_size):\n a_out = tf.keras.layers.Dense(units=action_outs, activation=None, name=\"action_out_{}\".format(eidx))(x)\n self.action_model[eidx] = tf.keras.Model(self.model_out, a_out)\n\n self.shift_and_log_scale_diag[eidx] = self.action_model[eidx](self.model_out)\n\n self.register_variables(self.action_model[eidx].variables)\n else:\n for eidx in range(ensemble_size):\n self.action_model[eidx] = tf.keras.Sequential([\n tf.keras.layers.Dense(\n units=hidden,\n activation=getattr(tf.nn, actor_hidden_activation, None),\n name=\"action_{}_{}\".format(eidx, i + 1))\n for i, hidden in enumerate(actor_hiddens)\n ] + [\n tf.keras.layers.Dense(\n units=action_outs, activation=None, name=\"action_out_{}\".format(eidx))\n ])\n self.shift_and_log_scale_diag[eidx] = self.action_model[eidx](self.model_out)\n\n self.register_variables(self.action_model[eidx].variables)\n\n self.actions_input = None\n if not self.discrete:\n self.actions_input = tf.keras.layers.Input(\n shape=(self.action_dim, ), name=\"actions\")\n\n def build_q_net(name, observations, actions, eidx):\n # For continuous actions: Feed obs and actions (concatenated)\n # through the NN. For discrete actions, only obs.\n q_net = tf.keras.Sequential(([\n tf.keras.layers.Concatenate(axis=1),\n ] if not self.discrete else []) + [\n tf.keras.layers.Dense(\n units=units,\n activation=getattr(tf.nn, critic_hidden_activation, None),\n name=\"{}_hidden_{}_{}\".format(name, i, eidx))\n for i, units in enumerate(critic_hiddens)\n ] + [\n tf.keras.layers.Dense(\n units=q_outs, activation=None, name=\"{}_out\".format(name))\n ])\n\n # TODO(hartikainen): Remove the unnecessary Model calls here\n if self.discrete:\n q_net = tf.keras.Model(observations, q_net(observations))\n else:\n q_net = tf.keras.Model([observations, actions],\n q_net([observations, actions]))\n return q_net\n\n self.q_net = [None for _ in range(ensemble_size)]\n self.twin_q_net = [None for _ in range(ensemble_size)]\n\n for eidx in range(ensemble_size):\n self.q_net[eidx] = build_q_net(\"q\", self.model_out, self.actions_input, eidx)\n self.register_variables(self.q_net[eidx].variables)\n\n if twin_q:\n self.twin_q_net[eidx] = build_q_net(\"twin_q\", self.model_out,\n self.actions_input, eidx)\n self.register_variables(self.twin_q_net[eidx].variables)\n\n # Auto-calculate the target entropy.\n if target_entropy is None or target_entropy == \"auto\":\n # See hyperparams in [2] (README.md).\n if self.discrete:\n target_entropy = 0.98 * np.array(\n -np.log(1.0 / self.action_dim), dtype=np.float32)\n # TODO: find the correct entropy value for the ensemble\n # See [1] (README.md).\n else:\n # TODO: find the correct entropy value for the ensemble\n target_entropy = -np.prod(action_space.shape[1:])\n self.target_entropy = target_entropy\n\n # TODO: find correct alpha value\n if constant_alpha:\n initial_alpha = 0.1\n print(\"=================CONSTANT ALPHA====================\")\n\n print(f\"target ent: {self.target_entropy}, initial alpha: {initial_alpha}, shared ent: {shared_entropy}\")\n\n if shared_entropy:\n self.log_alpha = tf.Variable(\n np.log(initial_alpha), dtype=tf.float32, name=\"log_alpha\")\n else:\n log_alpha_vec = [np.log(initial_alpha) for _ in range(ensemble_size)]\n log_alpha_vec = np.expand_dims(log_alpha_vec, axis=1)\n self.log_alpha = tf.Variable(log_alpha_vec, dtype=tf.float32, name=\"log_alpha\")\n self.alpha = tf.exp(self.log_alpha)\n if not constant_alpha:\n self.register_variables([self.log_alpha])\n\n def get_q_values(self, model_out, actions=None, midx=None):\n \"\"\"Return the Q estimates for the most recent forward pass.\n\n This implements Q(s, a).\n\n Arguments:\n model_out (Tensor): obs embeddings from the model layers, of shape\n [BATCH_SIZE, num_outputs].\n actions (Optional[Tensor]): Actions to return the Q-values for.\n Shape: [BATCH_SIZE, action_dim]. If None (discrete action\n case), return Q-values for all actions.\n\n Returns:\n tensor of shape [BATCH_SIZE].\n \"\"\"\n # TODO: consider remove casting after debug\n model_out = tf.cast(model_out, tf.float32)\n if actions is not None:\n actions = tf.unstack(actions, axis=1)\n q_value_list = [qnet([model_out, act]) for qnet, act in zip(self.q_net, actions)]\n else:\n q_value_list = [qnet(model_out) for qnet in self.q_net]\n\n if midx is not None:\n return q_value_list[midx]\n else:\n return tf.stack(q_value_list, axis=1)\n\n def get_twin_q_values(self, model_out, actions=None, midx=None):\n \"\"\"Same as get_q_values but using the twin Q net.\n\n This implements the twin Q(s, a).\n\n Arguments:\n model_out (Tensor): obs embeddings from the model layers, of shape\n [BATCH_SIZE, num_outputs].\n actions (Optional[Tensor]): Actions to return the Q-values for.\n Shape: [BATCH_SIZE, action_dim]. If None (discrete action\n case), return Q-values for all actions.\n\n Returns:\n tensor of shape [BATCH_SIZE].\n \"\"\"\n # TODO: consider remove casting after debug\n model_out = tf.cast(model_out, tf.float32)\n\n if actions is not None:\n actions = tf.unstack(actions, axis=1)\n twin_q_value_list = [twin_qnet([model_out, act]) for twin_qnet, act in zip(self.twin_q_net, actions)]\n else:\n twin_q_value_list = [twin_qnet(model_out) for twin_qnet in self.twin_q_net]\n if midx is not None:\n return twin_q_value_list[midx]\n else:\n return tf.stack(twin_q_value_list, axis=1)\n\n def get_policy_output(self, model_out, midx=None):\n \"\"\"Return the action output for the most recent forward pass.\n\n This outputs the support for pi(s). For continuous action spaces, this\n is the action directly. For discrete, is is the mean / std dev.\n\n Arguments:\n model_out (Tensor): obs embeddings from the model layers, of shape\n [BATCH_SIZE, num_outputs].\n\n Returns:\n tensor of shape [BATCH_SIZE, action_out_size]\n \"\"\"\n if midx is not None:\n return self.action_model[midx](model_out)\n else:\n policy_output_list = [self.action_model[eidx](model_out) for eidx in range(self.ensemble_size)]\n return tf.stack(policy_output_list, axis=1)\n\n def policy_variables(self, midx=None):\n \"\"\"Return the list of variables for the policy net.\"\"\"\n\n if midx is not None:\n return self.action_model[midx].variables\n else:\n vars = []\n for eidx in range(self.ensemble_size):\n vars += self.action_model[eidx].variables\n return vars\n\n def q_variables(self, midx=None):\n \"\"\"Return the list of variables for Q / twin Q nets.\"\"\"\n\n if midx is not None:\n return self.q_net[midx].variables + (self.twin_q_net[midx].variables if self.twin_q_net else [])\n else:\n vars = []\n # We assume that the list is ordered as [Q vars, twin Q vars]\n # 1. First list Q variables\n for eidx in range(self.ensemble_size):\n vars += self.q_net[eidx].variables\n # 2. Second list twin Q variables\n if self.twin_q:\n for eidx in range(self.ensemble_size):\n vars += self.twin_q_net[eidx].variables\n return vars\n"
] | [
[
"numpy.log",
"numpy.product",
"numpy.expand_dims",
"numpy.prod"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
TamasFlorin/YOLO3-4-Py | [
"d7cc4d67c7eb9168a30ce9716ed64024fc1e1f8f"
] | [
"setup.py"
] | [
"import tempfile\nfrom distutils.command.build import build\nfrom distutils.command.clean import clean\nimport sys\nimport numpy as np # TODO: Need a mechanism to ensure numpy is already installed\nimport shutil\n\n# Compile using .cpp files if cython is not present\ntry:\n from Cython.Distutils import build_ext\nexcept ImportError:\n from distutils.command.build_ext import build_ext\n use_cython = False\nelse:\n use_cython = True\n\nfrom setuptools import setup, Extension\nfrom util import build_darknet, clean_darknet, get_cflags, get_libs, find_site_packages, get_readme, find_dist_packages\nimport logging\nimport os\n\nlogging.basicConfig(level=logging.INFO)\n\n# Default configuration\nUSE_GPU = False\nUSE_CV = False\n\nif \"GPU\" in os.environ:\n if \"DARKNET_HOME\" in os.environ:\n logging.warning(\"GPU environment variable is skipped since DARKNET_HOME is specified\")\n if int(os.environ[\"GPU\"]) == 1:\n USE_GPU = True\n else:\n USE_GPU = False\n else:\n if int(os.environ[\"GPU\"]) == 1:\n logging.info(\"Darknet will be compiled with GPU support\")\n USE_GPU = True\n else:\n logging.info(\"Darknet will be compiled without GPU support\")\n USE_GPU = False\n\n\nif \"OPENCV\" in os.environ and int(os.environ[\"OPENCV\"]) == 0:\n logging.info(\"Compiling wrapper without OpenCV\")\n USE_CV = False\nelif \"OPENCV\" in os.environ and int(os.environ[\"OPENCV\"]) == 1:\n logging.info(\"Compiling wrapper with OpenCV\")\n USE_CV = True\n\nif USE_CV & (get_libs(\"opencv\") == '' or get_cflags(\"opencv\") == ''):\n logging.warning(\"OpenCV is not configured. Compiling wrapper without OpenCV!\")\n USE_CV = False\n\n\nif USE_GPU:\n if USE_CV:\n build_branch_name = \"master\"\n else:\n build_branch_name = \"master\"\nelse:\n build_branch_name = \"master\"\n if \"DARKNET_HOME\" not in os.environ:\n if USE_CV:\n logging.warning(\"Non GPU darknet branch is used. Compiling wrapper without OpenCV!\")\n USE_CV = False # OpenCV requires yolo34py-intergration branch which has OpenCV enabled\n\nif \"DARKNET_HOME\" not in os.environ:\n logging.info(\"Selected Darknet Branch: \" + build_branch_name+ \" from Darknet Fork 'https://github.com/madhawav/darknet/'\")\n\n\ntemp_dir = os.path.join(tempfile.gettempdir(), \"darknet\") # Temp directory to build darknet\n\n# Check whether user has specified DARKNET_HOME directory. If so, we would use the darknet installation at this location.\nif not \"DARKNET_HOME\" in os.environ:\n darknet_dir = os.path.join(temp_dir, \"darknet-\" + build_branch_name)\nelse:\n logging.info(\"DARKNET_HOME is set: \" + os.environ[\"DARKNET_HOME\"])\n darknet_dir = os.environ[\"DARKNET_HOME\"]\n\ninclude_paths = [np.get_include(), os.path.join(darknet_dir,\"include\"), os.path.join(darknet_dir,\"src\")]\nlibraries = [\"darknet\",\"m\", \"pthread\"]\nlibrary_paths = [\".\", \"./__libdarknet\"]\n\nextra_compiler_flags = [ get_cflags(\"python3\")]\nextra_linker_flags = [get_libs(\"python3\")]\n\ncython_compile_directives = {}\nmacros = []\n\nif USE_GPU:\n if \"CUDA_HOME\" in os.environ:\n include_paths.append(os.path.join(os.environ[\"CUDA_HOME\"],\"include\"))\n else:\n raise Exception(\"Environment variable CUDA_HOME not set\")\n cython_compile_directives[\"USE_GPU\"] = 1\n macros.append((\"USE_GPU\", 1))\nelse:\n cython_compile_directives[\"USE_GPU\"] = 0\n macros.append((\"USE_GPU\", 0))\n\nif USE_CV:\n extra_compiler_flags.append(get_cflags(\"opencv\"))\n extra_linker_flags.append(get_libs(\"opencv\"))\n cython_compile_directives[\"USE_CV\"] = 1\n macros.append((\"USE_CV\", 1))\nelse:\n cython_compile_directives[\"USE_CV\"] = 0\n macros.append((\"USE_CV\", 0))\n\n\n# Add linker flag to search in site_packages/__libdarknet. libdarknet.so is located at this location.\nfor site_package in find_site_packages():\n extra_linker_flags.append(\"-Wl,-rpath,\" + os.path.join(site_package,\"__libdarknet\"))\n\nfor dist_package in find_dist_packages():\n extra_linker_flags.append(\"-Wl,-rpath,\" + os.path.join(dist_package,\"__libdarknet\"))\n\nif \"--inplace\" in sys.argv:\n extra_linker_flags.append(\"-Wl,-rpath,.\") # Added to make test code work\n\nif use_cython:\n pydarknet_extension = Extension(\"pydarknet\", [\"pydarknet.pyx\", \"pydarknet.pxd\", \"bridge.cpp\"], include_dirs=include_paths, language=\"c++\",\n libraries=libraries, library_dirs=library_paths, extra_link_args=extra_linker_flags,\n extra_compile_args=extra_compiler_flags, define_macros = macros)\n\n # Pass macros to Cython\n pydarknet_extension.cython_compile_time_env = cython_compile_directives\nelse:\n pydarknet_extension = Extension(\"pydarknet\", [\"pydarknet.cpp\", \"bridge.cpp\"],\n include_dirs=include_paths, language=\"c++\",\n libraries=libraries, library_dirs=library_paths, extra_link_args=extra_linker_flags,\n extra_compile_args=extra_compiler_flags, define_macros=macros)\n\n # NOTE: It is assumed that pydarknet.cpp is already generated using pydarknet.py. It is also assumed that USE_CV\n # flag is unchanged between cythonize and current compilation.\n\next_modules=[\n pydarknet_extension\n]\n\ndarknet_setup_done = False\n\ndef setup_darknet():\n '''\n Configures darknet on which the wrapper works\n :return:\n '''\n global darknet_setup_done\n if darknet_setup_done:\n return\n\n target_location = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"__libdarknet\", \"libdarknet.so\")\n\n if \"--inplace\" in sys.argv:\n logging.info(\"For inplace compilations, target location is set to root\")\n target_location = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"libdarknet.so\")\n\n if \"DARKNET_HOME\" not in os.environ:\n # If user has not specified DARKNET_HOME, we will download and build darknet.\n build_darknet(temp_dir, build_branch_name, target_location)\n else:\n logging.info(\"Copying libdarknet.so from \" + os.environ[\"DARKNET_HOME\"])\n # If user has set DARKNET_HOME, it is assumed that he has built darknet. We will copy libdarknet.so from users location to site-pacakges/__libdarknet\n shutil.copyfile(os.path.join(os.environ[\"DARKNET_HOME\"], \"libdarknet.so\"),\n target_location)\n\n darknet_setup_done = True\n\nclass CustomBuild(build):\n def run(self):\n # This is triggered when src distribution is made. Not triggered for build_ext.\n setup_darknet()\n build.run(self)\n\nclass CustomBuildExt(build_ext):\n def run(self):\n setup_darknet()\n build_ext.run(self)\n\n if not \"DARKNET_HOME\" in os.environ:\n clean_darknet(temp_dir)\n\nclass CustomClean(clean):\n def run(self):\n if os.path.exists(os.path.join(os.path.dirname(os.path.abspath(__file__)),\"__libdarknet\",\"libdarknet.so\")):\n logging.info(\"removing __libdarknet/libdarknet.so\")\n os.remove(os.path.join(os.path.dirname(__file__),\"__libdarknet\",\"libdarknet.so\"))\n\n if os.path.exists(os.path.join(os.path.dirname(os.path.abspath(__file__)), \"libdarknet.so\")):\n logging.info(\"removing libdarknet.so\")\n os.remove(os.path.join(os.path.dirname(os.path.abspath(__file__)),\"libdarknet.so\"))\n\n if os.path.exists(os.path.join(os.path.dirname(os.path.abspath(__file__)),\"pydarknet.cpp\")):\n logging.info(\"removing pydarknet.cpp\")\n os.remove(os.path.join(os.path.dirname(os.path.abspath(__file__)),\"pydarknet.cpp\"))\n\n for f in os.listdir(os.path.dirname(os.path.abspath(__file__))):\n if f.startswith(\"pydarknet.\") and f.endswith(\".so\"):\n logging.info(\"removing \" + f)\n os.remove(os.path.join(os.path.dirname(os.path.abspath(__file__)),f))\n\n clean.run(self)\n\n\nif USE_GPU:\n name = \"yolo34py-gpu\"\nelse:\n name = \"yolo34py\"\n\ncmd_class = {'clean': CustomClean, \"build\": CustomBuild, \"build_ext\": CustomBuildExt}\n\n\nsetup(\n name = name,\n description=\"Python wrapper on YOLO 3.0 implementation by 'pjreddie': (https://pjreddie.com/yolo)\",\n long_description=get_readme(),\n long_description_content_type=\"text/markdown\",\n cmdclass= cmd_class,\n version='0.1.rc13',\n ext_modules = ext_modules,\n platforms=[\"linux-x86_64\"],\n setup_requires=[\n 'cython>=0.27',\n 'requests',\n 'numpy'\n ],\n install_requires=[\n 'cython>=0.27',\n 'requests',\n 'numpy'\n ],\n python_requires='>=3.5',\n author='Madhawa Vidanapathirana',\n author_email='[email protected]',\n url=\"https://github.com/madhawav/YOLO3-4-Py\",\n package_dir={\"__libdarknet\": \"__libdarknet\"},\n packages=[\"__libdarknet\"],\n include_package_data=True,\n license=\"YOLO34Py wrapper is under Apache 2.0. Darknet is Public Domain.\",\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Text Processing :: Linguistic',\n 'Operating System :: POSIX :: Linux',\n 'Intended Audience :: Science/Research',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence'\n ],\n keywords=\"yolo darknet object detection vision\",\n\n)\n"
] | [
[
"numpy.get_include"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sheepolata/GraphEngine | [
"853447e42dcd09154cdc5ac0b8e00c493445a389"
] | [
"ggraph.py"
] | [
"# import the pygame module, so you can use it\nimport pygame\nfrom pygame.locals import *\nimport warnings\nimport random\nimport numpy as np\nimport math\nfrom scipy.spatial import Delaunay\n\nimport delaunaytriangulation as dt\nimport graphmodel as gm\nimport drawer\nimport utils\n\nclass gNode(gm.Node):\n\n def draw(self, surface, color):\n self.drawNode(surface, color)\n self.drawEdges(surface, color)\n\n def drawNode(self, surface, color, outline_color=(255, 255, 255), outline_width=2):\n try:\n self.info[\"pos\"]\n except KeyError:\n warnings.warn(\"{} does not possess a position information (info[\\\"pos\\\"])\".format(self.id))\n return\n\n try:\n radius = self.info[\"radius\"]\n except KeyError:\n radius = 8\n\n pygame.draw.circle(surface, color, self.info[\"pos\"], radius)\n pygame.draw.circle(surface, outline_color, self.info[\"pos\"], radius+outline_width, width=outline_width)\n\n def drawEdges(self, surface, color, width=1):\n try:\n self.info[\"pos\"]\n except KeyError:\n warnings.warn(\"{} does not possess a position information (info[\\\"pos\\\"])\".format(self.id), stacklevel=2)\n return\n\n try:\n radius = self.info[\"radius\"]\n except KeyError:\n radius = 8\n\n for e in self.edges:\n if e.end == self:\n pygame.draw.circle(surface, color, (self.info[\"pos\"][0], self.info[\"pos\"][1]-radius), radius*1.2, width=width)\n else:\n try:\n if self.parent.oriented:\n v = (e.end.info[\"pos\"][0] - self.info[\"pos\"][0], e.end.info[\"pos\"][1] - self.info[\"pos\"][1])\n mag_v = np.linalg.norm(np.array(v))\n u = (v[0] / mag_v, v[1] / mag_v)\n ep = (e.end.info[\"pos\"][0] - radius*1.5*u[0], e.end.info[\"pos\"][1]- radius*1.5*u[1])\n drawer.arrow(surface, color, self.info[\"pos\"], ep)\n else:\n pygame.draw.line(surface, color, self.info[\"pos\"], e.end.info[\"pos\"], width=width)\n except KeyError:\n warnings.warn(\"{} does not possess a position information (info[\\\"pos\\\"])\".format(e.end.id), stacklevel=2)\n\n def move(self, direction, speed, limits=(1280, 720), collision=True):\n try:\n self.info[\"pos\"]\n except KeyError:\n self.info[\"pos\"] = [0, 0]\n\n new_pos = [0, 0]\n new_pos[0] = (self.info[\"pos\"][0] + math.cos(direction) * speed) % limits[0]\n new_pos[1] = (self.info[\"pos\"][1] + math.sin(direction) * speed) % limits[1]\n\n if collision:\n for other in self.parent.nodes:\n if not self.equal(other):\n _count = 0\n while self.collide(other):\n new_pos[0] = (self.info[\"pos\"][0] + math.cos(utils.angle_from_points(self.info[\"pos\"], other.info[\"pos\"])) * 1) % limits[0]\n new_pos[1] = (self.info[\"pos\"][1] + math.sin(utils.angle_from_points(self.info[\"pos\"], other.info[\"pos\"])) * 1) % limits[1]\n _count += 1\n if _count > 50:\n break\n\n self.info[\"pos\"][0] = new_pos[0]\n self.info[\"pos\"][1] = new_pos[1]\n\n\n\n def collide(self, other):\n # print(\"{} <= {}\".format(utils.distance2p(self.info[\"pos\"], other.info[\"pos\"]), max(self.info[\"radius\"], other.info[\"radius\"])))\n return utils.distance2p(self.info[\"pos\"], other.info[\"pos\"]) <= max(self.info[\"radius\"], other.info[\"radius\"])*2\n\n def collide_point(self, point):\n return utils.distance2p(self.info[\"pos\"], point) <= self.info[\"radius\"]\n\n def applyForces(self, speed=1, spring_rest_distance=75, collision=True):\n edges_force_vectors = []\n del_force_vectors = []\n\n attraction_factor = 0.8\n repulsion_factor = 1.4\n\n _t = 0.1\n t_up = 1.00 + _t\n t_down = 1.00 - _t\n\n for e in self.edges:\n if self.equal(e.end):\n continue\n try:\n dist = utils.distance2p(e.end.info[\"pos\"], self.info[\"pos\"])\n if dist >= spring_rest_distance*t_up:\n f = {\"force\":[e.end.info[\"pos\"][0] - self.info[\"pos\"][0], e.end.info[\"pos\"][1] - self.info[\"pos\"][1]]}\n f[\"f_dist\"] = utils.normalise(dist, mini=spring_rest_distance, maxi=spring_rest_distance*3) * attraction_factor\n edges_force_vectors.append(f)\n elif dist <= spring_rest_distance*t_down:\n # f = {\"force\":[self.info[\"pos\"][0] - e.end.info[\"pos\"][0], self.info[\"pos\"][1] - e.end.info[\"pos\"][1]]} # OLD WAY\n opposite_angle = (utils.angle_from_points(self.info[\"pos\"], e.end.info[\"pos\"]) + math.pi) % (2*math.pi)\n op_e = [self.info[\"pos\"][0] + dist*math.cos(opposite_angle), self.info[\"pos\"][1] + dist*math.sin(opposite_angle)]\n f = {\"force\":[self.info[\"pos\"][0] - op_e[0], self.info[\"pos\"][1] - op_e[1]]}\n f[\"f_dist\"] = utils.normalise(dist, mini=0, maxi=spring_rest_distance) * repulsion_factor\n edges_force_vectors.append(f)\n except KeyError:\n warnings.warn(\"{} or {} does not possess a position information (info[\\\"pos\\\"])\".format(self.id, e.end.id), stacklevel=2)\n\n # for other in self.parent.nodes:\n # if self.equal(other):\n # continue\n # if gm.Edge(self, other) in self.edges:\n # continue\n # try:\n # dist = utils.distance2p(other.info[\"pos\"], self.info[\"pos\"])\n # # if dist >= spring_rest_distance*t_up and dist <= spring_rest_distance*3:\n # # f = {\"force\":[other.info[\"pos\"][0] - self.info[\"pos\"][0], other.info[\"pos\"][1] - self.info[\"pos\"][1]]}\n # # f[\"f_dist\"] = utils.normalise(dist, mini=spring_rest_distance*1.5, maxi=spring_rest_distance*3) * attraction_factor \n # # edges_force_vectors.append(f)\n # if dist < spring_rest_distance*t_down:\n # opposite_angle = (utils.angle_from_points(self.info[\"pos\"], other.info[\"pos\"]) + math.pi) % (2*math.pi)\n # op_e = [self.info[\"pos\"][0] + dist*math.cos(opposite_angle), self.info[\"pos\"][1] + dist*math.sin(opposite_angle)]\n # f = {\"force\":[self.info[\"pos\"][0] - op_e[0], self.info[\"pos\"][1] - op_e[1]]}\n # f[\"f_dist\"] = utils.normalise(dist, mini=0, maxi=spring_rest_distance) * repulsion_factor * 1.2\n # edges_force_vectors.append(f)\n # except KeyError:\n # warnings.warn(\"{} or {} does not possess a position information (info[\\\"pos\\\"])\".format(self.id, other.id), stacklevel=2)\n\n for n_id in self.parent.triangulation.get_neighbours_of(self.id):\n neigh = self.parent.getNodeByID(n_id)\n\n if self.equal(neigh):\n continue\n if gm.Edge(self, neigh) in self.edges:\n continue\n\n dist = utils.distance2p(neigh.info[\"pos\"], self.info[\"pos\"])\n\n if dist >= spring_rest_distance*t_up and dist <= spring_rest_distance*3:\n f = {\"force\":[neigh.info[\"pos\"][0] - self.info[\"pos\"][0], neigh.info[\"pos\"][1] - self.info[\"pos\"][1]]}\n f[\"f_dist\"] = utils.normalise(dist, mini=spring_rest_distance*1.5, maxi=spring_rest_distance*3) * attraction_factor * 0.6\n del_force_vectors.append(f)\n if dist < spring_rest_distance*t_down:\n opposite_angle = (utils.angle_from_points(self.info[\"pos\"], neigh.info[\"pos\"]) + math.pi) % (2*math.pi)\n op_e = [self.info[\"pos\"][0] + dist*math.cos(opposite_angle), self.info[\"pos\"][1] + dist*math.sin(opposite_angle)]\n f = {\"force\":[self.info[\"pos\"][0] - op_e[0], self.info[\"pos\"][1] - op_e[1]]}\n f[\"f_dist\"] = utils.normalise(dist, mini=0, maxi=spring_rest_distance) * repulsion_factor * 1.4\n del_force_vectors.append(f)\n\n\n final_force = [0, 0]\n\n if edges_force_vectors != []:\n for f in edges_force_vectors:\n final_force[0] += f[\"force\"][0] * f[\"f_dist\"]\n final_force[1] += f[\"force\"][1] * f[\"f_dist\"]\n\n final_force[0] /= len(edges_force_vectors)\n final_force[1] /= len(edges_force_vectors)\n elif del_force_vectors != []:\n for f in del_force_vectors:\n final_force[0] += f[\"force\"][0] * f[\"f_dist\"]\n final_force[1] += f[\"force\"][1] * f[\"f_dist\"]\n\n final_force[0] /= len(del_force_vectors)\n final_force[1] /= len(del_force_vectors)\n\n final_force_mag = np.linalg.norm(np.array(final_force))\n\n spd_factor = 1\n if final_force_mag > spring_rest_distance:\n spd_factor = utils.normalise(final_force_mag, mini=spring_rest_distance*t_up, maxi=spring_rest_distance*3)\n else:\n spd_factor = 1 - utils.normalise(final_force_mag, mini=0, maxi=spring_rest_distance*t_down)\n\n if spd_factor > -0.005 and spd_factor < 0.005:\n spd_factor = 0\n # else:\n # utils.clamp(spd_factor, -1, 1)\n\n # if final_force_mag < 0.5:\n # spd_factor = 0\n\n self.move(math.atan2(final_force[1], final_force[0]), speed*spd_factor, collision=collision)\n\nclass gGraph(gm.Graph):\n\n def __init__(self, node_type=None, oriented=True):\n super().__init__(node_type=node_type, oriented=oriented)\n self.delaunay_points = None\n self.delaunay = None\n self._draw_delaunay = True\n\n def setDelaunay(self, dcl=-1):\n dict_pos = {}\n for n in self.nodes:\n dict_pos[n.id] = n.info[\"pos\"]\n\n self.triangulation = dt.Delaunay_Triangulation(dict_pos)\n self.triangulation.delaunay_cut_links = dcl\n self.triangulation.update()\n\n def computeDelaunay(self):\n # self.delaunay_points = np.array([p.info[\"pos\"] for p in self.nodes])\n # self.delaunay = Delaunay(self.delaunay_points)\n dict_pos = {}\n for n in self.nodes:\n dict_pos[n.id] = n.info[\"pos\"]\n\n self.triangulation.update(new_positions=dict_pos)\n\n def drawDelaunay(self, surface, color):\n for n in self.nodes:\n dneigh = self.triangulation.get_neighbours_of(n.id)\n for dn in dneigh:\n pygame.draw.line(surface, color, n.info[\"pos\"], self.getNodeByID(dn).info[\"pos\"])\n\n def draw(self, surface):\n for n in self.nodes:\n try:\n color = n.info[\"color\"]\n except KeyError:\n color = (255, 255, 255)\n n.drawEdges(surface, color)\n for n in self.nodes:\n try:\n color = n.info[\"color\"]\n except KeyError:\n color = (255, 255, 255)\n try:\n out_color = n.info[\"outline_color\"]\n n.drawNode(surface, color, outline_color=out_color)\n except KeyError:\n n.drawNode(surface, color)\n \n if self._draw_delaunay:\n self.drawDelaunay(surface, (0, 0, 255))"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
inesnolas/Rank-based-loss_ICASSP22 | [
"3ebe7345dc26b8fa74543725a51b43b7170c58cc",
"3ebe7345dc26b8fa74543725a51b43b7170c58cc"
] | [
"run_example.py",
"models/SingleLayer_net.py"
] | [
"import models.SingleLayer_net as single_layer\nimport loss_functions.rank_based_loss as rbl\n# import wandb\nimport torch\nimport utils.data_functions as df\nimport os\nimport json\nimport pandas as pd\nimport csv\n\n\nos.environ['CUDA_VISIBLE_DEVICES'] = '3'\n# wandb.init(project='example')\n\nexp_name = 'example'\n# wandb.run.name = exp_name\nstandardized_data = True\nsave_training_embeddings_to_plot = True\nshuffle = False \ndrop_last = False \n\nexperiments_folder =\"./example_data\"\n\ninitial_embeddings_path = os.path.join(experiments_folder, 'Normalized_VGGish_embeddings_based_on_Training_Set')\ntrain_initial_embeddings_path = os.path.join(initial_embeddings_path, 'train')\nval_initial_embeddings_path = os.path.join(initial_embeddings_path, 'val')\ntest_initial_embeddings_path = os.path.join(initial_embeddings_path, 'test')\n\nresults_folder = os.path.join(experiments_folder, \"results_\"+exp_name)\ncheckpoints_folder = os.path.join(results_folder, \"checkpoints\")\nif not os.path.exists(checkpoints_folder):\n os.makedirs(checkpoints_folder)\n\nif save_training_embeddings_to_plot:\n if not os.path.exists(os.path.join(checkpoints_folder, \"Embeddings_plot\")):\n os.mkdir(os.path.join(checkpoints_folder, \"Embeddings_plot\"))\n \ntrain_df = pd.read_csv(os.path.join(experiments_folder, 'train.csv'), dtype = str)\nval_df = pd.read_csv(os.path.join(experiments_folder, 'val.csv'), dtype = str)\ntest_df = pd.read_csv(os.path.join(experiments_folder, 'test.csv'), dtype = str)\n\nconfigs = {\"EMBEDDINGS_SIZE\" : 128,\n\"output_EMBEDDINGS_SIZE\" :3, \n\"EARLY_STOPPING_PTC\" : 20,\n\"LR\" : 1e-5,\n\"BATCH_SIZE\" : 12,\n\"n_epochs\" : 100, \n}\nparams = {'batch_size': configs[\"BATCH_SIZE\"],'shuffle': shuffle, 'drop_last': drop_last}\n\ntraining_set = df.RankBasedLossHierarchicalLabelsEmbeddings(train_df, train_initial_embeddings_path, target_labels='hierarchical_labels')#,'species','taxon'])\ntraining_generator = torch.utils.data.DataLoader(training_set, **params)\nlen_train = len(training_set)\n\n\nvalidation_set = df.RankBasedLossHierarchicalLabelsEmbeddings(val_df , val_initial_embeddings_path, target_labels='hierarchical_labels')#,'species','taxon'])\nparams_val = {'batch_size': configs[\"BATCH_SIZE\"],'shuffle': False, 'drop_last': False}\nvalidation_generator = torch.utils.data.DataLoader(validation_set, **params_val)\nlen_val = len(validation_set)\n\nmodel =single_layer.SingleLayerHypersphereConstraint(configs)\n\n# wandb.watch(model)\n# wandb.config = configs\n# wandb.config[\"architecture\"] = \"LinLayer_cosinedist\"\n# wandb.config[\"dataset\"] = \"TuT\"\nwith open(os.path.join(results_folder, 'configs_dict'), \"w\") as c:\n json.dump(configs, c)\n\ncheckpoint_name = rbl.train_RbL(model, training_generator, validation_generator,\n checkpoints_folder, configs['EARLY_STOPPING_PTC'], save_training_embeddings_to_plot, \n configs['n_epochs'], configs, distance='cosine',\n number_of_ranks = 4)\n\n\n\nprint( \"\\nFinished training, will now use the checkpoint to generate embeddings for the test set:\")\n# Predict with checkpoint:\n\n# if save_embeddings_to_plot:\nif not os.path.exists(os.path.join(results_folder, \"test_Embeddings_plot\")):\n os.mkdir(os.path.join(results_folder, \"test_Embeddings_plot\"))\n\ntest_set = df.RankBasedLossHierarchicalLabelsEmbeddings(test_df, test_initial_embeddings_path, target_labels = 'hierarchical_labels')\ntest_generator = torch.utils.data.DataLoader(test_set, batch_size=1, shuffle=False)\nlen_test = len(test_set)\n\n# load the checkpoint, configs and model\nwith open(os.path.join(results_folder, \"configs_dict\") )as c:\n configs = json.load(c)\n\nmodel=single_layer.SingleLayerHypersphereConstraint(configs)\nmodel.load_state_dict(torch.load(checkpoint_name)[\"net_dict\"])\n\nsil_id, sil_species =rbl.predict(model, test_generator, configs, results_folder)\nprint(\"sil_fine level\", sil_id)\nprint('sil_coarse level', sil_species)\nwith open(os.path.join(results_folder, 'silhouettes_on_test_set.csv'), 'w') as fout:\n writer = csv.writer(fout)\n writer.writerow(['sil_fine_level', str(sil_id)])\n writer.writerow(['sil_coarse_level', str(sil_species)])",
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\n\n\nclass SingleLayerHypersphereConstraint(nn.Module):\n \n def __init__(self, configs ):\n super(SingleLayerHypersphereConstraint, self).__init__()\n self.linear = nn.Linear(configs[\"EMBEDDINGS_SIZE\"], configs[\"output_EMBEDDINGS_SIZE\"])\n self.optimizer = optim.SGD(self.parameters(), lr=configs[\"LR\"])\n\n def forward(self, examples): \n x = self.linear( examples) \n x = torch.nn.functional.normalize(x, p=2)\n return x\n\n"
] | [
[
"torch.utils.data.DataLoader",
"torch.load"
],
[
"torch.nn.functional.normalize",
"torch.nn.Linear"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.